id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
1600691 | import re
class MagCard(object):
track_1 = r"\B(\d+?)\^([^\^]+?)\^(.{4})"
def __init__(self, data):
self._info = {
"PAN": None,
"First": None,
"Last": None,
"Expiration": None
}
self._data = ""
self.data = data
@property
def data(self):
return self._info
@data.setter
def data(self, text):
self._data += text
self.pharse_data(self._data)
def pharse_data(self, text):
track_1_regex = re.search(self.track_1, text)
if track_1_regex is not None:
pan = int(track_1_regex.group(1))
last_name = track_1_regex.group(2).split("/")[0]
first_name = track_1_regex.group(2).split("/")[1]
year = track_1_regex.group(3)[:2]
month = track_1_regex.group(3)[2:]
self._info["PAN"] = pan
self._info["First"] = first_name
self._info["Last"] = last_name
self._info["Expiration"] = (year, month)
@property
def populated(self):
return all(self.data[i] is not None for i in self.data)
def __repr__(self):
try:
exp = "{} 20{}".format(self.data["Expiration"][1], self.data["Expiration"][0])
# Can't index nonetype
except TypeError:
exp = "None"
return "PAN: {}\nName: {}, {}\nExpiration: {}".format(self.data["PAN"], self.data["Last"], self.data["First"], exp)
if __name__ == '__main__':
card = MagCard(raw_input("Card Data: "))
print card.populated
print card
| StarcoderdataPython |
79922 | <filename>tf_semseg/model/esanet.py
import tensorflow as tf
from .util import *
from . import resnet, erfnet, pspnet, config, senet, shortcut
def stem(rgb, depth, se_reduction=16, name=None, config=config.Config()):
def resnet_stem_b_no_pool(x, name):
x = conv_norm_act(x, filters=64, kernel_size=7, stride=2, name=name, config=config)
return x
rgb = resnet_stem_b_no_pool(rgb, name=join(name, "stem_rgb"))
depth = resnet_stem_b_no_pool(depth, name=join(name, "stem_depth"))
rgb_se = senet.squeeze_excite_channel(rgb, reduction=se_reduction, name=join(name, "se_rgb"), config=config)
depth_se = senet.squeeze_excite_channel(depth, reduction=se_reduction, name=join(name, "se_depth"), config=config)
rgb = rgb_se + depth_se
rgb = pool(rgb, kernel_size=3, stride=2, mode="max", config=config)
depth = pool(depth, kernel_size=3, stride=2, mode="max", config=config)
return rgb, depth
def upsample(x, factor, name=None, config=config.Config()):
filters = x.shape[-1]
x = resize(x, factor * tf.shape(x)[1:-1], method="nearest", config=config)
x = conv(x, filters, kernel_size=3, groups=filters, name=join(name, "conv"), bias=True, config=config)
return x
def esanet(rgb, depth, classes, num_residual_units, filters, dilation_rates, strides, name=None, psp_bin_sizes=[1, 5], block=erfnet.non_bottleneck_block_1d, se_reduction=16, decoder_filters=[512, 256, 128], num_decoder_units=[3, 3, 3], config=config.Config()):
rgb, depth = globals()["stem"](rgb, depth, se_reduction=se_reduction, name=join(name, "stem_b"), config=config)
encoder_blocks = []
# Encoder
for block_index in range(len(num_residual_units)):
for unit_index in range(num_residual_units[block_index]):
def unit(x, name):
return block(x,
filters=filters[block_index],
stride=strides[block_index] if unit_index == 0 else 1,
dilation_rate=(dilation_rates[block_index - 1] if block_index > 0 else 1) if unit_index == 0 else dilation_rates[block_index],
name=join(name, f"block{block_index + 1}", f"unit{unit_index + 1}"),
config=config)
rgb = unit(rgb, name=join(name, "encode_rgb"))
depth = unit(depth, name=join(name, "encode_depth"))
rgb_se = senet.squeeze_excite_channel(rgb, reduction=se_reduction, name=join(name, f"block{block_index + 1}", "se_rgb"), config=config)
depth_se = senet.squeeze_excite_channel(depth, reduction=se_reduction, name=join(name, f"block{block_index + 1}", "se_depth"), config=config)
rgb = rgb_se + depth_se
encoder_blocks.append(rgb)
# Context module
encoder_blocks[-1] = pspnet.psp(
encoder_blocks[-1],
resize_method="nearest",
name=join(name, "psp"),
bin_sizes=psp_bin_sizes,
config=config
)
encoder_blocks[-1] = conv_norm_act(encoder_blocks[-1], filters=encoder_blocks[-1].shape[-1] // 2, kernel_size=1, stride=1, name=join(name, "psp", "final"), config=config)
# Decoder
x = encoder_blocks[-1]
for block_index in range(len(num_decoder_units)):
x = conv_norm_act(x, filters=decoder_filters[block_index], kernel_size=3, name=join(name, "decode", f"block{block_index + 1}", "initial"), config=config)
for unit_index in range(num_decoder_units[block_index]):
x = block(x, name=join(name, "decode", f"block{block_index + 1}", f"unit{unit_index + 1}"), config=config)
x = upsample(x, factor=2, name=join(name, "decode", f"block{block_index + 1}", "upsample"), config=config)
x = shortcut.add(x, encoder_blocks[-(block_index + 2)], name=join(name, "decode", f"block{block_index + 1}", "shortcut"), config=config)
x = conv(x, classes, kernel_size=3, name=join(name, "decode", "final", "conv"), bias=True, config=config) # TODO: this should be initialized differently for training: https://github.com/TUI-NICR/ESANet/blob/56b7aff77e3fc05ce4ffe55142dc805b07956f22/src/models/model.py#L385
x = upsample(x, factor=2, name=join(name, "decode", "final", "upsample1"), config=config)
x = upsample(x, factor=2, name=join(name, "decode", "final", "upsample2"), config=config)
return x
| StarcoderdataPython |
227020 | <reponame>best-coloc-ever/globibot
from .plugin import UrbanDictionary
plugin_cls = UrbanDictionary
| StarcoderdataPython |
3383381 | from flake8_alphabetize.core import Alphabetize
from ._version import get_versions
__version__ = get_versions()["version"]
del get_versions
Alphabetize.version = __version__
__all__ = ["Alphabetize", "__version__"]
| StarcoderdataPython |
5148752 | <filename>scripts/pad-script.py
#!C:/local/Python37/python.exe
#
from tkinter import *
import tkinter.ttk as ttk
import serial
E2J = {'"': '@', '&': '^', '\'' : '&', '(' : '*', ')' : '(',
'=' : '_', '^' : '=', '~' : '+', '@' : '[', '`' : '{' , '[' : ']' , '{' : '}',
'+' : ':', ':' : '\'', '*' : '"', ']' : '\\', '}' : '|' ,
'\\' : 0x89, '|' : 0x8a, '_' : 0x87}
class TouchPad(ttk.Frame):
def __init__(self, master, port="COM3", mode=True):
super().__init__(master,width=300,height=300)
self.start_xy = None
self.x_y = None
self.create_pane()
self.propagate(False)
self.pack()
self.keyin=False
self.pressBtn=None
self.enable_jis=mode
self.serial=serial.Serial(port, 38400)
def create_pane(self):
self.pane=ttk.Frame(self, width=300, height=300, relief="groove")
self.pane.bind("<Button-1>",self.move_start)
self.pane.bind("<B1-Motion>",self.move_now)
self.pane.bind("<ButtonRelease-1>",self.move_end)
self.pane.place(x=0,y=0)
self.buttonL = ttk.Button(self,text = "\n\nL\n\n", width=8, takefocus=0)
self.buttonL.place(x=0,y=100)
self.buttonL.bind("<Button-1>",self.move_start)
self.buttonL.bind("<B1-Motion>",self.move_now)
self.buttonL.bind("<ButtonRelease-1>",self.move_end)
self.buttonR = ttk.Button(self,text = "\n\nR\n\n", width=8, takefocus=0)
self.buttonR.place(x=240,y=100)
self.buttonR.bind("<Button-1>",self.move_start)
self.buttonR.bind("<B1-Motion>",self.move_now)
self.buttonR.bind("<ButtonRelease-1>",self.move_end)
self.buttonZen = ttk.Button(self,text = "Zen", width=8, takefocus=0)
self.buttonZen.place(x=0,y=0)
self.buttonZen.bind("<Button-1>",self.zenkaku)
self.buttonHan = ttk.Button(self,text = "Han", width=8, takefocus=0)
self.buttonHan.place(x=240,y=0)
self.buttonHan.bind("<Button-1>",self.hankaku)
s_r=ttk.Style()
s_r.configure('Key.TButton', background="red")
s_b=ttk.Style()
s_b.map("KeyB.TButton",
foreground=[('pressed','red'), ('active', 'blue')],
background=[('pressed', '!disabled', 'black'), ('active', 'green')]
)
self.buttonKey = ttk.Button(self,text = "KeyIn", style='KeyB.TButton')
self.buttonKey.place(x=110,y=0)
self.buttonKey.bind("<Button-1>", self.keyin_set)
self.buttonKey.bind("<KeyPress>", self.keyin)
def keyin_set(self, event):
if self.keyin :
self.keyin = False
print("keyin_set 0")
else:
self.keyin = True
print("keyin_set 1")
def keyin(self, event):
if self.keyin:
if event.char:
if event.state == 0 or event.state == 1:
if self.enable_jis and event.char in E2J:
ch = E2J[event.char]
if type(ch) is int:
self.serial.write(ch.to_bytes(1, 'big'))
else:
self.serial.write(bytes(ch, 'UTF-8'))
else:
self.serial.write(bytes(event.char, 'UTF-8'))
elif event.state == 4:
self.serial.write(bytes(event.char, 'UTF-8'))
else:
print(event.char, event.state)
else:
if not event.keycode in (16, 17, 18):
if event.keycode == 37: # A_LEFT
self.serial.write(b'\x1b\x5b\x44')
elif event.keycode == 38: # A_UP
self.serial.write(b'\x1b\x5b\x41')
elif event.keycode == 39: # A_RIGHT
self.serial.write(b'\x1b\x5b\x43')
elif event.keycode == 40: # A_DOWN
self.serial.write(b'\x1b\x5b\x42')
elif event.keycode == 33: # PageUp
self.serial.write(b'\x1b\x5b\x34\x7e')
elif event.keycode == 34: # PageDown
self.serial.write(b'\x1b\x5b\x35\x7e')
elif event.keycode == 35: # END
self.serial.write(b'\x1b\x5b\x33\x7e')
elif event.keycode == 36: # HOME
self.serial.write(b'\x1b\x5b\x31\x7e')
elif event.keycode == 45: # INSERT
self.serial.write(b'\x1b\x5b\x32\x7e')
elif event.keycode == 46: # DEL
self.serial.write(b'\x7f')
elif event.keycode == 112: # F1
self.serial.write(b'\x1b\x5b\x31\x31\x7e')
elif event.keycode == 113: # F2
self.serial.write(b'\x1b\x5b\x31\x32\x7e')
elif event.keycode == 114: # F3
self.serial.write(b'\x1b\x5b\x31\x33\x7e')
elif event.keycode == 115: # F4
self.serial.write(b'\x1b\x5b\x31\x34\x7e')
elif event.keycode == 116: # F5
self.serial.write(b'\<KEY>')
elif event.keycode == 117: # F6
self.serial.write(b'\<KEY>')
elif event.keycode == 118: # F7
self.serial.write(b'\x1b\x5b\x31\x37\x7e')
elif event.keycode == 119: # F8
self.serial.write(b'\x1b\x5b\x31\x38\x7e')
elif event.keycode == 120: # F9
self.serial.write(b'\x1b\x5b\x32\x30\x7e')
else:
print(event.keycode)
def zenkaku(self, event):
if self.keyin:
self.serial.write(b'\x88')
def hankaku(self, event):
if self.keyin:
self.serial.write(b'\x8b')
def move_start(self,event):
# マウスカーソルの座標取得
self.start_xy = (event.x_root,event.y_root)
# 位置情報取得
place_info = event.widget.place_info()
x = int(place_info['x'])
y = int(place_info['y'])
self.x_y = (x,y)
try:
#print("Push", event.widget.cget('text'))
self.pressBtn=event.widget.cget('text').strip()
self.button_press(event)
except:
self.pressBtn=None
def move_now(self,event):
if self.start_xy is None:
return
# 移動距離を調べる
distance = (event.x_root-self.start_xy[0], event.y_root-self.start_xy[1])
x=(event.x_root-self.start_xy[0]) *5
y=(event.y_root-self.start_xy[1]) *5
data = self.mouse_move_cmd(x, y, 0)
self.serial.write(data)
#print(data)
#print(distance, self.pressBtn)
self.start_xy = (event.x_root, event.y_root)
def move_end(self,event):
self.start_xy = None
self.x_y = None
place_info = event.widget.place_info()
self.button_release(event)
def button_press(self,event):
data = self.mouse_move_cmd(-255, 0, 0)
self.serial.write(data)
def button_release(self,event):
data = self.mouse_move_cmd(-255, 0, 0)
self.serial.write(data)
self.pressBtn=None
def mouse_move_cmd(self, x, y, w=0):
data = b'\x1b\x5b\x6d'
if self.pressBtn == 'L':
data += b'\x01'
elif self.pressBtn == 'R':
data += b'\x02'
else:
data += b'\x00'
if x < -128 or y < -128 or w < -128:
data += b'\x00\x00\x00\x7e'
else:
x = min(max(x + 128, 1), 255)
y = min(max(y + 128, 1), 255)
w = min(max(w + 128, 1), 255)
data += x.to_bytes(1, 'big')
data += y.to_bytes(1, 'big')
data += w.to_bytes(1, 'big')
data += b'\x7e'
return data
if __name__ == '__main__':
port = "COM1"
mode=False
if len(sys.argv) > 1:
port = sys.argv[1]
if len(sys.argv) > 2:
mode=(sys.argv[2] == 'JP')
master = Tk()
master.title("TouchPad")
master.geometry("300x300")
TouchPad(master, port, mode)
master.mainloop()
| StarcoderdataPython |
6641616 | import sys
import os.path
sys.path.append(os.path.abspath(os.pardir))
import disaggregator as da
import unittest
class ApplianceTypeTestCase(unittest.TestCase):
def setUp(self):
pass
def test_(self):
pass
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
6422734 | <reponame>SmilingHeretic/ethernaut-solutions-brownie
from brownie import (
network,
accounts,
config,
interface,
Contract,
)
from scripts.helpful_scripts import (
get_account,
get_new_instance,
submit_instance,
)
from web3 import Web3
def main():
player = get_account()
instance_address = get_new_instance(level_id=22, player=player)
dex_contract = interface.IDex(instance_address)
token1 = dex_contract.token1()
token2 = dex_contract.token2()
dex_contract.approve(dex_contract, 110, {"from": player})
from_token = token1
to_token = token2
while (dex_contract.balanceOf(token1, dex_contract) and dex_contract.balanceOf(token2, dex_contract)):
# warning: there's probably risk of infinite loop with different starting balances
print_state(dex_contract, player)
from_token, to_token = to_token, from_token
num_tokens_to_swap = min(
dex_contract.balanceOf(from_token, player),
get_num_tokens_to_drain_dex(dex_contract, from_token, to_token)
)
tx = dex_contract.swap(from_token, to_token, num_tokens_to_swap, {"from": player})
tx.wait(1)
submit_instance(instance_address, player)
def get_num_tokens_to_drain_dex(dex_contract, from_token, to_token):
num_tokens_to_swap = 0
while (dex_contract.get_swap_price(from_token, to_token, num_tokens_to_swap) < dex_contract.balanceOf(to_token, dex_contract)):
num_tokens_to_swap += 1
return num_tokens_to_swap
def print_state(dex_contract, player):
token1 = dex_contract.token1()
token2 = dex_contract.token2()
print('Player balances:')
print(f'token1: {dex_contract.balanceOf(token1, player)}')
print(f'token2: {dex_contract.balanceOf(token2, player)}')
print()
print('Dex balances:')
print(f'token1: {dex_contract.balanceOf(token1, dex_contract)}')
print(f'token2: {dex_contract.balanceOf(token2, dex_contract)}')
print()
print(f'Possible swaps:')
num_tokens_to_swap = dex_contract.balanceOf(token1, player)
print(f'{num_tokens_to_swap} token1 -> {dex_contract.get_swap_price(token1, token2, num_tokens_to_swap)} token2')
num_tokens_to_swap = dex_contract.balanceOf(token2, player)
print(f'{num_tokens_to_swap} token2 -> {dex_contract.get_swap_price(token2, token1, num_tokens_to_swap)} token1')
print()
| StarcoderdataPython |
4804205 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
from secretpy import SimpleSubstitution, CryptMachine, alphabets as al
from secretpy.cmdecorators import Block, SaveAll
cipher = SimpleSubstitution()
alphabet = al.GERMAN
plaintext = u"schweißgequältvomödentextzürnttypografjakob"
key = u"<KEY>"
print(plaintext)
enc = cipher.encrypt(plaintext, key, alphabet)
print(enc)
dec = cipher.decrypt(enc, key, alphabet)
print(dec)
#######################################################
def encdec(machine, plaintext):
print("--------------------------------------------------------------------")
print(plaintext)
enc = machine.encrypt(plaintext)
print(enc)
print(machine.decrypt(enc))
cm0 = CryptMachine(cipher, key)
cm = cm0
cm.set_alphabet(al.ENGLISH)
cm.set_key("<KEY>")
plaintext = "I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!"
encdec(cm, plaintext)
cm = Block(cm, length=5, sep=")(")
plaintext = "This text is divided by blocks of length 5!"
encdec(cm, plaintext)
cm = SaveAll(cm0)
plaintext = "I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!"
encdec(cm, plaintext)
cm.set_alphabet(al.ENGLISH_SQUARE_IJ)
key = (
"n", "g", "a", "b", "l",
"s", "t", "u", "v", "c",
"m", "o", "p", "q", "h",
"ij", "k", "w", "x", "y",
"r", "d", "e", "f", "z"
)
cm.set_key(key)
plaintext = "Jj becomes Ii because we use ENGLISH_SQUARE_IJ!"
encdec(cm, plaintext)
alphabet = u"abcdABCDEfghijFGHIJ"
key = u"<KEY>"
cm.set_alphabet(alphabet)
cm.set_key(key)
plaintext = u"Text aBcdHijf"
encdec(cm, plaintext)
'''
schweißgequältvomödentextzürnttypografjakob
qxßuäüzeänsobrtlciyägrävrjkpgrrhmlepfödfalw
schweißgequältvomödentextzürnttypografjakob
--------------------------------------------------------------------
I don't love non-alphabet characters. I will remove all of them: ^,&@$~(*;?&#. Great!
kbnmxfnpcmnmyftjyzcxajyvyaxcvwkqkffvclnpcyffnhxjclivcyx
idontlovenonalphabetcharactersiwillremoveallofthemgreat
--------------------------------------------------------------------
This text is divided by blocks of length 5!
xjkwx-crxkw-bkpkb-cbzsz-fnaew-nhfcm-ixj
thistextisdividedbyblocksoflength
--------------------------------------------------------------------
I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!
K fnpc mnm-yftjyzcx ajyvyaxcvw. Xjcwc yvc : ^,&@$~(*;?&#. Xjyx'w kx!
I love non-alphabet characters. These are : ^,&@$~(*;?&#. That's it!
--------------------------------------------------------------------
Jj becomes Ii because we use ENGLISH_SQUARE_IJ!
Vv glaqolw Vv glanywl dl ywl LPTMVWU_WIYNKL_VV!
Ii becomes Ii because we use ENGLISH_SQUARE_II!
--------------------------------------------------------------------
Text aBcdHijf
Text dIBDiabg
Text aBcdHijf
'''
| StarcoderdataPython |
5178789 | from .pvserver import pvserver_connect
from .post import get_csv_data
from .post import get_case_parameters
from .post import get_case_parameters_str
from .post import print_html_parameters
from .post import get_case_root, get_case_report
from .post import get_fw_csv_data
from .post import for_each
from .post import ProgressBar
from .post import get_status_dict
from .post import cp_profile
from .post import cf_profile
from .post import calc_lift_centre_of_action
from .post import calc_drag_centre_of_action
from .post import get_monitor_data
from .post import calc_force_wall
from .post import cp_profile_wall_from_file
from .post import cf_profile_wall_from_file
from .post import get_num_procs
from .post import rotate_vector
| StarcoderdataPython |
8152520 | <reponame>mitpokerbots/scrimmage<filename>migrations/versions/4b3121fe5023_.py
"""Add more values to enum
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2019-01-21 19:42:37.230599
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<KEY>'
branch_labels = None
depends_on = None
def upgrade():
if op.get_bind().engine.name == 'postgresql':
op.execute('ALTER TYPE tournamentstatus RENAME TO tournamentstatus_old;')
tournamentstatus = postgresql.ENUM('created', 'spawned', 'spawning', 'done', name='tournamentstatus')
tournamentstatus.create(op.get_bind())
op.alter_column('tournaments', column_name='status',
type_=sa.Enum('created', 'spawned', 'spawning', 'done', name='tournamentstatus'),
postgresql_using='status::text::tournamentstatus')
op.execute('DROP TYPE tournamentstatus_old;')
def downgrade():
pass
| StarcoderdataPython |
11273306 | <filename>finappservice/migrations/0019_currency.py<gh_stars>0
# Generated by Django 3.1.4 on 2021-04-21 13:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('finappservice', '0018_internaltransacthistory_date_created'),
]
operations = [
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('cur_code', models.CharField(max_length=100)),
('cur_name', models.TextField(blank=True, null=True)),
('active', models.BooleanField(default=False)),
('date_created', models.DateTimeField(auto_now=True, null=True)),
],
options={
'db_table': 'currency',
},
),
]
| StarcoderdataPython |
1723143 | <reponame>j4ckstraw/pybrowscap<filename>pybrowscap/test/loader/csv/test_loader.py
import unittest
import os
from datetime import datetime
from pybrowscap.loader.csv import load_file
from pybrowscap.loader import Browscap, TYPE_CSV
class LoaderTest(unittest.TestCase):
browscap_file1 = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'browscap_14_05_2012.csv')
browscap_file2 = os.path.join(os.path.dirname(__file__), '..', '..', 'data', 'browscap_21_05_2012.csv')
def test_load_csv_browscap(self):
browscap = load_file(self.browscap_file1)
self.assertIsInstance(browscap, Browscap)
self.assertEqual(browscap.browscap_file_path, self.browscap_file1)
self.assertEqual(browscap.type, TYPE_CSV)
self.assertGreaterEqual(datetime.now(), browscap.loaded_at)
self.assertIsNone(browscap.reloaded_at)
self.assertEqual(len(browscap.data), 4)
self.assertEqual(len(browscap.regex_cache), 4)
self.assertEqual(browscap.version, 5003)
self.assertEqual(browscap.release_date, datetime.strptime('Mon, 14 May 2012 22:20:20', '%a, %d %b %Y %H:%M:%S'))
def test_reload_original_csv_browscap_file(self):
browscap = load_file(self.browscap_file1)
self.assertEqual(len(browscap.data), 4)
self.assertEqual(len(browscap.regex_cache), 4)
self.assertEqual(browscap.version, 5003)
self.assertEqual(browscap.release_date, datetime.strptime('Mon, 14 May 2012 22:20:20', '%a, %d %b %Y %H:%M:%S'))
browscap.reload()
self.assertEqual(len(browscap.data), 4)
self.assertEqual(len(browscap.regex_cache), 4)
self.assertEqual(browscap.version, 5003)
self.assertEqual(browscap.release_date, datetime.strptime('Mon, 14 May 2012 22:20:20', '%a, %d %b %Y %H:%M:%S'))
def test_reload_new_csv_browscap_file(self):
browscap = load_file(self.browscap_file1)
self.assertEqual(len(browscap.data), 4)
self.assertEqual(len(browscap.regex_cache), 4)
self.assertEqual(browscap.version, 5003)
self.assertEqual(browscap.release_date, datetime.strptime('Mon, 14 May 2012 22:20:20', '%a, %d %b %Y %H:%M:%S'))
browscap.reload(self.browscap_file2)
self.assertEqual(len(browscap.data), 3)
self.assertEqual(len(browscap.regex_cache), 3)
self.assertEqual(browscap.version, 5003)
self.assertEqual(browscap.release_date, datetime.strptime('Mon, 21 May 2012 15:48:39', '%a, %d %b %Y %H:%M:%S'))
def test_load_browscap_no_file(self):
self.assertRaises(Exception, load_file, ('www.codescale.net'))
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
3391918 | <gh_stars>0
"""
Implements the Nelder-Mead algorithm for maximizing a function with one or more
variables.
test changes
"""
import numpy as np
from numba import njit
from collections import namedtuple
results = namedtuple('results', 'x fun success nit final_simplex')
@njit
def nelder_mead(fun, x0, bounds=np.array([[], []]).T, args=(), tol_f=1e-10,
tol_x=1e-10, max_iter=1000):
"""
.. highlight:: none
Maximize a scalar-valued function with one or more variables using the
Nelder-Mead method.
This function is JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be maximized: `fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
x0 : ndarray(float, ndim=1)
Initial guess. Array of real elements of size (n,), where ‘n’ is the
number of independent variables.
bounds: ndarray(float, ndim=2), optional
Bounds for each variable for proposed solution, encoded as a sequence
of (min, max) pairs for each element in x. The default option is used
to specify no bounds on x.
args : tuple, optional
Extra arguments passed to the objective function.
tol_f : scalar(float), optional(default=1e-10)
Tolerance to be used for the function value convergence test.
tol_x : scalar(float), optional(default=1e-10)
Tolerance to be used for the function domain convergence test.
max_iter : scalar(float), optional(default=1000)
The maximum number of allowed iterations.
Returns
----------
results : namedtuple
A namedtuple containing the following items:
::
"x" : Approximate local maximizer
"fun" : Approximate local maximum value
"success" : 1 if the algorithm successfully terminated, 0 otherwise
"nit" : Number of iterations
"final_simplex" : Vertices of the final simplex
Examples
--------
>>> @njit
... def rosenbrock(x):
... return -(100 * (x[1] - x[0] ** 2) ** 2 + (1 - x[0])**2)
...
>>> x0 = np.array([-2, 1])
>>> qe.optimize.nelder_mead(rosenbrock, x0)
results(x=array([0.99999814, 0.99999756]), fun=-1.6936258239463265e-10,
success=True, nit=110,
final_simplex=array([[0.99998652, 0.9999727],
[1.00000218, 1.00000301],
[0.99999814, 0.99999756]]))
Notes
--------
This algorithm has a long history of successful use in applications, but it
will usually be slower than an algorithm that uses first or second
derivative information. In practice, it can have poor performance in
high-dimensional problems and is not robust to minimizing complicated
functions. Additionally, there currently is no complete theory describing
when the algorithm will successfully converge to the minimum, or how fast
it will if it does.
References
----------
.. [1] <NAME>, <NAME>, <NAME> and <NAME>,
Convergence Properties of the Nelder–Mead Simplex Method in Low
Dimensions, SIAM. J. Optim. 9, 112–147 (1998).
.. [2] <NAME> and <NAME>, Efficient implementation of the Nelder–Mead
search algorithm, Appl. Numer. Anal. Comput. Math., vol. 1, no. 2,
pp. 524–534, 2004.
.. [3] <NAME> and <NAME>, A simplex method for function
minimization, Comput. J. 7, 308–313 (1965).
.. [4] <NAME>. and <NAME>., Implementing the Nelder-Mead simplex algorithm
with adaptive parameters, Comput Optim Appl (2012) 51: 259.
.. [5] http://www.scholarpedia.org/article/Nelder-Mead_algorithm
.. [6] http://www.brnt.eu/phd/node10.html#SECTION00622200000000000000
.. [7] Chase Coleman's tutorial on Nelder Mead
.. [8] SciPy's Nelder-Mead implementation
"""
vertices = _initialize_simplex(x0, bounds)
results = _nelder_mead_algorithm(fun, vertices, bounds, args=args,
tol_f=tol_f, tol_x=tol_x,
max_iter=max_iter)
return results
@njit
def _nelder_mead_algorithm(fun, vertices, bounds=np.array([[], []]).T,
args=(), ρ=1., χ=2., γ=0.5, σ=0.5, tol_f=1e-8,
tol_x=1e-8, max_iter=1000):
"""
.. highlight:: none
Implements the Nelder-Mead algorithm described in Lagarias et al. (1998)
modified to maximize instead of minimizing. JIT-compiled in `nopython`
mode using Numba.
Parameters
----------
fun : callable
The objective function to be maximized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n) to be modified in-place.
args : tuple, optional
Extra arguments passed to the objective function.
ρ : scalar(float), optional(default=1.)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float), optional(default=2.)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float), optional(default=0.5)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float), optional(default=0.5)
Shrinkage parameter. Must be strictly between 0 and 1.
tol_f : scalar(float), optional(default=1e-10)
Tolerance to be used for the function value convergence test.
tol_x : scalar(float), optional(default=1e-10)
Tolerance to be used for the function domain convergence test.
max_iter : scalar(float), optional(default=1000)
The maximum number of allowed iterations.
Returns
----------
results : namedtuple
A namedtuple containing the following items:
::
"x" : Approximate solution
"fun" : Approximate local maximum
"success" : 1 if successfully terminated, 0 otherwise
"nit" : Number of iterations
"final_simplex" : The vertices of the final simplex
"""
n = vertices.shape[1]
_check_params(ρ, χ, γ, σ, bounds, n)
nit = 0
ργ = ρ * γ
ρχ = ρ * χ
σ_n = σ ** n
f_val = np.empty(n+1, dtype=np.float64)
for i in range(n+1):
f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i], args=args)
# Step 1: Sort
sort_ind = f_val.argsort()
LV_ratio = 1
# Compute centroid
x_bar = vertices[sort_ind[:n]].sum(axis=0) / n
while True:
shrink = False
# Check termination
fail = nit >= max_iter
best_val_idx = sort_ind[0]
worst_val_idx = sort_ind[n]
term_f = f_val[worst_val_idx] - f_val[best_val_idx] < tol_f
# Linearized volume ratio test (see [2])
term_x = LV_ratio < tol_x
if term_x or term_f or fail:
break
# Step 2: Reflection
x_r = x_bar + ρ * (x_bar - vertices[worst_val_idx])
f_r = _neg_bounded_fun(fun, bounds, x_r, args=args)
if f_r >= f_val[best_val_idx] and f_r < f_val[sort_ind[n-1]]:
# Accept reflection
vertices[worst_val_idx] = x_r
LV_ratio *= ρ
# Step 3: Expansion
elif f_r < f_val[best_val_idx]:
x_e = x_bar + χ * (x_r - x_bar)
f_e = _neg_bounded_fun(fun, bounds, x_e, args=args)
if f_e < f_r: # Greedy minimization
vertices[worst_val_idx] = x_e
LV_ratio *= ρχ
else:
vertices[worst_val_idx] = x_r
LV_ratio *= ρ
# Step 4 & 5: Contraction and Shrink
else:
# Step 4: Contraction
if f_r < f_val[worst_val_idx]: # Step 4.a: Outside Contraction
x_c = x_bar + γ * (x_r - x_bar)
LV_ratio_update = ργ
else: # Step 4.b: Inside Contraction
x_c = x_bar - γ * (x_r - x_bar)
LV_ratio_update = γ
f_c = _neg_bounded_fun(fun, bounds, x_c, args=args)
if f_c < min(f_r, f_val[worst_val_idx]): # Accept contraction
vertices[worst_val_idx] = x_c
LV_ratio *= LV_ratio_update
# Step 5: Shrink
else:
shrink = True
for i in sort_ind[1:]:
vertices[i] = vertices[best_val_idx] + σ * \
(vertices[i] - vertices[best_val_idx])
f_val[i] = _neg_bounded_fun(fun, bounds, vertices[i],
args=args)
sort_ind[1:] = f_val[sort_ind[1:]].argsort() + 1
x_bar = vertices[best_val_idx] + σ * \
(x_bar - vertices[best_val_idx]) + \
(vertices[worst_val_idx] - vertices[sort_ind[n]]) / n
LV_ratio *= σ_n
if not shrink: # Nonshrink ordering rule
f_val[worst_val_idx] = _neg_bounded_fun(fun, bounds,
vertices[worst_val_idx],
args=args)
for i, j in enumerate(sort_ind):
if f_val[worst_val_idx] < f_val[j]:
sort_ind[i+1:] = sort_ind[i:-1]
sort_ind[i] = worst_val_idx
break
x_bar += (vertices[worst_val_idx] - vertices[sort_ind[n]]) / n
nit += 1
return results(vertices[sort_ind[0]], -f_val[sort_ind[0]], not fail, nit,
vertices)
@njit
def _initialize_simplex(x0, bounds):
"""
Generates an initial simplex for the Nelder-Mead method. JIT-compiled in
`nopython` mode using Numba.
Parameters
----------
x0 : ndarray(float, ndim=1)
Initial guess. Array of real elements of size (n,), where ‘n’ is the
number of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x0.
Returns
----------
vertices : ndarray(float, ndim=2)
Initial simplex with shape (n+1, n).
"""
n = x0.size
vertices = np.empty((n + 1, n), dtype=np.float64)
# Broadcast x0 on row dimension
vertices[:] = x0
nonzdelt = 0.05
zdelt = 0.00025
for i in range(n):
# Generate candidate coordinate
if vertices[i + 1, i] != 0.:
vertices[i + 1, i] *= (1 + nonzdelt)
else:
vertices[i + 1, i] = zdelt
return vertices
@njit
def _check_params(ρ, χ, γ, σ, bounds, n):
"""
Checks whether the parameters for the Nelder-Mead algorithm are valid.
JIT-compiled in `nopython` mode using Numba.
Parameters
----------
ρ : scalar(float)
Reflection parameter. Must be strictly greater than 0.
χ : scalar(float)
Expansion parameter. Must be strictly greater than max(1, ρ).
γ : scalar(float)
Contraction parameter. Must be stricly between 0 and 1.
σ : scalar(float)
Shrinkage parameter. Must be strictly between 0 and 1.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
n : scalar(int)
Number of independent variables.
"""
if ρ < 0:
raise ValueError("ρ must be strictly greater than 0.")
if χ < 1:
raise ValueError("χ must be strictly greater than 1.")
if χ < ρ:
raise ValueError("χ must be strictly greater than ρ.")
if γ < 0 or γ > 1:
raise ValueError("γ must be strictly between 0 and 1.")
if σ < 0 or σ > 1:
raise ValueError("σ must be strictly between 0 and 1.")
if not (bounds.shape == (0, 2) or bounds.shape == (n, 2)):
raise ValueError("The shape of `bounds` is not valid.")
if (np.atleast_2d(bounds)[:, 0] > np.atleast_2d(bounds)[:, 1]).any():
raise ValueError("Lower bounds must be greater than upper bounds.")
@njit
def _check_bounds(x, bounds):
"""
Checks whether `x` is within `bounds`. JIT-compiled in `nopython` mode
using Numba.
Parameters
----------
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
Returns
----------
bool
`True` if `x` is within `bounds`, `False` otherwise.
"""
if bounds.shape == (0, 2):
return True
else:
return ((np.atleast_2d(bounds)[:, 0] <= x).all() and
(x <= np.atleast_2d(bounds)[:, 1]).all())
@njit
def _neg_bounded_fun(fun, bounds, x, args=()):
"""
Wrapper for bounding and taking the negative of `fun` for the
Nelder-Mead algorithm. JIT-compiled in `nopython` mode using Numba.
Parameters
----------
fun : callable
The objective function to be minimized.
`fun(x, *args) -> float`
where x is an 1-D array with shape (n,) and args is a tuple of the
fixed parameters needed to completely specify the function. This
function must be JIT-compiled in `nopython` mode using Numba.
bounds: ndarray(float, ndim=2)
Sequence of (min, max) pairs for each element in x.
x : ndarray(float, ndim=1)
1-D array with shape (n,) of independent variables at which `fun` is
to be evaluated.
args : tuple, optional
Extra arguments passed to the objective function.
Returns
----------
scalar
`-fun(x, *args)` if x is within `bounds`, `np.inf` otherwise.
"""
if _check_bounds(x, bounds):
return -fun(x, *args)
else:
return np.inf
| StarcoderdataPython |
1960377 | class DificilJogadas:
def __init__(self, ambiente, entrada, dificilSensores,
getJogadorUmPeca, simboloCampoVazio):
self.IAmbiente = ambiente
self.IEntrada = entrada
self.IDificilSensores = dificilSensores
self.IGetJogadorUmPeca = getJogadorUmPeca
self.ISimboloCampoVazio = simboloCampoVazio
self.Fodase = False
# Jogador que nunca perde, sua base de dados esta toda no codigo
def dificilJoga(self):
pecaPcDificil, pecaOutroJogador = self.IDificilSensores.auxPecas()
campo = self.IAmbiente.getCampo()
if pecaPcDificil == self.IGetJogadorUmPeca:
return self.dificilPrimeiro(campo, pecaPcDificil, pecaOutroJogador)
else:
return self.dificilSegundo()
def dificilSegundo(self):
todasJogadas = self.IEntrada.getTodasJogadasDaRodada()
tamanho = len(todasJogadas)
verifica = self.IDificilSensores.auxVerificaVitoria()
if self.Fodase:
return verifica
if tamanho == 1:
return self.primeiraRodada(todasJogadas)
if tamanho == 3:
return self.segundaRodada(todasJogadas, verifica)
if tamanho == 5:
return self.terceiraRodada(todasJogadas, verifica)
print("Erro no jogador dificil quando é o segundo")
def primeiraRodada(self, todasJogadas):
if todasJogadas == [4]:
return 6
else:
return 4
def segundaRodada(self, todasJogadas, verifica):
if todasJogadas[0] == 0:
self.Fodase = True
if todasJogadas == [0, 4, 1]:
return verifica
elif todasJogadas == [0, 4, 2]:
return verifica
elif todasJogadas == [0, 4, 3]:
return verifica
elif todasJogadas == [0, 4, 5]:
return 8
elif todasJogadas == [0, 4, 6]:
return verifica
elif todasJogadas == [0, 4, 7]:
return 8
elif todasJogadas == [0, 4, 8]:
return 1
elif todasJogadas[0] == 1:
self.Fodase = True
if todasJogadas == [1, 4, 0]:
return verifica
elif todasJogadas == [1, 4, 2]:
return verifica
elif todasJogadas == [1, 4, 3]:
return 2
elif todasJogadas == [1, 4, 5]:
return 0
elif todasJogadas == [1, 4, 6]:
return 0
elif todasJogadas == [1, 4, 7]:
return 6
elif todasJogadas == [1, 4, 8]:
return 2
elif todasJogadas[0] == 2:
self.Fodase = True
if todasJogadas == [2, 4, 0]:
return verifica
elif todasJogadas == [2, 4, 1]:
return verifica
elif todasJogadas == [2, 4, 3]:
return 6
elif todasJogadas == [2, 4, 5]:
return verifica
elif todasJogadas == [2, 4, 6]:
return 1
elif todasJogadas == [2, 4, 7]:
return 6
elif todasJogadas == [2, 4, 8]:
return verifica
elif todasJogadas[0] == 3:
self.Fodase = True
if todasJogadas == [3, 4, 0]:
return verifica
elif todasJogadas == [3, 4, 1]:
return 2
elif todasJogadas == [3, 4, 2]:
return 0
elif todasJogadas == [3, 4, 5]:
return 2
elif todasJogadas == [3, 4, 6]:
return verifica
elif todasJogadas == [3, 4, 7]:
return 8
elif todasJogadas == [3, 4, 8]:
return 6
elif todasJogadas[0] == 4:
self.Fodase = True
if todasJogadas == [4, 6, 0]:
return verifica
elif todasJogadas == [4, 6, 1]:
return verifica
elif todasJogadas == [4, 6, 2]:
return 0
elif todasJogadas == [4, 6, 3]:
self.Fodase = False
return 5
elif todasJogadas == [4, 6, 5]:
return verifica
elif todasJogadas == [4, 6, 7]:
self.Fodase = False
return 1
elif todasJogadas == [4, 6, 8]:
return verifica
elif todasJogadas[0] == 5:
self.Fodase = True
if todasJogadas == [5, 4, 0]:
return 2
elif todasJogadas == [5, 4, 1]:
return 0
elif todasJogadas == [5, 4, 2]:
return verifica
elif todasJogadas == [5, 4, 3]:
return 2
elif todasJogadas == [5, 4, 6]:
return 8
elif todasJogadas == [5, 4, 7]:
return 6
elif todasJogadas == [5, 4, 8]:
return verifica
elif todasJogadas[0] == 6:
self.Fodase = True
if todasJogadas == [6, 4, 0]:
return verifica
elif todasJogadas == [6, 4, 1]:
return 2
elif todasJogadas == [6, 4, 2]:
return 1
elif todasJogadas == [6, 4, 3]:
return verifica
elif todasJogadas == [6, 4, 5]:
return 2
elif todasJogadas == [6, 4, 7]:
return verifica
elif todasJogadas == [6, 4, 8]:
return verifica
elif todasJogadas[0] == 7:
self.Fodase = True
if todasJogadas == [7, 4, 0]:
return 6
elif todasJogadas == [7, 4, 1]:
return 6
elif todasJogadas == [7, 4, 2]:
return 8
elif todasJogadas == [7, 4, 3]:
return 8
elif todasJogadas == [7, 4, 5]:
return 6
elif todasJogadas == [7, 4, 6]:
return verifica
elif todasJogadas == [7, 4, 8]:
return verifica
elif todasJogadas[0] == 8:
self.Fodase = True
if todasJogadas == [8, 4, 0]:
return 1
elif todasJogadas == [8, 4, 1]:
return 0
elif todasJogadas == [8, 4, 2]:
return verifica
elif todasJogadas == [8, 4, 3]:
return 0
elif todasJogadas == [8, 4, 5]:
return verifica
elif todasJogadas == [8, 4, 6]:
return verifica
elif todasJogadas == [8, 4, 7]:
return verifica
def terceiraRodada(self, todasJogadas, verifica):
self.Fodase = True
if todasJogadas[:-1] == [4, 6, 3, 5]:
if todasJogadas[-1] == 2:
return 0
else:
return verifica
elif todasJogadas[:-1] == [4, 6, 7, 1]:
if todasJogadas[-1] == 2:
return 8
else:
return verifica
def dificilPrimeiro(self, campo, pecaPcInteligente, pecaOutroJogador):
simboloVazio = self.ISimboloCampoVazio
verifica = self.IDificilSensores.auxVerificaVitoria()
# PC é o primeiro a jogar
if campo[6] == simboloVazio:
# Jogada 1
return 6
else:
primeiraJogadorDois = self.IEntrada.getTodasJogadasDaRodada()[1]
if primeiraJogadorDois == 0:
if campo[7] == pecaPcInteligente:
if campo[8] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 8
else:
# Jogada 2
return 7
elif primeiraJogadorDois == 1:
if campo[8] == pecaPcInteligente:
if campo[7] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 7
else:
# Jogada 2
return 8
elif primeiraJogadorDois == 2:
if campo[0] == pecaPcInteligente:
if campo[3] == pecaOutroJogador:
if campo[8] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 8
else:
# Jogada 3.1
return 3
else:
# Jogada 2
return 0
elif primeiraJogadorDois == 3:
if campo[7] == pecaPcInteligente:
if campo[8] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 8
else:
# Jogada 2
return 7
elif primeiraJogadorDois == 4:
if campo[2] == simboloVazio:
return 2
else:
return verifica
elif primeiraJogadorDois == 5:
if campo[0] == pecaPcInteligente:
if campo[3] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 3
else:
# Jogada 2
return 0
elif primeiraJogadorDois == 7:
if campo[3] == pecaPcInteligente:
if campo[0] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 0
else:
# Jogada 2
return 3
elif primeiraJogadorDois == 8:
if campo[3] == pecaPcInteligente:
if campo[0] == pecaOutroJogador:
if campo[4] == pecaPcInteligente:
# Jogada 4
return verifica
else:
# Jogada 3.2
return 4
else:
# Jogada 3.1
return 0
else:
# Jogada 2
return 3
| StarcoderdataPython |
8083244 | import hashlib
import json
import logging
import os
import numpy as np
from ecdsa import VerifyingKey, SECP256k1
from abc import ABC, abstractmethod
from datetime import datetime
from sklearn.cluster import KMeans
from .merkle_tree import MerkleTree
from db.mapper import Mapper
class Serializable(ABC):
def serialize(self):
return json.dumps(self.to_dict(),
sort_keys=True).encode("utf-8")
@abstractmethod
def to_dict(self):
pass
class Transaction(Serializable):
def __init__(self, source=None, target=None, amount=0, timestamp=datetime.now(),
pubkey=None, sig=None):
self.source = source
self.target = target
self.amount = amount
self.timestamp = timestamp
if pubkey and sig:
self.pubkey = VerifyingKey.from_string(bytes.fromhex(pubkey), SECP256k1, hashlib.sha256,
valid_encodings=['raw'])
self.sig = bytes.fromhex(sig)
def set_pubkey(self, pubkey):
self.pubkey = pubkey
def set_signature(self, sig):
self.sig = sig
@staticmethod
def from_dict(transaction_dict):
if type(transaction_dict["timestamp"]) == str:
timestamp = datetime.strptime(
transaction_dict["timestamp"], '%m/%d/%Y, %H:%M:%S'
)
else:
timestamp = transaction_dict["timestamp"]
return Transaction(transaction_dict["source"], transaction_dict["target"],
transaction_dict["amount"], timestamp, transaction_dict["pubkey"],
transaction_dict["sig"])
def to_dict(self):
return {
"source": self.source,
"target": self.target,
"amount": self.amount,
"timestamp": self.timestamp.strftime("%m/%d/%Y, %H:%M:%S")
}
def to_full_dict(self):
''' Create a dict that also contains the pubkey and the signature for the transaction '''
return {
"source": self.source,
"target": self.target,
"amount": self.amount,
"timestamp": self.timestamp.strftime("%m/%d/%Y, %H:%M:%S"),
# get the text string representation of the ECDSA key binary blobs with hex()
"pubkey": self.pubkey.to_string().hex(),
"sig": self.sig.hex()
}
def hash(self):
return hashlib.sha256(self.serialize()).hexdigest()
def get_balance(self):
balance = 100 # +100 balance for testing
cwd = os.getcwd()
if cwd.endswith('tests'):
# if in directory 'tests', go one directory up
cwd = os.path.dirname(os.getcwd())
local_block_hashes = os.listdir(cwd + "/db/blocks/")
for block_hash in local_block_hashes:
block_dict = Mapper().read_block(block_hash)
block: Block = Block().from_dict(block_dict, block_hash)
try:
for transaction in block.transactions:
if transaction.source == self.source:
balance -= transaction.amount
if transaction.target == self.source:
balance += transaction.amount
except AttributeError:
logging.error("no transaction")
return balance
def validate(self):
# verify the transaction data structure
# we just assume if the keys are present, the values are also valid
expected_keys = set(["amount", "source", "target", "timestamp"])
if set(self.to_dict()) != expected_keys:
logging.error("Transaction key fields invalid")
return False
balance = self.get_balance()
if balance < self.amount:
logging.error(f"Not valid: {self.source} can't send {self.amount} "
f"with balance of {balance}")
return False
# verify the transaction signature
tx_hash = self.hash()
if not self.pubkey.verify(self.sig, tx_hash.encode("utf-8")):
logging.error("Cannot verify transaction signature")
return False
logging.info("Transaction is valid")
return True
class Block(Serializable):
def __init__(self, pred=None, transactions=None, saved_hash=None, nonce=None):
if not transactions:
transactions = list()
self.predecessor = pred
self.transactions = transactions
self.nonce = nonce
self.saved_hash = saved_hash
self.is_mining = True
self.nonce_list = []
def set_nonce(self, nonce):
self.nonce = nonce
def get_nonce(self):
return self.nonce
def set_saved_hash(self, saved_hash):
self.saved_hash = saved_hash
@staticmethod
def from_dict(block_dict, block_hash):
block = Block(block_dict["predecessor"],
block_dict["transactions"], block_hash, int(block_dict["nonce"]))
transaction_objects = []
for transaction_dict in block.transactions:
transaction_objects.append(Transaction.from_dict(transaction_dict))
block.transactions = transaction_objects
return block
def to_dict(self):
transactions = list()
for t in self.transactions:
transactions.append(t.to_full_dict())
return {
"predecessor": self.predecessor,
"transactions": transactions,
"nonce": self.nonce
}
def to_dict_with_hash(self):
transactions = list()
for t in self.transactions:
transactions.append(t.to_full_dict())
return {
"hash": self.hash(),
"predecessor": self.predecessor,
"transactions": transactions,
"nonce": self.nonce
}
def hash(self):
if self.nonce is not None:
transactions = list()
for t in self.transactions:
transactions.append(json.dumps(t.to_dict()))
transactions.append(str(self.nonce))
if len(transactions) != 0:
mtree = MerkleTree(transactions)
t_hash = mtree.getRootHash()
else:
t_hash = transactions
block_dict = {
"predecessor": self.predecessor,
"transactions": t_hash,
"nonce": self.nonce
}
serialized_block = json.dumps(
block_dict, sort_keys=True).encode("utf-8")
return hashlib.sha256(serialized_block).hexdigest()
else:
logging.error("No Nonce available jet. Mine it first!")
def add_transaction(self, t):
self.transactions.append(t)
def validate(self):
transactions = list()
for transaction in self.transactions: # Validating each transaction
if transaction.validate() is False:
return False
transactions.append(json.dumps(transaction.to_dict()))
if self.saved_hash != self.hash():
logging.error("Not valid: recalculating the hash results in a different hash")
return False
if not self.validate_nonce(transactions, self.nonce):
logging.error(f"Not valid: Nonce {self.nonce} does not fullfill the difficulty")
return False
logging.info("Block is valid")
return True
def write_to_file(self):
hash = self.hash()
block = self.serialize()
Mapper().write_block(hash, block)
def stop_mining(self) -> None:
self.is_mining = False
def get_mining_status(self) -> bool:
return self.is_mining
def get_iterations(self) -> int:
return self.iterations
def determine_start_nonce(self) -> int:
# convert the read data into a list of integers
data = Mapper().read_nonce_list()
if data:
data = data.split('\n') # str.split() returns a list of strings
data.pop(-1) # remove the last element, since it is an empty string
self.nonce_list = sorted(list(map(int, data))) # convert all str to int and sort
# only cluster every 5 blocks and when we have enough initial values
if len(self.nonce_list) >= 15 and len(self.nonce_list) % 5 == 0:
data = np.array(self.nonce_list)
kmeans = KMeans(n_clusters=3).fit(data.reshape(-1, 1))
kmeans.predict(data.reshape(-1, 1))
# return the mean value between the first centroid and the smallest nonce in the list
# FIXME using the standard deviation might make more sense here
return int((int(min(kmeans.cluster_centers_)[0]) + min(self.nonce_list)) / 2)
# else return the last determined start nonce
return int(Mapper().read_latest_start_nonce())
def find_nonce(self, difficulty=4, method='bruteforce'):
transactions = list()
for t in self.transactions:
transactions.append(json.dumps(t.to_dict()))
if method == 'bruteforce':
nonce = 0
while self.is_mining:
# Try with this nonce
if self.validate_nonce(transactions, nonce, difficulty):
logging.info(f"successfull at {nonce}")
self.nonce = nonce
return nonce
else:
logging.debug(f"not successfull at {nonce}")
nonce += 1
elif method == 'nonce-skip':
nonce = self.determine_start_nonce()
print(nonce)
Mapper().write_latest_start_nonce(str(nonce).encode())
# remove all elements from the nonce_list that are smaller than the starting nonce
self.nonce_list = list(filter(lambda x: x >= nonce, self.nonce_list))
index = 0
iterations = 0
while self.is_mining:
# check if the nonce has already been used
if self.nonce_list and nonce == self.nonce_list[index]:
logging.info(f"skipped {nonce}")
nonce += 1
# check if we reached the end of the list
if not (index + 1 == len(self.nonce_list)):
index += 1
continue
elif self.validate_nonce(transactions, nonce, difficulty):
logging.info(f"successfull at {nonce}")
self.nonce = nonce
self.iterations = iterations + 1
Mapper().append_to_nonce_list(nonce)
return nonce
else:
logging.debug(f"not successfull at {nonce}")
nonce += 1
iterations += 1
# stop when we reach the limit of a 32-bit integer
if nonce > 2**32:
self.is_mining = False
elif method == 'bitshift':
# when starting at 33000, we have 17 values until 2*32 and 16 values until 1
start_value = 33000
iterations = 0
used_numbers = []
while self.is_mining:
nonce = start_value
while True:
if nonce not in used_numbers:
if self.validate_nonce(transactions, nonce, difficulty):
logging.info(f"successfull at {nonce}")
logging.debug(f"{start_value=}\t{iterations=}")
self.iterations = iterations
return nonce
used_numbers.append(nonce)
iterations += 1
logging.debug(f"not successfull at {nonce}")
nonce = nonce << 1
if nonce > 2**32: # make sure we stay in our bounds
break
nonce = start_value # reset the nonce
while True:
if nonce not in used_numbers:
if self.validate_nonce(transactions, nonce, difficulty):
logging.info(f"successfull at {nonce}")
logging.debug(f"{start_value=}\t{iterations=}")
self.iterations = iterations
return nonce
used_numbers.append(nonce)
iterations += 1
logging.debug(f"not successfull at {nonce}")
nonce = nonce >> 1
if nonce < 1: # check for 1, since we never reach 0
break
start_value += 1
# at 130000 we have 16 values until 2*32 and 17 values until 1
if start_value > 130000:
self.is_mining = False
def validate_nonce(self, transactions, nonce, difficulty=4):
transactions.append(str(nonce))
mtree = MerkleTree(transactions)
t_hash = mtree.getRootHash()
transactions.pop()
# check the result
important_digits = t_hash[0:difficulty]
not_null_digits = important_digits.replace("0", "")
if len(not_null_digits) == 0:
return True
return False
| StarcoderdataPython |
6450597 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Decompose 0/1 loss into bias variance using meny learning methods in classification
Reference
http://www-bcf.usc.edu/~gareth/research/bv.pdf
"""
import os
import numpy as np
from sklearn.utils import resample
from sklearn.grid_search import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from sklearn.ensemble import RandomForestClassifier, BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from classes.ensemble_classifier import EnsembleClassifier
from xgboost import XGBClassifier
from sklearn.linear_model import SGDClassifier
def original_cross_val_score(estimator, sample_X, sample_y):
est_y = []
true_y = []
for train_index, test_index in KFold(sample_X.shape[0], 5):
X_train2, X_test2, y_train2, y_test2 = sample_X[train_index], sample_X[test_index], sample_y[train_index], sample_y[test_index]
estimator.fit(X_train2, y_train2)
est_y.extend(estimator.predict(X_test2).tolist())
true_y.extend(y_test2.tolist())
return accuracy_score(est_y, true_y)
class xgbTuner:
def __init__(self, estimator, tuned_parameters, cv=5):
self.estimator = estimator
self.tuned_parameters = tuned_parameters
self.cv = cv
def score(self, parameter):
parameter['max_depth'] = int(parameter['max_depth'])
self.estimator.set_params(**parameter)
return -cross_val_score(self.estimator, self.sample_X, self.sample_y, cv=self.cv, scoring="accuracy").mean()
def predict(self, sample_X):
pred = self.estimator.predict(sample_X)
return pred
def fit(self, sample_X, sample_y):
self.sample_X = sample_X
self.sample_y = sample_y
best_parameter = fmin(self.score, self.tuned_parameters, algo=tpe.suggest, max_evals=200)
best_parameter.update({"objective":'binary:logistic'})
best_parameter.update({'silent': 1})
best_parameter.update({'nthread': -1})
self.best_score_ = self.score(best_parameter)
self.best_params_ = best_parameter
self.estimator.set_params(**best_parameter)
self.estimator.fit(self.sample_X, self.sample_y)
n_iter = 10
dataset = np.loadtxt(os.path.join("data", "pima-indians-diabetes.data"), delimiter=',')
sample_X = dataset[:, 0:8]
sample_y = dataset[:, 8]
X_train, X_test, y_train, y_test = train_test_split(sample_X, sample_y, test_size=0.2)
print "number of sample ", X_train.shape, X_test.shape
xbg_tuning_parameters = {
'objective': 'binary:logistic',
'learning_rate': hp.quniform('learning_rate', 0.01, 1, 0.01),
'gamma': hp.quniform('gamma', 0, 2, 0.1),
'min_child_weight': hp.quniform('min_child_weight', 0, 10, 1),
'max_depth': hp.quniform('max_depth', 1, 20, 1),
'subsample': hp.quniform('subsample', 0.5, 1, 0.1),
#'colsample_bytree': hp.quniform('colsample_bytree', 0.1, 1, 0.1),
'colsample_bytree': hp.quniform('colsample_bytree', 0.3, 1, 0.1),
'reg_lambda': hp.quniform('reg_lambda', 0.0, 1.5, 0.1),
'reg_alpha': hp.quniform('reg_alpha', 0.0, 1.0, 0.1),
'nthread': -1,
'silent': 1,
}
SGD_parameters = [{ 'loss':['perceptron'],
'penalty':['l1', 'l2', 'elasticnet'],
'alpha': np.linspace(1e-2, 1e+2, 40),
'l1_ratio': np.linspace(1e-1, 1, 5),
'epsilon':np.linspace(1e-1, 1, 5)
}]
estimators = [
{"context": SGDClassifier(n_iter=50), "tuned_parameters": SGD_parameters, "name": "SGD"},
{"context": XGBClassifier(), "tuned_parameters": xbg_tuning_parameters, "name": "XGBoost", "tuner": xgbTuner(XGBClassifier(), xbg_tuning_parameters)},
{"context": EnsembleClassifier(), "name": "EnsembleClassifier"},
{"context": BaggingClassifier(DecisionTreeClassifier(max_depth=14), max_samples=0.9, max_features=0.5, n_estimators=50), "name": "Bagging Tree"},
{"context": RandomForestClassifier(n_estimators=50), "tuned_parameters": [{'max_depth': range(8, 20, 2)}], "name": "random forest"},
{"context": DecisionTreeClassifier(), "tuned_parameters": [{'max_depth': range(8, 20, 2)}], "name": "decision tree"},
{"context": LogisticRegression(), "tuned_parameters": [{'C': np.linspace(1e-8, 1e+2, 20)}], "name": "logistic"},
{"context": KNeighborsClassifier(), "tuned_parameters": [{'n_neighbors': range(1, 10, 1)}], "name": "KNN"},
]
for estimator in estimators:
print "==========%s========"%(estimator['name'])
pred = np.zeros((X_test.shape[0], n_iter))
for i in range(n_iter):
sample_X, sample_y = resample(X_train, y_train)
if estimator.get('tuned_parameters') != None:
if estimator.get('tuner') != None:
context = estimator['tuner']
else:
context = GridSearchCV(estimator['context'], estimator['tuned_parameters'], cv=5, n_jobs=-1, scoring="accuracy")
context.fit(sample_X, sample_y)
if i == 0:
print "grid search results:"
print "\tbest_params", context.best_params_
print "\tcross_val_score", context.best_score_ # resampleによってサンプルに重複が存在するため高く見積もられる
else:
context = estimator["context"]
context.fit(sample_X, sample_y)
if i == 0:
print "cross_val_score", original_cross_val_score(context, sample_X, sample_y)
pred[:, i] = context.predict(X_test)
tot_var = 0.
tot_bias = 0.
for i in range(X_test.shape[0]):
target = pred[i, :]
estimate = 1 if target[target==1].shape[0] > target[target==0].shape[0] else 0
tot_var += float(target[target!=estimate].shape[0]) / target.shape[0]
tot_bias += 0 if estimate == y_test[i] else 1
print "variance", float(tot_var) / X_test.shape[0]
print "bias", float(tot_bias) / X_test.shape[0]
| StarcoderdataPython |
9621984 | <gh_stars>0
#
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import common
import unittest
from collections import OrderedDict
from blockimgdiff import BlockImageDiff, EmptyImage, DataImage, Transfer
from rangelib import RangeSet
class BlockImageDiffTest(unittest.TestCase):
def test_GenerateDigraphOrder(self):
"""Make sure GenerateDigraph preserves the order.
t0: <0-5> => <...>
t1: <0-7> => <...>
t2: <0-4> => <...>
t3: <...> => <0-10>
t0, t1 and t2 must go before t3, i.e. t3.goes_after =
{ t0:..., t1:..., t2:... }. But the order of t0-t2 must be preserved.
"""
src = EmptyImage()
tgt = EmptyImage()
block_image_diff = BlockImageDiff(tgt, src)
transfers = block_image_diff.transfers
t0 = Transfer(
"t1", "t1", RangeSet("10-15"), RangeSet("0-5"), "move", transfers)
t1 = Transfer(
"t2", "t2", RangeSet("20-25"), RangeSet("0-7"), "move", transfers)
t2 = Transfer(
"t3", "t3", RangeSet("30-35"), RangeSet("0-4"), "move", transfers)
t3 = Transfer(
"t4", "t4", RangeSet("0-10"), RangeSet("40-50"), "move", transfers)
block_image_diff.GenerateDigraph()
t3_goes_after_copy = t3.goes_after.copy()
# Elements in the set must be in the transfer evaluation order.
elements = list(t3_goes_after_copy)
self.assertEqual(t0, elements[0])
self.assertEqual(t1, elements[1])
self.assertEqual(t2, elements[2])
# Now switch the order of t0, t1 and t2.
transfers[0], transfers[1], transfers[2] = (
transfers[2], transfers[0], transfers[1])
t3.goes_after.clear()
t3.goes_before.clear()
block_image_diff.GenerateDigraph()
# The goes_after must be different from last run.
self.assertNotEqual(t3_goes_after_copy, t3.goes_after)
# Assert that each element must agree with the transfer order.
elements = list(t3.goes_after)
self.assertEqual(t2, elements[0])
self.assertEqual(t0, elements[1])
self.assertEqual(t1, elements[2])
| StarcoderdataPython |
384538 | #!/usr/bin/python2
import rospy as rp
import rospkg
import yaml
import math
from upo_msgs.msg import PersonPoseUPO
from upo_msgs.msg import PersonPoseArrayUPO
#from hri_feedback_msgs.msg import HRIFeedbackFromInterface
from upo_decision_making.msg import ControlEvent, IDArray, HRIFeedbackFromInterface
class EventPublisher(object):
def __init__(self):
waypoint_defs_path = rp.get_param('/waypoint_defs')
rospack = rospkg.RosPack()
base_path = rospack.get_path("upo_launchers") #"teresa_common"
if base_path[-1] != "/":
base_path += "/"
with open(base_path + waypoint_defs_path, 'r') as f:
self.wayp_dict = yaml.load(f)
self.gif_pub = rp.Publisher("teresa_pilot_feedback", HRIFeedbackFromInterface, queue_size=1)
self.cev_pub = rp.Publisher("behavior_manager/control_events", ControlEvent, queue_size=1)
self.people_sub = rp.Subscriber("people/navigation",
PersonPoseArrayUPO,
self.people_cb,
queue_size=1)
self._people = []
def people_cb(self, msg):
self._people = msg.personPoses
def publish_interactiontarget_req(self):
per_selected = False
while not per_selected:
print 'Select person Id:'
req_id = int(raw_input('> '))
req_idx = -1
for a in range(len(self._people)):
if self._people[a].id == req_id:
req_idx = a
per_selected = True
if per_selected == False:
print 'There is not such a person Id.'
gif = HRIFeedbackFromInterface()
gif.header.stamp = rp.Time.now()
gif.type = HRIFeedbackFromInterface.NAV_TO_PERSON_REQUESTED
print "Sending request to IT id '"+str(req_id) + "'"
gif.data = str(req_id)
self.gif_pub.publish(gif)
def publish_walkside_req(self):
per_selected = False
while not per_selected:
print 'Select person Id:'
req_id = int(raw_input('> '))
req_idx = -1
for a in range(len(self._people)):
if self._people[a].id == req_id:
req_idx = a
per_selected = True
if per_selected == False:
print 'There is not such a person Id.'
gif = HRIFeedbackFromInterface()
gif.header.stamp = rp.Time.now()
gif.type = HRIFeedbackFromInterface.WALK_WITH_PERSON_REQUESTED
print "Sending request to walk with IT id '%s'" % req_id
gif.data = str(req_id)
self.gif_pub.publish(gif)
def publish_waypoint_req(self):
wayp_selected = False
while not wayp_selected:
try:
print 'Select waypoint index (starts at 0):'
a = int(raw_input('> '))
if a in range(len(self.wayp_dict)):
wayp_selected = True
except ValueError:
rp.logwarn("Not a valid input.")
finally:
if not wayp_selected:
print 'Incorrect waypoint index.'
print 'There are only %d registered waypoints.' % len(self.wayp_dict)
print 'Check cfg/waypoint_defs.yaml.'
k = self.wayp_dict.keys()[a]
print "Sending request to waypoint '%s'" % k
gif = HRIFeedbackFromInterface()
gif.header.stamp = rp.Time.now()
gif.type = HRIFeedbackFromInterface.NAV_TO_WAYPOINT_REQUESTED
gif.data = k
self.gif_pub.publish(gif)
def publish_low_batt(self):
cev = ControlEvent()
cev.stamp = rp.Time.now()
cev.type = ControlEvent.LOW_BATT
self.cev_pub.publish(cev)
def publish_call_over(self):
gif = HRIFeedbackFromInterface()
gif.header.stamp = rp.Time.now()
gif.type = HRIFeedbackFromInterface.CALL_OVER
self.gif_pub.publish(gif)
def publish_hearing_probs(self):
gif = HRIFeedbackFromInterface()
gif.header.stamp = rp.Time.now()
gif.type = HRIFeedbackFromInterface.HEARING_PROBLEMS_INDICATED
self.gif_pub.publish(gif)
if __name__ == '__main__':
rp.init_node('upo_behavior_tester')
gp = EventPublisher()
rp.loginfo('test node running')
option_dict = {'1': gp.publish_waypoint_req,
'2': gp.publish_interactiontarget_req,
'3': gp.publish_walkside_req,
'4': gp.publish_low_batt,
'5': gp.publish_call_over,
'6': gp.publish_hearing_probs}
while not rp.is_shutdown():
print 'Select an event type:'
print '1: Navigate to Waypoint Request'
print '2: Navigate to Interaction Target Request'
print '3: Walk side-by-side Request'
print '4: Low Battery'
print '5: Call Over'
print '6: Hearing Problems Indicated'
a = raw_input('> ')
try:
option_dict[a]()
except KeyError:
print 'Option not recognized. Please select an option from the list'
rp.spin()
| StarcoderdataPython |
4979869 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import time
import requests
import ui
# check data every 10 minutes
SLEEP = 10 * 60
URL = 'http://api.waqi.info/feed/shanghai/us-consulate/?token=' + os.getenv('AQI_TOKEN', '<PASSWORD>')
def main():
# wait until network is ready
time.sleep(10)
thread = ui.MyThread()
thread.start()
while True:
r = requests.get(URL)
aqi = r.json()['data']['aqi']
thread.set(aqi)
time.sleep(SLEEP)
def fork():
pid = os.fork()
if pid > 0:
with open('/home/pi/client/mama-ai.pid','w') as f:
f.write(str(pid)+"\n")
sys.exit()
if pid == 0:
main()
if __name__=='__main__':
fork()
| StarcoderdataPython |
5147936 | <filename>predict.py
from __future__ import print_function
import argparse
import skimage
import skimage.io
import skimage.transform
from PIL import Image
from tqdm import tqdm
import os
import torch
import torch.nn.parallel
import torch.distributed as dist
from torch.autograd import Variable
import numpy as np
import warnings
warnings.simplefilter("ignore")
def parse_args():
# Training settings
parser = argparse.ArgumentParser(description="PyTorch GANet Example")
parser.add_argument(
"--crop_height", type=int, required=True, help="crop height"
)
parser.add_argument(
"--crop_width", type=int, required=True, help="crop width"
)
parser.add_argument(
"--max_disp", type=int, default=192, help="max disp"
)
parser.add_argument(
"--resume", type=str, default="", help="resume from saved model"
)
parser.add_argument(
"--cuda", type=bool, default=True, help="use cuda?"
)
parser.add_argument(
"--kitti", type=int, default=0, help="kitti dataset? Default=False"
)
parser.add_argument(
"--kitti2015", type=int, default=0, help="kitti 2015? Default=False"
)
parser.add_argument(
"--cityscapes", type=int, default=0, help="Cityscapes? Default=False"
)
parser.add_argument(
"--data_path", type=str, required=True, help="data root"
)
parser.add_argument(
"--test_list", type=str, required=True, help="training list"
)
parser.add_argument(
"--save_path", type=str, default="./result/", help="location to save result"
)
parser.add_argument(
"--model", type=str, default="GANet_deep", help="model to train"
)
parser.add_argument("--local_rank", type=int, default=0)
opt = parser.parse_args()
print(opt)
return opt
def test_transform(temp_data, crop_height, crop_width):
_, h, w = np.shape(temp_data)
if h <= crop_height and w <= crop_width:
temp = temp_data
temp_data = np.zeros([6, crop_height, crop_width], "float32")
temp_data[:, crop_height - h : crop_height, crop_width - w : crop_width] = temp
else:
start_x = int((w - crop_width) / 2)
start_y = int((h - crop_height) / 2)
temp_data = temp_data[
:, start_y : start_y + crop_height, start_x : start_x + crop_width
]
left = np.ones([1, 3, crop_height, crop_width], "float32")
left[0, :, :, :] = temp_data[0:3, :, :]
right = np.ones([1, 3, crop_height, crop_width], "float32")
right[0, :, :, :] = temp_data[3:6, :, :]
return torch.from_numpy(left).float(), torch.from_numpy(right).float(), h, w
def load_data(leftname, rightname):
left = Image.open(leftname)
right = Image.open(rightname)
size = np.shape(left)
height = size[0]
width = size[1]
temp_data = np.zeros([6, height, width], "float32")
left = np.asarray(left)
right = np.asarray(right)
r = left[:, :, 0]
g = left[:, :, 1]
b = left[:, :, 2]
temp_data[0, :, :] = (r - np.mean(r[:])) / np.std(r[:])
temp_data[1, :, :] = (g - np.mean(g[:])) / np.std(g[:])
temp_data[2, :, :] = (b - np.mean(b[:])) / np.std(b[:])
r = right[:, :, 0]
g = right[:, :, 1]
b = right[:, :, 2]
# r,g,b,_ = right.split()
temp_data[3, :, :] = (r - np.mean(r[:])) / np.std(r[:])
temp_data[4, :, :] = (g - np.mean(g[:])) / np.std(g[:])
temp_data[5, :, :] = (b - np.mean(b[:])) / np.std(b[:])
return temp_data
def test(model, leftname, rightname, savename, crop_height, crop_width, cuda):
# count=0
input1, input2, height, width = test_transform(
load_data(leftname, rightname), crop_height, crop_width
)
input1 = Variable(input1, requires_grad=False)
input2 = Variable(input2, requires_grad=False)
if cuda:
input1 = input1.cuda()
input2 = input2.cuda()
with torch.no_grad():
prediction = model(input1, input2)
temp = prediction.cpu()
temp = temp.detach().numpy()
if height <= crop_height and width <= crop_width:
temp = temp[
0,
crop_height - height : crop_height,
crop_width - width : crop_width,
]
else:
temp = temp[0, :, :]
skimage.io.imsave(savename, (temp * 256).astype("uint16"))
def main():
opt = parse_args()
if opt.model == "GANet11":
from models.GANet11 import GANet
elif opt.model == "GANet_deep":
from models.GANet_deep import GANet
else:
raise Exception("No suitable model found...")
cuda = opt.cuda
# cuda = True
if cuda and not torch.cuda.is_available():
raise Exception("No GPU found, please run without --cuda")
num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
distributed = num_gpus > 1
if distributed:
assert cuda, "Distributed inference only works with GPUs"
torch.cuda.set_device(opt.local_rank)
torch.distributed.init_process_group(
backend="nccl", init_method="env://"
)
dist.barrier()
# torch.manual_seed(opt.seed)
# if cuda:
# torch.cuda.manual_seed(opt.seed)
# print('===> Loading datasets')
print("===> Building model")
model = GANet(opt.max_disp)
if distributed:
model.to('cuda')
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[opt.local_rank], output_device=[opt.local_rank])
elif cuda:
model = torch.nn.DataParallel(model)
model.to('cuda')
model.eval()
if opt.resume:
if os.path.isfile(opt.resume):
print("=> loading checkpoint '{}'".format(opt.resume))
checkpoint = torch.load(opt.resume)
model.load_state_dict(checkpoint["state_dict"], strict=False)
else:
print("=> no checkpoint found at '{}'".format(opt.resume))
# Inference
file_path = opt.data_path
file_list = opt.test_list
with open(file_list, "r") as f:
filelist = [line.strip() for line in f.readlines()]
if distributed:
filelist = filelist[opt.local_rank::num_gpus]
for current_file in tqdm(filelist):
if opt.kitti2015:
leftname = os.path.join(file_path, "image_2", current_file)
rightname = os.path.join(file_path, "image_3", current_file)
savename = os.path.join(opt.save_path, current_file)
if opt.kitti:
leftname = os.path.join(file_path, "colored_0", current_file)
rightname = os.path.join(file_path, "colored_1", current_file)
savename = os.path.join(opt.save_path, current_file)
if opt.cityscapes:
file_id = current_file.split("_leftImg8bit.png")[0]
leftname = os.path.join(
file_path, "leftImg8bit", file_id + "_leftImg8bit.png"
)
rightname = os.path.join(
file_path, "rightImg8bit", file_id + "_rightImg8bit.png"
)
savename = os.path.join(
opt.save_path, os.path.basename(file_id) + "_Disp16bit.png"
)
test(model, leftname, rightname, savename, opt.crop_height, opt.crop_width, cuda)
if __name__ == "__main__":
main()
| StarcoderdataPython |
8067307 | <reponame>sbonner0/temporal-offset-reconstruction<filename>src/models/GAE.py<gh_stars>1-10
from __future__ import print_function
from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from .layers import GraphConvolution
class GAE(nn.Module):
"""Graph Auto Encoder (see: https://arxiv.org/abs/1611.07308) - Probabilistic Version"""
def __init__(self, data, n_hidden, n_latent, dropout, bias, xavier_init=True):
super().__init__()
# Data
self.x = data['features']
self.adj_norm = data['adj_norm']
self.adj_labels = data['adj_labels']
# Dimensions
N, D = data['features'].shape
self.n_edges = self.adj_labels.sum()
self.input_dim = D
self.n_hidden = n_hidden
self.n_latent = n_latent
self.bias = bias
# Parameters
self.pos_weight = float(N * N - self.n_edges) / self.n_edges
self.norm = float(N * N) / ((N * N - self.n_edges) * 2)
self.gc1 = GraphConvolution(self.input_dim, self.n_hidden, self.bias)
self.gc2 = GraphConvolution(self.n_hidden, self.n_latent, self.bias)
self.dropout = dropout
# Adding PReLU seemingly made a difference for TO
self.prelu1 = nn.PReLU()
if xavier_init:
# Initialise the GCN weights to Xavier Uniform
torch.nn.init.xavier_uniform_(self.gc1.weight)
torch.nn.init.xavier_uniform_(self.gc2.weight)
def encode_graph(self, x, adj):
# Perform the encoding stage using a two layer GCN
x = self.prelu1(self.gc1(x, adj))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, adj)
return x
def forward(self, x, adj):
# Encoder
x = self.encode_graph(x, adj)
# Decoder
x = F.dropout(x, self.dropout, training=self.training)
adj_hat = torch.spmm(x, x.t())
return adj_hat
def get_embeddings(self, x, adj):
return self.encode_graph(x, adj) | StarcoderdataPython |
315955 | from dropbeat.models import User
from django.contrib.auth import SESSION_KEY
from django.core.exceptions import ObjectDoesNotExist
def auth_required(f):
def wrap(self, request, *args, **kwargs):
if SESSION_KEY not in request.session:
# Malformed session detected.
return self.on_unauthorized()
try:
request.user = User.objects.get(email=request.session['email'])
return f(self, request, *args, **kwargs)
except ObjectDoesNotExist:
return self.on_unauthorized()
return wrap
class AuthBackend(object):
def authenticate(self, email=None, password=None):
try:
user = User.objects.get(email=email)
except ObjectDoesNotExist:
return None
return user if user.check_password(password) else None
def get_user(self, user_id):
try:
return User.objects.get(id=user_id)
except ObjectDoesNotExist:
return None
| StarcoderdataPython |
3274227 | <filename>src/data/build_data.py
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 1 19:00:55 2018
@author: sandr
"""
import numpy as np
import pandas as pd
import get_raw_data as grd
import logging
import os
from dotenv import find_dotenv, load_dotenv
import data_classes
import balanced_data_classes
import Normalizer
import datetime
import glob
from os.path import abspath
from pathlib import Path
from inspect import getsourcefile
from datetime import datetime
import math
import argparse
import sys
import tensorflow as tf
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import OneHotEncoder
DT_FLOAT = np.float32
DT_BOOL = np.uint8
RANDOM_SEED = 123
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
# logger.propagate = False # it will not log to console.
RAW_DIR = os.path.join(Path(abspath(getsourcefile(lambda:0))).parents[2], 'data', 'raw')
PRO_DIR = os.path.join(Path(abspath(getsourcefile(lambda:0))).parents[2], 'data', 'processed')
def drop_descrip_cols(data):
'''Exclude from the dataset 'data' the following descriptive columns :
to_exclude = [
'LOAN_ID',
'ASOFMONTH',
'MONTH',
'TIME',
'CURRENT_INVESTOR_CODE_253',
'LLMA2_VINTAGE_2005',
'MBA_DELINQUENCY_STATUS',
'STATE']
Args:
data (DataFrame): Input Dataset which is modified in place.
Returns:
None
Raises:
'''
logger.name = 'drop_descrip_cols'
to_exclude = [
'LOAN_ID',
'ASOFMONTH',
'MONTH',
'TIME',
'CURRENT_INVESTOR_CODE_253',
'LLMA2_VINTAGE_2005',
'MBA_DELINQUENCY_STATUS',
'STATE',]
data.drop(to_exclude, axis=1, inplace=True)
logger.info('...Descriptive columns Excluded from dataset...')
return None
def allfeatures_drop_cols(data, columns):
'''Exclude from the dataset 'data' the descriptive columns as parameters.
Args:
data (DataFrame): Input Dataset which is modified in place.
Returns:
None
Raises:
'''
logger.name = 'allfeatures_drop_cols'
data.drop(columns, axis=1, inplace=True)
logger.info('...Columns Excluded from dataset...')
return None
def drop_low_variation(data, low_freq_cols=None):
'''Exclude the columns not showing enough population variation.
Args:
data (DataFrame): Input Dataset which is modified in place.
low_freq_cols (Array of column names): In case of 'None' drop the boolean columns with a mean less than 0.05 or abs(100 - mean) < thresh (almost 1).
Otherwise, drops these columns from the Dataset.
Returns:
Array of String. Array of dropped column names.
Raises:
'''
logger.name = 'drop_low_variation'
if low_freq_cols is None:
low_freq_cols = []
thresh = 0.05 # this is 5 basis points.
for name in data.columns.values:
if data[name].dtype == DT_BOOL:
temp = 100 * data[name].mean()
# print(name, temp)
if temp < thresh or abs(100 - temp) < thresh:
data.drop(name, axis=1, inplace=True)
# print('Dropped', name)
low_freq_cols.append(name)
else:
for name in low_freq_cols:
data.drop(name, axis=1, inplace=True)
logger.info('Dropped: ' + name)
logger.info('...Columns Excluded from dataset...')
logger.info('Size of the database after drop_low_variation:' + str(data.shape))
return low_freq_cols
def drop_paidoff_reo(data):
'''Exclude rows: 'MBA_DELINQUENCY_STATUS_0<1 & MBA_DELINQUENCY_STATUS_R<1'.
Exclude columns: ['MBA_DELINQUENCY_STATUS_0', 'MBA_DELINQUENCY_STATUS_R'].
Args:
data (DataFrame): Input Dataset which is modified in place.
Returns:
None.
Raises:
'''
logger.name = 'drop_paidoff_reo'
logger.info('Size Dataset: '+ str(data.shape))
data.query(
'MBA_DELINQUENCY_STATUS_0<1 & MBA_DELINQUENCY_STATUS_R<1',
inplace=True)
data.drop(
['MBA_DELINQUENCY_STATUS_0', 'MBA_DELINQUENCY_STATUS_R'],
axis=1,
inplace=True)
logger.info('New Size Dataset: '+ str(data.shape))
return None
def extract_numeric_labels(data, label_column='MBA_DELINQUENCY_STATUS_next'):
'''Extract the labels from Dataset, order-and-transform them into numeric labels.
Args:
data (DataFrame): Input Dataset which is modified in place.
label_column (string): Default 'MBA_DELINQUENCY_STATUS_next'.
Returns:
DataSeries. Numeric label column.
Raises:
'''
logger.name = 'extract_numeric_labels'
labels = data[label_column]
all_labels = labels.value_counts().index.tolist()
all_labels.sort()
dict_labels = dict(zip(all_labels, np.arange(len(all_labels))))
labels = labels.map(dict_labels)
logger.info('mapped labels: ' + str(dict_labels))
data.drop(label_column, axis=1, inplace=True)
logger.info('...Labels extracted from Dataset...')
logger.info('Size of the dataset after extract_labels:' + str(data.shape))
return labels
def allfeatures_extract_labels(data, columns='MBA_DELINQUENCY_STATUS_next'):
logger.name = 'allfeatures_extract_labels'
if (type(columns)==str):
indices = [i for i, elem in enumerate(data.columns) if columns in elem] # (alphabetically ordered)
else:
indices = columns
if indices:
labels = data[data.columns[indices]]
data.drop(data.columns[indices], axis=1, inplace=True)
logger.info('...Labels extracted from Dataset...')
return labels
else: return None
def oneHotEncoder_np(column, typ=DT_FLOAT):
''' Encode categorical integer features using a one-hot aka one-of-K scheme from numpy library.
Args:
column (Series): Input Categorical integer column.
Returns:
Numpy Array. Boolean sparse matrix of categorical features.
Raises:
'''
logger.name = 'oneHotEncoder_np'
label_num = len(column.value_counts())
one_hot_labels = (
np.arange(label_num) == column[:, None]).astype(DT_FLOAT)
logger.info('...labels changed to one-hot-encoding (numpy)....')
return one_hot_labels
def oneHotEncoder_sklearn(column):
''' Encode categorical integer features using a one-hot aka one-of-K scheme from sklearn library.
Args:
column (Series): Input Categorical integer column.
Returns:
Numpy Array. Float sparse matrix of categorical features.
Raises:
'''
logger.name = 'oneHotEncoder_sklearn'
enc = OneHotEncoder()
arr = column.values
arr = arr.reshape(-1,1)
enc.fit(arr)
arr = enc.transform(arr).toarray()
logger.info('...labels changed to one-hot-encoding (sklearn)....')
return arr
def reformat_data_labels(data, labels, typ=DT_FLOAT):
'''Reformat the pd.DataFrames and pd.Series to np.arrays of a specific type.
Args:
data (DataFrame): Input Dataset. Before this, All categorical features must be changed to one-hot-encoding.
labels (Series): Class Column of Numeric labels.
Returns:
np.Array. Float Dataset of features.
np.Array. Float Column of labels.
Raises:
ValueError: 'data and labels have to be aligned!'
'''
logger.name = 'reformat'
if not (data.index == labels.index).all():
raise ValueError('data and labels have to be aligned!')
data_mat = data.values.astype(typ, casting='same_kind')
labels_mat = labels.values.astype(typ, casting='same_kind')
logger.info('...Reformatted Dataset...')
return data_mat, labels_mat
def reformat(data, typ=DT_FLOAT):
'''Reformat the pd.DataFrames and pd.Series to np.arrays of a specific type.
Args:
data (DataFrame or Series): Input Dataset. Before this, All categorical features must be changed to one-hot-encoding.
Returns:
np.Array. Float Dataset of features.
np.Array. Float Column of labels.
Raises:
'''
logger.name = 'reformat'
data_mat = data.values.astype(typ) # , casting='same_kind'
logger.info('...Reformatted data...')
return data_mat
def normalize(data):
'''Transform features to follow a normal distribution using quantiles information. The transformation is applied on each feature independently.
Note that this transform is non-linear. It may distort linear correlations between variables measured at the same scale but renders variables measured at different scales more directly comparable.
Args:
data (DataFrame): Input Dataset. Before this, All features must be reformatted to numeric values.
Returns:
DataFrame. Normalized Dataset.
Raises:
'''
logger.name = 'normalize'
normalizer = QuantileTransformer(output_distribution='normal') # 'uniform'
logger.info('...Normalized Dataset...')
return normalizer.fit_transform(data)
def oneHotDummies_column(column, categories):
'''Convert categorical variable into dummy/indicator variables.
Args:
column (Series): Input String Categorical Column.
Returns:
DataFrame. Integer Sparse binary matrix of categorical features.
Raises:
'''
logger.name = 'oneHotDummies_column: ' + column.name
cat_column = pd.Categorical(column.astype('str'), categories=categories)
cat_column = pd.get_dummies(cat_column) # in the same order as categories! (alphabetically ordered)
cat_column = cat_column.add_prefix(column.name + '_')
if (cat_column.isnull().any().any()):
null_cols = cat_column.columns[cat_column.isnull().any()]
print(cat_column[null_cols].isnull().sum())
print(cat_column[cat_column.isnull().any(axis=1)][null_cols].head(50))
return cat_column
def encode_binary_to_labeled_column(sparse_data):
'''convert a Dataframe of binary columns into a single series column labeled by the binary column names.
if a record doesn't belong to any class, this method assign automatically this record to the first class.
Args:
sparse_data (DataFrame): Sparse matrix of categorical columns.
Returns:
Series. Single string column of categorical features.
Raises:
'''
return sparse_data.idxmax(axis=1)
def encode_sparsematrix(data, x):
'''load and encode from binary sparse matrix which match with substring criteria in the column names to be transformed to a categorical column.
Args:
sparse_data (DataFrame): Sparse matrix of categorical columns.
x (String). substring criteria to filter the column names.
Returns:
Series. Single string column of categorical features.
Raises:
'''
sparse_matrix = np.where(pd.DataFrame(data.columns.values)[0].str.contains(x))
subframe= data.iloc[:, sparse_matrix[0]]
return encode_binary_to_labeled_column(subframe)
def get_datasets(data, train_num, valid_num, test_num, weight_flag=False, stratified_flag=False, refNorm=True):
'''Sample and transform the data and split it into training, test and validation sets. This function uses:
-drop_paidoff_reo(...)
-sample_data(...) from get_raw_data
-drop_descrip_cols(...)
-drop_low_variation(...)
-extract_numeric_labels(...)
-oneHotEncoder_np(...)
-reformat(...)
-normalize(...)
Args:
data (DataFrame): Input Dataset.
train_num (Integer): Number of training samples.
valid_num (Integer): Number of Validation samples.
test_num (Integer): Number of Testing samples.
weight_flag (boolean): Default False. True if it will execute a pondered sampling.
refNorm (boolean). Default True. True if it will execute reformatting and normalization over the selected dataset.
Returns:
Array of tuples (Numpy Array, Numpy Array). Each tuple for training (data, labels), validation (data, labels) and testing (data, labels).
Feature columns (list). List of string names of the columns for training, validation and test sets.
Raises:
ValueError: 'data and labels have to be aligned!'
'''
# Dropping paid-off and REO loans.
drop_paidoff_reo(data)
print('Sampling the dataset and shuffling the results....')
np.random.seed(RANDOM_SEED)
if (stratified_flag == False):
data_df = grd.sample_data(data, train_num + valid_num + test_num, weight_flag)
else:
data_df = grd.stratified_sample_data(data, float(train_num + valid_num + test_num)/float(data.shape[0]))
logger.info('Size of the database after sampling: ' + str(data_df.shape))
drop_descrip_cols(data_df)
print('Droppping low-variation variables.....')
drop_low_variation(data_df, None)
print('Getting the numeric labels...')
labels_df = extract_numeric_labels(data_df)
if not (data_df.index == labels_df.index).all():
raise ValueError('data and labels have to be aligned!')
labels = oneHotEncoder_np(labels_df)
if (refNorm==True):
print('Reformating and normalizing the data.....')
data = reformat(data_df)
data = normalize(data)
logger.name = 'get_datasets'
train = (data[:train_num, ], labels[:train_num, ])
logger.info('Training labels shape: ' + str(train[1].shape))
valid = (data[train_num:train_num + valid_num, ],
labels[train_num:train_num + valid_num, ])
logger.info('Validation labels shape: ' + str(valid[1].shape))
test = (data[train_num + valid_num:, ], labels[train_num + valid_num:, ])
logger.info('Test labels shape: ' + str(test[1].shape))
return train, valid, test, data_df.columns.values.tolist()
else:
logger.name = 'get_datasets'
train = (np.array(data_df.iloc[:train_num, ]), labels[:train_num, ])
logger.info('Training labels shape: ' + str(train[1].shape))
valid = (np.array(data_df.iloc[train_num:train_num + valid_num, ]),
labels[train_num:train_num + valid_num, ])
logger.info('Validation labels shape: ' + str(valid[1].shape))
test = (np.array(data_df.iloc[train_num + valid_num:, ]), labels[train_num + valid_num:, ])
logger.info('Test labels shape: ' + str(test[1].shape))
return train, valid, test, data_df.columns.values.tolist()
def drop_invalid_delinquency_status(data, gflag, log_file):
logger.name = 'drop_invalid_delinquency_status'
delinq_ids = data[data['MBA_DELINQUENCY_STATUS'].isin(['0', 'R', 'S', 'T', 'X', 'Z'])]['LOAN_ID']
groups = data[data['LOAN_ID'].isin(delinq_ids)][['LOAN_ID', 'PERIOD', 'MBA_DELINQUENCY_STATUS', 'DELINQUENCY_STATUS_NEXT']].groupby('LOAN_ID')
groups_list = list(groups)
iuw= pd.Index([])
if gflag != '':
try:
iuw= iuw.union(groups.get_group(gflag).index[0:])
except Exception as e:
print(str(e))
if data.iloc[-1]['LOAN_ID'] in groups.groups.keys():
gflag = data.iloc[-1]['LOAN_ID']
else:
gflag = ''
for k, group in groups_list:
li= group.index[(group['MBA_DELINQUENCY_STATUS'] =='S') | (group['MBA_DELINQUENCY_STATUS'] =='T')
| (group['MBA_DELINQUENCY_STATUS'] =='X') | (group['MBA_DELINQUENCY_STATUS'] =='Z')].tolist()
if li: iuw= iuw.union(group.index[group.index.get_loc(li[0]):])
# In case of REO or Paid-Off, we need to exclude since the next record:
df_delinq_01 = group[(group['MBA_DELINQUENCY_STATUS'] =='0') | (group['MBA_DELINQUENCY_STATUS'] =='R')]
if df_delinq_01.shape[0]>0:
track_i = df_delinq_01.index[0]
iuw= iuw.union(group.index[group.index.get_loc(track_i)+1:])
if iuw!=[]:
#log_df = data.loc[iuw]
log_file.write('drop_invalid_delinquency_status - Total rows: %d\r\n' % len(iuw)) # (log_df.shape[0])
#log_file.write(data.iloc[iuw])
# np.savetxt(log_file, log_df.values, header=str(log_df.columns.values), delimiter=',')
# log_df.to_csv(log_file, index=False, mode='a')
data.drop(iuw, inplace=True)
logger.info('invalid_delinquency_status dropped')
return gflag
def custom_robust_normalizer(ncols, dist_file, normalizer_type='robust_scaler_sk', center_value='median'):
norm_cols = []
scales = []
centers = []
for i, x in enumerate (ncols):
x_frame = dist_file.iloc[:, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(x+'_Q'))[0]]
if not x_frame.empty:
iqr = float(pd.to_numeric(x_frame[x+'_Q3'], errors='coerce').subtract(pd.to_numeric(x_frame[x+'_Q1'], errors='coerce')))
if iqr!=0:
norm_cols.append(x)
scales.append(iqr)
if center_value == 'median':
centers.append( float(x_frame[x+'_MEDIAN']) )
else:
centers.append( float(x_frame[x+'_Q1']) )
# else:
# scales.append(float(0.))
# centers.append(float(0.))
if (normalizer_type == 'robust_scaler_sk'):
normalizer = RobustScaler()
normalizer.scale_ = scales
normalizer.center_ = centers
elif (normalizer_type == 'percentile_scaler'):
normalizer = Normalizer.Normalizer(scales, centers)
else: normalizer=None
return norm_cols, normalizer
def custom_minmax_normalizer(ncols, scales, dist_file):
norm_cols = []
minmax_scales = []
centers = []
# to_delete =[]
for i, x in enumerate (ncols):
# if scales[i] == 0:
#x_frame = dist_file.iloc[:, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(x+'_M'))[0]]
x_min = dist_file.iloc[0, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(x+'_MIN'))[0]]
x_max = dist_file.iloc[0, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(x+'_MAX'))[0]]
if not(x_min.empty) and not(x_max.empty):
x_min = np.float32(x_min.values[0])
x_max = np.float32(x_max.values[0])
minmax_scales.append(x_max - x_min)
centers.append(x_min)
norm_cols.append(x)
# to_delete.append(i)
normalizer = Normalizer.Normalizer(minmax_scales, centers)
return norm_cols, normalizer #, to_delete
def imputing_nan_values(nan_dict, distribution):
new_dict = {}
for k,v in nan_dict.items():
if v=='median':
new_dict[k] = float(distribution[k+'_MEDIAN'])
else:
new_dict[k] = v
return new_dict
def splitDataFrameIntoSmaller(df, chunkSize = 1200):
listOfDf = list()
numberChunks = math.ceil(len(df) / chunkSize)
for i in range(numberChunks):
listOfDf.append(df[i*chunkSize:(i+1)*chunkSize])
return listOfDf
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def tag_chunk(tag, label, chunk, chunk_periods, tag_period, log_file, with_index, tag_index, hdf=None, tfrec=None):
inter_periods = list(chunk_periods.intersection(set(range(tag_period[0], tag_period[1]+1))))
log_file.write('Periods corresponding to ' + tag +' period: %s\r\n' % str(inter_periods))
p_chunk = chunk.loc[(slice(None), inter_periods), :]
log_file.write('Records for ' + tag + ' Set - Number of rows: %d\r\n' % (p_chunk.shape[0]))
print('Records for ' + tag + ' Set - Number of rows:', p_chunk.shape[0])
if (p_chunk.shape[0] > 0):
if (with_index==True):
#p_chunk.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(p_chunk.index, range(tag_index, tag_index + p_chunk.shape[0]))])
p_chunk.reset_index(inplace=True)
allfeatures_drop_cols(p_chunk, ['PERIOD'])
p_chunk.set_index('DELINQUENCY_STATUS_NEXT', inplace=True) #1 index
# print(p_chunk.index)
else:
p_chunk.reset_index(drop=True, inplace=True)
labels = allfeatures_extract_labels(p_chunk, columns=label)
p_chunk = p_chunk.astype(DT_FLOAT)
labels = labels.astype(np.int8)
if (p_chunk.shape[0] != labels.shape[0]) :
print('Error in shapes:', p_chunk.shape, labels.shape)
else :
if (hdf!=None):
hdf.put(tag + '/features', p_chunk, append=True, data_columns=['DELINQUENCY_STATUS_NEXT'], index=True)
hdf.put(tag + '/labels', labels, append=True, data_columns=['DELINQUENCY_STATUS_NEXT'], index=True)
# pc_subframes = splitDataFrameIntoSmaller(p_chunk, chunkSize = 1000)
# for sf in pc_subframes:
# hdf.put(tag + '/features', sf.astype(DT_FLOAT), append=True)
# lb_subframes = splitDataFrameIntoSmaller(labels, chunkSize = 1000)
# for sf in lb_subframes:
# hdf.put(tag + '/labels', sf.astype('int8'), append=True)
hdf.flush()
elif (tfrec!=None):
for row, lab in zip(p_chunk.values, labels.values):
feature = {tag + '/labels': _int64_feature(lab),
tag + '/features': _float_feature(row)}
# Create an example protocol buffer
example = tf.train.Example(features=tf.train.Features(feature=feature))
tfrec.write(example.SerializeToString())
tfrec.flush()
tag_index += p_chunk.shape[0]
return tag_index
def zscore(x,mean,stdd):
return (x - mean) / stdd
def zscore_apply(dist_file, data):
stddv_0 = []
for col_name in data.columns.values:
mean = pd.Series(dist_file.iloc[0, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(col_name+'_MEAN'))[0]], dtype='float32')
stddev = dist_file.iloc[0, np.where(pd.DataFrame(dist_file.columns.values)[0].str.contains(col_name+'_STDD'))[0]]
if not mean.empty and not stddev.empty:
mean = np.float32(mean.values[0])
stddev = np.float32(stddev.values[0])
if stddev == 0:
stddv_0.append(col_name)
else:
data[col_name] = data[col_name].apply(lambda x: zscore(x, mean, stddev))
else: print('Column not normalized: ', col_name)
print('STANDARD DEV zero: ', stddv_0)
return data
def prepro_chunk(file_name, file_path, chunksize, label, log_file, nan_cols, categorical_cols, descriptive_cols, time_cols, robust_cols, minmax_cols,
robust_normalizer, minmax_normalizer, dist_file, with_index, refNorm, train_period, valid_period, test_period, hdf=None, tfrec=None):
gflag = ''
i = 1
train_index = 0
valid_index = 0
test_index = 0
for chunk in pd.read_csv(file_path, chunksize = chunksize, sep=',', low_memory=False):
print('chunk: ', i, ' chunk size: ', chunk.shape[0])
log_file.write('chunk: %d, chunk size: %d \n' % (i, chunk.shape[0]))
chunk.columns = chunk.columns.str.upper()
log_df = chunk[chunk[label].isnull()]
log_file.write('Dropping Rows with Null Labels - Number of rows: %d\r\n' % (log_df.shape[0]))
# log_df.to_csv(log_file, index=False, mode='a') #summary
chunk.drop(chunk.index[chunk[label].isnull()], axis=0, inplace=True)
log_df = chunk[chunk['INVALID_TRANSITIONS']==1]
log_file.write('Dropping Rows with Invalid Transitions - Number of rows: %d\r\n' % (log_df.shape[0]))
# log_df.to_csv(log_file, index=False, mode='a') #summary
chunk.drop(chunk.index[chunk['INVALID_TRANSITIONS']==1], axis=0, inplace=True)
gflag = drop_invalid_delinquency_status(chunk, gflag, log_file)
null_columns=chunk.columns[chunk.isnull().any()]
log_df = chunk[chunk.isnull().any(axis=1)][null_columns]
log_file.write('Filling NULL values - (rows, cols) : %d, %d\r\n' % (log_df.shape[0], log_df.shape[1]))
# log_df.to_csv(log_file, index=False, mode='a') #summary
log_df = chunk[null_columns].isnull().sum().to_frame().reset_index()
log_df.to_csv(log_file, index=False, mode='a')
nan_cols = imputing_nan_values(nan_cols, dist_file)
chunk.fillna(value=nan_cols, inplace=True)
chunk.drop_duplicates(inplace=True) # Follow this instruction!!
logger.info('dropping invalid transitions and delinquency status, fill nan values, drop duplicates')
log_file.write('Drop duplicates - new size : %d\r\n' % (chunk.shape[0]))
chunk.reset_index(drop=True, inplace=True) #don't remove this line! otherwise NaN values appears.
chunk['ORIGINATION_YEAR'][chunk['ORIGINATION_YEAR']<1995] = "B1995"
for k,v in categorical_cols.items():
# if (chunk[k].dtype=='O'):
old_len = len(chunk.columns.values)
chunk[k] = chunk[k].astype('str')
chunk[k] = chunk[k].str.strip()
chunk[k].replace(['\.0$'], [''], regex=True, inplace=True)
new_cols = oneHotDummies_column(chunk[k], v)
if (chunk[k].value_counts().sum()!=new_cols.sum().sum()):
print('Error at categorization, different categories', k)
print(chunk[k].value_counts(), new_cols.sum())
log_file.write('Error at categorization, different categories %s\r\n' % str(k))
chunk[new_cols.columns] = new_cols
else:
chunk[new_cols.columns] = new_cols
log_file.write('New columns added: %s\r\n' % str(new_cols.columns.values))
if (len(chunk.columns.values)!=(len(new_cols.columns.values) + old_len)):
log_file.write('Error adding new columns to chunk : %s\r\n' % str(new_cols.columns.values))
if (len(v)!=len(new_cols.columns.values)):
log_file.write('Error adding new columns to chunk : %s\r\n' % str(new_cols.columns.values))
allfeatures_drop_cols(chunk, descriptive_cols)
#np.savetxt(log_file, descriptive_cols, header='descriptive_cols dropped:', newline=" ")
log_file.write('descriptive_cols dropped: %s\r\n' % str(descriptive_cols))
allfeatures_drop_cols(chunk, time_cols)
#np.savetxt(log_file, time_cols, header='time_cols dropped:', newline=" ")
log_file.write('time_cols dropped: %s\r\n' % str(time_cols))
cat_list = list(categorical_cols.keys())
cat_list.remove('DELINQUENCY_STATUS_NEXT')
#np.savetxt(log_file, cat_list, header='categorical_cols dropped:', newline=" ")
log_file.write('categorical_cols dropped: %s\r\n' % str(cat_list))
allfeatures_drop_cols(chunk, cat_list)
chunk.reset_index(drop=True, inplace=True)
chunk.set_index(['DELINQUENCY_STATUS_NEXT', 'PERIOD'], append=False, inplace=True) #2 indexes
# np.savetxt(log_file, str(chunk.index.names), header='Indexes created:', newline=" ")
log_file.write('Indexes created: %s\r\n' % str(chunk.index.names))
if chunk.isnull().any().any(): raise ValueError('There are null values...File: ' + file_name)
if (refNorm==True):
norm_df = zscore_apply(dist_file, chunk[robust_cols]) #robust_normalizer.transform(chunk[robust_cols])
chunk[robust_cols] = norm_df
#chunk[minmax_cols] = minmax_normalizer.transform(chunk[minmax_cols])
#np.savetxt(log_file, robust_cols, header='robust_cols normalized:', newline=" ")
log_file.write('robust_cols normalized: %s\r\n' % str(robust_cols))
#np.savetxt(log_file, minmax_cols, header='minmax_cols normalized:', newline=" ")
#log_file.write('minmax_cols normalized: %s\r\n' % str(minmax_cols))
if chunk.isnull().any().any(): raise ValueError('There are null values...File: ' + file_name)
#chunk.to_csv(file_path[:-4] +'-pp.csv', mode='a', index=False)
chunk_periods = set(list(chunk.index.get_level_values('PERIOD')))
#print(tfrec)
if (tfrec!=None):
train_index = tag_chunk('train', label, chunk, chunk_periods, train_period, log_file, with_index, train_index, tfrec=tfrec[0])
valid_index = tag_chunk('valid', label, chunk, chunk_periods, valid_period, log_file, with_index, valid_index, tfrec=tfrec[1])
test_index = tag_chunk('test', label, chunk, chunk_periods, test_period, log_file, with_index, test_index, tfrec=tfrec[2])
sys.stdout.flush()
elif (hdf!=None):
train_index = tag_chunk('train', label, chunk, chunk_periods, train_period, log_file, with_index, train_index, hdf=hdf[0])
valid_index = tag_chunk('valid', label, chunk, chunk_periods, valid_period, log_file, with_index, valid_index, hdf=hdf[1])
test_index = tag_chunk('test', label, chunk, chunk_periods, test_period, log_file, with_index, test_index, hdf=hdf[2])
inter_periods = list(chunk_periods.intersection(set(range(test_period[1]+1,355))))
log_file.write('Periods greater than test_period: %s\r\n' % str(inter_periods))
p_chunk = chunk.loc[(slice(None), inter_periods), :]
log_file.write('Records greater than test_period - Number of rows: %d\r\n' % (p_chunk.shape[0]))
del chunk
i += 1
return train_index, valid_index, test_index
def allfeatures_prepro_file(RAW_DIR, file_path, raw_dir, file_name, target_path, train_period, valid_period, test_period, log_file, dividing='percentage', chunksize=500000,
refNorm=True, label='DELINQUENCY_STATUS_NEXT', with_index=True, output_hdf=True):
descriptive_cols = [
'LOAN_ID',
'ASOFMONTH',
'PERIOD_NEXT',
'MOD_PER_FROM',
'MOD_PER_TO',
'PROPERTY_ZIP',
'INVALID_TRANSITIONS'
]
numeric_cols = ['MBA_DAYS_DELINQUENT', 'MBA_DAYS_DELINQUENT_NAN',
'CURRENT_INTEREST_RATE', 'CURRENT_INTEREST_RATE_NAN', 'LOANAGE', 'LOANAGE_NAN',
'CURRENT_BALANCE', 'CURRENT_BALANCE_NAN', 'SCHEDULED_PRINCIPAL',
'SCHEDULED_PRINCIPAL_NAN', 'SCHEDULED_MONTHLY_PANDI',
'SCHEDULED_MONTHLY_PANDI_NAN',
'LLMA2_CURRENT_INTEREST_SPREAD', 'LLMA2_CURRENT_INTEREST_SPREAD_NAN',
'LLMA2_C_IN_LAST_12_MONTHS',
'LLMA2_30_IN_LAST_12_MONTHS', 'LLMA2_60_IN_LAST_12_MONTHS',
'LLMA2_90_IN_LAST_12_MONTHS', 'LLMA2_FC_IN_LAST_12_MONTHS',
'LLMA2_REO_IN_LAST_12_MONTHS', 'LLMA2_0_IN_LAST_12_MONTHS',
'LLMA2_HIST_LAST_12_MONTHS_MIS',
'NUM_MODIF', 'NUM_MODIF_NAN', 'P_RATE_TO_MOD', 'P_RATE_TO_MOD_NAN', 'MOD_RATE',
'MOD_RATE_NAN', 'DIF_RATE', 'DIF_RATE_NAN', 'P_MONTHLY_PAY',
'P_MONTHLY_PAY_NAN', 'MOD_MONTHLY_PAY', 'MOD_MONTHLY_PAY_NAN',
'DIF_MONTHLY_PAY', 'DIF_MONTHLY_PAY_NAN', 'CAPITALIZATION_AMT',
'CAPITALIZATION_AMT_NAN', 'MORTGAGE_RATE', 'MORTGAGE_RATE_NAN',
'FICO_SCORE_ORIGINATION', 'INITIAL_INTEREST_RATE', 'ORIGINAL_LTV',
'ORIGINAL_BALANCE', 'BACKEND_RATIO', 'BACKEND_RATIO_NAN',
'ORIGINAL_TERM', 'ORIGINAL_TERM_NAN', 'SALE_PRICE', 'SALE_PRICE_NAN',
'PREPAY_PENALTY_TERM', 'PREPAY_PENALTY_TERM_NAN',
'NUMBER_OF_UNITS', 'NUMBER_OF_UNITS_NAN', 'MARGIN',
'MARGIN_NAN', 'PERIODIC_RATE_CAP', 'PERIODIC_RATE_CAP_NAN',
'PERIODIC_RATE_FLOOR', 'PERIODIC_RATE_FLOOR_NAN', 'LIFETIME_RATE_CAP',
'LIFETIME_RATE_CAP_NAN', 'LIFETIME_RATE_FLOOR',
'LIFETIME_RATE_FLOOR_NAN', 'RATE_RESET_FREQUENCY',
'RATE_RESET_FREQUENCY_NAN', 'PAY_RESET_FREQUENCY',
'PAY_RESET_FREQUENCY_NAN', 'FIRST_RATE_RESET_PERIOD',
'FIRST_RATE_RESET_PERIOD_NAN',
'LLMA2_PRIME',
'LLMA2_SUBPRIME', 'LLMA2_APPVAL_LT_SALEPRICE', 'LLMA2_ORIG_RATE_SPREAD',
'LLMA2_ORIG_RATE_SPREAD_NAN', 'AGI', 'AGI_NAN', 'UR', 'UR_NAN', 'LLMA2_ORIG_RATE_ORIG_MR_SPREAD',
'LLMA2_ORIG_RATE_ORIG_MR_SPREAD_NAN', 'COUNT_INT_RATE_LESS', 'NUM_PRIME_ZIP', 'NUM_PRIME_ZIP_NAN'
]
# nan_cols = {'MBA_DAYS_DELINQUENT': 0, 'CURRENT_INTEREST_RATE': 0, 'LOANAGE': 0,
# 'CURRENT_BALANCE' : 0, 'SCHEDULED_PRINCIPAL': 0, 'SCHEDULED_MONTHLY_PANDI': 0,
# 'LLMA2_CURRENT_INTEREST_SPREAD': 0, 'NUM_MODIF': 0, 'P_RATE_TO_MOD': 0, 'MOD_RATE': 0,
# 'DIF_RATE': 0, 'P_MONTHLY_PAY': 0, 'MOD_MONTHLY_PAY': 0, 'DIF_MONTHLY_PAY': 0, 'CAPITALIZATION_AMT': 0,
# 'MORTGAGE_RATE': 0, 'FICO_SCORE_ORIGINATION': 0, 'INITIAL_INTEREST_RATE': 0, 'ORIGINAL_LTV': 0,
# 'ORIGINAL_BALANCE': 0, 'BACKEND_RATIO': 0, 'ORIGINAL_TERM': 0, 'SALE_PRICE': 0, 'PREPAY_PENALTY_TERM': 0,
# 'NUMBER_OF_UNITS': 0, 'MARGIN': 0, 'PERIODIC_RATE_CAP': 0, 'PERIODIC_RATE_FLOOR': 0, 'LIFETIME_RATE_CAP': 0,
# 'LIFETIME_RATE_FLOOR': 0, 'RATE_RESET_FREQUENCY': 0, 'PAY_RESET_FREQUENCY': 0,
# 'FIRST_RATE_RESET_PERIOD': 0, 'LLMA2_ORIG_RATE_SPREAD': 0, 'AGI': 0, 'UR': 0,
# 'LLMA2_C_IN_LAST_12_MONTHS': 0, 'LLMA2_30_IN_LAST_12_MONTHS': 0, 'LLMA2_60_IN_LAST_12_MONTHS': 0,
# 'LLMA2_90_IN_LAST_12_MONTHS': 0, 'LLMA2_FC_IN_LAST_12_MONTHS': 0,
# 'LLMA2_REO_IN_LAST_12_MONTHS': 0, 'LLMA2_0_IN_LAST_12_MONTHS': 0}
nan_cols = {'MBA_DAYS_DELINQUENT': 'median', 'CURRENT_INTEREST_RATE': 'median', 'LOANAGE': 'median',
'CURRENT_BALANCE' : 'median', 'SCHEDULED_PRINCIPAL': 'median', 'SCHEDULED_MONTHLY_PANDI': 'median',
'LLMA2_CURRENT_INTEREST_SPREAD': 'median', 'NUM_MODIF': 0, 'P_RATE_TO_MOD': 0, 'MOD_RATE': 0,
'DIF_RATE': 0, 'P_MONTHLY_PAY': 0, 'MOD_MONTHLY_PAY': 0, 'DIF_MONTHLY_PAY': 0, 'CAPITALIZATION_AMT': 0,
'MORTGAGE_RATE': 'median', 'FICO_SCORE_ORIGINATION': 'median', 'INITIAL_INTEREST_RATE': 'median', 'ORIGINAL_LTV': 'median',
'ORIGINAL_BALANCE': 'median', 'BACKEND_RATIO': 'median', 'ORIGINAL_TERM': 'median', 'SALE_PRICE': 'median', 'PREPAY_PENALTY_TERM': 'median',
'NUMBER_OF_UNITS': 'median', 'MARGIN': 'median', 'PERIODIC_RATE_CAP': 'median', 'PERIODIC_RATE_FLOOR': 'median', 'LIFETIME_RATE_CAP': 'median',
'LIFETIME_RATE_FLOOR': 'median', 'RATE_RESET_FREQUENCY': 'median', 'PAY_RESET_FREQUENCY': 'median',
'FIRST_RATE_RESET_PERIOD': 'median', 'LLMA2_ORIG_RATE_SPREAD': 'median', 'AGI': 'median', 'UR': 'median',
'LLMA2_C_IN_LAST_12_MONTHS': 'median', 'LLMA2_30_IN_LAST_12_MONTHS': 'median', 'LLMA2_60_IN_LAST_12_MONTHS': 'median',
'LLMA2_90_IN_LAST_12_MONTHS': 'median', 'LLMA2_FC_IN_LAST_12_MONTHS': 'median',
'LLMA2_REO_IN_LAST_12_MONTHS': 'median', 'LLMA2_0_IN_LAST_12_MONTHS': 'median',
'LLMA2_ORIG_RATE_ORIG_MR_SPREAD':0, 'COUNT_INT_RATE_LESS' :'median', 'NUM_PRIME_ZIP':'median'
}
categorical_cols = {'MBA_DELINQUENCY_STATUS': ['0','3','6','9','C','F','R'], 'DELINQUENCY_STATUS_NEXT': ['0','3','6','9','C','F','R'], #,'S','T','X'
'BUYDOWN_FLAG': ['N','U','Y'], 'NEGATIVE_AMORTIZATION_FLAG': ['N','U','Y'], 'PREPAY_PENALTY_FLAG': ['N','U','Y'],
'OCCUPANCY_TYPE': ['1','2','3','U'], 'PRODUCT_TYPE': ['10','20','30','40','50','51','52','53','54','5A','5Z',
'60','61','62','63','6Z','70','80','81','82','83','84','8Z','U'],
'PROPERTY_TYPE': ['1','2','3','4','5','6','7','8','9','M','U','Z'], 'LOAN_PURPOSE_CATEGORY': ['P','R','U'],
'DOCUMENTATION_TYPE': ['1','2','3','U'], 'CHANNEL': ['1','2','3','4','5','6','7','8','9','A','B','C','D','U'],
'LOAN_TYPE': ['1','2','3','4','5','6','U'], 'IO_FLAG': ['N','U','Y'],
'CONVERTIBLE_FLAG': ['N','U','Y'], 'POOL_INSURANCE_FLAG': ['N','U','Y'], 'STATE': ['AK', 'AL', 'AR', 'AZ', 'CA', 'CO',
'CT', 'DC', 'DE', 'FL', 'GA', 'HI', 'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA',
'MD', 'ME', 'MI', 'MN', 'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV',
'NY', 'OH', 'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VT',
'WA', 'WI', 'WV', 'WY'],
'CURRENT_INVESTOR_CODE': ['240', '250', '253', 'U'], 'ORIGINATION_YEAR': ['B1995','1995','1996','1997','1998','1999','2000','2001','2002','2003',
'2004','2005','2006','2007','2008','2009','2010','2011','2012','2013','2014','2015','2016','2017','2018']}
time_cols = ['YEAR', 'MONTH'] #, 'PERIOD'] #no nan values
total_cols = numeric_cols.copy()
total_cols.extend(descriptive_cols)
total_cols.extend(categorical_cols.keys())
total_cols.extend(time_cols)
print('raw total_cols size: ', len(total_cols)) #110 !=112?? set(chunk_cols) - set(total_cols): {'LOAN_ID', 'PERIOD'}
pd.set_option('io.hdf.default_format','table')
dist_file = pd.read_csv(os.path.join(RAW_DIR, "percentile features3-test.csv"), sep=';', low_memory=False)
dist_file.columns = dist_file.columns.str.upper()
ncols = [x for x in numeric_cols if x.find('NAN')<0]
robust_cols, robust_normalizer = custom_robust_normalizer(ncols, dist_file, center_value='quantile', normalizer_type='percentile_scaler')
minmax_cols, minmax_normalizer = custom_minmax_normalizer(ncols, robust_normalizer.scale_, dist_file)
inters = set(robust_cols).intersection(minmax_cols)
to_delete = [i for x,i in zip(minmax_cols,range(len(minmax_cols))) if x in inters]
minmax_normalizer.scale_ = np.delete(minmax_normalizer.scale_,to_delete, 0)
minmax_normalizer.center_ = np.delete(minmax_normalizer.center_,to_delete, 0)
minmax_cols = np.delete(minmax_cols,to_delete, 0)
if (output_hdf == True):
#with pd.HDFStore(target_path +'-pp.h5', complib='lzo', complevel=9) as hdf: #complib='lzo', complevel=9
train_writer = pd.HDFStore(target_path +'-train-pp.h5', complib='lzo', complevel=9)
valid_writer = pd.HDFStore(target_path +'-valid-pp.h5', complib='lzo', complevel=9)
test_writer = pd.HDFStore(target_path +'-test-pp.h5', complib='lzo', complevel=9)
print('generating: ', target_path +'-pp.h5')
train_index, valid_index, test_index = prepro_chunk(file_name, file_path, chunksize, label, log_file, nan_cols, categorical_cols, descriptive_cols,
time_cols, robust_cols, minmax_cols, robust_normalizer, minmax_normalizer, dist_file, with_index,
refNorm, train_period, valid_period, test_period, hdf=[train_writer, valid_writer, test_writer], tfrec=None)
print(train_index, valid_index, test_index)
if train_writer.get_storer('train/features').nrows != train_writer.get_storer('train/labels').nrows:
raise ValueError('Train-DataSet: Sizes should match!')
if valid_writer.get_storer('valid/features').nrows != valid_writer.get_storer('valid/labels').nrows:
raise ValueError('Valid-DataSet: Sizes should match!')
if test_writer.get_storer('test/features').nrows != test_writer.get_storer('test/labels').nrows:
raise ValueError('Test-DataSet: Sizes should match!')
print('train/features size: ', train_writer.get_storer('train/features').nrows)
print('valid/features size: ', valid_writer.get_storer('valid/features').nrows)
print('test/features size: ', test_writer.get_storer('test/features').nrows)
log_file.write('***SUMMARY***\n')
log_file.write('train/features size: %d\r\n' %(train_writer.get_storer('train/features').nrows))
log_file.write('valid/features size: %d\r\n' %(valid_writer.get_storer('valid/features').nrows))
log_file.write('test/features size: %d\r\n' %(test_writer.get_storer('test/features').nrows))
logger.info('training, validation and testing set into .h5 file')
else:
train_writer = tf.python_io.TFRecordWriter(target_path +'-train-pp.tfrecords')
valid_writer = tf.python_io.TFRecordWriter(target_path +'-valid-pp.tfrecords')
test_writer = tf.python_io.TFRecordWriter(target_path +'-test-pp.tfrecords')
train_index, valid_index, test_index = prepro_chunk(file_name, file_path, chunksize, label, log_file, nan_cols, categorical_cols, descriptive_cols, time_cols,
robust_cols, minmax_cols, robust_normalizer, minmax_normalizer, dist_file, with_index, refNorm, train_period,
valid_period, test_period, hdf=None, tfrec=[train_writer, valid_writer, test_writer])
print(train_index, valid_index, test_index)
train_writer.close()
valid_writer.close()
test_writer.close()
def get_other_set_slice(prep_dir, init_period, end_period, set_dir, file_name, chunk_size=8000000):
pd.set_option('io.hdf.default_format','table')
try:
chunk_ind = 0
target_path = os.path.join(PRO_DIR, set_dir,file_name+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
total_rows = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
with pd.HDFStore(file_path) as hdf_input:
# hdf_input.get['features'].
# temp_features = pd.read_hdf(self.h5_path, self.dtype + '/features', start=self._global_index, stop=self._global_index + batch_size)
# df = hdf_input.select('features', [ Term('index', '>', Timestamp('20010105') ])
period_range = set(range(init_period, end_period+1))
period_features = set(list(hdf_input['features'].index.get_level_values(2)))
period_inter = period_features.intersection(period_range)
for i in list(period_inter):
df_features = hdf_input['features'].loc[(slice(None), slice(None), i), :]
df_labels = hdf_input['labels'].loc[(slice(None), slice(None), i), :]
hdf_target.put('features', df_features, append=True)
hdf_target.put('labels', df_labels, append=True)
hdf_target.flush()
total_rows += df_features.shape[0]
num_columns = len(df_features.columns.values)
del df_features
del df_labels
if (total_rows >= chunk_size or i==period_inter[-1]):
if hdf_target.get_storer('features').nrows != hdf_target.get_storer('labels').nrows:
raise ValueError('DataSet: Sizes should match!')
hdf_target.get_storer('features').attrs.num_columns = num_columns
hdf_target.close()
total_rows = 0
chunk_ind += 1
if (i!=period_inter[-1]):
target_path = os.path.join(PRO_DIR, set_dir,file_name+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
if hdf_target.is_open: hdf_target.close()
except Exception as e:
hdf_target.close()
print(e)
def get_other_set(prep_dir, init_period, end_period, set_dir, chunk_size=8000000):
pd.set_option('io.hdf.default_format','table')
try:
chunk_ind = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
print(file_name)
with pd.HDFStore(file_path) as hdf_input:
file_index = 0
for df_features in hdf_input.select('features', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), chunksize = chunk_size):
try:
target_path = os.path.join(PRO_DIR, set_dir,file_name[:-4]+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path)
print('Target Path: ', target_path)
if file_index + chunk_size <= hdf_input.get_storer('features').nrows:
df_labels = hdf_input.select('labels', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), start = file_index, stop = file_index + chunk_size)
file_index += chunk_size
else:
df_labels = hdf_input.select('labels', "PERIOD>=" + str(init_period) + ' & PERIOD<=' + str(end_period), start = file_index)
file_index = 0
hdf_target.put('features', df_features, append=True)
hdf_target.put('labels', df_labels, append=True)
hdf_target.flush()
num_columns = len(df_features.columns.values)
hdf_target.get_storer('features').attrs.num_columns = num_columns
if hdf_target.get_storer('features').nrows != hdf_target.get_storer('labels').nrows:
raise ValueError('DataSet: Sizes should match!')
hdf_target.close()
del df_labels
del df_features
chunk_ind += 1
except Exception as e:
if hdf_target.is_open: hdf_target.close()
except Exception as e:
print(e)
def slice_fixed_sets(prep_dir, set_dir, tag, chunk_size=400000):
pd.set_option('io.hdf.default_format','fixed') #'table')
try:
chunk_ind = 0
for file_path in glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5")):
file_name = os.path.basename(file_path)
print(file_name)
with pd.HDFStore(file_path) as hdf_input:
file_index = 0
for df_features in hdf_input.select(tag + '/features', chunksize = chunk_size):
try:
target_path = os.path.join(PRO_DIR, set_dir,file_name[:-4]+'_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path, complib='lzo', complevel=9, chunkshape='auto')
print('Target Path: ', target_path)
df_labels = hdf_input.select(tag + '/labels', start = file_index, stop = file_index + df_features.shape[0])
# df_labels = df_labels.reset_index(level='index', drop=True)
# df_labels.set_index('index', range(0, chunk_size), append=True, inplace=True)
df_features.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_features.index, range(0, df_features.shape[0]))])
df_labels.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_labels.index, range(0, df_labels.shape[0]))])
file_index += df_features.shape[0]
hdf_target.put(tag + '/features', df_features)
hdf_target.put(tag + '/labels', df_labels)
hdf_target.flush()
if hdf_target.get_storer(tag+'/features').shape[0] != hdf_target.get_storer(tag + '/labels').shape[0]:
raise ValueError('DataSet: Sizes should match!')
hdf_target.close()
del df_labels
del df_features
chunk_ind += 1
except Exception as e:
if hdf_target.is_open: hdf_target.close()
except Exception as e:
print(e)
def slice_table_sets(prep_dir, set_dir, tag, target_name, input_chunk_size=1200, target_size = 70000, with_index=True, index=0):
'''The input directory must not be the same as the output directory, because the .h5 output files can be confused with the input files. '''
pd.set_option('io.hdf.default_format', 'table')
all_files = glob.glob(os.path.join(PRO_DIR, prep_dir, "*.h5"))
chunk_ind = index
if (with_index==True):
target_path = os.path.join(PRO_DIR, set_dir,target_name+'_{:d}.h5'.format(chunk_ind))
else:
target_path = os.path.join(PRO_DIR, set_dir,target_name+'_non_index_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path, complib='lzo', complevel=9)
print('Target Path: ', target_path)
try:
total_rows = 0
target_index = 0
for i, file_path in enumerate(all_files):
file_name = os.path.basename(file_path)
print('Input File: ', file_name)
with pd.HDFStore(file_path) as hdf_input:
file_index = 0
for df_features in hdf_input.select(tag + '/features', chunksize = input_chunk_size):
try:
df_labels = hdf_input.select(tag + '/labels', start = file_index, stop = file_index + df_features.shape[0])
# df_labels.set_index('index', range(0, chunk_size), append=True, inplace=True)
if (with_index==True):
df_features.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_features.index, range(target_index, target_index + df_features.shape[0]))])
df_labels.index = pd.MultiIndex.from_tuples([(i, x[1], x[2],x[3]) for x,i in zip(df_labels.index, range(target_index, target_index + df_labels.shape[0]))])
else:
df_features.reset_index(drop=True, inplace=True)
df_labels.reset_index(drop=True, inplace=True)
file_index += df_features.shape[0]
target_index += df_features.shape[0]
hdf_target.put(tag + '/features', df_features, append=True)
hdf_target.put(tag + '/labels', df_labels, append=True)
hdf_target.flush()
total_rows += df_features.shape[0]
print('total_rows: ', total_rows)
if (total_rows >= target_size):
if hdf_target.get_storer(tag+'/features').nrows != hdf_target.get_storer(tag + '/labels').nrows:
raise ValueError('DataSet: Sizes should match!')
hdf_target.close()
total_rows = 0
chunk_ind += 1
if ((i+1<len(all_files)) or (i+1==len(all_files) and df_features.shape[0]>=input_chunk_size)):
if (with_index==True):
target_path = os.path.join(PRO_DIR, set_dir,target_name+'_{:d}.h5'.format(chunk_ind))
else:
target_path = os.path.join(PRO_DIR, set_dir,target_name+'_non_index_{:d}.h5'.format(chunk_ind))
hdf_target = pd.HDFStore(target_path, complib='lzo', complevel=9)
print('Target Path: ', target_path)
target_index = 0
del df_labels
del df_features
except Exception as e:
if hdf_target.is_open: hdf_target.close()
if hdf_target.is_open: hdf_target.close()
except Exception as e:
if hdf_target.is_open: hdf_target.close()
print(e)
def get_h5_dataset(PRO_DIR, architecture, train_dir, valid_dir, test_dir, train_period=None, valid_period=None, test_period=None, cols=None, remainder=False):
train_path = os.path.join(PRO_DIR, train_dir) if train_dir is not None else None
valid_path = os.path.join(PRO_DIR, valid_dir) if valid_dir is not None else None
test_path = os.path.join(PRO_DIR, test_dir) if test_dir is not None else None
DATA = data_classes.Dataset(architecture, train_path=train_path, valid_path=valid_path, test_path=test_path,
train_period=train_period, valid_period=valid_period, test_period=test_period, cols=cols, remainder=remainder)
return DATA
def get_h5_bdataset(PRO_DIR, architecture, train_dir, valid_dir, test_dir, train_period=None, valid_period=None, test_period=None, cols=None):
train_path = os.path.join(PRO_DIR, train_dir) if train_dir is not None else None
valid_path = os.path.join(PRO_DIR, valid_dir) if valid_dir is not None else None
test_path = os.path.join(PRO_DIR, test_dir) if test_dir is not None else None
DATA = balanced_data_classes.Dataset(architecture, train_path=train_path, valid_path=valid_path, test_path=test_path,
train_period=train_period, valid_period=valid_period, test_period=test_period, cols=cols)
return DATA
def allfeatures_preprocessing(RAW_DIR, PRO_DIR, raw_dir, train_num, valid_num, test_num, dividing='percentage', chunksize=500000, refNorm=True, with_index=True, output_hdf=True):
for file_path in glob.glob(os.path.join(RAW_DIR, raw_dir,"*.txt")):
file_name = os.path.basename(file_path)
if with_index==True:
target_path = os.path.join(PRO_DIR, raw_dir,file_name[:-4])
else:
target_path = os.path.join(PRO_DIR, raw_dir,file_name[:-4]+'_non_index')
log_file=open(target_path+'-log.txt', 'w+', 1)
print('Preprocessing File: ' + file_path)
log_file.write('Preprocessing File: %s\r\n' % file_path)
startTime = datetime.now()
allfeatures_prepro_file(RAW_DIR, file_path, raw_dir, file_name, target_path, train_num, valid_num, test_num, log_file, dividing=dividing, chunksize=chunksize,
refNorm=refNorm, with_index=with_index, output_hdf=output_hdf)
startTime = datetime.now() - startTime
print('Preprocessing Time: ', startTime)
log_file.write('Preprocessing Time: %s\r\n' % str(startTime))
log_file.close()
def read_data_sets(num_examples, valid_num, test_num, weight_flag=False, stratified_flag=False, refNorm=True):
"""load the notMNIST dataset and apply get_datasets(...) function.
Args:
num_examples (Integer): Input Dataset.
train_num (Integer): Number of Training examples.
valid_num (Integer): Number of Validation samples.
test_num (Integer): Number of Testing samples.
weight_flag (boolean): Default False. True if it executes a pondered sampling.
Returns:
data_classes.Dataset Object.
Raises:
"""
print('Reading the data from disk....')
all_data = grd.read_df(45)
print('Size of the database:', all_data.shape)
train, valid, test, feature_columns = get_datasets(all_data, num_examples, valid_num, test_num,
weight_flag=weight_flag, stratified_flag=stratified_flag, refNorm=refNorm)
return data_classes.Dataset(train, valid, test, feature_columns)
def update_parser(parser):
"""Parse the arguments from the CLI and update the parser."""
parser.add_argument(
'--prepro_step',
type=str,
default='preprocessing', #'slicing', 'preprocessing'
help='To execute a preprocessing method')
#this is for allfeatures_preprocessing:
parser.add_argument(
'--train_period',
type=int,
nargs='*',
default=[121,323], #[156, 180], [121,143], # 279],
help='Training Period')
parser.add_argument(
'--valid_period',
type=int,
nargs='*',
default=[324,329], #[181,185], [144,147],
help='Validation Period')
parser.add_argument(
'--test_period',
type=int,
nargs='*',
default= [330, 342], # [186,191], [148, 155],
help='Testing Period')
parser.add_argument(
'--prepro_dir',
type=str,
default='chuncks_random_c1mill',
help='Directory with raw data inside data/raw/ and it will be the output directory inside data/processed/')
parser.add_argument(
'--prepro_chunksize',
type=int,
default=500000,
help='Chunk size to put into the h5 file...')
parser.add_argument(
'--prepro_with_index',
type=bool,
default=True,
help='To keep indexes for each record')
parser.add_argument(
'--ref_norm',
type=bool,
default=True,
help='To execute the normalization over the raw inputs')
#to execute slice_table_sets:
parser.add_argument(
'--slice_input_dir',
type=str,
default='chuncks_random_c1mill',
help='Input data directory')
parser.add_argument(
'--slice_output_dir',
type=str,
nargs='*',
default=['chuncks_random_c1mill_train', 'chuncks_random_c1mill_valid', 'chuncks_random_c1mill_test'],
help='Output data directory. Input and output could be the same per group, it is recommendable different directories...')
parser.add_argument(
'--slice_tag',
type=str,
nargs='*',
default=['train', 'valid', 'test'],
help='features group to be extracted')
parser.add_argument(
'--slice_target_name',
type=str,
nargs='*',
default=['c1mill99-01_train', 'c1mill99-01_valid', 'c1mill99-01_test'],
help='file name root inside output directory')
parser.add_argument(
'--slice_chunksize',
type=int,
default=1000,
help='Chunk size to put into the h5 output files...')
parser.add_argument(
'--slice_target_size',
type=int,
default=36000000,
help='Output file size')
parser.add_argument(
'--slice_with_index',
type=bool,
default=False,
help='To keep indexes for each record')
parser.add_argument(
'--slice_index',
type=int,
default=0,
help='index to label each output file')
return parser.parse_known_args()
def main(project_dir):
"""
This module is in charge of::
- Retrieving DataFrame from Raw Data .
- Data Sampling.
- Encode Categorical features.
- Reformat and Normalize features.
- Remove columns from dataset.
- Split the dataset in training, validation and testing sets.
-
"""
logger.name ='__main__'
logger.info('Retrieving DataFrame from Raw Data, Data Sampling')
print("Run the main program.")
FLAGS, UNPARSED = update_parser(argparse.ArgumentParser())
print("UNPARSED", UNPARSED)
if FLAGS.prepro_step == 'preprocessing':
startTime = datetime.now()
#chuncks_random_c1mill chunks_all_800th
#allfeatures_preprocessing('chuncks_random_c1mill', [121, 279], [280,285], [286, 304], dividing='percentage', chunksize=500000, refNorm=True, with_index=True)
if not os.path.exists(os.path.join(PRO_DIR, FLAGS.prepro_dir)): #os.path.exists
os.makedirs(os.path.join(PRO_DIR, FLAGS.prepro_dir))
allfeatures_preprocessing(RAW_DIR, PRO_DIR, FLAGS.prepro_dir, FLAGS.train_period, FLAGS.valid_period, FLAGS.test_period, dividing='percentage',
chunksize=FLAGS.prepro_chunksize, refNorm=FLAGS.ref_norm, with_index=FLAGS.prepro_with_index, output_hdf=True)
print('Preprocessing - Time: ', datetime.now() - startTime)
elif FLAGS.prepro_step == 'slicing':
for i in range(len(FLAGS.slice_tag)):
startTime = datetime.now()
if not os.path.exists(os.path.join(PRO_DIR, FLAGS.slice_output_dir[i])): #os.path.exists
os.makedirs(os.path.join(PRO_DIR, FLAGS.slice_output_dir[i]))
slice_table_sets(FLAGS.slice_input_dir, FLAGS.slice_output_dir[i], FLAGS.slice_tag[i], FLAGS.slice_target_name[i],
target_size=FLAGS.slice_target_size, with_index=FLAGS.slice_with_index, index=FLAGS.slice_index, input_chunk_size = FLAGS.slice_chunksize)
print('Dividing .h5 files - Time: ', datetime.now() - startTime)
#slice_table_sets('chuncks_random_c1mill', 'chuncks_random_c1mill', 'train', 'chuncks_random_c1mill_train_cs1200', target_size=36000000, with_index=False, index=2)
#slice_table_sets('chuncks_random_c1mill', 'chuncks_random_c1mill', 'valid', 'chuncks_random_c1mill_valid_cs1200', target_size=36000000, with_index=False, index=2)
#slice_table_sets('chuncks_random_c1mill', 'chuncks_random_c1mill', 'test', 'chuncks_random_c1mill_test_cs1200', target_size=36000000, with_index=False, index=2)
else:
print('Invalid prepro_step...')
if __name__ == '__main__':
# not used in this stub but often useful for finding various files
project_dir = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
# find .env automagically by walking up directories until it's found, then
# load up the .env entries as environment variables
load_dotenv(find_dotenv())
logger.propagate = False
main(project_dir)
| StarcoderdataPython |
5011661 | # -*- coding: utf-8 -*-
# The LLVM Compiler Infrastructure
#
# This file is distributed under the University of Illinois Open Source
# License. See LICENSE.TXT for details.
import libscanbuild.clang as sut
from . import fixtures
import os.path
class GetClangArgumentsTest(fixtures.TestCase):
def test_get_clang_arguments(self):
with fixtures.TempDir() as tmpdir:
filename = os.path.join(tmpdir, 'test.c')
with open(filename, 'w') as handle:
handle.write('')
result = sut.get_arguments(
['clang', '-c', filename, '-DNDEBUG', '-Dvar="this is it"'],
tmpdir)
self.assertIn('NDEBUG', result)
self.assertIn('var="this is it"', result)
def test_get_clang_arguments_fails(self):
self.assertRaises(
Exception, sut.get_arguments,
['clang', '-###', '-fsyntax-only', '-x', 'c', 'notexist.c'], '.')
class GetCheckersTest(fixtures.TestCase):
def test_get_checkers(self):
# this test is only to see is not crashing
result = sut.get_checkers('clang', [])
self.assertTrue(len(result))
def test_get_active_checkers(self):
# this test is only to see is not crashing
result = sut.get_active_checkers('clang', [])
self.assertTrue(len(result))
| StarcoderdataPython |
4836341 | <reponame>wilsaj/flask-admin-old
# -*- coding: utf-8 -*-
"""
flask.ext.datastore.mongoalchemy
~~~~~~~~~~~~~~
:copyright: (c) 2011 by wilsaj.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import types
import mongoalchemy as ma
from mongoalchemy.document import Document
from wtforms import fields as f
from wtforms import form, validators, widgets
from wtforms.form import Form
from flask.ext.admin.datastore import AdminDatastore
from flask.ext.admin import wtforms as admin_wtf
from flask.ext.admin import util
class MongoAlchemyDatastore(AdminDatastore):
"""A datastore for accessing MongoAlchemy document models.
The `models` parameter should be either a module or an iterable
that contains the MongoAlchemy models that will be made available
through the admin interface.
`db_session` should be an initialized MongoAlchemy session
object. See the `MongoAlchemy documentation`_ for information on
how to do that.
By default, a form for adding and editing data will be
automatically generated for each MongoAlchemy model. Only
primitive MongoAlchemy types are supported so if you need to
support other fields you will need to create custom forms. You can
also use custom forms if you want more control over form behavior.
To use custom forms, set the `model_forms` parameter to be a dict
with model names as keys matched to custom forms for the forms you
want to override. Forms should be WTForms form objects; see the
`WTForms documentation`_ for more information on how to configure
forms.
A dict with model names as keys, mapped to WTForm Form objects
that should be used as forms for creating and editing instances of
these models.
.. _MongoAlchemy documentation: http://www.mongoalchemy.org/api/session.html
.. _WTForms documentation: http://wtforms.simplecodes.com/
"""
def __init__(self, models, db_session, model_forms=None):
self.model_classes = {}
self.model_forms = model_forms
self.db_session = db_session
if not self.model_forms:
self.model_forms = {}
if type(models) == types.ModuleType:
self.model_classes = dict(
[(k, v) for k, v in models.__dict__.items()
if issubclass(v, Document)])
else:
self.model_classes = dict(
[(model.__name__, model)
for model in models
if issubclass(model, Document)])
if self.model_classes:
self.form_dict = dict(
[(k, _form_for_model(v, db_session,))
for k, v in self.model_classes.items()])
for model_name, form in self.model_forms.items():
if model_name in self.form_dict:
self.form_dict[model_name] = form
def create_model_pagination(self, model_name, page, per_page=25):
"""Returns a pagination object for the list view."""
model_class = self.get_model_class(model_name)
query = self.db_session.query(model_class).skip(
(page - 1) * per_page).limit(per_page)
return MongoAlchemyPagination(page, per_page, query)
def delete_model_instance(self, model_name, model_keys):
"""Deletes a model instance. Returns True if model instance
was successfully deleted, returns False otherwise.
"""
model_class = self.get_model_class(model_name)
try:
model_instance = self.find_model_instance(model_name, model_keys)
self.db_session.remove(model_instance)
return True
except ma.query.BadResultException:
return False
def find_model_instance(self, model_name, model_keys):
"""Returns a model instance, if one exists, that matches
model_name and model_keys. Returns None if no such model
instance exists.
"""
model_key = model_keys[0]
model_class = self.get_model_class(model_name)
return self.db_session.query(model_class).filter(
model_class.mongo_id == model_key).one()
def get_model_class(self, model_name):
"""Returns a model class, given a model name."""
return self.model_classes.get(model_name, None)
def get_model_form(self, model_name):
"""Returns a form, given a model name."""
return self.form_dict.get(model_name, None)
def get_model_keys(self, model_instance):
"""Returns the keys for a given a model instance."""
return [model_instance.mongo_id]
def list_model_names(self):
"""Returns a list of model names available in the datastore."""
return self.model_classes.keys()
def save_model(self, model_instance):
"""Persists a model instance to the datastore. Note: this
could be called when a model instance is added or edited.
"""
return model_instance.commit(self.db_session.db)
def update_from_form(self, model_instance, form):
"""Returns a model instance whose values have been updated
with the values from a given form.
"""
for field in form:
# handle FormFields that were generated for mongoalchemy
# TupleFields as a special case
if field.__class__ == f.FormField:
data_tuple = tuple([subfield.data for subfield in field])
setattr(model_instance, field.name, data_tuple)
continue
# don't use the mongo id from the form - it comes from the
# key/url and if someone tampers with the form somehow, we
# should ignore that
elif field.name != 'mongo_id':
setattr(model_instance, field.name, field.data)
return model_instance
class MongoAlchemyPagination(util.Pagination):
def __init__(self, page, per_page, query, *args, **kwargs):
super(MongoAlchemyPagination, self).__init__(
page, per_page, total=query.count(), items=query.all(),
*args, **kwargs)
def _form_for_model(document_class, db_session):
"""returns a wtform Form object for a given document model class.
"""
#XXX: needs to be implemented
return model_form(document_class)
#-----------------------------------------------------------------------
# mongo alchemy form generation: to be pushed upstream
#-----------------------------------------------------------------------
class DisabledTextInput(widgets.TextInput):
def __call__(self, field, **kwargs):
kwargs['disabled'] = 'disabled'
return super(DisabledTextInput, self).__call__(field, **kwargs)
def converts(*args):
def _inner(func):
func._converter_for = frozenset(args)
return func
return _inner
class ModelConverterBase(object):
def __init__(self, converters, use_mro=True):
self.use_mro = use_mro
if not converters:
converters = {}
for name in dir(self):
obj = getattr(self, name)
if hasattr(obj, '_converter_for'):
for classname in obj._converter_for:
converters[classname] = obj
self.converters = converters
def convert(self, model, ma_field, field_args):
default = getattr(ma_field, 'default', None)
if default == ma.util.UNSET:
default = None
kwargs = {
'validators': [],
'filters': [],
'default': default,
}
if field_args:
kwargs.update(field_args)
if not ma_field.required:
kwargs['validators'].append(validators.Optional())
types = [type(ma_field)]
converter = None
for ma_field_type in types:
type_string = '%s.%s' % (
ma_field_type.__module__, ma_field_type.__name__)
if type_string.startswith('mongoalchemy.fields'):
type_string = type_string[20:]
if type_string in self.converters:
converter = self.converters[type_string]
break
else:
for ma_field_type in types:
if ma_field_type.__name__ in self.converters:
converter = self.converters[ma_field_type.__name__]
break
else:
return
return converter(model=model, ma_field=ma_field, field_args=kwargs)
class ModelConverter(ModelConverterBase):
def __init__(self, extra_converters=None):
super(ModelConverter, self).__init__(extra_converters)
@converts('BoolField')
def conv_Bool(self, ma_field, field_args, **extra):
return f.BooleanField(**field_args)
@converts('DateTimeField')
def conv_DateTime(self, ma_field, field_args, **extra):
# TODO: add custom validator for date range
field_args['widget'] = admin_wtf.DateTimePickerWidget()
return f.DateTimeField(**field_args)
@converts('EnumField')
def conv_Enum(self, model, ma_field, field_args, **extra):
converted_field = self.convert(model, ma_field.item_type, {})
converted_field.kwargs['validators'].append(
validators.AnyOf(ma_field.values, values_formatter=str))
return converted_field
@converts('FloatField')
def conv_Float(self, ma_field, field_args, **extra):
if ma_field.min or ma_field.max:
field_args['validators'].append(
validators.NumberRange(min=ma_field.min, max=ma_field.max))
return f.FloatField(**field_args)
@converts('IntField')
def conv_Int(self, ma_field, field_args, **extra):
if ma_field.min or ma_field.max:
field_args['validators'].append(
validators.NumberRange(min=ma_field.min, max=ma_field.max))
return f.IntegerField(**field_args)
@converts('ObjectIdField')
def conv_ObjectId(self, field_args, **extra):
widget = DisabledTextInput()
return f.TextField(widget=widget, **field_args)
@converts('StringField')
def conv_String(self, ma_field, field_args, **extra):
if ma_field.min or ma_field.max:
min = ma_field.min or -1
max = ma_field.max or -1
field_args['validators'].append(
validators.Length(min=min, max=max))
return f.TextField(**field_args)
@converts('TupleField')
def conv_Tuple(self, model, ma_field, field_args, **extra):
def convert_field(field):
return self.convert(model, field, {})
fields = map(convert_field, ma_field.types)
fields_dict = dict([('%s_%s' % (ma_field._name, i), field)
for i, field in enumerate(fields)])
class ConvertedTupleForm(Form):
def process(self, formdata=None, obj=None, **kwargs):
# if the field is being populated from a mongoalchemy
# TupleField, obj will be a tuple object so we can set
# the fields by reversing the field name to get the
# index and then passing that along to wtforms in the
# kwargs dict
if type(obj) == tuple:
for name, field in self._fields.items():
tuple_index = int(name.split('_')[-1])
kwargs[name] = obj[tuple_index]
super(ConvertedTupleForm, self).process(
formdata, obj, **kwargs)
fields_form = type(ma_field._name + 'Form', (ConvertedTupleForm,), fields_dict)
return f.FormField(fields_form)
def model_fields(model, only=None, exclude=None, field_args=None,
converter=None):
"""
Generate a dictionary of fields for a given MongoAlchemy model.
See `model_form` docstring for description of parameters.
"""
if not issubclass(model, Document):
raise TypeError('model must be a mongoalchemy document model')
converter = converter or ModelConverter()
field_args = field_args or {}
ma_fields = ((name, field) for name, field in model.get_fields().items())
if only:
ma_fields = (x for x in ma_fields if x[0] in only)
elif exclude:
ma_fields = (x for x in ma_fields if x[0] not in exclude)
field_dict = {}
for name, field in ma_fields:
wtfield = converter.convert(model, field, field_args.get(name))
if wtfield is not None:
field_dict[name] = wtfield
return field_dict
def model_form(model, base_class=Form, only=None, exclude=None,
field_args=None, converter=None):
"""
Create a wtforms Form for a given MongoAlchemy model class::
from wtforms.ext.mongoalchemy.orm import model_form
from myapp.models import User
UserForm = model_form(User)
:param model:
A MongoAlchemy mapped model class.
:param base_class:
Base form class to extend from. Must be a ``wtforms.Form`` subclass.
:param only:
An optional iterable with the property names that should be included in
the form. Only these properties will have fields.
:param exclude:
An optional iterable with the property names that should be excluded
from the form. All other properties will have fields.
:param field_args:
An optional dictionary of field names mapping to keyword arguments used
to construct each field object.
:param converter:
A converter to generate the fields based on the model properties. If
not set, ``ModelConverter`` is used.
"""
field_dict = model_fields(model, only, exclude, field_args, converter)
return type(model.__name__ + 'Form', (base_class, ), field_dict)
| StarcoderdataPython |
6536980 | <reponame>korenlev/calipso-cvim<gh_stars>0
###############################################################################
# Copyright (c) 2017-2020 <NAME> (Cisco Systems), #
# <NAME> (Cisco Systems), <NAME> (Cisco Systems) and others #
# #
# All rights reserved. This program and the accompanying materials #
# are made available under the terms of the Apache License, Version 2.0 #
# which accompanies this distribution, and is available at #
# http://www.apache.org/licenses/LICENSE-2.0 #
###############################################################################
from scan.validators.validator_base import ValidatorBase
class TreeValidator(ValidatorBase):
def run(self, save_result: bool = False) -> (bool, list):
objects_list = self.inv.find(search={"environment": self.env},
projection=['_id', 'id', 'type', 'parent_id', 'parent_type'])
errors = []
objects_dict = {}
for obj in objects_list:
if obj['id'] in objects_dict:
errors.append("Duplicate id: '{}' for object: '{}' of type '{}'".format(obj['id'],
obj['_id'],
obj['type']))
else:
objects_dict[obj['id']] = obj
for obj_id, obj in objects_dict.items():
if obj['parent_id'] not in objects_dict and obj['parent_type'] != 'environment':
errors.append("Missing parent object with id: '{}' for object: '{}' of type '{}'".format(obj['parent_id'],
obj['_id'],
obj['type']))
# TODO: search for cycles?
return self.process_result(errors=errors, save_to_db=save_result)
| StarcoderdataPython |
4895065 | <reponame>penguinnnnn/HKJCData
import random
import numpy as np
import matplotlib.pyplot as plt
fix_bet = 10
with open('data/odds.txt') as f:
odds_data = f.read().splitlines()
X = [[float(i) for i in d.split('\t')[0].split()] for d in odds_data]
Y = [int(d.split('\t')[1]) for d in odds_data]
plt.style.use('ggplot')
plt.figure(figsize=(16, 8))
plt.title('Betting Strategy')
plt.xlabel('Race ID')
plt.ylabel('Gain')
capital = 0
plot = [0]
for x, y in zip(X, Y):
choice = np.argmax(np.array(x))
if choice == y:
capital += x[choice] * fix_bet
capital -= fix_bet
plot.append(capital)
index = [i for i in range(len(plot))]
plt.plot(index, plot, '-', color='r', label='Highest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# c[np.argmax(c)] = np.min(c)
# choice = np.argmax(c)
# if choice == y:
# capital += x[choice] * fix_bet
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='g', label='2nd Highest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# choice1 = np.argmax(c)
# c[np.argmax(c)] = np.min(c)
# choice2 = np.argmax(c)
# if choice1 == y:
# capital += x[choice1] * fix_bet * 0.5
# if choice2 == y:
# capital += x[choice2] * fix_bet * 0.5
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='b', label='Two Highest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# c[np.argmax(c)] = np.min(c)
# c[np.argmax(c)] = np.min(c)
# choice = np.argmax(c)
# if choice == y:
# capital += x[choice] * fix_bet
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='c', label='3rd Highest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# c[np.argmin(c)] = np.max(c)
# c[np.argmin(c)] = np.max(c)
# choice = np.argmin(c)
# if choice == y:
# capital += x[choice] * fix_bet
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='b', label='3rd Lowest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# choice1 = np.argmin(c)
# c[np.argmin(c)] = np.max(c)
# choice2 = np.argmin(c)
# if choice1 == y:
# capital += x[choice1] * fix_bet * 0.5
# if choice2 == y:
# capital += x[choice2] * fix_bet * 0.5
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='r', label='Two Lowest')
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# c = np.array(x)
# c[np.argmin(c)] = np.max(c)
# choice = np.argmin(c)
# if choice == y:
# capital += x[choice] * fix_bet
# capital -= fix_bet
# plot.append(capital)
# index = [i for i in range(len(plot))]
# plt.plot(index, plot, '-', color='g', label='2nd Lowest')
capital = 0
plot = [0]
for x, y in zip(X, Y):
choice = np.argmin(np.array(x))
if choice == y:
capital += x[choice] * fix_bet
capital -= fix_bet
plot.append(capital)
index = [i for i in range(len(plot))]
plt.plot(index, plot, '-', color='g', label='Lowest')
random_run = []
avg_count = []
for run_index in range(100):
print('Run random %d' % (run_index + 1))
capital = 0
plot = [0]
count = 0
for x, y in zip(X, Y):
choice = random.randint(0, len(x) - 1)
if choice == y:
count += 1
capital += x[choice] * fix_bet
capital -= fix_bet
plot.append(capital)
random_run.append(plot)
avg_count.append(count)
random_run = np.array(random_run)
random_run = np.average(random_run, axis=0)
index = [i for i in range(len(random_run))]
plt.plot(index, random_run, '-', color='b', label='Random')
print(np.average(np.array(avg_count)))
# sampling_run = []
# for run_index in range(10):
# print('Run sampling %d' % (run_index + 1))
# capital = 0
# plot = [0]
# for x, y in zip(X, Y):
# prob = 1 / np.array(x)
# prob = prob / np.sum(prob)
# prob_bin = [np.sum(prob[:i + 1]) for i in range(len(prob))]
# choice = random.random()
# for i in range(len(prob_bin)):
# if prob_bin[i] > choice:
# choice = i
# break
# if choice == y:
# capital += x[choice] * fix_bet
# capital -= fix_bet
# plot.append(capital)
# sampling_run.append(plot)
# sampling_run = np.array(sampling_run)
# sampling_run = np.average(sampling_run, axis=0)
# index = [i for i in range(len(sampling_run))]
# plt.plot(index, sampling_run, '-', color='g', label='Sampling')
plt.legend()
plt.savefig('results/my.png')
| StarcoderdataPython |
11207115 | <reponame>mineo/beets<gh_stars>0
# This file is part of beets.
# Copyright 2012, <NAME>.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Fetches album art.
"""
import urllib
import re
import logging
import os
from beets.plugins import BeetsPlugin
from beets import importer
from beets import ui
IMAGE_EXTENSIONS = ['png', 'jpg', 'jpeg']
COVER_NAMES = ['cover', 'front', 'art', 'album', 'folder']
CONTENT_TYPES = ('image/jpeg',)
log = logging.getLogger('beets')
# ART SOURCES ################################################################
def _fetch_image(url):
"""Downloads an image from a URL and checks whether it seems to
actually be an image. If so, returns a path to the downloaded image.
Otherwise, returns None.
"""
log.debug('Downloading art: %s' % url)
try:
fn, headers = urllib.urlretrieve(url)
except IOError:
log.debug('error fetching art')
return
# Make sure it's actually an image.
if headers.gettype() in CONTENT_TYPES:
log.debug('Downloaded art to: %s' % fn)
return fn
else:
log.debug('Not an image.')
# Cover Art Archive.
CAA_URL = 'http://coverartarchive.org/release/{mbid}/front-500'
def caa_art(release_id):
"""Fetch album art from the Cover Art Archive given a MusicBrainz
release ID.
"""
url = CAA_URL.format(mbid=release_id)
return _fetch_image(url)
# Art from Amazon.
AMAZON_URL = 'http://images.amazon.com/images/P/%s.%02i.LZZZZZZZ.jpg'
AMAZON_INDICES = (1, 2)
def art_for_asin(asin):
"""Fetch art for an Amazon ID (ASIN) string."""
for index in AMAZON_INDICES:
# Fetch the image.
url = AMAZON_URL % (asin, index)
fn = _fetch_image(url)
if fn:
return fn
# AlbumArt.org scraper.
AAO_URL = 'http://www.albumart.org/index_detail.php'
AAO_PAT = r'href\s*=\s*"([^>"]*)"[^>]*title\s*=\s*"View larger image"'
def aao_art(asin):
"""Fetch art from AlbumArt.org."""
# Get the page from albumart.org.
url = '%s?%s' % (AAO_URL, urllib.urlencode({'asin': asin}))
try:
log.debug('Scraping art URL: %s' % url)
page = urllib.urlopen(url).read()
except IOError:
log.debug('Error scraping art page')
return
# Search the page for the image URL.
m = re.search(AAO_PAT, page)
if m:
image_url = m.group(1)
return _fetch_image(image_url)
else:
log.debug('No image found on page')
# Art from the filesystem.
def art_in_path(path):
"""Look for album art files in a specified directory."""
if not os.path.isdir(path):
return
# Find all files that look like images in the directory.
images = []
for fn in os.listdir(path):
for ext in IMAGE_EXTENSIONS:
if fn.lower().endswith('.' + ext):
images.append(fn)
# Look for "preferred" filenames.
for fn in images:
for name in COVER_NAMES:
if fn.lower().startswith(name):
log.debug('Using well-named art file %s' % fn)
return os.path.join(path, fn)
# Fall back to any image in the folder.
if images:
log.debug('Using fallback art file %s' % images[0])
return os.path.join(path, images[0])
# Try each source in turn.
def art_for_album(album, path, local_only=False):
"""Given an Album object, returns a path to downloaded art for the
album (or None if no art is found). If `local_only`, then only local
image files from the filesystem are returned; no network requests
are made.
"""
# Local art.
if isinstance(path, basestring):
out = art_in_path(path)
if out:
return out
if local_only:
# Abort without trying Web sources.
return
# CoverArtArchive.org.
if album.mb_albumid:
log.debug('Fetching album art for MBID {0}.'.format(album.mb_albumid))
out = caa_art(album.mb_albumid)
if out:
return out
# Amazon and AlbumArt.org.
if album.asin:
log.debug('Fetching album art for ASIN %s.' % album.asin)
out = art_for_asin(album.asin)
if out:
return out
return aao_art(album.asin)
# All sources failed.
log.debug('No ASIN available: no art found.')
return None
# PLUGIN LOGIC ###############################################################
def batch_fetch_art(lib, albums, force):
"""Fetch album art for each of the albums. This implements the manual
fetchart CLI command.
"""
for album in albums:
if album.artpath and not force:
message = 'has album art'
else:
path = art_for_album(album, None)
if path:
album.set_art(path, False)
message = 'found album art'
else:
message = 'no art found'
log.info(u'{0} - {1}: {2}'.format(album.albumartist, album.album,
message))
class FetchArtPlugin(BeetsPlugin):
def __init__(self):
super(FetchArtPlugin, self).__init__()
self.autofetch = True
# Holds paths to downloaded images between fetching them and
# placing them in the filesystem.
self.art_paths = {}
def configure(self, config):
self.autofetch = ui.config_val(config, 'fetchart',
'autofetch', True, bool)
if self.autofetch:
# Enable two import hooks when fetching is enabled.
self.import_stages = [self.fetch_art]
self.register_listener('import_task_files', self.assign_art)
# Asynchronous; after music is added to the library.
def fetch_art(self, config, task):
"""Find art for the album being imported."""
if task.is_album: # Only fetch art for full albums.
if task.choice_flag == importer.action.ASIS:
# For as-is imports, don't search Web sources for art.
local = True
elif task.choice_flag == importer.action.APPLY:
# Search everywhere for art.
local = False
else:
# For any other choices (e.g., TRACKS), do nothing.
return
album = config.lib.get_album(task.album_id)
path = art_for_album(album, task.path, local_only=local)
if path:
self.art_paths[task] = path
# Synchronous; after music files are put in place.
def assign_art(self, config, task):
"""Place the discovered art in the filesystem."""
if task in self.art_paths:
path = self.art_paths.pop(task)
album = config.lib.get_album(task.album_id)
album.set_art(path, not (config.delete or config.move))
if config.delete or config.move:
task.prune(path)
# Manual album art fetching.
def commands(self):
cmd = ui.Subcommand('fetchart', help='download album art')
cmd.parser.add_option('-f', '--force', dest='force',
action='store_true', default=False,
help='re-download art when already present')
def func(lib, config, opts, args):
batch_fetch_art(lib, lib.albums(ui.decargs(args)), opts.force)
cmd.func = func
return [cmd]
| StarcoderdataPython |
279112 | # -*- coding: utf-8 -*-
import collections
c = collections.Counter('extremely')
c['z'] = 0
print(c)
print(list(c.elements()))
| StarcoderdataPython |
8121754 | <filename>apim-migration-testing-tool/Python/ApiMangerConfigUtil/remove_files.py
import os
from properties import *
def remove_tenant_loaderJar():
if not os.remove('%s/wso2am-%s/repository/components/dropins/tenantloader-1.0.jar' % (APIM_HOME_PATH, NEW_VERSION)):
print("Successfully removed tenantloader-1.0.jar from it\'s locations.")
else:
print("Failed to remove tenantloader-1.0.jar from it\'s locations!!!")
def remove_client_migration_zip():
if os.path.isfile('%s/wso2am-%s/repository/components/dropins/wso2-api-migration-client.zip' % (APIM_HOME_PATH, NEW_VERSION)):
if not os.remove(
'%s/wso2am-%s/repository/components/dropins/wso2-api-migration-client.zip' % (APIM_HOME_PATH, NEW_VERSION)):
print("Successfully removed wso2-api-migration-client.zip from it\'s locations.")
else:
print("Failed to remove wso2-api-migration-client.zip from it\'s locations!!!")
| StarcoderdataPython |
6666560 | # -*- coding: utf-8 -*-
def write(text=' '):
import datetime, os
time = datetime.datetime.today()
path = "./log/new/" + time.strftime("%Y//%m//%d")
if not os.path.exists(path):
os.makedirs(path)
f = open(time.strftime("./log/new/%Y/%m/%d/%H:%M.log"), 'a')
f.write(text)
f.close()
def wprint(text=' '):
import datetime
time = datetime.datetime.today()
write("[" + time.strftime("%H:%M.%S") + "] " + text + "と出力しました\n")
print(text)
def wprintf(text=' '):
import datetime, sys
time = datetime.datetime.today()
write("[" + time.strftime("%H:%M.%S") + "] " + text + "と出力しました\n")
sys.stdout.write("\r" + text)
sys.stdout.flush()
def archive():
import shutil, os
if not os.path.exists("./log/archive/old.zip"):
shutil.make_archive('./log/archive/old', 'zip', root_dir='./log/', base_dir='./new/')
write("ログファイルを圧縮しました")
else:
os.remove("./log/archive/old.zip")
shutil.make_archive('./log/archive/old', 'zip', root_dir='./log/', base_dir='./new/')
write("ログファイルを圧縮しました") | StarcoderdataPython |
4954225 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
from requests import HTTPError
from typing import Dict, Any
from json.decoder import JSONDecodeError
import json
import traceback
import requests
import math
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBALS/PARAMS '''
TOKEN = demisto.params().get('token')
# Remove trailing slash to prevent wrong URL path to service
SERVER = demisto.params().get('url')[:-1] \
if ('url' in demisto.params() and demisto.params()['url'].endswith('/')) else demisto.params().get('url', '')
# Should we use SSL
USE_SSL = not demisto.params().get('insecure', False)
# Headers to be sent in requests
HEADERS = {
'Authorization': f'Token {TOKEN}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
# Error messages
INVALID_ID_ERR_MSG = 'Error in API call. This may be happen if you provided an invalid id.'
API_ERR_MSG = 'Error in API call to AttackIQ. '
DEFAULT_PAGE_SIZE = 10
# Transformation dicts
ASSESSMENTS_TRANS = {
'id': 'Id',
'name': 'Name',
'user': 'User',
'users': 'Users',
'owner': 'Owner',
'groups': 'Groups',
'creator': 'Creator',
'created': 'Created',
'end_date': 'EndDate',
'modified': 'Modified',
'start_date': 'StartDate',
'description': 'Description',
'project_state': 'AssessmentState',
'master_job_count': 'MasterJobCount',
'default_schedule': 'DefaultSchedule',
'default_asset_count': 'DefaultAssetCount',
'project_template.id': 'AssessmentTemplateId',
'default_asset_group_count': 'DefaultAssetGroupCount',
'project_template.company': 'AssessmentTemplateCompany',
'project_template.created': 'AssessmentTemplateCreated',
'project_template.modified': 'AssessmentTemplateModified',
'project_template.template_name': 'AssessmentTemplateName',
'project_template.default_schedule': 'AssessmentTemplateDefaultSchedule',
'project_template.template_description': 'AssessmentTemplateDescription'
}
TESTS_TRANS = {
'id': 'Id',
'name': 'Name',
'description': 'Description',
'project': 'Assessment',
'total_asset_count': 'TotalAssetCount',
'cron_expression': 'CronExpression',
'runnable': 'Runnable',
'last_result': 'LastResult',
'user': 'User',
'created': 'Created',
'modified': 'Modified',
'using_default_schedule': 'UsingDefaultSchedule',
'using_default_assets': 'UsingDefaultAssets',
'latest_instance_id': 'LatestInstanceId',
'scenarios': {
'name': 'Name',
'id': 'Id'
},
'assets': {
'id': 'Id',
'ipv4_address': 'Ipv4Address',
'hostname': 'Hostname',
'product_name': 'ProductName',
'modified': 'Modified',
'status': 'Status'
}
}
TEST_STATUS_TRANS = {
'detected': 'Detected',
'failed': 'Failed',
'finished': 'Finished',
'passed': 'Passed',
'errored': 'Errored',
'total': 'Total'
}
TEST_RESULT_TRANS = {
'id': 'Id',
'modified': 'Modified',
'project_id': 'Assessment.Id',
'project_name': 'Assessment.Name',
'scenario.id': 'Scenario.Id',
'scenario.name': 'Scenario.Name',
'scenario.description': 'Scenario.Description',
'asset.id': 'Asset.Id',
'asset.ipv4_address': 'Asset.Ipv4Address',
'asset.hostname': 'Asset.Hostname',
'asset.product_name': 'Asset.ProductName',
'asset.modified': 'Asset.Modified',
'asset_group': 'Asset.AssetGroup',
'job_state_name': 'JobState',
'outcome_name': 'Outcome'
}
''' HELPER FUNCTIONS '''
def http_request(method, url_suffix, params=None, data=None):
url = urljoin(SERVER, url_suffix)
LOG(f'AttackIQ is attempting {method} request sent to {url} with params:\n{json.dumps(params, indent=4)} \n '
f'data:\n"{json.dumps(data)}')
try:
res = requests.request(
method,
url,
verify=USE_SSL,
params=params,
data=data,
headers=HEADERS
)
if res.status_code == 204:
return ''
# Handle error responses gracefully
if res.status_code not in {200, 201}:
error_reason = get_http_error_reason(res)
raise HTTPError(f'[{res.status_code}] - {error_reason}')
try:
return res.json()
except JSONDecodeError:
return_error('Response contained no valid body. See logs for more information.',
error=f'AttackIQ response body:\n{res.content!r}')
except requests.exceptions.ConnectionError as e:
LOG(str(e))
return_error('Encountered issue reaching the endpoint, please check that you entered the URL correctly.')
def get_http_error_reason(res):
"""
Get error reason from an AttackIQ http error
Args:
res: AttackIQ response
Returns: Reason for http error
"""
err_reason = res.reason
try:
res_json = res.json()
if 'detail' in res_json:
err_reason = f'{err_reason}. {res_json["detail"]}'
except JSONDecodeError:
pass
return err_reason
def build_transformed_dict(src, trans_dict):
"""Builds a dictionary according to a conversion map
Args:
src (dict): original dictionary to build from
trans_dict (dict): dict in the format { 'OldKey': 'NewKey', ...}
Returns: src copy with changed keys
"""
if isinstance(src, list):
return [build_transformed_dict(x, trans_dict) for x in src]
res: Dict[str, Any] = {}
for key, val in trans_dict.items():
if isinstance(val, dict):
# handle nested list
sub_res = res
item_val = [build_transformed_dict(item, val) for item in (demisto.get(src, key) or [])]
key = underscoreToCamelCase(key)
for sub_key in key.split('.')[:-1]:
if sub_key not in sub_res:
sub_res[sub_key] = {}
sub_res = sub_res[sub_key]
sub_res[key.split('.')[-1]] = item_val
elif '.' in val:
# handle nested vals
update_nested_value(res, val, to_val=demisto.get(src, key))
else:
res[val] = demisto.get(src, key)
return res
def create_invalid_id_err_msg(orig_err, error_codes):
"""
Creates an 'invalid id' error message
Args:
orig_err (str): The original error message
error_codes (list): List of error codes to look for
Returns (str): Error message for invalid id
"""
err_msg = API_ERR_MSG
if any(err_code in orig_err for err_code in error_codes):
err_msg += 'This may be happen if you provided an invalid id.\n'
err_msg += orig_err
return err_msg
def update_nested_value(src_dict, to_key, to_val):
"""
Updates nested value according to transformation dict structure where 'a.b' key will create {'a': {'b': val}}
Args:
src_dict (dict): The original dict
to_key (str): Key to transform to (expected to contain '.' to mark nested)
to_val: The value that'll be put under the nested key
"""
sub_res = src_dict
to_key_lst = to_key.split('.')
for sub_to_key in to_key_lst[:-1]:
if sub_to_key not in sub_res:
sub_res[sub_to_key] = {}
sub_res = sub_res[sub_to_key]
sub_res[to_key_lst[-1]] = to_val
def get_page_number_and_page_size(args):
"""
Get arguments page_number and page_size from args
Args:
args (dict): Argument dictionary, with possible page_number and page_size keys
Returns (int, int): Return a tuple of (page_number, page_size)
"""
page = args.get('page_number', 1)
page_size = args.get('page_size', DEFAULT_PAGE_SIZE)
err_msg_format = 'Error: Invalid {arg} value. "{val}" Is not a valid value. Please enter a positive integer.'
try:
page = int(page)
if page <= 0:
raise ValueError()
except (ValueError, TypeError):
return_error(err_msg_format.format(arg='page_number', val=page))
try:
page_size = int(page_size)
if page_size <= 0:
raise ValueError()
except (ValueError, TypeError):
return_error(err_msg_format.format(arg='page_size', val=page_size))
return page, page_size
''' COMMANDS + REQUESTS FUNCTIONS '''
def test_module():
"""
Performs basic get request to get item samples
"""
http_request('GET', '/v1/assessments')
demisto.results('ok')
''' COMMANDS MANAGER / SWITCH PANEL '''
def activate_assessment_command():
""" Implements attackiq-activate-assessment command
"""
ass_id = demisto.getArg('assessment_id')
try:
raw_res = http_request('POST', f'/v1/assessments/{ass_id}/activate')
hr = raw_res['message'] if 'message' in raw_res else f'Assessment {ass_id} activation was sent successfully.'
demisto.results(hr)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
def get_assessment_execution_status_command():
""" Implements attackiq-get-assessment-execution-status command
"""
ass_id = demisto.getArg('assessment_id')
try:
raw_res = http_request('GET', f'/v1/assessments/{ass_id}/is_on_demand_running')
ex_status = raw_res.get('message')
hr = f'Assessment {ass_id} execution is {"" if ex_status else "not "}running.'
ec = {
'AttackIQ.Assessment(val.Id === obj.Id)': {
'Running': ex_status,
'Id': ass_id
}
}
return_outputs(hr, ec, raw_res)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
def get_test_execution_status_command():
""" Implements attackiq-get-test-execution-status command
"""
test_id = demisto.getArg('test_id')
try:
raw_test_status = http_request('GET', f'/v1/tests/{test_id}/get_status')
test_status = build_transformed_dict(raw_test_status, TEST_STATUS_TRANS)
test_status['Id'] = test_id
hr = tableToMarkdown(f'Test {test_id} status', test_status)
return_outputs(hr, {'AttackIQTest(val.Id === obj.Id)': test_status}, raw_test_status)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['500']))
def build_test_results_hr(test_results, test_id, page, tot_pages):
"""
Creates test results human readable
Args:
page (int): Current page
tot_pages (int): Total pages
test_results (list): Results of the test (after being transformed)
test_id (str): ID of the test
Returns: Human readable of test results
"""
keys = ['Assessment Name', 'Scenario Name', 'Hostname', 'Asset IP', 'Job State', 'Modified', 'Outcome']
test_results_mod = []
for t_res in test_results:
assessment = t_res.get('Assessment')
asset = t_res.get('Asset')
scenario = t_res.get('Scenario')
hr_items = {
keys[0]: assessment.get('Name'),
keys[1]: scenario.get('Name'),
keys[2]: asset.get('Hostname'),
keys[3]: asset.get('Ipv4Address'),
keys[4]: demisto.get(t_res, 'JobState'),
keys[5]: t_res.get('Modified'),
keys[6]: demisto.get(t_res, 'Outcome.Name')
}
test_results_mod.append(hr_items)
return tableToMarkdown(f'Test Results for {test_id}\n ### Page {page}/{tot_pages}', test_results_mod, keys)
def get_test_results(page, page_size, test_id, show_last_res):
"""
Get test results response
Args:
page (int): Page number
page_size (int): Page size
test_id (int): ID of test
show_last_res (bool): Flag for showing only last result
Returns: Test results
"""
params = {
'page': page,
'page_size': page_size,
'test_id': test_id,
'show_last_result': show_last_res
}
return http_request('GET', '/v1/results', params=params)
def get_test_results_command(args=demisto.args()):
""" Implements attackiq-get-test-results command
"""
test_id = args.get('test_id')
outcome_filter = args.get('outcome_filter')
page, page_size = get_page_number_and_page_size(demisto.args())
try:
raw_test_res = get_test_results(page, page_size, test_id, args.get('show_last_result') == 'True')
test_cnt = raw_test_res.get('count')
if test_cnt == 0:
return_outputs('No results were found', {})
else:
total_pages = math.ceil(test_cnt / page_size)
remaining_pages = total_pages - page
if remaining_pages < 0:
remaining_pages = 0
test_res = build_transformed_dict(raw_test_res['results'], TEST_RESULT_TRANS)
if outcome_filter:
test_res = list(filter(lambda x: x.get('Outcome') == outcome_filter, test_res))
context = {
'AttackIQTestResult(val.Id === obj.Id)': test_res,
'AttackIQTestResult(val.Count).Count': test_cnt,
'AttackIQTestResult(val.RemainingPages).RemainingPages': remaining_pages
}
hr = build_test_results_hr(test_res, test_id, page, total_pages)
return_outputs(hr, context, raw_test_res)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['500']))
def get_assessments(page='1', assessment_id=None, page_size=DEFAULT_PAGE_SIZE):
"""
Fetches assessments from attackIQ
Args:
page (str or int): Page number to fetch
assessment_id (str): (Optional) If provided will fetch only the assessment with matching ID
Returns: Assessments from attackIQ
"""
params = {
'page_size': page_size,
'page': page
}
if assessment_id:
return http_request('GET', f'/v1/assessments/{assessment_id}')
return http_request('GET', '/v1/assessments', params=params)
def list_assessments_command():
""" Implements attackiq-list-assessments command
"""
page, page_size = get_page_number_and_page_size(demisto.args())
raw_assessments = get_assessments(page=page, page_size=page_size)
assessments_res = build_transformed_dict(raw_assessments.get('results'), ASSESSMENTS_TRANS)
ass_cnt = raw_assessments.get('count')
total_pages = math.ceil(ass_cnt / page_size)
remaining_pages = total_pages - page
if remaining_pages < 0:
remaining_pages = 0
context = {
'AttackIQ.Assessment(val.Id === obj.Id)': assessments_res,
'AttackIQ.Assessment(val.Count).Count': ass_cnt,
'AttackIQ.Assessment(val.RemainingPages).RemainingPages': remaining_pages
}
hr = tableToMarkdown(f'AttackIQ Assessments Page {page}/{total_pages}', assessments_res,
headers=['Id', 'Name', 'Description', 'User', 'Created', 'Modified'])
return_outputs(hr, context, raw_assessments)
def get_assessment_by_id_command():
""" Implements attackiq-get-assessment-by-id command
"""
assessment_id = demisto.getArg('assessment_id')
try:
raw_assessments = get_assessments(assessment_id=assessment_id)
assessments_res = build_transformed_dict(raw_assessments, ASSESSMENTS_TRANS)
hr = tableToMarkdown(f'AttackIQ Assessment {assessment_id}', assessments_res,
headers=['Id', 'Name', 'Description', 'User', 'Created', 'Modified'])
return_outputs(hr, {'AttackIQ.Assessment(val.Id === obj.Id)': assessments_res}, raw_assessments)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
def build_tests_hr(tests_res, ass_id, page_num, tot_pages):
"""
Creates tests human readable
Args:
tot_pages (int): Total pages
page_num (int): Current page
ass_id (str): Assignment ID
tests_res (list): Transformed result of test
Returns: Human readable string (md format) of tests
"""
hr = f'# Assessment {ass_id} tests\n## Page {page_num} / {tot_pages}\n'
for test in tests_res:
test = dict(test)
assets = test.pop('Assets', {})
scenarios = test.pop('Scenarios', {})
test_name = test.get('Name')
hr += tableToMarkdown(f'Test - {test_name}', test,
headers=['Id', 'Name', 'Created', 'Modified', 'Runnable', 'LastResult'],
headerTransform=pascalToSpace)
hr += tableToMarkdown(f'Assets ({test_name})', assets)
hr += tableToMarkdown(f'Scenarios ({test_name})', scenarios)
if not hr:
hr = 'Found no tests'
return hr
def list_tests_by_assessment(params):
return http_request('GET', '/v1/tests', params=params)
def list_tests_by_assessment_command():
""" Implements attackiq-list-tests-by-assessment command
"""
page, page_size = get_page_number_and_page_size(demisto.args())
ass_id = demisto.getArg('assessment_id')
params = {
'project': ass_id,
'page_size': page_size,
'page': page
}
raw_res = list_tests_by_assessment(params)
test_cnt = raw_res.get('count')
if test_cnt == 0:
return_outputs('No results were found', {})
else:
tests_res = build_transformed_dict(raw_res.get('results'), TESTS_TRANS)
total_pages = math.ceil(test_cnt / page_size)
remaining_pages = total_pages - page
if remaining_pages < 0:
remaining_pages = 0
context = {
'AttackIQTest(val.Id === obj.Id)': tests_res,
'AttackIQTest(val.Count).Count': test_cnt,
'AttackIQTest(val.RemainingPages).RemainingPages': remaining_pages
}
hr = build_tests_hr(tests_res, ass_id, page, total_pages)
return_outputs(hr, context, raw_res)
def run_all_tests_in_assessment_command():
""" Implements attackiq-run-all-tests-in-assessment command
"""
args = demisto.args()
ass_id = args.get('assessment_id')
on_demand_only = args.get('on_demand_only')
try:
params = {'on_demand_only': on_demand_only == 'True'}
raw_res = http_request('POST', f'/v1/assessments/{ass_id}/run_all_tests', params=params)
hr = raw_res['message'] if 'message' in raw_res else \
f'Request to run all tests for assessment {ass_id} was sent successfully.'
demisto.results(hr)
except HTTPError as e:
return_error(create_invalid_id_err_msg(str(e), ['403']))
@logger
def list_templates_command():
"""
Returns:
A list of all assesment templates.
"""
res = http_request('GET', '/v1/project_template_types')
templates = []
for template_group in res.get("results", []):
for template in template_group.get('project_templates', []):
template_dict = {
"ID": template.get("id"),
'Name': template.get("template_name"),
'Description': template.get("template_description"),
'ProjectName': template.get('project_name'),
'ProjectDescription': template.get('project_description'),
'Hidden': template.get('hidden')
}
templates.append(template_dict)
ec = {
"AttackIQ.Template(val.ID && val.ID === obj.ID)": templates
}
hr = tableToMarkdown("Templates:", templates, ["ID", "Name", 'Description', 'ProjectName', 'ProjectDescription'])
return_outputs(hr, ec, res)
@logger
def list_assets_command():
"""
Returns:
A list of all configured assets.
"""
res = http_request('GET', '/v1/assets')
assets = []
for asset in res.get('results', []):
asset_dict = {
'ID': asset.get('id', ''),
'Description': asset.get('description', ''),
'IPv4': asset.get('ipv4_address', ''),
'IPv6': asset.get('ipv6_address', ''),
'MacAddress': asset.get('mac_address', ''),
'ProcessorArch': asset.get('processor_arch', ''),
'ProductName': asset.get('product_name', ''),
'Hostname': asset.get('hostname', ''),
'Domain': asset.get('domain_name', ''),
'User': asset.get('user', ''),
'Status': asset.get('status', '')
}
groups = []
for group in asset.get('asset_groups', []):
temp_group = {
"ID": group.get('id'),
"Name": group.get('name')
}
groups.append(temp_group)
asset_dict['Groups'] = groups
assets.append(asset_dict)
ec = {
"AttackIQ.Asset(val.ID && val.ID === obj.ID)": assets
}
hr = tableToMarkdown("Assets:", assets, ['ID', 'Hostname', 'IPv4', 'MacAddress', 'Domain',
'Description', 'User', 'Status'])
return_outputs(hr, ec, res)
@logger
def create_assessment_command():
"""
name - The name of the assesment to create.
Returns:
"""
body = {
"project_name": demisto.args().get('name'),
"template": demisto.args().get('template_id')
}
try:
res = http_request('POST', '/v1/assessments/project_from_template', data=json.dumps(body))
except Exception as e:
raise ValueError(f"Could not create an assessment. Specifically: {str(e)}")
assessment_id = res.get('project_id')
raw_assessments = get_assessments(assessment_id=assessment_id)
assessments_res = build_transformed_dict(raw_assessments, ASSESSMENTS_TRANS)
hr = tableToMarkdown(f'Created Assessment: {assessment_id} successfully.', assessments_res,
headers=['Id', 'Name', 'Description', 'User', 'Created', 'Modified'])
return_outputs(hr, {'AttackIQ.Assessment(val.Id === obj.Id)': assessments_res}, raw_assessments)
@logger
def add_assets_to_assessment():
assessment_id = demisto.args().get('assessment_id')
assets = demisto.args().get('assets')
asset_groups = demisto.args().get('asset_groups')
data = {}
if assets:
data['assets'] = assets
if asset_groups:
data['asset_groups'] = asset_groups
if data == {}:
raise ValueError("No asset or asset groups were specified.")
try:
res = http_request('POST', f'/v1/assessments/{assessment_id}/update_defaults', data=json.dumps(data))
demisto.results(res.get('message', ''))
except Exception as e:
if '403' in str(e):
raise ValueError("Could not find either the assessment or one of the assets/asset groups.")
else:
raise
@logger
def delete_assessment_command():
assessment_id = demisto.args().get('assessment_id')
try:
http_request('DELETE', f'/v1/assessments/{assessment_id}')
demisto.results(f"Deleted assessment {assessment_id} successfully.")
except Exception as e:
if '403' in str(e):
raise ValueError(f"Could not find the assessment {assessment_id}")
else:
raise
def main():
handle_proxy()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
if command == 'test-module':
test_module()
elif command == 'attackiq-activate-assessment':
activate_assessment_command()
elif command == 'attackiq-get-assessment-execution-status':
get_assessment_execution_status_command()
elif command == 'attackiq-get-test-execution-status':
get_test_execution_status_command()
elif command == 'attackiq-get-test-results':
get_test_results_command()
elif command == 'attackiq-list-assessments':
list_assessments_command()
elif command == 'attackiq-get-assessment-by-id':
get_assessment_by_id_command()
elif command == 'attackiq-list-tests-by-assessment':
list_tests_by_assessment_command()
elif command == 'attackiq-run-all-tests-in-assessment':
run_all_tests_in_assessment_command()
elif command == 'attackiq-list-assessment-templates':
list_templates_command()
elif command == 'attackiq-list-assets':
list_assets_command()
elif command == 'attackiq-create-assessment':
create_assessment_command()
elif command == 'attackiq-add-assets-to-assessment':
add_assets_to_assessment()
elif command == 'attackiq-delete-assessment':
delete_assessment_command()
else:
return_error(f'Command {command} is not supported.')
except HTTPError as e:
# e is expected to contain parsed error message
err = f'{API_ERR_MSG}{str(e)}'
return_error(err)
except Exception as e:
message = f'Unexpected error: {str(e)}, traceback: {traceback.format_exc()}'
return_error(message)
# python2 uses __builtin__ python3 uses builtins
if __name__ in ("__builtin__", "builtins", "__main__"):
main()
| StarcoderdataPython |
1779314 | """
String generation functions.
"""
import binascii
import hashlib
import os
import random
import string
def generate_password(length=20):
"""
Generate random password of the given ``length``.
Beware that the string will be generate as random data from urandom,
and returned as headecimal string of twice the ``length``.
"""
return binascii.hexlify(os.urandom(length))
def generate_random_alphanum(length=10):
"""Generate a random string, made of ascii letters + digits"""
charset = string.ascii_letters + string.digits
return ''.join(random.choice(charset) for _ in xrange(length))
def gen_random_id(length=10):
"""Generate a random id, made of lowercase ascii letters + digits"""
charset = string.ascii_lowercase + string.digits
return ''.join(random.choice(charset) for _ in xrange(length))
def gen_dataset_name():
"""Generate a random dataset name"""
return "dataset-{0}".format(gen_random_id())
def gen_picture(s, size=200):
"""Generate URL to picture from some text hash"""
return gen_robohash(s, size)
def gen_gravatar(s, size=200):
"""Return URL for gravatar of md5 of string ``s``"""
h = hashlib.md5(s).hexdigest()
return ('http://www.gravatar.com/avatar/{0}.jpg'
'?d=identicon&f=y&s={1}'.format(h, size))
def gen_robohash(s, size=200):
"""Return URL for robohash pic for sha1 hash of string ``s``"""
h = hashlib.sha1(s).hexdigest()
return ('http://robohash.org/{0}.png?size={1}x{1}&bgset=bg2&set=set1'
.format(h, size))
| StarcoderdataPython |
5102950 | import ast
import macropy.core.macros
from macropy.core.hquotes import macros, hq, unhygienic
from macropy.core import Captured
macros = macropy.core.macros.Macros()
value = 2
def double(x):
return x * value
@macros.expr
def expand(tree, gen_sym, **kw):
tree = hq[str(value) + "x: " + double(ast_literal[tree])]
return tree
@macros.block
def expand(tree, gen_sym, **kw):
v = 5
with hq as new_tree:
return v
return new_tree
@macros.block
def expand_unhygienic(tree, gen_sym, **kw):
v = 5
with hq as new_tree:
unhygienic[x] = unhygienic[x] + v
return new_tree
| StarcoderdataPython |
4856117 | <gh_stars>0
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import six
from django.utils.functional import lazy
from fancy_cronfield.fields import CronField
from fancy_cronfield.utils.compat import DJANGO_1_6, DJANGO_1_5
from fancy_cronfield.widgets import CronWidget
class CronFieldTests(TestCase):
def test_get_internal_type(self):
field = CronField()
self.assertEqual(field.get_internal_type(), 'CharField')
def test_to_python(self):
field = CronField()
self.assertEqual(field.to_python('0 0 1 1 *'), '0 0 1 1 *')
def test_get_prep_value(self):
field = CronField()
if DJANGO_1_5 or DJANGO_1_6:
value = field.get_prep_value(six.text_type('0 0 1 1 *'))
else:
lazy_func = lazy(lambda: u'0 0 1 1 *', six.text_type)
value = field.get_prep_value(lazy_func())
self.assertIsInstance(value, six.string_types)
def test_get_prep_value_int(self):
field = CronField()
if DJANGO_1_5 or DJANGO_1_6:
value = field.get_prep_value(int(0))
else:
lazy_func = lazy(lambda: 0, int)
value = field.get_prep_value(lazy_func())
self.assertIsInstance(value, six.text_type)
def test_max_length_passed_to_formfield(self):
"""
Test that CronField pass its max_length attributes to
form fields created using its .formfield() method.
"""
cf1 = CronField()
cf2 = CronField(max_length=256)
self.assertEqual(cf1.formfield().max_length, 120)
self.assertEqual(cf2.formfield().max_length, 256)
def test_widget_passed_to_formfield(self):
f = CronField()
self.assertEqual(f.formfield().widget.__class__, CronWidget)
def test_raises_error_on_empty_string(self):
f = CronField()
self.assertRaises(ValidationError, f.clean, "", None)
def test_cleans_empty_string_when_blank_true(self):
f = CronField(blank=True)
self.assertEqual('', f.clean('', None))
def test_raises_error_on_invalid_input(self):
f = CronField()
self.assertRaises(ValidationError, f.clean, 'test', None)
def test_raises_error_on_empty_input(self):
f = CronField(null=False)
self.assertRaises(ValidationError, f.clean, None, None)
| StarcoderdataPython |
311184 | # Please setup this dependent package 'https://github.com/BorealisAI/advertorch'
import torch.nn as nn
from advertorch.attacks import LinfPGDAttack, FABAttack, LinfFABAttack
from advertorch.attacks.utils import multiple_mini_batch_attack
from advertorch_examples.utils import get_cifar10_test_loader
from models.wideresnet import *
import argparse
parser = argparse.ArgumentParser(description='Adversarial Training')
parser.add_argument('--model-path', type=str, help='model path')
parser.add_argument('--loss', type=str, default='pgdHE')
parser.add_argument('--attack', type=str, default='FAB')
parser.add_argument('--iters', type=int, default=20)
parser.add_argument('--norm', type=str, default='Linf')
args = parser.parse_args()
eps=8./255.
if args.loss == 'trades' or args.loss == 'pgd' or args.loss == 'alp':
print("normalize False")
model = WideResNet()
else:
print("normalize True")
model = WideResNet(use_FNandWN = True)
model.load_state_dict(torch.load(args.model_path))
loader = get_cifar10_test_loader(batch_size=100)
if args.attack == 'FAB':
adversary = LinfFABAttack(model, n_restarts=1, n_iter=args.iters,
alpha_max=0.1, eta=1.05, beta=0.9, loss_fn=None, verbose=False)
else:
adversary = LinfPGDAttack(
model, loss_fn=nn.CrossEntropyLoss(reduction="sum"), eps=eps,
nb_iter=20, eps_iter=2. / 255, rand_init=False, clip_min=0.0, clip_max=1.0,
targeted=False)
label, pred, advpred, dist = multiple_mini_batch_attack(
adversary, loader, device="cuda",norm='Linf')
print("Natural Acc: {:.2f}, Robust Acc: {:.2f}".format(100. * (label == pred).sum().item() / len(label),
100. * (dist > eps).sum().item()/len(dist)))
| StarcoderdataPython |
3460592 | <gh_stars>0
"""
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
import dj_database_url
from dotenv import load_dotenv
import django_heroku
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
load_dotenv(override=True)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['DEBUG'] == 'True'
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'visutrader.netlify.app', 'visutrader-backend.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third party apps
'allauth',
'allauth.account',
'dj_rest_auth.registration',
'rest_framework',
'rest_framework.authtoken',
'dj_rest_auth',
'corsheaders',
# Local apps
'backend',
'users',
'wallets',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# Third party middlewares
'corsheaders.middleware.CorsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'backend/templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(conn_max_age=600)
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Singapore'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Additional custom configurations
AUTH_USER_MODEL = 'users.UserAccount'
AUTHENTICATION_BACKENDS = [
# allauth specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
# Needed to login by username in Django admin, regardless of allauth
'django.contrib.auth.backends.ModelBackend',
]
# Email settings
# Email verification via Django's console
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# The "real" email verification
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = os.environ['EMAIL_ADDRESS']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_PASSWORD']
# Third party settings
CORS_ALLOWED_ORIGINS = [
# Local development
"http://127.0.0.1:3000",
"http://localhost:3000",
# Deployment websites
"https://visutrader.netlify.app",
"https://visutrader-backend.herokuapp.com",
]
# Not a good practice, but quick fix for CORS issue.
# TODO: Fix this
CORS_ALLOW_ALL_ORIGINS = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.TokenAuthentication',
]
}
REST_AUTH_SERIALIZERS = {
'USER_DETAILS_SERIALIZER': 'users.serializers.UserSerializer',
}
REST_AUTH_REGISTER_SERIALIZERS = {
'REGISTER_SERIALIZER': 'users.serializers.UserRegistrationSerializer',
}
# Some issues regarding SSL when using django_heroku
# Quick fix: Only run it when it's on deployment environment
if (os.environ['ENV'] != 'development'):
django_heroku.settings(locals())
SITE_ID = 1
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_EMAIL_SUBJECT_PREFIX = '[VisuTrader] - '
ACCOUNT_USERNAME_REQUIRED = True
OLD_PASSWORD_FIELD_ENABLED = True
LOGIN_URL = ('http://localhost:3000' if os.environ['ENV'] == 'development' else 'https://visutrader.netlify.app') + '/login' | StarcoderdataPython |
3486347 | <reponame>DeuroIO/Deuro-scikit-learn
"""
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| StarcoderdataPython |
4932256 | # -*- coding: utf-8 -*-
# 模式-设备数据
# 作者: 三石
# 时间: 2022-01-21
from pydantic import BaseModel, conint
class SchemaUpdateCurrent(BaseModel):
"""模式-更新当前数据"""
# 地址
address: conint(ge=0, le=9999)
# 最新值
value: conint(ge=0, le=99)
| StarcoderdataPython |
3556362 | <reponame>c-yan/atcoder
# Union Find 木
from sys import setrecursionlimit
def find(parent, i):
t = parent[i]
if t < 0:
return i
t = find(parent, t)
parent[i] = t
return t
def unite(parent, i, j):
i = find(parent, i)
j = find(parent, j)
if i == j:
return
parent[j] += parent[i]
parent[i] = j
setrecursionlimit(10 ** 6)
N, M = map(int, input().split())
AB = [[int(c) - 1 for c in input().split()] for _ in range(M)]
parent = [-1] * N
inconvenience = N * (N - 1) // 2
result = []
for a, b in AB[::-1]:
result.append(inconvenience)
pa, pb = find(parent, a), find(parent, b)
if pa != pb:
inconvenience -= parent[pa] * parent[pb]
unite(parent, a, b)
print(*result[::-1])
| StarcoderdataPython |
9670438 | import sys
def main():
try:
from src.run import run
run()
except Exception:
sys.exit(1)
return
if __name__ == "__main__":
main()
| StarcoderdataPython |
3208124 | print('niahi')
print('niahi') | StarcoderdataPython |
1896438 | """`Thompson2003Model`, `Thompson2003Spatial` [Thompson2003]_"""
import numpy as np
import copy
from ..utils import Curcio1990Map, sample
from ..models import Model, SpatialModel
from ._thompson2003 import fast_thompson2003
# Log all warnings.warn() at the WARNING level:
import warnings
import logging
logging.captureWarnings(True)
class Thompson2003Spatial(SpatialModel):
"""Scoreboard model of [Thompson2003]_ (spatial module only)
Implements the scoreboard model described in [Thompson2003]_, where all
percepts are circular disks of a given size, and a fraction of electrodes
may randomly drop out.
.. note ::
Use this class if you want to combine the spatial model with a temporal
model.
Use :py:class:`~pulse2percept.models.Thompson2003Model` if you want a
a standalone model.
Parameters
----------
radius : double, optional
Disk radius describing phosphene size (microns).
If None, disk diameter is chosen as the electrode-to-electrode spacing
(works only for implants with a ``shape`` attribute) with a 5% gap.
dropout : int or float, optional
If an int, number of electrodes to randomly drop out every frame.
If a float between 0 and 1, the fraction of electrodes to randomly drop
out every frame.
xrange : (x_min, x_max), optional
A tuple indicating the range of x values to simulate (in degrees of
visual angle). In a right eye, negative x values correspond to the
temporal retina, and positive x values to the nasal retina. In a left
eye, the opposite is true.
yrange : tuple, (y_min, y_max), optional
A tuple indicating the range of y values to simulate (in degrees of
visual angle). Negative y values correspond to the superior retina,
and positive y values to the inferior retina.
xystep : int, double, tuple, optional
Step size for the range of (x,y) values to simulate (in degrees of
visual angle). For example, to create a grid with x values [0, 0.5, 1]
use ``xrange=(0, 1)`` and ``xystep=0.5``.
grid_type : {'rectangular', 'hexagonal'}, optional
Whether to simulate points on a rectangular or hexagonal grid
retinotopy : :py:class:`~pulse2percept.utils.VisualFieldMap`, optional
An instance of a :py:class:`~pulse2percept.utils.VisualFieldMap`
object that provides ``ret2dva`` and ``dva2ret`` methods.
By default, :py:class:`~pulse2percept.utils.Curcio1990Map` is
used.
n_gray : int, optional
The number of gray levels to use. If an integer is given, k-means
clustering is used to compress the color space of the percept into
``n_gray`` bins. If None, no compression is performed.
.. important ::
If you change important model parameters outside the constructor (e.g.,
by directly setting ``model.xrange = (-10, 10)``), you will have to call
``model.build()`` again for your changes to take effect.
"""
def get_default_params(self):
"""Returns all settable parameters of the model"""
base_params = super(Thompson2003Spatial, self).get_default_params()
params = {'radius': None, 'dropout': None,
'retinotopy': Curcio1990Map()}
return {**base_params, **params}
def _predict_spatial(self, earray, stim):
"""Predicts the brightness at spatial locations"""
if not np.allclose([e.z for e in earray.electrode_objects], 0):
msg = ("Nonzero electrode-retina distances do not have any effect "
"on the model output.")
warnings.warn(msg)
radius = self.radius
if radius is None:
if not hasattr(earray, 'spacing'):
raise NotImplementedError
radius = 0.45 * earray.spacing
dropout = np.zeros(stim.shape, dtype=np.uint8)
if self.dropout is not None:
for t in range(dropout.shape[1]):
dropout[sample(np.arange(stim.shape[0]), k=self.dropout),
t] = 255
# This does the expansion of a compact stimulus and a list of
# electrodes to activation values at X,Y grid locations:
return fast_thompson2003(stim.data,
np.array([earray[e].x for e in stim.electrodes],
dtype=np.float32),
np.array([earray[e].y for e in stim.electrodes],
dtype=np.float32),
self.grid.xret.ravel(),
self.grid.yret.ravel(),
dropout.astype(np.uint8),
radius,
self.thresh_percept)
class Thompson2003Model(Model):
"""Scoreboard model of [Thompson2003]_ (standalone model)
Implements the scoreboard model described in [Thompson2003]_, where all
percepts are circular disks of a given size, and a fraction of electrodes
may randomly drop out.
.. note ::
Use this class if you want a standalone model.
Use :py:class:`~pulse2percept.models.Thompson2003Spatial` if you want
to combine the spatial model with a temporal model.
radius : double, optional
Disk radius describing phosphene size (microns).
If None, disk diameter is chosen as the electrode-to-electrode spacing
(works only for implants with a ``shape`` attribute) with a 5% gap.
dropout : int or float, optional
If an int, number of electrodes to randomly drop out every frame.
If a float between 0 and 1, the fraction of electrodes to randomly drop
out every frame.
xrange : (x_min, x_max), optional
A tuple indicating the range of x values to simulate (in degrees of
visual angle). In a right eye, negative x values correspond to the
temporal retina, and positive x values to the nasal retina. In a left
eye, the opposite is true.
yrange : tuple, (y_min, y_max), optional
A tuple indicating the range of y values to simulate (in degrees of
visual angle). Negative y values correspond to the superior retina,
and positive y values to the inferior retina.
xystep : int, double, tuple, optional
Step size for the range of (x,y) values to simulate (in degrees of
visual angle). For example, to create a grid with x values [0, 0.5, 1]
use ``xrange=(0, 1)`` and ``xystep=0.5``.
grid_type : {'rectangular', 'hexagonal'}, optional
Whether to simulate points on a rectangular or hexagonal grid
retinotopy : :py:class:`~pulse2percept.utils.VisualFieldMap`, optional
An instance of a :py:class:`~pulse2percept.utils.VisualFieldMap`
object that provides ``ret2dva`` and ``dva2ret`` methods.
By default, :py:class:`~pulse2percept.utils.Watson2014Map` is
used.
n_gray : int, optional
The number of gray levels to use. If an integer is given, k-means
clustering is used to compress the color space of the percept into
``n_gray`` bins. If None, no compression is performed.
.. important ::
If you change important model parameters outside the constructor (e.g.,
by directly setting ``model.xrange = (-10, 10)``), you will have to call
``model.build()`` again for your changes to take effect.
"""
def __init__(self, **params):
super(Thompson2003Model, self).__init__(spatial=Thompson2003Spatial(),
temporal=None,
**params)
| StarcoderdataPython |
377949 | <reponame>QiangZiBro/stacked_capsule_autoencoders.pytorch
# -*- coding: UTF-8 -*-
"""
@Project -> File :cnn_models_comparation.pytorch -> __init__
@IDE :PyCharm
@Author :QiangZiBro
@Date :2020/5/23 12:56 下午
@Desc : use factory method to add model,loss,Metrics,Optimizer
"""
import torch
import model.models as module_models
import model.loss as module_loss
import model.metric as module_metric
__all__ = ["makeModel", "makeLoss", "makeMetrics", "makeOptimizer"]
def makeModel(config):
return config.init_obj("arch", module_models)
def makeLoss(config):
return getattr(module_loss, config["loss"])
def makeMetrics(config):
return [getattr(module_metric, met) for met in config["metrics"]]
def makeOptimizer(config, model):
trainable_params = filter(lambda p: p.requires_grad, model.parameters())
optimizer = config.init_obj("optimizer", torch.optim, trainable_params)
return optimizer
| StarcoderdataPython |
11360692 | class ApiResponseMessage:
# Patient
PATIENT_NOT_IMPLEMENT = 'Patient {method} method is not support on this site.'
PATIENT_EXECUTED = 'Patient has been executed.'
PATIENT_IMAGE_EXECUTED = 'Patient image has been executed.'
PATIENT_IMAGE_NOT_IMPLEMENT = 'Patient image {method} method is not support on this site.'
# Enconuter
ENCOUNTER_EXECUTED = 'Encounter has been executed.'
ENCOUNTER_NOT_IMPLEMENT = 'Encounter {method} method is not support on this site.'
# Appointment
APPOINTMENT_EXECUTED = 'Appointment has been executed.'
APPOINTMENT_NOT_IMPLEMENT = 'Appointment {method} method is not support on this site.'
# Practitioner
PRACTITIONER_NOT_IMPLEMENT = 'Practitioner {method} method is not support on this site.'
PRACTITIONER_EXECUTED = 'Practitioner has been executed.'
# Procedure
PROCEDURE_EXECUTED = 'Procedure has been executed.'
PROCEDURE_NOT_IMPLEMENT = 'Procedure {method} method is not support on this site.'
# OBSERVATION
OBSERVATION_EXECUTED = 'Observation has been executed.'
OBSERVATION_NOT_IMPLEMENT = 'Observation {method} method is not support on this site.'
VITALSIGNS_EXECUTED = 'Vital Signs has been executed.'
VITALSIGNS_NOT_IMPLEMENT = 'Vital Signs {method} method is not support on this site.'
PHYSICALEXAM_EXECUTED = 'Physical Exam has been executed.'
PHYSICALEXAM_NOT_IMPLEMENT = 'Physical Exam {method} method is not support on this site.'
LABORATORY_EXECUTED = 'Laboratory has been executed.'
LABORATORY_NOT_IMPLEMENT = 'Laboratory {method} method is not support on this site.'
RADIOLOGY_EXECUTED = 'Radiology has been executed.'
RADIOLOGY_NOT_IMPLEMENT = 'Radiology {method} method is not support on this site.'
IPDTRIAGE_EXECUTED = 'IPDTriage has been executed.'
IPDTRIAGE_NOT_IMPLEMENT = 'IPDTriage {method} method is not support on this site.'
# ALLERGY
ALLERGY_EXECUTED = 'Allergy Intolerance has been executed.'
ALLERGY_NOT_IMPLEMENT = 'Allergy Intolerance {method} method is not support on this site.'
# CONDITION
CONDITION_NOT_IMPLEMENT = 'Condition {method} method is not support on this site.'
CONDITION_EXECUTED = 'Condition has been executed.'
DIAGNOSIS_NOT_IMPLEMENT = 'Diagnosis {method} method is not support on this site.'
DIAGNOSIS_EXECUTED = 'Diagnosis has been executed.'
MEDICALHISTORY_NOT_IMPLEMENT = 'Medical History {method} method is not support on this site.'
MEDICALHISTORY_EXECUTED = 'Medical History has been executed.'
MEDICATIONDISPENSE_NOT_IMPLEMENT = 'Medication Dispense {method} method is not support on this site.'
MEDICATIONDISPENSE_EXECUTED = 'Medication Dispense has been executed.'
CC_NOT_IMPLEMENT = 'CC {method} method is not support on this site.'
CC_EXECUTED = 'CC has been executed.'
#COVERAGE
COVERAGE_EXECUTED = 'Coverage has been executed.'
#SCHEDULE
SCHEDULE_EXECUTED = 'Schedule has been executed.'
#CAREPLAN
CAREPLAN_EXECUTED = 'Care Plan has been executed.' | StarcoderdataPython |
3232785 | <reponame>fcnjd/soundrts
import threading
import time
import pygame
from pygame.locals import KEYDOWN
from soundrts.lib.message import Message
from soundrts.lib.sound import DEFAULT_VOLUME
from soundrts.lib.voicechannel import VoiceChannel
class _Voice(object):
msgs = [] # said and unsaid messages
active = False # currently talking (not just self.item())
history = False # in "history" mode
current = 0 # index of the message currently said
# == len(self.msgs) if no message
def get_unsaid(self): # index of the first never said message (== len(self.msgs) if no unsaid message)
for i, m in enumerate(self.msgs):
if not m.said:
return i
return len(self.msgs)
unsaid = property(get_unsaid)
def init(self, *args, **kwargs):
self.lock = threading.Lock()
self.channel = VoiceChannel(*args, **kwargs)
def _start_current(self):
self.channel.stop()
self.active = False
self.update()
def previous(self):
self.history = True
if self.current > 0:
self.current -= 1
self._start_current()
def _current_message_is_unsaid(self):
return self._exists(self.current) and not self.msgs[self.current].said
def next(self, history_only=False):
if self.active:
if self._current_message_is_unsaid():
if not history_only:
self._mark_current_as_said() # give up current message
self.current += 1
else:
return
else:
self.current += 1
self._start_current()
def _exists(self, index):
return index < len(self.msgs)
def _unsaid_exists(self):
return self._exists(self.unsaid)
def alert(self, *args, **keywords):
self._say_now(interruptible=False, *args, **keywords)
def important(self, *args, **keywords):
self._say_now(*args, **keywords)
def confirmation(self, *args, **keywords):
self._say_now(keep_key=True, *args, **keywords)
def menu(self, *args, **keywords):
self._say_now(keep_key=True, *args, **keywords)
def info(self, list_of_sound_numbers, *args, **keywords):
"""Say sooner or later."""
if list_of_sound_numbers:
self.msgs.append(Message(list_of_sound_numbers, *args, **keywords))
self.update()
def _say_now(self, list_of_sound_numbers, lv=DEFAULT_VOLUME, rv=DEFAULT_VOLUME, interruptible=True, keep_key=False):
"""Say now (give up saying sentences not said yet) until the end or a keypress."""
if list_of_sound_numbers:
with self.lock:
self._give_up_current_if_partially_said()
self.channel.play(Message(list_of_sound_numbers, lv, rv))
while self.channel.get_busy():
if interruptible and self._key_hit(keep_key=keep_key):
break
time.sleep(.1)
self.channel.update()
if not interruptible:
pygame.event.get([KEYDOWN])
self.msgs.append(Message(list_of_sound_numbers, lv, rv, said=True))
self._go_to_next_unsaid() # or next_current?
self.active = False
# self.update()
def _mark_current_as_said(self):
self.msgs[self.current].said = True
def _mark_unsaid_as_said(self):
self.msgs[self.unsaid].said = True
def _go_to_next_unsaid(self):
self.current = self.unsaid
def _give_up_current_if_partially_said(self): # to avoid to many repetitions
if self._current_message_is_unsaid() and self.channel.is_almost_done():
self._mark_current_as_said()
def item(self, list_of_sound_numbers, lv=DEFAULT_VOLUME, rv=DEFAULT_VOLUME):
"""Say now without recording."""
if list_of_sound_numbers:
with self.lock:
self._give_up_current_if_partially_said()
self._go_to_next_unsaid()
self.channel.play(Message(list_of_sound_numbers, lv, rv))
self.active = False
self.history = False
def _expired(self, index):
msg = self.msgs[index]
if msg.has_expired():
return True
# look for a more recent message of the same type
if msg.update_type is not None:
for m in self.msgs[index + 1:]:
if m.update_type == msg.update_type:
return True
# look for a more recent, identical message
for m in self.msgs[index + 1:]:
if msg.list_of_sound_numbers == m.list_of_sound_numbers:
return True
return False
def _mark_expired_messages_as_said(self):
for i, m in enumerate(self.msgs):
if not m.said and self._expired(i):
m.said = True
# limit the size of history
if len(self.msgs) > 200:
# truncate the list in place
del self.msgs[:100]
self.current -= 100
self.current = max(0, self.current)
def update(self):
if self.channel.get_busy():
self.channel.update()
else:
self._mark_expired_messages_as_said()
if self.active: # one message from the queue has just finished
self._mark_current_as_said()
self.current += 1
if not self.history:
self._go_to_next_unsaid()
if self._exists(self.current):
self.channel.play(self.msgs[self.current])
self.active = True
else:
self.active = False
self.history = False
def silent_flush(self):
self.channel.stop()
self.active = False
self.current = len(self.msgs)
for m in self.msgs:
m.said = True
def flush(self, interruptible=True):
while True:
self.update()
if not (self._unsaid_exists() or self.channel.get_busy()):
break
elif interruptible and self._key_hit(): # keep_key=False? (and remove next line?)
if self._unsaid_exists():
self.next()
pygame.event.get([KEYDOWN]) # consequence: _key_hit() == False
else:
break
time.sleep(.1)
if not interruptible:
pygame.event.get([KEYDOWN])
def _key_hit(self, keep_key=True):
l = pygame.event.get([KEYDOWN])
if keep_key:
for e in l: # put events back in queue
pygame.event.post(e) # will the order be preserved?
return len(l) != 0
voice = _Voice()
| StarcoderdataPython |
9615770 | '''
Create a AlexNet-Style network.
Exact network parameters are from (for sake of comparision): https://arxiv.org/abs/1801.01423
author: <NAME>
'''
import tensorflow as tf
from tensorflow.keras import layers as lyrs
conv_params = {
"padding": "same",
#"kernel_initializer": tf.keras.initializers.glorot_normal(),
"kernel_initializer": tf.keras.initializers.he_normal(),
"bias_initializer": "zeros"
}
dense_params = {
"kernel_initializer": tf.keras.initializers.glorot_normal(),
"bias_initializer": "zeros"
}
class AlexNet(tf.keras.Model):
'''Defines AlexNet like model.
Args:
shape (tuple): Image input Size
'''
def __init__(self, shape=(32, 32, 3)):
super(AlexNet, self).__init__()
# generate all items
comp = 1
self.conv1 = lyrs.Conv2D(64, (4, 4), (1, 1), **conv_params, activation=tf.nn.relu)
self.pool2 = lyrs.MaxPool2D((2, 2), padding='same')
comp *= 2
self.dropout3 = lyrs.Dropout(0.2)
self.conv4 = lyrs.Conv2D(128, (3, 3), (1, 1), **conv_params, activation=tf.nn.relu)
self.pool5 = lyrs.MaxPool2D((2, 2), padding='same')
comp *= 2
self.dropout6 = lyrs.Dropout(0.2)
self.conv7 = lyrs.Conv2D(256, (2, 2), (1, 1), **conv_params, activation=tf.nn.relu)
self.pool8 = lyrs.MaxPool2D((2, 2), padding='same')
comp *= 2
self.dropout9 = lyrs.Dropout(0.5)
# calculate exact reshape
self.reshape = lyrs.Reshape(( int((shape[0] / comp) * (shape[1] / comp) * 256), ))
# create dense layers
self.fc10 = lyrs.Dense(2048, **dense_params, activation=tf.nn.relu)
self.dropout11 = lyrs.Dropout(0.5)
self.fc12 = lyrs.Dense(2048, **dense_params, activation=tf.nn.relu)
self.dropout13 = lyrs.Dropout(0.5)
def call(self, input_tensor, training=None):
# iterate through the network
x = self.dropout3(self.pool2(self.conv1(input_tensor)), training=training)
x = self.dropout6(self.pool5(self.conv4(x)), training=training)
x = self.dropout9(self.pool8(self.conv7(x)), training=training)
x = self.reshape(x)
x = self.dropout11(self.fc10(x), training=training)
x = self.dropout13(self.fc12(x), training=training)
return x
| StarcoderdataPython |
6566894 | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
import pymysql
import pytest
from datadog_checks.dev import WaitFor, docker_run
from . import common, tags
MYSQL_FLAVOR = os.getenv('MYSQL_FLAVOR')
MYSQL_VERSION = os.getenv('MYSQL_VERSION')
COMPOSE_FILE = os.getenv('COMPOSE_FILE')
@pytest.fixture(scope='session')
def dd_environment(instance_basic):
with docker_run(
os.path.join(common.HERE, 'compose', COMPOSE_FILE),
env_vars={
'MYSQL_DOCKER_REPO': _mysql_docker_repo(),
'MYSQL_PORT': str(common.PORT),
'MYSQL_SLAVE_PORT': str(common.SLAVE_PORT),
'WAIT_FOR_IT_SCRIPT_PATH': _wait_for_it_script(),
},
conditions=[WaitFor(init_master, wait=2), WaitFor(init_slave, wait=2)],
):
master_conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root')
_populate_database(master_conn)
yield instance_basic
@pytest.fixture(scope='session')
def instance_basic():
return {'server': common.HOST, 'user': common.USER, 'pass': common.PASS, 'port': common.PORT}
@pytest.fixture
def instance_complex():
return {
'server': common.HOST,
'user': common.USER,
'pass': common.PASS,
'port': common.PORT,
'options': {
'replication': True,
'extra_status_metrics': True,
'extra_innodb_metrics': True,
'extra_performance_metrics': True,
'schema_size_metrics': True,
},
'tags': tags.METRIC_TAGS,
'queries': [
{
'query': "SELECT * from testdb.users where name='Alice' limit 1;",
'metric': 'alice.age',
'type': 'gauge',
'field': 'age',
},
{
'query': "SELECT * from testdb.users where name='Bob' limit 1;",
'metric': 'bob.age',
'type': 'gauge',
'field': 'age',
},
],
}
@pytest.fixture(scope='session')
def instance_error():
return {'server': common.HOST, 'user': 'unknown', 'pass': common.PASS}
def init_master():
conn = pymysql.connect(host=common.HOST, port=common.PORT, user='root')
_add_dog_user(conn)
def init_slave():
pymysql.connect(host=common.HOST, port=common.SLAVE_PORT, user=common.USER, passwd=common.PASS)
def _add_dog_user(conn):
cur = conn.cursor()
cur.execute("CREATE USER 'dog'@'%' IDENTIFIED BY 'dog';")
if MYSQL_FLAVOR == 'mysql' and MYSQL_VERSION == '8.0':
cur.execute("GRANT REPLICATION CLIENT ON *.* TO 'dog'@'%';")
cur.execute("ALTER USER 'dog'@'%' WITH MAX_USER_CONNECTIONS 5;")
else:
cur.execute("GRANT REPLICATION CLIENT ON *.* TO 'dog'@'%' WITH MAX_USER_CONNECTIONS 5;")
cur.execute("GRANT PROCESS ON *.* TO 'dog'@'%';")
cur.execute("GRANT SELECT ON performance_schema.* TO 'dog'@'%'")
def _populate_database(conn):
cur = conn.cursor()
cur.execute("USE mysql;")
cur.execute("CREATE DATABASE testdb;")
cur.execute("USE testdb;")
cur.execute("CREATE TABLE testdb.users (name VARCHAR(20), age INT);")
cur.execute("INSERT INTO testdb.users (name,age) VALUES('Alice',25);")
cur.execute("INSERT INTO testdb.users (name,age) VALUES('Bob',20);")
cur.execute("GRANT SELECT ON testdb.users TO 'dog'@'%';")
cur.close()
def _wait_for_it_script():
"""
FIXME: relying on the filesystem layout is a bad idea, the testing helper
should expose its path through the api instead
"""
script = os.path.join(common.TESTS_HELPER_DIR, 'scripts', 'wait-for-it.sh')
return os.path.abspath(script)
def _mysql_docker_repo():
if MYSQL_FLAVOR == 'mysql':
if MYSQL_VERSION == '5.5':
return 'jfullaondo/mysql-replication'
elif MYSQL_VERSION in ('5.6', '5.7'):
return 'bergerx/mysql-replication'
elif MYSQL_VERSION == '8.0':
return 'bitnami/mysql'
elif MYSQL_FLAVOR == 'mariadb':
return 'bitnami/mariadb'
| StarcoderdataPython |
3584250 | # -*- coding: utf-8 -*-
"""
Created on Thu Dec 3 08:48:50 2020
@author: ebuit
Inspired in examples taken from:
https://dash.plotly.com/dash-core-components/input
https://stackoverflow.com/questions/51407191/python-dash-get-value-from-input-text
https://github.com/AdamSpannbauer/app_rasa_chat_bot/blob/master/dash_demo_app.py
"""
import dash
from dash.dependencies import Input, Output, State
import dash_html_components as html
import dash_core_components as dcc
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
from keras.models import load_model
model = load_model('chatbot_model.h5')
import json
import random
intents = json.loads(open('intenciones.json').read())
words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb'))
tableau = 'https://public.tableau.com/profile/sonia.ardila#!/vizhome/Pacientes_Dashboard/Dashboard1?publish=yes'
"""
-----------------------------------------------------
# Funciones requeridas para el chatbot
-----------------------------------------------------
"""
# Función para preprocesamiento
def clean_up_sentence(sentence):
# Tokenizar el patrón - dividir las palabras en un arreglo
sentence_words = nltk.word_tokenize(sentence, 'spanish')
# Crear una forma orta para cada palabra
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# Retornar el arreglo del bag-of-words: 0 or 1 para cada palabra en la bolsa que exista en la oracióm
def bow(sentence, words, show_details=True):
# tokeinzar el patrón
sentence_words = clean_up_sentence(sentence)
# bag of words - matriz con N palabras, matriz de vocabulario
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
# asignar 1 si la palabra actual esta en la posición de vocabulario
bag[i] = 1
if show_details:
print ("found in bag: %s" % w)
return(np.array(bag))
# Función para predicción
def predict_class(sentence, model):
# filtrar predicciones por debajo e un umbral
p = bow(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# ordenar por probabilidad
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
# Función para obtener entrada de texto
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['respuestas'])
break
return result
# Función para responder
def chatbot_response(msg):
ints = predict_class(msg, model)
res = getResponse(ints, intents)
return res
"""
-----------------------------------------------------
Aplicación dash
-----------------------------------------------------
"""
app = dash.Dash(__name__)
app.layout = html.Div([
# Selector title
html.H1(
"Elison: tu amigo en el diagnóstico de riesgo cardiovascular",
id="title",
style={
"text-align": "left",
"font-weight": "bold",
"display": "block",
"color": "black"},
),
dcc.Textarea(
id='textarea-conversacion',
value='¡Bienvenido! Soy Elison.\
\n Te ayudaré en la prevención de riesgo cardiovascular.',
style={'width': '50%', 'height': 200},
),
dcc.Input(
id='input-usuario',
value='',
type='text',
children='value',
style={'width': '50%', 'height': 60},
),
html.Div(
html.Button('Enviar tu respuesta a Elison', id='input-usuario-button', n_clicks=0),
),
html.Div(
html.H3(
"Elison recomienda:",
id="title_recomendacion",
style={
"text-align": "left",
"font-weight": "bold",
"display": "block",
"color": "green"},
),
),
dcc.Link('¿Quieres aprender sobre estadísticas de riesgo cardiovascular? Sigue este enlace para visualizar algunas gráficas que he preparado para ti.', href=tableau),
])
@app.callback(
Output('textarea-conversacion', 'value'),
Input('input-usuario-button', 'n_clicks'),
State('input-usuario', 'value')
)
def update_output(n_clicks, value):
if n_clicks > 0:
value = str(chatbot_response(value))
else:
value='¡Hola! Soy Elison. \nTe ayudaré en la prevención de riesgo cardiovascular.'
return value
@app.callback(
Output('input-usuario', 'value'),
[Input('textarea-conversacion', 'value')]
)
def clear_input(_):
return ''
if __name__ == '__main__':
app.run_server(host='0.0.0.0', port='8051', debug=True)
| StarcoderdataPython |
5070422 | <filename>webqq/views.py
from django.shortcuts import render
from django.shortcuts import HttpResponse
import json
import datetime
from webqq import utils
from bbs import models
# Create your views here.
global_msg_dic = {}
def dashboard(request):
return render(request, 'webqq/dashboard.html', locals())
def send_msg(request):
# print(request.POST)
data = request.POST['data']
data = json.loads(data)
print(data)
from_id = data['from_id']
to_id = data['to_id']
contact_type = data['contact_type']
data['date'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if contact_type == 'single':
if to_id not in global_msg_dic:
global_msg_dic[to_id] = utils.Chat()
global_msg_dic[to_id].put_msg(data)
print("push msg [%s] to user:%s's que" % (data['msg'],
models.UserProfile.objects.get(id=to_id),
))
# print(global_msg_dic)
return HttpResponse('success')
def get_msg(request):
# print(request.GET)
# print('global_msg_dic:', global_msg_dic)
uid = request.GET['user_id']
# print('uid:', uid)
if uid:
res = None
if uid in global_msg_dic:
res = global_msg_dic[uid].get_msg()
return HttpResponse(json.dumps(res))
return HttpResponse(json.dumps('chucuole')) | StarcoderdataPython |
3421696 | import pytest
import redriver
from mock import call
@pytest.fixture
def mock_sqs_helper(mocker):
mocker.patch.object(redriver, 'sqs_helper')
return redriver.sqs_helper
def test_missing_dlq_url():
with pytest.raises(ValueError):
redriver.redrive({'MaxMessageCount': 10}, None)
def test_missing_max_message_count():
with pytest.raises(ValueError):
redriver.redrive({'DLQName': 'mydlq'}, None)
def test_max_message_count_not_integer():
with pytest.raises(ValueError):
redriver.redrive({'DLQName': 'mydlq', 'MaxMessageCount': 'string'}, None)
def test_max_message_count_negative_integer():
with pytest.raises(ValueError):
redriver.redrive({'DLQName': 'mydlq', 'MaxMessageCount': -1}, None)
def test_redrive_no_message(mock_sqs_helper):
dlq_name = 'dlqName'
dlq_url = 'mydlq'
max_message_count = 10
mock_sqs_helper.get_queue_url.return_value = dlq_url
mock_sqs_helper.receive_messages.return_value = []
redriver.redrive({'DLQName': dlq_name, 'MaxMessageCount': max_message_count}, None)
mock_sqs_helper.get_queue_url.assert_called_once_with(dlq_name)
mock_sqs_helper.get_source_queues.assert_called_once_with(dlq_url)
mock_sqs_helper.receive_messages.assert_called_once_with(dlq_url, max_message_count)
mock_sqs_helper.send_messages.assert_not_called()
mock_sqs_helper.delete_messages.assert_not_called()
def test_redrive(mock_sqs_helper):
dlq_name = 'dlqName'
dlq_url = 'mydlq'
max_message_count = 10
source_queue = 'mySource'
mock_sqs_helper.get_queue_url.return_value = dlq_url
mock_sqs_helper.get_source_queues.return_value = [source_queue]
mock_sqs_helper.receive_messages.side_effect = [[{
'MessageId': 'myId',
'Body': 'This is my message',
'MessageAttributes': [],
'ReceiptHandle': 'myHandle'
}], []]
redriver.redrive({'DLQName': dlq_name, 'MaxMessageCount': max_message_count}, None)
mock_sqs_helper.get_queue_url.assert_called_once_with(dlq_name)
mock_sqs_helper.get_source_queues.assert_called_once_with(dlq_url)
mock_sqs_helper.receive_messages.assert_has_calls([call(dlq_url, max_message_count), call(dlq_url, max_message_count - 1)])
mock_sqs_helper.send_messages.assert_called_once_with(source_queue, [{
'Id': 'myId',
'MessageBody': 'This is my message',
'MessageAttributes': []
}])
mock_sqs_helper.delete_messages.assert_called_once_with(dlq_url, [{
'Id': 'myId',
'ReceiptHandle': 'myHandle'
}])
def test_redrive_no_message_attributes(mock_sqs_helper):
dlq_name = 'dlqName'
dlq_url = 'mydlq'
max_message_count = 10
source_queue = 'mySource'
mock_sqs_helper.get_queue_url.return_value = dlq_url
mock_sqs_helper.get_source_queues.return_value = [source_queue]
mock_sqs_helper.receive_messages.side_effect = [[{
'MessageId': 'myId',
'Body': 'This is my message',
'ReceiptHandle': 'myHandle'
}], []]
redriver.redrive({'DLQName': dlq_name, 'MaxMessageCount': max_message_count}, None)
mock_sqs_helper.get_queue_url.assert_called_once_with(dlq_name)
mock_sqs_helper.get_source_queues.assert_called_once_with(dlq_url)
mock_sqs_helper.receive_messages.assert_has_calls([call(dlq_url, max_message_count), call(dlq_url, max_message_count - 1)])
mock_sqs_helper.send_messages.assert_called_once_with(source_queue, [{
'Id': 'myId',
'MessageBody': 'This is my message'
}])
mock_sqs_helper.delete_messages.assert_called_once_with(dlq_url, [{
'Id': 'myId',
'ReceiptHandle': 'myHandle'
}])
| StarcoderdataPython |
3277528 | <filename>gplay_apk_download_multidex/apk_multidex.py
#!/usr/bin/env python3
import subprocess
import os
APKANALYZER = "/Users/amitseal/Android/Sdk/tools/bin/apkanalyzer"
APKANALYZER_COMMAND = "{} dex list {}"
def is_multidex(apk_path: str):
global APKANALYZER
global APKANALYZER_COMMAND
# command = shlex.split(APKANALYZER_COMMAND.format(APKANALYZER, apk_path))
command = APKANALYZER_COMMAND.format(APKANALYZER, apk_path)
print(command)
output = subprocess.getoutput(command)
lines = output.splitlines()
count_dex = 0
for line in lines:
if line.endswith(".dex"):
count_dex += 1
if count_dex >= 2:
return True
return False
def count_multidex_in_dir(dir_path: str):
apk_count = 0
multidex_count = 0
for entry in os.scandir(dir_path):
if entry.is_file():
file_name: str = entry.name
if file_name.endswith(".apk"):
apk_count += 1
if is_multidex(dir_path+os.sep+file_name):
multidex_count += 1
return (multidex_count, apk_count)
if __name__ == "__main__":
print(count_multidex_in_dir("/Users/amitseal/workspaces/apks")) | StarcoderdataPython |
391809 | <filename>app/models/base.py
from app import db
class Base(db.Model):
__tablename__ = 'base_tbl'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(), nullable=False)
__mapper_args__ = {'polymorphic_on': type}
def __init__(self):
pass
def __repr__(self):
return '<>'
| StarcoderdataPython |
6492647 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from . import classification_engine_test
class TestCocompilationModelPythonAPI(classification_engine_test.ClassificationEngineTestCase):
def test_various_cocompiled_models(self):
# Mobilenet V1 and Mobilenet V2.
self._test_classify_cat(
'cocompilation/mobilenet_v1_1.0_224_quant_cocompiled_with_mobilenet_v2_1.0_224_quant_edgetpu.tflite',
[('Egyptian cat', 0.78), ('tiger cat', 0.128)]
)
self._test_classify_cat(
'cocompilation/mobilenet_v2_1.0_224_quant_cocompiled_with_mobilenet_v1_1.0_224_quant_edgetpu.tflite',
[('Egyptian cat', 0.84)]
)
# Inception V1 and Inception V4.
self._test_classify_cat(
'cocompilation/inception_v1_224_quant_cocompiled_with_inception_v4_299_quant_edgetpu.tflite',
[('tabby, tabby cat', 0.41),
('Egyptian cat', 0.35),
('tiger cat', 0.156)]
)
self._test_classify_cat(
'cocompilation/inception_v4_299_quant_cocompiled_with_inception_v1_224_quant_edgetpu.tflite',
[('Egyptian cat', 0.45),
('tabby, tabby cat', 0.3),
('tiger cat', 0.15)]
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3335893 |
class DatabaseResource(object):
def __init__(self, conn):
self.conn = conn
def resource_list_only(self):
c = self.conn.cursor()
c.execute("""
SELECT DISTINCT RS.id, RS.url, RS.url_blacklisted, RS.is_truncated, RS.is_external
FROM devtools_request AS RQ
JOIN devtools_resource AS RS
ON RQ.resource_id = RS.id
WHERE RQ.is_main = 0
""")
result = c.fetchall()
data_resources = []
for row in result:
resource_id, url, url_blacklisted, is_truncated, is_external = row
resource_data = {
"id": resource_id,
"url": url,
"url_blacklisted": url_blacklisted,
"is_truncated": is_truncated,
"is_external": is_external
}
data_resources.append(resource_data)
return data_resources
# Only finished: requests.http_status is not null
# - exclude requests that didn't receive Network.loadingFinished or Network.loadingFailed event
# - unfinished requests are not included in stats, because we don't know if this is an error
def request_stat_for_resources(self):
data_resources = self._all_finished_requests()
self._append_non_cached_requests(data_resources)
self._append_unfinished_requests(data_resources)
return data_resources
def _append_non_cached_requests(self, data):
for resource_id, non_cached in self._non_cached_finished_requests().items():
if resource_id not in data:
data[resource_id] = {}
data[resource_id]["avg_size"] = non_cached["avg_size"]
data[resource_id]["avg_load_time"] = non_cached["avg_load_time"]
def _append_unfinished_requests(self, data):
for resource_id, unfinished in self._unfinished_requests().items():
if resource_id not in data:
data[resource_id] = {}
data[resource_id]["requests_unfinished"] = unfinished
def _all_finished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, COUNT(*), SUM(from_cache)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NOT NULL
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, requests_finished, from_cache = row
resources[resource_id] = {
"requests_finished": requests_finished,
"from_cache": from_cache
}
return resources
# avg_load_time (only non-cached requests)
# avg_size (this is resource size, so cached requests are not included)
def _non_cached_finished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, AVG(data_received), AVG(time_load)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NOT NULL AND from_cache = 0
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, avg_size, avg_load_time = row
resources[resource_id] = {
"avg_size": avg_size,
"avg_load_time": avg_load_time
}
return resources
def _unfinished_requests(self):
c = self.conn.cursor()
c.execute("""
SELECT resource_id, COUNT(*)
FROM devtools_request
WHERE is_main = 0 AND http_status IS NULL
GROUP BY resource_id
""")
result = c.fetchall()
resources = {}
for row in result:
resource_id, requests_unfinished = row
resources[resource_id] = requests_unfinished
return resources
def request_error_for_resources(self):
c = self.conn.cursor()
c.execute("""
SELECT R.page_id, R.resource_id, R.http_status, E.name
FROM devtools_request AS R
LEFT JOIN devtools_request_error AS E
ON E.id = R.error_id
WHERE R.is_main = 0 AND (R.http_status >= 400 OR R.http_status = 0)
""")
result = c.fetchall()
resources = {}
for row in result:
self._append_error_info_to_resources(resources, row)
return resources
# top 10 occurrences for every failed request
def _append_error_info_to_resources(self, resources, row):
page_id, resource_id, http_status, error_name = row
if resource_id not in resources:
resources[resource_id] = {
"requests_errors": 0,
"pages_with_error": []
}
resources[resource_id]["requests_errors"] += 1
page_occurrences = len(resources[resource_id]["pages_with_error"])
max_occurrences = 10
error_data = [page_id, http_status, error_name]
if page_occurrences < max_occurrences and error_data not in resources[resource_id]["pages_with_error"]:
resources[resource_id]["pages_with_error"].append(error_data)
| StarcoderdataPython |
3527265 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('cms', '0013_urlconfrevision'),
]
operations = [
migrations.CreateModel(
name='BirthdaysPluginModel',
fields=[
('cmsplugin_ptr', models.OneToOneField(to='cms.CMSPlugin', serialize=False, auto_created=True, primary_key=True, parent_link=True)),
('caption', models.CharField(max_length=32, default='Happy birthday to...')),
('max_days', models.PositiveSmallIntegerField(null=True, blank=True, validators=[django.core.validators.MaxValueValidator(365)], default=7)),
('max_entries', models.PositiveSmallIntegerField(null=True, blank=True, default=None)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
| StarcoderdataPython |
9734081 | import logging
from datetime import timedelta
from enum import Enum
import celery
from celery import Celery
from celery.apps.beat import Beat
from celery.apps.worker import Worker
from celery.schedules import crontab
from spaceone.core import config
DEFAULT_SPACEONE_BEAT = 'spaceone.core.celery.schedulers.SpaceOneScheduler'
@celery.signals.after_setup_logger.connect
def on_after_setup_logger(**kwargs):
if config.get_global('CELERY', {}).get('debug_mode'):
logger = logging.getLogger('celery')
logger.propagate = True
logger.level = logging.DEBUG
logger = logging.getLogger('celery.app.trace')
logger.propagate = True
logger.level = logging.DEBUG
app = Celery('spaceone')
class SERVER_MODE_ENUM(Enum):
WORKER = 'WORKER'
BEAT = 'BEAT'
SPACEONE_BEAT = 'SPACEONE_BEAT'
def update_celery_config(app):
conf = config.get_global()
default_que = f"{conf.get('SERVICE', 'spaceone')}_q"
app.conf.update(task_default_queue=default_que)
app.conf.update(task_cls='spaceone.core.celery.tasks:BaseTask')
celery_config = conf.get('CELERY', {})
app.conf.update(**celery_config.get('config', {}))
mode = app.conf.mode = celery_config.get('mode')
if mode == 'BEAT':
register_beat_schedules(app)
# add default tasks
app.autodiscover_tasks(['spaceone.core.celery', conf["PACKAGE"]], force=True)
# add custom scheduler tasks
app.autodiscover_tasks([conf["PACKAGE"]], related_name='task', force=True)
def parse_schedule(rule_type: str, rule: dict):
if rule_type == 'interval':
return timedelta(**{rule.get('period', 'seconds'): rule['every']})
elif rule_type == 'cron':
return crontab(**rule)
else:
raise NotImplementedError('UNSUPPORTED RULE_TYPE')
def register_beat_schedules(app):
conf = config.get_global()
schedules_config = conf.get('CELERY', {}).get('schedules', {})
for name, sch_info in schedules_config.items():
schedule = {
"task": sch_info['task'],
"schedule": parse_schedule(sch_info['rule_type'], sch_info['rule'])
}
if args := sch_info.get('args'):
schedule['args'] = args
if kwargs := sch_info.get('kwargs'):
schedule['kwargs'] = kwargs
app.conf.beat_schedule[name]= schedule
def serve():
# set_logger()
update_celery_config(app)
server_mode = app.conf.get('mode', SERVER_MODE_ENUM.WORKER.value)
if server_mode == SERVER_MODE_ENUM.BEAT.value:
Beat(app=app, loglevel='DEBUG').run()
elif server_mode == SERVER_MODE_ENUM.SPACEONE_BEAT.value:
app.conf.update(beat_scheduler=DEFAULT_SPACEONE_BEAT)
Beat(app=app, loglevel='DEBUG').run()
else:
Worker(app=app).start()
| StarcoderdataPython |
6493572 | <reponame>abhinavDhulipala/SAM-URL
from flask import Flask, render_template, redirect, request, jsonify, url_for, flash
import secrets
import boto_utils
from local_constants import DEPLOYED_GATEWAY
import requests
from urllib3.exceptions import HTTPError
from requests.exceptions import RequestException
app = Flask(__name__)
app.secret_key = secrets.token_hex(128)
# warning: firefox behavior is undetermined. Use chrome for most consistent performance
@app.route('/home', methods=['GET', 'POST'])
def home():
if request.method == 'GET':
return render_template('home.html')
url = request.form.get('basic-url')
try:
not_valid = requests.get(url, timeout=3).status_code != 200
except RequestException or HTTPError:
not_valid = True
if not_valid:
flash(u'Misformatted url', 'error')
return redirect(url_for('home'))
random_token = secrets.token_urlsafe(7)
# collision prob = 64**7 = 4.3e12 Nearly impossible collision rate
boto_utils.put(url, random_token, 'unimplemented', 'no user field')
return render_template('home.html', link=DEPLOYED_GATEWAY + random_token)
@app.errorhandler(404)
def page_not_found(_):
return redirect(url_for('home'))
if __name__ == '__main__':
app.run(debug=True)
| StarcoderdataPython |
11331080 | """Main entry points to tentaclio-io."""
from typing import ContextManager
from tentaclio import protocols
from tentaclio.credentials import authenticate
from .stream_registry import STREAM_HANDLER_REGISTRY
__all__ = ["open"]
VALID_MODES = ("", "rb", "wb", "rt", "wt", "r", "w", "b", "t")
def open(url: str, mode: str = None, **kwargs) -> ContextManager[protocols.AnyReaderWriter]:
"""Open a url and return a reader or writer depending on mode.
Arguments:
:mode: similar to built-in open, allowed modes are combinations of "w" for writing
"r" for reading. "t" for text resources, and "b" for binary. The default is "rt".
Examples:
>>> open(path, 'b') # opens binary reader
>>> open(path, 'br') # opens binary reader
>>> open(path, 'wb') # opens binary writer
>>> open(path, 'wt') # opens text writer
"""
mode = mode or ""
_assert_mode(mode)
is_write_mode = "w" in mode
if is_write_mode:
return _open_writer(url=url, mode=mode, **kwargs)
else:
return _open_reader(url=url, mode=mode, **kwargs)
# Helpers
def _assert_mode(mode: str):
"""Check if a mode is valid or raise an error otherwise."""
if mode not in VALID_MODES:
valid_modes = ",".join(VALID_MODES)
raise ValueError(f"Mode {mode} is not allowed. Valid modes are {valid_modes}")
def _open_writer(url: str, mode: str, **kwargs) -> ContextManager[protocols.Writer]:
"""Open a url and return a writer."""
authenticated = authenticate(url)
return STREAM_HANDLER_REGISTRY.open_stream_writer(authenticated, mode, extras=kwargs)
def _open_reader(url: str, mode: str, **kwargs) -> ContextManager[protocols.Reader]:
"""Open a url and return a reader."""
authenticated = authenticate(url)
return STREAM_HANDLER_REGISTRY.open_stream_reader(authenticated, mode, extras=kwargs)
| StarcoderdataPython |
4829482 | '''
Instance of a standard Python type does not have a '__dict__'.
Instance of the subclass of that type has '__dict__'.
object().__dict__ # -> AttributeError
class MyObject(object): pass
MyObject().__dict__
'''
import builtins
import sys
# === Not tested ===
# cell
# TextFile
# === Basic ===
# object
try:
o = object()
o.__dict__
assert False, 'object'
except AttributeError:
pass
class MyObject(object):
pass
o = MyObject()
o.__dict__
# type
int.__dict__
# NoneType
try:
None.__dict__
assert False, 'NoneType'
except AttributeError:
pass
# ellipsis
try:
(...).__dict__
assert False, 'NoneType'
except AttributeError:
pass
# NotImplementedType
try:
NotImplemented.__dict__
assert False, 'NoneType'
except AttributeError:
pass
# types.SimpleNamespace
# Not done
# =================================
# === int, bool, float, complex ===
# =================================
# int
try:
(1).__dict__
assert False, 'int'
except AttributeError:
pass
class MyInt(int):
pass
i = MyInt()
i.__dict__
# bool
try:
(True).__dict__
assert False, 'bool'
except AttributeError:
pass
try:
(False).__dict__
assert False, 'bool'
except AttributeError:
pass
# float
try:
(1.42).__dict__
assert False, 'float'
except AttributeError:
pass
class MyFloat(float):
pass
f = MyFloat()
f.__dict__
# complex
try:
(1.42j).__dict__
assert False, 'complex'
except AttributeError:
pass
class MyComplex(complex):
pass
c = MyComplex()
c.__dict__
# ===================
# === list, tuple ===
# ===================
# list
try:
[].__dict__
assert False, 'list'
except AttributeError:
pass
class MyList(list):
pass
l = MyList()
l.__dict__
# list_iterator
try:
[].__iter__.__dict__
assert False, 'list_iterator'
except AttributeError:
pass
# list_reverseiterator
try:
[].__reversed__.__dict__
assert False, 'list_reverseiterator'
except AttributeError:
pass
# tuple
try:
().__dict__
assert False, 'tuple'
except AttributeError:
pass
class MyTuple(tuple):
pass
t = MyTuple()
t.__dict__
# tuple_iterator
try:
().__iter__.__dict__
assert False, 'tuple_iterator'
except AttributeError:
pass
# =============================
# === str, bytes, bytearray ===
# =============================
# str
try:
''.__dict__
assert False, 'str'
except AttributeError:
pass
class MyStr(str):
pass
s = MyStr()
s.__dict__
# str_iterator
try:
''.__iter__.__dict__
assert False, 'str_iterator'
except AttributeError:
pass
# bytes
try:
b''.__dict__
assert False, 'bytes'
except AttributeError:
pass
class MyBytes(bytes):
pass
s = MyBytes()
s.__dict__
# bytes_iterator
try:
b''.__iter__.__dict__
assert False, 'bytes_iterator'
except AttributeError:
pass
# bytearray
try:
bytearray().__dict__
assert False, 'bytearray'
except AttributeError:
pass
class MyByteArray(bytearray):
pass
s = MyByteArray()
s.__dict__
# bytearray_iterator
try:
bytearray().__iter__.__dict__
assert False, 'bytes_iterator'
except AttributeError:
pass
# ============
# === dict ===
# ============
# dict
try:
dict().__dict__
assert False, 'dict'
except AttributeError:
pass
class MyDict(dict):
pass
d = MyDict()
d.__dict__
# dict_items
try:
dict().items().__dict__
assert False, 'dict_items'
except AttributeError:
pass
# dict_itemiterator
try:
dict().items().__iter__.__dict__
assert False, 'dict_itemiterator'
except AttributeError:
pass
# dict_keys
try:
dict().keys().__dict__
assert False, 'dict_keys'
except AttributeError:
pass
# dict_keyiterator
try:
dict().keys().__iter__.__dict__
assert False, 'dict_keyiterator'
except AttributeError:
pass
# dict_values
try:
dict().values().__iter__.__dict__
assert False, 'dict_values'
except AttributeError:
pass
# dict_valueiterator
try:
dict().values().__dict__
assert False, 'dict_valueiterator'
except AttributeError:
pass
# ======================
# === set, frozenset ===
# ======================
# set
try:
set().__dict__
assert False, 'set'
except AttributeError:
pass
class MySet(set):
pass
s = MySet()
s.__dict__
# frozenset
try:
frozenset().__dict__
assert False, 'frozenset'
except AttributeError:
pass
class MyFrozenSet(frozenset):
pass
s = MyFrozenSet()
s.__dict__
# set_iterator
try:
set().__iter__.__dict__
assert False, 'set_iterator'
except AttributeError:
pass
# === range, slice ===
try:
range(5).__dict__
assert False, 'range'
except AttributeError:
pass
try:
range(5).__iter__.__dict__
assert False, 'range_iterator'
except AttributeError:
pass
try:
l = [1, 2, 3]
l[1:2].__dict__
assert False, 'slice'
except AttributeError:
pass
# === map, filter, zip, enumerate ===
# You had one job!
def fn_that_returns_true(i): return False
try:
o = map(fn_that_returns_true, [1, 2, 3])
o.__dict__
assert False, 'map'
except AttributeError:
pass
try:
o = filter(fn_that_returns_true, [1, 2, 3])
o.__dict__
assert False, 'filter'
except AttributeError:
pass
try:
o = zip((1, 2, 3), [1, 2, 3])
o.__dict__
assert False, 'zip'
except AttributeError:
pass
try:
o = enumerate([1, 2, 3])
o.__dict__
assert False, 'enumerate'
except AttributeError:
pass
# === iterator, reversed, callable_iterator ===
class DummyIterable:
"""
We need a new class, if we used tuple/array that would return the type-specific '__iter__'.
"""
def __len__(self): return 1
def __getitem__(self): return 1
# iterator
try:
o = iter(DummyIterable())
o.__dict__
assert False, 'iterator'
except AttributeError:
pass
# reversed
try:
o = reversed(DummyIterable())
o.__dict__
assert False, 'reversed'
except AttributeError:
pass
# callable_iterator
def iter_fn_that_returns_1():
return 1
try:
sentinel_that_is_not_1 = 2
o = iter(iter_fn_that_returns_1, sentinel_that_is_not_1)
o.__dict__
assert False, 'callable_iterator'
except AttributeError:
pass
# === builtin function/method ===
try:
iter.__dict__
assert False, 'builtinFunction'
except AttributeError:
pass
try:
# We can't just use 'str.count',
# we have to bind it to specific instance
method = ''.count
method.__dict__
assert False, 'builtinMethod'
except AttributeError:
pass
# === function, method, classmethod, staticmethod ===
def dummy_fn(): pass
dummy_fn.__dict__
class DummyClass:
def method(self): pass
# We can't just use 'DummyClass.method',
# we have to bind it to specific instance
c = DummyClass()
method = c.method
method.__dict__
# We can't use ' @classmethod DummyClass.class_method',
# because then getter ('DummyClass.class_method' thingy)
# would bind it to 'cls' ('DummyClass') creating 'method'.
class_method = classmethod(dummy_fn)
class_method.__dict__
# While we could use '@staticmethod' inside 'DummyClass',
# we will just use '__init__' for symmetry with 'classmethod'.
static_method_ = staticmethod(dummy_fn)
static_method_.__dict__
# === property ===
class DummyClass:
@property
def x(self): return 1
# Test it, because I never remember the syntax
c = DummyClass()
assert c.x == 1
try:
DummyClass.x.__dict__
assert False, 'property'
except AttributeError:
pass
# === code, frame, module, super, traceback ===
def dummy_fn(): pass
class DummyClass:
pass
# code
try:
dummy_fn.__code__.__dict__
assert False, 'code'
except AttributeError:
pass
# frame
try:
f = sys._getframe
f.__dict__
assert False, 'frame'
except AttributeError:
pass
# module
builtins.__dict__
# super
try:
s = super(int)
s.__dict__
assert False, 'super'
except AttributeError:
pass
# traceback
def give_me_traceback():
try:
raise ValueError('dummy')
except ValueError as e:
return e.__traceback__
try:
tb = give_me_traceback()
tb.__dict__
assert False, 'traceback'
except AttributeError:
pass
# === Errors ===
BaseException('').__dict__
SystemExit('').__dict__
KeyboardInterrupt('').__dict__
GeneratorExit('').__dict__
Exception('').__dict__
StopIteration('').__dict__
StopAsyncIteration('').__dict__
ArithmeticError('').__dict__
FloatingPointError('').__dict__
OverflowError('').__dict__
ZeroDivisionError('').__dict__
AssertionError('').__dict__
AttributeError('').__dict__
BufferError('').__dict__
EOFError('').__dict__
ImportError('').__dict__
ModuleNotFoundError('').__dict__
LookupError('').__dict__
IndexError('').__dict__
KeyError('').__dict__
MemoryError('').__dict__
NameError('').__dict__
UnboundLocalError('').__dict__
OSError('').__dict__
BlockingIOError('').__dict__
ChildProcessError('').__dict__
ConnectionError('').__dict__
BrokenPipeError('').__dict__
ConnectionAbortedError('').__dict__
ConnectionRefusedError('').__dict__
ConnectionResetError('').__dict__
FileExistsError('').__dict__
FileNotFoundError('').__dict__
InterruptedError('').__dict__
IsADirectoryError('').__dict__
NotADirectoryError('').__dict__
PermissionError('').__dict__
ProcessLookupError('').__dict__
TimeoutError('').__dict__
ReferenceError('').__dict__
RuntimeError('').__dict__
NotImplementedError('').__dict__
RecursionError('').__dict__
SyntaxError('').__dict__
IndentationError('').__dict__
TabError('').__dict__
SystemError('').__dict__
TypeError('').__dict__
ValueError('').__dict__
UnicodeError('').__dict__
UnicodeDecodeError('ascii', b'', 0, 1, '?').__dict__
UnicodeEncodeError('ascii', '', 0, 1, '?').__dict__
UnicodeTranslateError('ascii', 0, 1, '?').__dict__
Warning('').__dict__
DeprecationWarning('').__dict__
PendingDeprecationWarning('').__dict__
RuntimeWarning('').__dict__
SyntaxWarning('').__dict__
UserWarning('').__dict__
FutureWarning('').__dict__
ImportWarning('').__dict__
UnicodeWarning('').__dict__
BytesWarning('').__dict__
ResourceWarning('').__dict__
| StarcoderdataPython |
1781419 | <gh_stars>1-10
# this file is to merge the jupyter notebooks into one.
# source: https://towardsdatascience.com/how-to-easily-merge-multiple-jupyter-notebooks-into-one-e464a22d2dc4
import json
import copy
# functions
def read_ipynb(notebook_path):
with open(notebook_path, 'r', encoding='utf-8') as f:
return json.load(f)
def write_ipynb(notebook, notebook_path):
with open(notebook_path, 'w', encoding='utf-8') as f:
json.dump(notebook, f)
karel_notebook = read_ipynb('../Karel/milestone2_EDA.ipynb')
kaitlyn_notebook = read_ipynb('../Kaitlyn/milestone2.ipynb')
shaheer_notebook = read_ipynb('../Shaheer/milestone2.ipynb')
final_notebook = copy.deepcopy(karel_notebook)
final_notebook['cells'] = karel_notebook['cells'] + kaitlyn_notebook['cells'] + shaheer_notebook['cells']
# Saving the resulting notebook
write_ipynb(final_notebook, '../submitted/milestone2.ipynb')
| StarcoderdataPython |
3378210 | <reponame>LucasRouckhout/microCTImageAnalyser<gh_stars>1-10
#!/usr/bin/env python3
import sys
def ask():
reply = input(">> ")
if reply == 'quit':
sys.exit()
return reply
| StarcoderdataPython |
8079922 | <reponame>PlasticMem/tencentcloud-sdk-python<filename>tencentcloud/mgobe/v20201014/errorcodes.py
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# CAM签名/鉴权错误。
AUTHFAILURE = 'AuthFailure'
# DryRun 操作,代表请求将会是成功的,只是多传了 DryRun 参数。
DRYRUNOPERATION = 'DryRunOperation'
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 连接信息为空。
FAILEDOPERATION_ACCESSACCESSINFOEMPTY = 'FailedOperation.AccessAccessInfoEmpty'
# 添加COMM连接信息失败。
FAILEDOPERATION_ACCESSADDCOMMCONNERR = 'FailedOperation.AccessAddCommConnErr'
# 添加心跳连接信息失败。
FAILEDOPERATION_ACCESSADDHEARTCONNERR = 'FailedOperation.AccessAddHeartConnErr'
# 添加Relay连接信息失败。
FAILEDOPERATION_ACCESSADDRELAYCONNERR = 'FailedOperation.AccessAddRelayConnErr'
# 获取Token失败。
FAILEDOPERATION_ACCESSCMDGETTOKENERR = 'FailedOperation.AccessCmdGetTokenErr'
# 命令字无效错误。
FAILEDOPERATION_ACCESSCMDINVALIDERR = 'FailedOperation.AccessCmdInvalidErr'
# Token无效或过期。
FAILEDOPERATION_ACCESSCMDINVALIDTOKEN = 'FailedOperation.AccessCmdInvalidToken'
# Token即将过期。
FAILEDOPERATION_ACCESSCMDTOKENPREEXPIRE = 'FailedOperation.AccessCmdTokenPreExpire'
# 查找连接信息出错。
FAILEDOPERATION_ACCESSCONNERR = 'FailedOperation.AccessConnErr'
# 获取COMM连接信息失效。
FAILEDOPERATION_ACCESSGETCOMMCONNECTERR = 'FailedOperation.AccessGetCommConnectErr'
# 获取RELAY连接信息失效。
FAILEDOPERATION_ACCESSGETRELAYCONNECTERR = 'FailedOperation.AccessGetRelayConnectErr'
# 获取Relay的RS_IP或RS_PORT出错。
FAILEDOPERATION_ACCESSGETRSIPERR = 'FailedOperation.AccessGetRsIpErr'
# 心跳包解析出错。
FAILEDOPERATION_ACCESSHEARTBODYPARSEERR = 'FailedOperation.AccessHeartBodyParseErr'
# 登录用户中心回包解析出错。
FAILEDOPERATION_ACCESSLOGINBODYPARSEERR = 'FailedOperation.AccessLoginBodyParseErr'
# 转发SVR名字错误,不是relay_svr或state_svr。
FAILEDOPERATION_ACCESSNOERELAYORSTATESVR = 'FailedOperation.AccessNoeRelayOrStateSvr'
# 用户已经登录,不能重复登录。
FAILEDOPERATION_ACCESSPLAYERDUPLICATELOGIN = 'FailedOperation.AccessPlayerDuplicateLogin'
# PUSH序列化包失败。
FAILEDOPERATION_ACCESSPUSHSERIALIZEERR = 'FailedOperation.AccessPushSerializeErr'
# 计费类型相关错误。
FAILEDOPERATION_BILLINGERROR = 'FailedOperation.BillingError'
# 非法命令字。
FAILEDOPERATION_CMDINVALID = 'FailedOperation.CmdInvalid'
# 解散房间失败。
FAILEDOPERATION_DISMISSROOMFAILED = 'FailedOperation.DismissRoomFailed'
# 发送消息频率达到限制。
FAILEDOPERATION_GROUPCHATFREQUENCYLIMIT = 'FailedOperation.GroupChatFrequencyLimit'
# 无权限修改队组队长。
FAILEDOPERATION_GROUPMODIFYOWNERNOPERMISSION = 'FailedOperation.GroupModifyOwnerNoPermission'
# 队组不存在。
FAILEDOPERATION_GROUPNOTEXIST = 'FailedOperation.GroupNotExist'
# 队组操作失败。
FAILEDOPERATION_GROUPOPERATIONFAILED = 'FailedOperation.GroupOperationFailed'
# 对组中人数超过限制。
FAILEDOPERATION_GROUPPLAYERNUMLIMITEXCEED = 'FailedOperation.GroupPlayerNumLimitExceed'
# 没有权限移除玩家。
FAILEDOPERATION_GROUPREMOVEPLAYERNOPERMISSION = 'FailedOperation.GroupRemovePlayerNoPermission'
# 服务器内部错误。
FAILEDOPERATION_INNERERROR = 'FailedOperation.InnerError'
# 无效的修改选项。
FAILEDOPERATION_INVALIDCHANGEOPTION = 'FailedOperation.InvalidChangeOption'
# 参数错误change_room_option_list。
FAILEDOPERATION_INVALIDCHANGEROOMOPTION = 'FailedOperation.InvalidChangeRoomOption'
# 业务参数错误。
FAILEDOPERATION_INVALIDPARAMS = 'FailedOperation.InvalidParams'
# 参数错误create_room_type。
FAILEDOPERATION_INVALIDPARAMSCREATEROOMTYPE = 'FailedOperation.InvalidParamsCreateRoomType'
# 参数错误device_id。
FAILEDOPERATION_INVALIDPARAMSDEVICEID = 'FailedOperation.InvalidParamsDeviceId'
# 参数错误game_id。
FAILEDOPERATION_INVALIDPARAMSGAMEID = 'FailedOperation.InvalidParamsGameId'
# 队组自定义属性参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPCUSTOMPROPERTIES = 'FailedOperation.InvalidParamsGroupCustomProperties'
# 队组id参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPID = 'FailedOperation.InvalidParamsGroupId'
# 队组名称参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPNAME = 'FailedOperation.InvalidParamsGroupName'
# 队组owner参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPOWNER = 'FailedOperation.InvalidParamsGroupOwner'
# 队组玩家自定义属性参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPPLAYERCUSTOMPROPERTIES = 'FailedOperation.InvalidParamsGroupPlayerCustomProperties'
# 队组玩家自定义状态参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPPLAYERCUSTOMSTATUS = 'FailedOperation.InvalidParamsGroupPlayerCustomStatus'
# 队组玩家名称参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPPLAYERNAME = 'FailedOperation.InvalidParamsGroupPlayerName'
# 队组接收消息类型参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPRECVTYPE = 'FailedOperation.InvalidParamsGroupRecvType'
# 队组类型参数错误。
FAILEDOPERATION_INVALIDPARAMSGROUPTYPE = 'FailedOperation.InvalidParamsGroupType'
# 参数错误match_code。
FAILEDOPERATION_INVALIDPARAMSMATCHCODE = 'FailedOperation.InvalidParamsMatchCode'
# 参数错误match_type。
FAILEDOPERATION_INVALIDPARAMSMATCHTYPE = 'FailedOperation.InvalidParamsMatchType'
# 最大玩家数量参数错误。
FAILEDOPERATION_INVALIDPARAMSMAXPLAYER = 'FailedOperation.InvalidParamsMaxPlayer'
# 参数错误max_players。
FAILEDOPERATION_INVALIDPARAMSMAXPLAYERS = 'FailedOperation.InvalidParamsMaxPlayers'
# 参数错误message。
FAILEDOPERATION_INVALIDPARAMSMESSAGE = 'FailedOperation.InvalidParamsMessage'
# 消息长度超过限制。
FAILEDOPERATION_INVALIDPARAMSMESSAGELENGTH = 'FailedOperation.InvalidParamsMessageLength'
# 消息队列消息decode参数错误。
FAILEDOPERATION_INVALIDPARAMSMSGQDECODE = 'FailedOperation.InvalidParamsMsgqDecode'
# 消息队列消息encode参数错误。
FAILEDOPERATION_INVALIDPARAMSMSGQENCODE = 'FailedOperation.InvalidParamsMsgqEncode'
# 参数错误network_state。
FAILEDOPERATION_INVALIDPARAMSNETWORKSTATE = 'FailedOperation.InvalidParamsNetworkState'
# 参数错误nonce。
FAILEDOPERATION_INVALIDPARAMSNONCE = 'FailedOperation.InvalidParamsNonce'
# 参数错误open_id。
FAILEDOPERATION_INVALIDPARAMSOPENID = 'FailedOperation.InvalidParamsOpenId'
# 参数错误owner。
FAILEDOPERATION_INVALIDPARAMSOWNER = 'FailedOperation.InvalidParamsOwner'
# 参数错误owner_open_id。
FAILEDOPERATION_INVALIDPARAMSOWNEROPENID = 'FailedOperation.InvalidParamsOwnerOpenId'
# 参数错误page_no。
FAILEDOPERATION_INVALIDPARAMSPAGENO = 'FailedOperation.InvalidParamsPageNo'
# 参数错误page_size。
FAILEDOPERATION_INVALIDPARAMSPAGESIZE = 'FailedOperation.InvalidParamsPageSize'
# 参数错误platform。
FAILEDOPERATION_INVALIDPARAMSPLATFORM = 'FailedOperation.InvalidParamsPlatform'
# 玩法协议规则表达式错误。
FAILEDOPERATION_INVALIDPARAMSPLAYMODEEXPRESSION = 'FailedOperation.InvalidParamsPlayModeExpression'
# 玩法协议规则类型错误。
FAILEDOPERATION_INVALIDPARAMSPLAYMODERULETYPE = 'FailedOperation.InvalidParamsPlayModeRuletype'
# 玩法协议规则团队表达式错误。
FAILEDOPERATION_INVALIDPARAMSPLAYMODETEAM = 'FailedOperation.InvalidParamsPlayModeTeam'
# 玩法协议版本号错误。
FAILEDOPERATION_INVALIDPARAMSPLAYMODEVERSION = 'FailedOperation.InvalidParamsPlayModeVersion'
# 参数错误player_id。
FAILEDOPERATION_INVALIDPARAMSPLAYERID = 'FailedOperation.InvalidParamsPlayerId'
# 参数错误player_info。
FAILEDOPERATION_INVALIDPARAMSPLAYERINFO = 'FailedOperation.InvalidParamsPlayerInfo'
# 参数错误playerlist。
FAILEDOPERATION_INVALIDPARAMSPLAYERLIST = 'FailedOperation.InvalidParamsPlayerList'
# 玩家不在队组中不允许操作。
FAILEDOPERATION_INVALIDPARAMSPLAYERNOTINGROUP = 'FailedOperation.InvalidParamsPlayerNotInGroup'
# 队组接收消息的玩家中存在不在队组中的玩家。
FAILEDOPERATION_INVALIDPARAMSRECVPLAYERID = 'FailedOperation.InvalidParamsRecvPlayerId'
# 参数错误region。
FAILEDOPERATION_INVALIDPARAMSREGION = 'FailedOperation.InvalidParamsRegion'
# 参数错误create_type。
FAILEDOPERATION_INVALIDPARAMSROOMCREATETYPE = 'FailedOperation.InvalidParamsRoomCreateType'
# 参数错误room_name。
FAILEDOPERATION_INVALIDPARAMSROOMNAME = 'FailedOperation.InvalidParamsRoomName'
# 参数错误room_type。
FAILEDOPERATION_INVALIDPARAMSROOMTYPE = 'FailedOperation.InvalidParamsRoomType'
# 参数错误sign。
FAILEDOPERATION_INVALIDPARAMSSIGN = 'FailedOperation.InvalidParamsSign'
# 参数错误timestamp。
FAILEDOPERATION_INVALIDPARAMSTIMESTAMP = 'FailedOperation.InvalidParamsTimestamp'
# 参数错误token。
FAILEDOPERATION_INVALIDPARAMSTOKEN = 'FailedOperation.InvalidParamsToken'
# [rm]当前大区找不到合适的匹配,内部接口用。
FAILEDOPERATION_MATCHCANNOTFOUND = 'FailedOperation.MatchCanNotFound'
# 取消匹配失败。
FAILEDOPERATION_MATCHCANCELFAILED = 'FailedOperation.MatchCancelFailed'
# 匹配创建房间失败。
FAILEDOPERATION_MATCHCREATEROOMERR = 'FailedOperation.MatchCreateRoomErr'
# 匹配创房有玩家已经在房间中。
FAILEDOPERATION_MATCHCREATEROOMPLAYERALREADYINROOM = 'FailedOperation.MatchCreateRoomPlayerAlreadyInRoom'
# 匹配失败。
FAILEDOPERATION_MATCHERR = 'FailedOperation.MatchErr'
# 游戏信息不存在。
FAILEDOPERATION_MATCHGAMEINFONOTEXIST = 'FailedOperation.MatchGameInfoNotExist'
# 获取匹配信息失败。
FAILEDOPERATION_MATCHGETMATCHINFOERR = 'FailedOperation.MatchGetMatchInfoErr'
# 匹配获取玩家属性失败。
FAILEDOPERATION_MATCHGETPLAYERATTRFAIL = 'FailedOperation.MatchGetPlayerAttrFail'
# 查询匹配队列信息失败。
FAILEDOPERATION_MATCHGETPLAYERLISTINFOERR = 'FailedOperation.MatchGetPlayerListInfoErr'
# 匹配获取队伍属性失败。
FAILEDOPERATION_MATCHGETTEAMATTRFAIL = 'FailedOperation.MatchGetTeamAttrFail'
# 匹配小组人数超过队伍上限。
FAILEDOPERATION_MATCHGROUPNUMEXCEEDLIMIT = 'FailedOperation.MatchGroupNumExceedLimit'
# 匹配无效参数。
FAILEDOPERATION_MATCHINVALIDPARAMS = 'FailedOperation.MatchInvalidParams'
# 匹配加入房间失败。
FAILEDOPERATION_MATCHJOINROOMERR = 'FailedOperation.MatchJoinRoomErr'
# 匹配逻辑错误。
FAILEDOPERATION_MATCHLOGICERR = 'FailedOperation.MatchLogicErr'
# 匹配失败,无任何房间。
FAILEDOPERATION_MATCHNOROOM = 'FailedOperation.MatchNoRoom'
# 玩家属性无法决定队伍类别。
FAILEDOPERATION_MATCHNONETEAMTYPEFIT = 'FailedOperation.MatchNoneTeamTypeFit'
# 匹配参数不完整。
FAILEDOPERATION_MATCHPLAYATTRNOTFOUND = 'FailedOperation.MatchPlayAttrNotFound'
# 匹配规则获取属性匹配区间失败。
FAILEDOPERATION_MATCHPLAYRULEATTRSEGMENTNOTFOUND = 'FailedOperation.MatchPlayRuleAttrSegmentNotFound'
# 匹配规则算法错误。
FAILEDOPERATION_MATCHPLAYRULEFUNCERR = 'FailedOperation.MatchPlayRuleFuncErr'
# 匹配规则不存在。
FAILEDOPERATION_MATCHPLAYRULENOTFOUND = 'FailedOperation.MatchPlayRuleNotFound'
# 匹配规则不可用。
FAILEDOPERATION_MATCHPLAYRULENOTRUNNING = 'FailedOperation.MatchPlayRuleNotRunning'
# 玩家属性不存在。
FAILEDOPERATION_MATCHPLAYERATTRNOTFOUND = 'FailedOperation.MatchPlayerAttrNotFound'
# 匹配小组中玩家ID重复。
FAILEDOPERATION_MATCHPLAYERIDISREPEATED = 'FailedOperation.MatchPlayerIdIsRepeated'
# 用户信息不存在。
FAILEDOPERATION_MATCHPLAYERINFONOTEXIST = 'FailedOperation.MatchPlayerInfoNotExist'
# 用户已经在匹配中。
FAILEDOPERATION_MATCHPLAYERISINMATCH = 'FailedOperation.MatchPlayerIsInMatch'
# 用户不在匹配状态。
FAILEDOPERATION_MATCHPLAYERNOTINMATCH = 'FailedOperation.MatchPlayerNotInMatch'
# 查询游戏信息失败。
FAILEDOPERATION_MATCHQUERYGAMEERR = 'FailedOperation.MatchQueryGameErr'
# 查询用户信息失败。
FAILEDOPERATION_MATCHQUERYPLAYERERR = 'FailedOperation.MatchQueryPlayerErr'
# 查询大区信息失败。
FAILEDOPERATION_MATCHQUERYREGIONERR = 'FailedOperation.MatchQueryRegionErr'
# 无大区信息。
FAILEDOPERATION_MATCHREGIONINFONOTEXIST = 'FailedOperation.MatchRegionInfoNotExist'
# 匹配已经取消。
FAILEDOPERATION_MATCHREQUESTCANCELED = 'FailedOperation.MatchRequestCanceled'
# 匹配请求ID已经存在。
FAILEDOPERATION_MATCHREQUESTIDISEXIST = 'FailedOperation.MatchRequestIdIsExist'
# 匹配请求ID不存在。
FAILEDOPERATION_MATCHREQUESTIDNOTEXIST = 'FailedOperation.MatchRequestIdNotExist'
# 匹配机器人Group不正确。
FAILEDOPERATION_MATCHROBOTGROUPNOTRIGHT = 'FailedOperation.MatchRobotGroupNotRight'
# 匹配机器人Team不正确。
FAILEDOPERATION_MATCHROBOTTEAMNOTRIGHT = 'FailedOperation.MatchRobotTeamNotRight'
# 团队匹配失败。
FAILEDOPERATION_MATCHTEAMFAIL = 'FailedOperation.MatchTeamFail'
# 队伍匹配失败。
FAILEDOPERATION_MATCHTEAMMATCHFAIL = 'FailedOperation.MatchTeamMatchFail'
# 玩家伍类别非法。
FAILEDOPERATION_MATCHTEAMTYPEINVALID = 'FailedOperation.MatchTeamTypeInvalid'
# 匹配超时。
FAILEDOPERATION_MATCHTIMEOUT = 'FailedOperation.MatchTimeout'
# 更新匹配信息失败。
FAILEDOPERATION_MATCHUPDATEMATCHINFOERR = 'FailedOperation.MatchUpdateMatchInfoErr'
# 没有队组操作权限。
FAILEDOPERATION_NOGROUPOPERATIONPERMISSION = 'FailedOperation.NoGroupOperationPermission'
# 没有权限请求。
FAILEDOPERATION_NORIGHT = 'FailedOperation.NoRight'
# 队组禁止玩家加入。
FAILEDOPERATION_OPERATIONFAILEDGROUPFORBIDJOIN = 'FailedOperation.OperationFailedGroupForbidJoin'
# 参数错误。
FAILEDOPERATION_PARAMSINVALID = 'FailedOperation.ParamsInvalid'
# 持久化队组数量超过限制。
FAILEDOPERATION_PERSISTENCEGROUPNUMEXCEEDTHELIMIT = 'FailedOperation.PersistenceGroupNumExceedTheLimit'
# 新增用户信息失败。
FAILEDOPERATION_PLAYERADDPLAYERFAIL = 'FailedOperation.PlayerAddPlayerFail'
# 清除token缓存失败。
FAILEDOPERATION_PLAYERCLEARTOKENFAIL = 'FailedOperation.PlayerClearTokenFail'
# 重复请求。
FAILEDOPERATION_PLAYERDUPLICATEREQ = 'FailedOperation.PlayerDuplicateReq'
# game不存在。
FAILEDOPERATION_PLAYERGAMENOTEXIST = 'FailedOperation.PlayerGameNotExist'
# 游戏已停止服务。
FAILEDOPERATION_PLAYERGAMEOUTOFSERVICE = 'FailedOperation.PlayerGameOutOfService'
# 查询token失败。
FAILEDOPERATION_PLAYERGETTOKENFAIL = 'FailedOperation.PlayerGetTokenFail'
# 玩家加入的对组个数超过限制。
FAILEDOPERATION_PLAYERGROUPNUMLIMITEXCEED = 'FailedOperation.PlayerGroupNumLimitExceed'
# 玩家已经在队组中。
FAILEDOPERATION_PLAYERISEXISTGROUP = 'FailedOperation.PlayerIsExistGroup'
# 玩家不在该队组中。
FAILEDOPERATION_PLAYERISNOTEXISTGROUP = 'FailedOperation.PlayerIsNotExistGroup'
# 获取分布式锁失败。
FAILEDOPERATION_PLAYERLOCKFAIL = 'FailedOperation.PlayerLockFail'
# 查询game信息失败。
FAILEDOPERATION_PLAYERQUERYGAMEFAIL = 'FailedOperation.PlayerQueryGameFail'
# 查询用户信息失败。
FAILEDOPERATION_PLAYERQUERYPLAYERFAIL = 'FailedOperation.PlayerQueryPlayerFail'
# 用户记录数不正确。
FAILEDOPERATION_PLAYERRECORDNUMERR = 'FailedOperation.PlayerRecordNumErr'
# 保存token缓存失败。
FAILEDOPERATION_PLAYERSAVETOKENFAIL = 'FailedOperation.PlayerSaveTokenFail'
# 查询secret_key失败。
FAILEDOPERATION_PLAYERSECRETKEYFAIL = 'FailedOperation.PlayerSecretKeyFail'
# sign校验失败。
FAILEDOPERATION_PLAYERSIGNERR = 'FailedOperation.PlayerSignErr'
# timestamp非法。
FAILEDOPERATION_PLAYERTIMESTAMPINVALID = 'FailedOperation.PlayerTimestampInvalid'
# token非法。
FAILEDOPERATION_PLAYERTOKENINVALID = 'FailedOperation.PlayerTokenInvalid'
# token不存在。
FAILEDOPERATION_PLAYERTOKENNOTEXIST = 'FailedOperation.PlayerTokenNotExist'
# 释放分布式锁失败。
FAILEDOPERATION_PLAYERUNLOCKFAIL = 'FailedOperation.PlayerUnlockFail'
# 重复创建。
FAILEDOPERATION_RELAYALREADYEXISTS = 'FailedOperation.RelayAlreadyExists'
# 清理房间对局数据失败。
FAILEDOPERATION_RELAYCLEANRELAYROOMFAIL = 'FailedOperation.RelayCleanRelayRoomFail'
# data长度超限制。
FAILEDOPERATION_RELAYDATAEXCEEDLIMITED = 'FailedOperation.RelayDataExceedLimited'
# 转发到client-sdk失败。
FAILEDOPERATION_RELAYFORWARDTOCLIENTFAIL = 'FailedOperation.RelayForwardToClientFail'
# 转发到自定义逻辑svr失败。
FAILEDOPERATION_RELAYFORWARDTOGAMESVRFAIL = 'FailedOperation.RelayForwardToGamesvrFail'
# gamesvr查不到房间信息报错。
FAILEDOPERATION_RELAYGAMESVRNOTFOUNDROOMFAIL = 'FailedOperation.RelayGamesvrNotFoundRoomFail'
# 自定义扩展服务(gamesvr)未开通。
FAILEDOPERATION_RELAYGAMESVRSERVICENOTOPEN = 'FailedOperation.RelayGamesvrServiceNotOpen'
# 查询帧缓存失败。
FAILEDOPERATION_RELAYGETFRAMECACHEFAIL = 'FailedOperation.RelayGetFrameCacheFail'
# 共享内存缓存错误。
FAILEDOPERATION_RELAYHKVCACHEERROR = 'FailedOperation.RelayHkvCacheError'
# 帧率非法。
FAILEDOPERATION_RELAYINVALIDFRAMERATE = 'FailedOperation.RelayInvalidFrameRate'
# 成员已存在。
FAILEDOPERATION_RELAYMEMBERALREADYEXISTS = 'FailedOperation.RelayMemberAlreadyExists'
# 成员不存在。
FAILEDOPERATION_RELAYMEMBERNOTEXISTS = 'FailedOperation.RelayMemberNotExists'
# 无可用的pod。
FAILEDOPERATION_RELAYNOAVAILABLEPOD = 'FailedOperation.RelayNoAvailablePod'
# 没任何成员。
FAILEDOPERATION_RELAYNOMEMBERS = 'FailedOperation.RelayNoMembers'
# 没权限,401开头是权限相关错误。
FAILEDOPERATION_RELAYNOPERMISSION = 'FailedOperation.RelayNoPermission'
# 服务不存在。
FAILEDOPERATION_RELAYNOTEXISTS = 'FailedOperation.RelayNotExists'
# 通知自定义服务gamesvr失败,402开头,是自定义gamesvr相关的错误。
FAILEDOPERATION_RELAYNOTIFYGAMESVRFAIL = 'FailedOperation.RelayNotifyGamesvrFail'
# 通知relayworker失败。
FAILEDOPERATION_RELAYNOTIFYRELAYWORKERFAIL = 'FailedOperation.RelayNotifyRelayworkerFail'
# redis缓存错误。
FAILEDOPERATION_RELAYREDISCACHEERROR = 'FailedOperation.RelayRedisCacheError'
# 补帧的时候游戏没有开始。
FAILEDOPERATION_RELAYREQFRAMEGAMENOTSTARTED = 'FailedOperation.RelayReqFrameGameNotStarted'
# 请求分配pod失败。
FAILEDOPERATION_RELAYREQPODFAIL = 'FailedOperation.RelayReqPodFail'
# 重置房间对局失败。
FAILEDOPERATION_RELAYRESETRELAYROOMFAIL = 'FailedOperation.RelayResetRelayRoomFail'
# 开局状态下,G不允许修改帧率。
FAILEDOPERATION_RELAYSETFRAMERATEFORBIDDEN = 'FailedOperation.RelaySetFrameRateForbidden'
# 状态异常。
FAILEDOPERATION_RELAYSTATEINVALID = 'FailedOperation.RelayStateInvalid'
# 被移除的玩家Id为空。
FAILEDOPERATION_REMOVEPLAYERIDISEMPTY = 'FailedOperation.RemovePlayerIdIsEmpty'
# 请求包格式错误。
FAILEDOPERATION_REQBADPKG = 'FailedOperation.ReqBadPkg'
# ctrlsvr分配relaysvr失败。
FAILEDOPERATION_ROOMALLOCATERELAYSVRIPPORTERR = 'FailedOperation.RoomAllocateRelaysvrIpPortErr'
# 检查登录失败。
FAILEDOPERATION_ROOMCHECKLOGINSESSIONERR = 'FailedOperation.RoomCheckLoginSessionErr'
# 创建房间失败。
FAILEDOPERATION_ROOMCREATEFAIL = 'FailedOperation.RoomCreateFail'
# 创建房间无权限。
FAILEDOPERATION_ROOMCREATENOPERMISSION = 'FailedOperation.RoomCreateNoPermission'
# 销毁房间无权限。
FAILEDOPERATION_ROOMDESTORYNOPERMISSION = 'FailedOperation.RoomDestoryNoPermission'
# 无解散房间权限。
FAILEDOPERATION_ROOMDISSMISSNOPERMISSION = 'FailedOperation.RoomDissmissNoPermission'
# 游戏信息不存在。
FAILEDOPERATION_ROOMGAMEINFONOTEXIST = 'FailedOperation.RoomGameInfoNotExist'
# 查询用户信息失败。
FAILEDOPERATION_ROOMGETPLAYERINFOERR = 'FailedOperation.RoomGetPlayerInfoErr'
# 获取房间信息失败。
FAILEDOPERATION_ROOMGETROOMINFOERR = 'FailedOperation.RoomGetRoomInfoErr'
# 房间信息不存在。
FAILEDOPERATION_ROOMINFOUNEXIST = 'FailedOperation.RoomInfoUnexist'
# 房间teamId无效。
FAILEDOPERATION_ROOMINVALIDPARAMSTEAMID = 'FailedOperation.RoomInvalidParamsTeamId'
# 无权限加入房间。
FAILEDOPERATION_ROOMJOINNOPERMISSION = 'FailedOperation.RoomJoinNoPermission'
# 房间不允许加入用户。
FAILEDOPERATION_ROOMJOINNOTALLOW = 'FailedOperation.RoomJoinNotAllow'
# 最大用户数值设置非法。
FAILEDOPERATION_ROOMMAXPLAYERSINVALID = 'FailedOperation.RoomMaxPlayersInvalid'
# 房间数量超过限制。
FAILEDOPERATION_ROOMMAXROOMNUMBEREXCEEDLIMIT = 'FailedOperation.RoomMaxRoomNumberExceedLimit'
# 修改房主失败。
FAILEDOPERATION_ROOMMODIFYOWNERERR = 'FailedOperation.RoomModifyOwnerErr'
# 玩家信息操作繁忙,请重试。
FAILEDOPERATION_ROOMMODIFYPLAYERBUSY = 'FailedOperation.RoomModifyPlayerBusy'
# 无修改房间属性权限。
FAILEDOPERATION_ROOMMODIFYPROPERTIESNOPEMISSION = 'FailedOperation.RoomModifyPropertiesNoPemission'
# 页号、页数大小参数不合法,可能实际大小没这么大。
FAILEDOPERATION_ROOMPARAMPAGEINVALID = 'FailedOperation.RoomParamPageInvalid'
# 用户已经在房间内,不能操作创建房间、加房等操作。
FAILEDOPERATION_ROOMPLAYERALREADYINROOM = 'FailedOperation.RoomPlayerAlreadyInRoom'
# 用户信息不存在。
FAILEDOPERATION_ROOMPLAYERINFONOTEXIST = 'FailedOperation.RoomPlayerInfoNotExist'
# 用户目前不在房间内,不能操作更改房间属性、踢人等操作。
FAILEDOPERATION_ROOMPLAYERNOTINROOM = 'FailedOperation.RoomPlayerNotInRoom'
# 用户在房间中掉线,不能开始游戏等操作。
FAILEDOPERATION_ROOMPLAYEROFFLINE = 'FailedOperation.RoomPlayerOffline'
# 房间内用户数已经达到最大人数不能再加入了。
FAILEDOPERATION_ROOMPLAYERSEXCEEDLIMIT = 'FailedOperation.RoomPlayersExceedLimit'
# 游戏信息失败。
FAILEDOPERATION_ROOMQUERYGAMEERR = 'FailedOperation.RoomQueryGameErr'
# 查询用户信息失败。
FAILEDOPERATION_ROOMQUERYPLAYERERR = 'FailedOperation.RoomQueryPlayerErr'
# 查询地域信息失败。
FAILEDOPERATION_ROOMQUERYREGIONERR = 'FailedOperation.RoomQueryRegionErr'
# 查询不到accessRegion信息。
FAILEDOPERATION_ROOMREGIONINFONOTEXIST = 'FailedOperation.RoomRegionInfoNotExist'
# 无踢人权限。
FAILEDOPERATION_ROOMREMOVEPLAYERNOPERMISSION = 'FailedOperation.RoomRemovePlayerNoPermission'
# 被踢玩家不在房间中。
FAILEDOPERATION_ROOMREMOVEPLAYERNOTINROOM = 'FailedOperation.RoomRemovePlayerNotInRoom'
# 无踢出自己权限。
FAILEDOPERATION_ROOMREMOVESELFNOPERMISSION = 'FailedOperation.RoomRemoveSelfNoPermission'
# 房间团队人员已满。
FAILEDOPERATION_ROOMTEAMMEMBERLIMITEXCEED = 'FailedOperation.RoomTeamMemberLimitExceed'
# 编码失败。
FAILEDOPERATION_SDKENCODEPARAMFAIL = 'FailedOperation.SdkEncodeParamFail'
# 参数错误。
FAILEDOPERATION_SDKINVALIDPARAMS = 'FailedOperation.SdkInvalidParams'
# 帧同步鉴权错误。
FAILEDOPERATION_SDKNOCHECKLOGIN = 'FailedOperation.SdkNoCheckLogin'
# 登录态错误。
FAILEDOPERATION_SDKNOLOGIN = 'FailedOperation.SdkNoLogin'
# 无房间。
FAILEDOPERATION_SDKNOROOM = 'FailedOperation.SdkNoRoom'
# 消息响应超时。
FAILEDOPERATION_SDKRESTIMEOUT = 'FailedOperation.SdkResTimeout'
# 消息发送失败。
FAILEDOPERATION_SDKSENDFAIL = 'FailedOperation.SdkSendFail'
# Socket断开。
FAILEDOPERATION_SDKSOCKETCLOSE = 'FailedOperation.SdkSocketClose'
# 网络错误。
FAILEDOPERATION_SDKSOCKETERROR = 'FailedOperation.SdkSocketError'
# SDK未初始化。
FAILEDOPERATION_SDKUNINIT = 'FailedOperation.SdkUninit'
# 服务器繁忙。
FAILEDOPERATION_SERVERBUSY = 'FailedOperation.ServerBusy'
# 标签添加失败。
FAILEDOPERATION_TAGADDFAILED = 'FailedOperation.TagAddFailed'
# 标签接口调用失败,请稍后再试。若无法解决,请在线咨询。
FAILEDOPERATION_TAGCALLERFAILED = 'FailedOperation.TagCallerFailed'
# 后端超时错误。
FAILEDOPERATION_TIMEOUT = 'FailedOperation.TimeOut'
# 内部错误。
INTERNALERROR = 'InternalError'
# 配置房间id管理模块错误。
INTERNALERROR_CONFROOMIDBUCKETERR = 'InternalError.ConfRoomIdBucketErr'
# 数据格式转化失败。
INTERNALERROR_DATAFORMATERR = 'InternalError.DataFormatErr'
# hashcode解码失败。
INTERNALERROR_HASHIDDECODEERR = 'InternalError.HashidDecodeErr'
# hashcode编码失败。
INTERNALERROR_HASHIDENCODEERR = 'InternalError.HashidEncodeErr'
# hashcode生成失败。
INTERNALERROR_HASHIDERR = 'InternalError.HashidErr'
# 参数错误recordId。
INTERNALERROR_INVALIDPARAMSRECOREID = 'InternalError.InvalidParamsRecoreId'
# JSON数据格式转化失败。
INTERNALERROR_JSONFORMATERR = 'InternalError.JsonFormatErr'
# 玩法数据格式转化失败。
INTERNALERROR_JSONPLAYMODEFORMATERR = 'InternalError.JsonPlayModeFormatErr'
# 玩法数据格式转化失败。
INTERNALERROR_JSONPLAYMODEPARISEERR = 'InternalError.JsonPlayModePariseErr'
# 匹配内部逻辑错误。
INTERNALERROR_MATCHINNERLOGICERR = 'InternalError.MatchInnerLogicErr'
# 匹配内部参数错误。
INTERNALERROR_MATCHINNERPARAMSERR = 'InternalError.MatchInnerParamsErr'
# 匹配不是GSE类型查询匹配结果失败。
INTERNALERROR_MATCHRESULTTYPENOTGSE = 'InternalError.MatchResultTypeNotGse'
# 匹配房间添加节点失败。
INTERNALERROR_MATCHROOMINNERADDNODEERR = 'InternalError.MatchRoomInnerAddNodeErr'
# 匹配房间删除节点失败。
INTERNALERROR_MATCHROOMINNERDELNODEERR = 'InternalError.MatchRoomInnerDelNodeErr'
# myspp框架返回-1000。
INTERNALERROR_MYSPPSYSTEMERR = 'InternalError.MysppSystemErr'
# 删除失败。
INTERNALERROR_MYSQLDELETEFAIL = 'InternalError.MysqlDeleteFail'
# 插入失败。
INTERNALERROR_MYSQLINSERTFAIL = 'InternalError.MysqlInsertFail'
# 查询为空。
INTERNALERROR_MYSQLMULTIROWFOUND = 'InternalError.MysqlMultiRowFound'
# 查询为空。
INTERNALERROR_MYSQLNOROWFOUND = 'InternalError.MysqlNoRowFound'
# 查询失败。
INTERNALERROR_MYSQLQUERYSFAIL = 'InternalError.MysqlQuerysFail'
# 更新失败。
INTERNALERROR_MYSQLUPDATEFAIL = 'InternalError.MysqlUpdateFail'
# 反序列化失败。
INTERNALERROR_PBPARSEFROMSTRERR = 'InternalError.PbParseFromStrErr'
# 序列化失败。
INTERNALERROR_PBSERIALIZETOSTRERR = 'InternalError.PbSerializeToStrErr'
# redisdel类操作失败。
INTERNALERROR_REDISDELOPERR = 'InternalError.RedisDelOpErr'
# redis操作异常。
INTERNALERROR_REDISEXPIREOPERR = 'InternalError.RedisExpireOpErr'
# redisget类操作失败。
INTERNALERROR_REDISGETOPERR = 'InternalError.RedisGetOpErr'
# redisKEY不存在。
INTERNALERROR_REDISKEYNOTEXIST = 'InternalError.RedisKeyNotExist'
# redislist操作失败。
INTERNALERROR_REDISLISTOPERR = 'InternalError.RedisListOpErr'
# redislistpop空结果。
INTERNALERROR_REDISLISTPOPEMPTY = 'InternalError.RedisListPopEmpty'
# redis加锁冲突类操作失败。
INTERNALERROR_REDISLOCKALREADYEXIST = 'InternalError.RedisLockAlreadyExist'
# redis加锁类操作失败。
INTERNALERROR_REDISLOCKOPERR = 'InternalError.RedisLockOpErr'
# redis操作参数不合法。
INTERNALERROR_REDISOPINVALIDPARAMS = 'InternalError.RedisOpInvalidParams'
# redis实例池获取实例失败。
INTERNALERROR_REDISPOOLGETINSTANCEFAIL = 'InternalError.RedisPoolGetInstanceFail'
# redisset内为空。
INTERNALERROR_REDISSETISEMPTY = 'InternalError.RedisSetIsEmpty'
# redisset类操作失败。
INTERNALERROR_REDISSETOPERR = 'InternalError.RedisSetOpErr'
# 申请service失败。
INTERNALERROR_ROOMALLOCATESERVICEFAIL = 'InternalError.RoomAllocateServiceFail'
# mysql数据库插入历史房间信息失败。
INTERNALERROR_ROOMHISTORYINFOINSERTERR = 'InternalError.RoomHistoryInfoInsertErr'
# 检查锁失败,一般是过期。
INTERNALERROR_ROOMREDISCHECKLOCKERR = 'InternalError.RoomRedisCheckLockErr'
# 删除锁失败。
INTERNALERROR_ROOMREDISDELLOCKERR = 'InternalError.RoomRedisDelLockErr'
# 获取锁失败。
INTERNALERROR_ROOMREDISGETLOCKERR = 'InternalError.RoomRedisGetLockErr'
# 数据库更新失败。
INTERNALERROR_ROOMREDISUPDATEERR = 'InternalError.RoomRedisUpdateErr'
# 删除用户房间映射表信息失败。
INTERNALERROR_ROOMREMOVEREDISPLAYERROOMMATCHERR = 'InternalError.RoomRemoveRedisPlayerRoomMatchErr'
# 删除房间信息表信息失败。
INTERNALERROR_ROOMREMOVEREDISROOMINFOERR = 'InternalError.RoomRemoveRedisRoomInfoErr'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 玩家ID在玩家列表中重复。
INVALIDPARAMETER_DUPLICATEPLAYERIDINPLAYERS = 'InvalidParameter.DuplicatePlayerIdInPlayers'
# 无效的游戏描述长度。
INVALIDPARAMETER_GAMEDESCLENGTH = 'InvalidParameter.GameDescLength'
# 无效的游戏名称长度。
INVALIDPARAMETER_GAMENAMELENGTH = 'InvalidParameter.GameNameLength'
# 无效的游戏平台。
INVALIDPARAMETER_GAMEPLATFORM = 'InvalidParameter.GamePlatform'
# 无效的游戏类型。
INVALIDPARAMETER_GAMETYPE = 'InvalidParameter.GameType'
# 无效的自定义房间属性。
INVALIDPARAMETER_INVALIDCUSTOMPROPERTIES = 'InvalidParameter.InvalidCustomProperties'
# 无效的最大玩家数量。
INVALIDPARAMETER_INVALIDMAXPLAYERS = 'InvalidParameter.InvalidMaxPlayers'
# 无效的最小玩家数量。
INVALIDPARAMETER_INVALIDMINPLAYERS = 'InvalidParameter.InvalidMinPlayers'
# 无效的OpenId长度。
INVALIDPARAMETER_INVALIDOPENIDLENGTH = 'InvalidParameter.InvalidOpenIdLength'
# 无效的自定义玩家属性长度。
INVALIDPARAMETER_INVALIDPLAYERCUSTOMPROFILELENGTH = 'InvalidParameter.InvalidPlayerCustomProfileLength'
# 无效的自定义玩家状态。
INVALIDPARAMETER_INVALIDPLAYERCUSTOMPROFILESTATUS = 'InvalidParameter.InvalidPlayerCustomProfileStatus'
# 无效的玩家昵称长度。
INVALIDPARAMETER_INVALIDPLAYERNAMELENGTH = 'InvalidParameter.InvalidPlayerNameLength'
# 无效的玩家数量。
INVALIDPARAMETER_INVALIDPLAYERSSIZE = 'InvalidParameter.InvalidPlayersSize'
# 错误的机器人匹配模式参数。
INVALIDPARAMETER_INVALIDROBOTMATCHMODELPARAM = 'InvalidParameter.InvalidRobotMatchModelParam'
# 无效的房间名称。
INVALIDPARAMETER_INVALIDROOMNAME = 'InvalidParameter.InvalidRoomName'
# 无效的房间类型长度。
INVALIDPARAMETER_INVALIDROOMTYPELENGTH = 'InvalidParameter.InvalidRoomTypeLength'
# 无效的队伍Id长度。
INVALIDPARAMETER_INVALIDTEAMIDLENGTH = 'InvalidParameter.InvalidTeamIdLength'
# 无效的队伍昵称长度。
INVALIDPARAMETER_INVALIDTEAMNAMELENGTH = 'InvalidParameter.InvalidTeamNameLength'
# 无效的队伍大小。
INVALIDPARAMETER_INVALIDTEAMSSIZE = 'InvalidParameter.InvalidTeamsSize'
# 无效的开通联网对战服务选项。
INVALIDPARAMETER_OPENONLINESERVICE = 'InvalidParameter.OpenOnlineService'
# 房主信息不在玩家列表中。
INVALIDPARAMETER_OWNERNOTINPLAYERS = 'InvalidParameter.OwnerNotInPlayers'
# 玩家数量不在队伍可容纳范围。
INVALIDPARAMETER_PLAYERNUMNOTINTEAMRANGE = 'InvalidParameter.PlayerNumNotInTeamRange'
# 玩家ID在玩家列表中重复。
INVALIDPARAMETER_PLAYEROPENIDINPLAYERSDUPLICATE = 'InvalidParameter.PlayerOpenIdInPlayersDuplicate'
# 创建满员房间但是玩家数量不足。
INVALIDPARAMETER_PLAYERSIZENOTENOUGH = 'InvalidParameter.PlayerSizeNotEnough'
# 玩家队伍ID不在队伍列表中。
INVALIDPARAMETER_PLAYERTEAMIDNOTINTEAMS = 'InvalidParameter.PlayerTeamIdNotInTeams'
# 无效的标签列表。
INVALIDPARAMETER_TAGS = 'InvalidParameter.Tags'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 超过配额限制。
LIMITEXCEEDED = 'LimitExceeded'
# 日志集数量超出限制。
LIMITEXCEEDED_CLSLOGSETEXCEED = 'LimitExceeded.CLSLogsetExceed'
# 日志主题数量超出限制。
LIMITEXCEEDED_CLSTOPICEXCEED = 'LimitExceeded.CLSTopicExceed'
# 游戏数超过限额。
LIMITEXCEEDED_GAMERESOURCELIMIT = 'LimitExceeded.GameResourceLimit'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 操作被拒绝。
OPERATIONDENIED = 'OperationDenied'
# 请求的次数超过了频率限制。
REQUESTLIMITEXCEEDED = 'RequestLimitExceeded'
# 操作过于频繁,请稍等几秒后重试。
REQUESTLIMITEXCEEDED_FREQUENCYLIMIT = 'RequestLimitExceeded.FrequencyLimit'
# 资源被占用。
RESOURCEINUSE = 'ResourceInUse'
# 资源不足。
RESOURCEINSUFFICIENT = 'ResourceInsufficient'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 房间不存在。
RESOURCENOTFOUND_ROOMNOTEXIST = 'ResourceNotFound.RoomNotExist'
# 标签资源未找到。
RESOURCENOTFOUND_TAGNOTFOUND = 'ResourceNotFound.TagNotFound'
# 资源不可用。
RESOURCEUNAVAILABLE = 'ResourceUnavailable'
# 日志服务(CLS)不可用,请确保您已在日志服务控制台开通服务。若无法解决,请在线咨询。
RESOURCEUNAVAILABLE_CLSNOTALLOWED = 'ResourceUnavailable.CLSNotAllowed'
# 游戏已被冻结,操作失败。
RESOURCEUNAVAILABLE_GAMEFROZEN = 'ResourceUnavailable.GameFrozen'
# 资源售罄。
RESOURCESSOLDOUT = 'ResourcesSoldOut'
# 未授权操作。
UNAUTHORIZEDOPERATION = 'UnauthorizedOperation'
# 需要授权CAM权限操作。
UNAUTHORIZEDOPERATION_CAMUNAUTHORIZEDOPERATION = 'UnauthorizedOperation.CAMUnauthorizedOperation'
# 需要授权GSE的CAM权限操作。
UNAUTHORIZEDOPERATION_GSECAMUNAUTHORIZEDOPERATION = 'UnauthorizedOperation.GseCAMUnauthorizedOperation'
# 无操作权限。
UNAUTHORIZEDOPERATION_NOTACTIONPERMISSION = 'UnauthorizedOperation.NotActionPermission'
# 无项目权限。
UNAUTHORIZEDOPERATION_NOTPROJECTPERMISSION = 'UnauthorizedOperation.NotProjectPermission'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
# 操作不支持。
UNSUPPORTEDOPERATION = 'UnsupportedOperation'
# 标签键不允许重复。
UNSUPPORTEDOPERATION_TAGKEYDUPLICATE = 'UnsupportedOperation.TagKeyDuplicate'
| StarcoderdataPython |
151314 | <reponame>rodelrebucas/dev-overload-starterpack
"""
Time complexity O(n)
"""
# Search for maximum number
l = [2, 4, 5, 1, 80, 5, 99]
maximum = l[0]
for item in l:
if item > maximum:
maximum = item
print(maximum)
| StarcoderdataPython |
3397713 | import bpy
from sklearn.cluster import DBSCAN, OPTICS
bl_info = {
"name": "Clustering",
"author": "<NAME>, <NAME> (Wakeone)",
"version": (0, 1),
"blender": (2, 80, 0),
"description": "Runs a clustering algorithm on the selected objects.",
"category": "Object",
}
clustering_algorithm = [
'DBSCAN',
'OPTICS',
]
def Fit(context, positions):
if context.scene.clustering_group.Mode == 'DBSCAN':
return DBSCAN(eps=context.scene.clustering_group.Eps, min_samples=context.scene.clustering_group.MinSamples).fit(positions)
elif context.scene.clustering_group.Mode == 'OPTICS':
return OPTICS(min_samples=context.scene.clustering_group.MinSamples, xi=context.scene.clustering_group.Xi).fit(positions)
else:
print("Unsupported clustering mode.")
return None
def ProcessClustering(self, context):
if not context.scene.clustering_group.SelectedOnly:
bpy.ops.object.select_all(action='SELECT')
# Preprocess the model
bpy.ops.object.parent_clear(type='CLEAR_KEEP_TRANSFORM')
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='BOUNDS')
bpy.ops.object.visual_transform_apply()
bpy.ops.object.make_single_user(type='ALL', object=True, obdata=True, material=False, animation=False)
print("Processing clusters...")
# Split the objects by material if needed.
objects_to_cluster = {}
if context.scene.clustering_group.ByMaterial:
objects_to_cluster = SplitByMaterial(bpy.context.selected_objects)
else:
objects_to_cluster["Default"] = bpy.context.selected_objects
processed_clusters = {}
for material, objcluster in objects_to_cluster.items():
positions = [obj.location for obj in objcluster]
# If we there are less objects in the cluster than min_samples just treat them as a cluster.
if len(positions) < context.scene.clustering_group.MinSamples:
x = {}
x['0'] = objcluster
processed_clusters[material] = x
continue
else:
db = Fit(context,positions)
print(str(len(objcluster)) + ' objects with the material ' + material)
labels = db.labels_
clusters = {}
for cn, obj in zip(labels,objcluster):
if cn not in clusters:
clusters[cn] = [obj]
else:
clusters[cn].append(obj)
processed_clusters[material] = clusters
for x,y in clusters.items():
print("Cluster " + str(x) + " has " + str(len(y)) + " items")
for material, objcluster in processed_clusters.items():
for cn, clusters in objcluster.items():
bpy.ops.object.select_all(action='DESELECT')
for obj in clusters:
obj.select_set(state=True)
bpy.context.view_layer.objects.active = clusters[0]
print("Merging " + str(len(clusters)) + " items.")
bpy.ops.object.join()
def SplitByMaterial(objects):
unique_colors = {}
print("Total object count: " + str(len(objects)))
# Sort all objects by their material. Ignore objects with more than one material for now.
for obj in objects:
if len(obj.material_slots) > 1:
print(str(len(obj.material_slots)) + ' materials in '+ obj.name + '. Ignoring.')
continue
for mat in obj.material_slots:
if mat.name not in unique_colors:
unique_colors[mat.name] = [obj]
else:
unique_colors[mat.name].append(obj)
print("\nFound " + str(len(unique_colors)) + " different materials. \n")
return unique_colors
class ClusterGroupProperty(bpy.types.PropertyGroup):
mode_options = [(value, value, '', idx) for idx, value in enumerate(clustering_algorithm)]
Mode: bpy.props.EnumProperty(
items=mode_options,
name="Clustering mode",
description="Clustering mode",
default='DBSCAN'
)
SelectedOnly: bpy.props.BoolProperty(name="Only process selected objects")
ByMaterial: bpy.props.BoolProperty(name="Split clusters by material")
MinSamples: bpy.props.IntProperty(name="Min samples",default=3)
Eps: bpy.props.FloatProperty(name="EPS",default=0.5)
Xi: bpy.props.FloatProperty(name="XI",default=0.05)
MinClusterSize: bpy.props.FloatProperty(name="Min cluster size",default=0.05)
class OBJECT_OT_ClusteringCore(bpy.types.Operator):
"""Object clustering"""
bl_idname = "object.cluster"
bl_label = "Cluster and merge"
bl_options = {'REGISTER','UNDO','INTERNAL'}
def execute(self,context):
ProcessClustering(self, context)
return {'FINISHED'}
class CLUSTERING_PT_ClusteringPanel(bpy.types.Panel):
"""Panel for clustering params"""
bl_label = 'Clustering'
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
def draw(self,context):
layout = self.layout
col = layout.column()
col.prop(context.scene.clustering_group, "Mode")
col.prop(context.scene.clustering_group, "SelectedOnly")
col.prop(context.scene.clustering_group, "ByMaterial")
col.prop(context.scene.clustering_group, "MinSamples")
if context.scene.clustering_group.Mode == 'DBSCAN':
col.prop(context.scene.clustering_group, "Eps")
elif context.scene.clustering_group.Mode == 'OPTICS':
col.prop(context.scene.clustering_group, "Xi")
col.prop(context.scene.clustering_group, "MinClusterSize")
else:
pass
row = layout.row()
row.scale_y = 3.0
row.operator("object.cluster")
classes = (
ClusterGroupProperty,
OBJECT_OT_ClusteringCore,
CLUSTERING_PT_ClusteringPanel,
)
def register():
from bpy.utils import register_class
for cls in classes:
register_class(cls)
bpy.types.Scene.clustering_group = bpy.props.PointerProperty(type=ClusterGroupProperty)
def unregister():
from bpy.utils import unregister_class
for cls in reversed(classes):
unregister_class(cls)
del bpy.types.Scene.clustering_group
if __name__ == "__main__":
register()
| StarcoderdataPython |
199700 | <reponame>Chrispresso/PyGenoCar
from Box2D import *
from settings import get_boxcar_constant
class Wheel(object):
def __init__(self, world: b2World, radius: float, density: float, restitution: float = 0.2):
self.radius = radius
self.density = density
# self.motor_speed = motor_speed # Used when it's connected to a chassis
self.restitution = restitution
# Create body def
body_def = b2BodyDef()
body_def.type = b2_dynamicBody
body_def.position = b2Vec2(0, 1)
self.body = world.CreateBody(body_def)
# Create fixture def + circle for wheel
fixture_def = b2FixtureDef()
circle = b2CircleShape()
circle.radius = self.radius
fixture_def.shape = circle
fixture_def.density = self.density
fixture_def.friction = 10.0
fixture_def.restitution = self.restitution
fixture_def.groupIndex = -1
# Create fixture on body
self.body.CreateFixture(fixture_def)
self._mass = self.body.mass
self._torque = 0.0
@property
def mass(self):
return self._mass
@mass.setter
def mass(self, value):
raise Exception('Wheel mass is read-only. If you need to change it, do so through Wheel.body.mass or Wheel._mass')
@property
def torque(self):
return self._torque
@torque.setter
def torque(self, value):
self._torque = value
def clone(self) -> 'Wheel':
clone = Wheel(self.world, self.radius, self.density, self.restitution)
return clone | StarcoderdataPython |
5033711 | """Defines the anvil logging module.
"""
__author__ = '<EMAIL>'
from anvil import enums
from anvil import util
class WorkUnit(object):
"""A class used to keep track of work progress.
The WorkUnit class uses total and complete metrics to keep track of work
completed. WorkUnits can be chained in parent-child relationships with any
updates to child WorkUnits being aggregated in the parent WorkUnit. The
total/complete numbers are used to determine the current status of a given
WorkUnit. Once an exception is set on a WorkUnit, its status automatically
becomes FAILED along with all units up the parent chain.
WorkUnits can also have change listeners attached. When the total, complete
or exception attributes are updated on a WorkUnit, all listeners on that
unit and all units up the parent chain are notified via a call to #update
on the listener.
"""
def __init__(self, name):
self._complete = None
self._total = None
self._waiting = True
self._exception = None
self.name = name
self.parent = None
self.children = []
self.listeners = []
self.start_time = None
self.end_time = None
@property
def complete(self):
"""Returns a complete count including all child WorkUnits.
Returns:
A number representing the total complete work units encompassed by this
WorkUnit and all its child WorkUnits.
"""
complete = 0
for child in self.children:
complete += child.complete
if self._complete:
complete += self._complete
return complete
@complete.setter
def complete(self, complete):
"""Sets the complete count on this WorkUnit and validates all values.
Args:
complete: A number value corresponding to complete units of work.
"""
self._complete = complete
self._validate_and_update('complete')
@property
def total(self):
"""Returns a complete count including all parent WorkUnits.
Returns:
A number representing the total work units encompassed by this WorkUnit
and all its child WorkUnits.
"""
total = 0
for child in self.children:
total += child.total
if self._total:
total += self._total
return total
@total.setter
def total(self, total):
"""Sets the total count on this WorkUnit and validates all values.
Args:
complete: A number value corresponding to total units of work.
"""
self._total = total
self._validate_and_update('total')
@property
def exception(self):
"""Gets the exception value on this WorkUnit or any children.
Returns:
An exception object or None if one is not set.
"""
for child in self.children:
if child.exception:
return child.exception
return self._exception
@exception.setter
def exception(self, exception):
"""Sets an exception on this work unit and validates all values.
Args:
exception: A Python exception object.
"""
self._exception = exception
self._validate_and_update('exception')
def get_status(self):
"""Returns the status of this WorkUnit.
The WorkUnit is in a WAITING state until either its exception, total or
complete values are updated or any of its children change to a RUNNING
state. The WorkUnit immediately goes to a FAILED state if an exception is
set. If the total and completed units are both set to 0, the WorkUnit goes
to a SKIPPED state. If total units are set and are greater than the number
of completed units, it is set to a RUNNING state. Note that the complete
and total units used for this determination include those set on any child
WorkUnits. If total and complete are equal and greater than 0, then the
WorkUnit is set to SUCCEEDED.
Returns:
An enums.Status value corresponding to the state this WorkUnit is in.
"""
if self._is_waiting():
return enums.Status.WAITING
elif self.exception is not None:
return enums.Status.FAILED
elif self.total == 0 and self.complete == 0:
return enums.Status.SKIPPED
diff = (self.total or 0) - (self.complete or 0)
if diff > 0:
return enums.Status.RUNNING
elif diff == 0:
return enums.Status.SUCCEEDED
def add_child(self, child):
"""Adds a child WorkUnit.
Args:
child: A child WorkUnit. The values of this child will be aggregated into
the values of this object.
"""
self.children.append(child)
child.parent = self
def add_change_listener(self, listener):
""""Adds a change listener to this object.
Change listeners will receive notifications of updates to values on this
WorkUnit or any of its children. Listeners must implement the following
methods:
will_listen(WorkUnit):Boolean - Should return true if this listener
should listen to this WorkUnit. Gives a listener a chance to keep
from receiving updates for a given WorkUnit. If the listener returns
True, it should be assumed that WorkUnit passed in will be run and
will send updates to the listener.
update(WorkUnit, String, *) - A method called when updates occur on a
WorkUnit. The method is called with the WorkUnit that changed, the
name of the attribute that was updated and the updated value.
"""
if listener.should_listen(self):
self.listeners.append(listener)
def _validate_and_update(self, attribute):
"""Validates the current values on this WorkUnit and propegates updates.
This method validates the current WorkUnit and performs any updates that
occur as side-effects, such as updating the start and end time. This
method also calls change listeners and propagates change calls to parent
WorkUnits.
Args:
attribute: A string value corresponding to the name of the attribute
that was updated.
Raises:
ValueError: A ValueError is raised if any of the current values are
in an invalid state. For instance, if the completed count is greater
that the total count.
"""
if (not self._total == None and
not self._complete == None and
self._complete > self._total):
raise ValueError('Complete tasks cannot be more than the total tasks.')
if not self.start_time:
self.start_time = util.timer()
if self.total == self.complete and not self.total == None:
self.end_time = util.timer()
self._waiting = False
for listener in self.listeners:
listener.update(self, attribute, getattr(self, attribute))
if self.parent:
self.parent._validate_and_update(attribute)
def _is_waiting(self):
"""Returns true iff this WorkUnit and all its child units are WAITING.
Returns:
True iff this WorkUnit and all its child units are WAITING.
"""
if self._waiting:
for child in self.children:
if not child._waiting:
self._waiting = False
break
return self._waiting
class LogSource(object):
"""A LogSource can be used to log messages filtered on a verbosity level.
The LogSource class allows the buffered logging of messages at different
severity levels and filtered off of Verbosity levels. LogSources can be part
of a parent-child relationship, in which case child LogSources can make use
of the parent's Verbosity level and LogSinks.
Messages logged to a LogSource are buffered in the source's buffered_messages
collection iff the severity of the message passes the Verbosity filter on the
LogSource.
"""
def __init__(self, verbosity=enums.Verbosity.INHERIT):
"""Sets up the LogSource with a default Verbosity of INHERIT.
Args:
verbosty: A enums.Verbosity value. Defaults to INHERIT. Note that if a
parent LogSource does not exist, then this LogSource will use a NORMAL
verbosity level.
"""
self._verbosity = verbosity
self.buffered_messages = []
self.log_sinks = []
self.parent = None
@property
def verbosity(self):
"""Returns the effective verbosity level of this LogSource.
If this LogSource has a verbosity level of INHERIT and a parent exists, then
this accessor will return the verbosity level of the parent.
Returns:
The enums.Verbosity level of this LogSource.
"""
if not self.parent == None and self._verbosity == enums.Verbosity.INHERIT:
return self.parent.verbosity
elif self.parent == None and self._verbosity == enums.Verbosity.INHERIT:
return enums.Verbosity.NORMAL
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity):
"""Sets the enums.Verbosity level of this LogSource.
Args:
verbosity: A enums.Verbosity level.
"""
self._verbosity = verbosity
def add_child(self, child):
"""Adds a child log source.
Any LogSinks set on the parent will also be set on the child so that all
messages from children are received.
Args:
child: A LogSource that will be a child of this LogSource.
"""
child.parent = self
for log_sink in self.log_sinks:
child.add_log_sink(log_sink)
def add_log_sink(self, log_sink):
"""Adds a LogSink to this LogSource.
Adds a listening LogSink to this LogSource. If there any buffered messages,
they are delegated synchronously to the added LogSink.
Args:
log_sink: A LogSink object capable of listening for LogSource messages.
"""
if log_sink in self.log_sinks:
return
self.log_sinks.append(log_sink)
for message in self.buffered_messages:
log_sink.log(message)
def log_debug(self, message, name=None):
"""Logs a message at DEBUG log level.
DEBUG log level is only recorded on LogSources with VERBOSE verbosity.
Args:
message: A string message to be logged.
name: A string name representing the source of the message. Defaults to
none. How this is used is up to the LogSource.
"""
if self._should_log(enums.LogLevel.DEBUG):
message = '[%s] %s' % (
enums.log_level_to_string(enums.LogLevel.DEBUG), message)
self._log_internal(
(enums.LogLevel.DEBUG, util.timer(), name, message))
def log_info(self, message, name=None):
"""Logs a message at INFO log level.
INFO log level is recorded on LogSources with VERBOSE or NORMAL verbosity.
Args:
message: A string message to be logged.
name: A string name representing the source of the message. Defaults to
none. How this is used is up to the LogSource.
"""
if self._should_log(enums.LogLevel.INFO):
message = '[%s] %s' % (
enums.log_level_to_string(enums.LogLevel.INFO), message)
self._log_internal(
(enums.LogLevel.INFO, util.timer(), name, message))
def log_warning(self, message, name=None):
"""Logs a message at WARNING log level.
WARNING log level is recorded on LogSources with VERBOSE or NORMAL
verbosity.
Args:
message: A string message to be logged.
name: A string name representing the source of the message. Defaults to
none. How this is used is up to the LogSource.
"""
if self._should_log(enums.LogLevel.WARNING):
message = '[%s] %s' % (
enums.log_level_to_string(enums.LogLevel.WARNING), message)
self._log_internal(
(enums.LogLevel.WARNING, util.timer(), name, message))
def log_error(self, message, name=None):
"""Logs a message at ERROR log level.
ERROR log level is recorded on LogSources with any verbosity level.
Args:
message: A string message to be logged.
name: A string name representing the source of the message. Defaults to
none. How this is used is up to the LogSource.
"""
if self._should_log(enums.LogLevel.ERROR):
message = '[%s] %s' % (
enums.log_level_to_string(enums.LogLevel.ERROR), message)
self._log_internal(
(enums.LogLevel.ERROR, util.timer(), name, message))
def _should_log(self, level):
"""Determines whether a log message should be recorded.
Given an enums.LogLevel value and the current enums.Verbosity level of this
LogSource, this method determines whether the message should be recorded.
Returns:
Returns true if the passed in LogLevel should be recorded given the
Verbosity level of this LogSource.
"""
# Errors should always be shown.
if level == enums.LogLevel.ERROR:
return True
# Otherwise, switch off of the log level and the current verbosity.
if self.verbosity == enums.Verbosity.SILENT:
return False
elif self.verbosity == enums.Verbosity.NORMAL:
if level == enums.LogLevel.DEBUG:
return False
else:
return True
elif self.verbosity == enums.Verbosity.VERBOSE:
return True
def _log_internal(self, message):
"""A private helper method for logging messages.
Args:
message: A tuple containing the LogLevel, the time the message was
received, the name sent with the message and the mesage itself.
"""
if self.log_sinks:
for log_sink in self.log_sinks:
log_sink.log(message)
else:
self.buffered_messages.append(message)
class WorkUnitLogSource(LogSource):
"""A LogSource meant to function as a listener on WorkUnits.
"""
def __init__(self, verbosity=enums.Verbosity.INHERIT):
super(WorkUnitLogSource, self).__init__(verbosity)
def should_listen(self, work_unit):
"""All work_units should be listened to.
Args:
work_unit: The WorkUnit this listener is being asked to listen to.
Returns:
Returns True if the WorkUnit should be observed by this LogSource.
"""
self.log_debug(
'Adding listener to WorkUnit named \'%s\' with a status of %s.' %
(work_unit.name, enums.status_to_string(work_unit.get_status())),
work_unit.name)
self.log_info(
'%s: Logging %s' % (
enums.status_to_string(work_unit.get_status()), work_unit.name),
work_unit.name)
return True
def update(self, work_unit, attribute, value):
"""Receives updates from monitored WorkUnits.
Given updates for monitored WorkUnits, this method transforms a WorkUnit
update into a log message. It logs all calls to this method at DEBUG
level, passing all arguments. It then logs formatted messages at INFO
level recording the status of the WorkUnit being updated.
Args:
work_unit: The WorkUnit that was updated.
attribute: The attribute who's value was updated.
value: The new attribute value.
"""
self.log_debug(
'Received an update - WorkUnit: %s, Attr: %s, Value: %s' % (
work_unit.name, attribute, value), work_unit.name)
if work_unit.get_status() == enums.Status.RUNNING:
running = enums.status_to_string(work_unit.get_status())
self.log_info(
'%s: %s - %s of %s' % (
running, work_unit.name, work_unit.complete, work_unit.total),
work_unit.name)
else:
status_string = enums.status_to_string(work_unit.get_status())
self.log_info(
'%s: %s' % (status_string, work_unit.name), work_unit.name)
| StarcoderdataPython |
11398527 | <filename>JellyBot/api/responses/ar/__init__.py
from .add import AutoReplyAddResponse, AutoReplyAddExecodeResponse
from .validate import ContentValidationResponse
from .tag import AutoReplyTagPopularityResponse
| StarcoderdataPython |
8197846 | <gh_stars>0
import lxml.objectify
import lxml.etree
from flexi.xml import serializer_registry
def xml_element_injector(element):
def wrapper(cls):
serializer_registry.xml_serializers.append(cls)
cls.xml_element = element
return cls
return wrapper
# Class decorators
def xml_element_string(element_string):
element = lxml.etree.fromstring(element_string)
return xml_element_injector(element)
def xml_element(tag, **attributes):
maker = lxml.objectify.ElementMaker(annotate=False)
element = getattr(maker, tag)(**attributes)
return xml_element_injector(element)
def python_type(_python_type):
def python_type_injector(cls):
cls.python_type = _python_type
serializer_registry.tree_serializers[_python_type] = cls
return cls
return python_type_injector
| StarcoderdataPython |
5031109 | <filename>tests/test_integration_workflows.py
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import shutil
import sys
import tempfile
import unittest
from glob import glob
import nibabel as nib
import numpy as np
import torch
from ignite.metrics import Accuracy
import monai
from monai.data import create_test_image_3d
from monai.engines import SupervisedEvaluator, SupervisedTrainer
from monai.handlers import (
CheckpointLoader,
CheckpointSaver,
LrScheduleHandler,
MeanDice,
SegmentationSaver,
StatsHandler,
TensorBoardImageHandler,
TensorBoardStatsHandler,
ValidationHandler,
)
from monai.inferers import SimpleInferer, SlidingWindowInferer
from monai.transforms import (
Activationsd,
AsChannelFirstd,
AsDiscreted,
Compose,
KeepLargestConnectedComponentd,
LoadNiftid,
RandCropByPosNegLabeld,
RandRotate90d,
ScaleIntensityd,
ToTensord,
)
from monai.utils import set_determinism
from tests.utils import skip_if_quick
def run_training_test(root_dir, device="cuda:0", amp=False):
images = sorted(glob(os.path.join(root_dir, "img*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
train_files = [{"image": img, "label": seg} for img, seg in zip(images[:20], segs[:20])]
val_files = [{"image": img, "label": seg} for img, seg in zip(images[-20:], segs[-20:])]
# define transforms for image and segmentation
train_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
RandCropByPosNegLabeld(
keys=["image", "label"], label_key="label", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4
),
RandRotate90d(keys=["image", "label"], prob=0.5, spatial_axes=[0, 2]),
ToTensord(keys=["image", "label"]),
]
)
val_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a training data loader
train_ds = monai.data.CacheDataset(data=train_files, transform=train_transforms, cache_rate=0.5)
# use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training
train_loader = monai.data.DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4)
# create a validation data loader
val_ds = monai.data.CacheDataset(data=val_files, transform=val_transforms, cache_rate=1.0)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
loss = monai.losses.DiceLoss(sigmoid=True)
opt = torch.optim.Adam(net.parameters(), 1e-3)
lr_scheduler = torch.optim.lr_scheduler.StepLR(opt, step_size=2, gamma=0.1)
val_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
val_handlers = [
StatsHandler(output_transform=lambda x: None),
TensorBoardStatsHandler(log_dir=root_dir, output_transform=lambda x: None),
TensorBoardImageHandler(
log_dir=root_dir, batch_transform=lambda x: (x["image"], x["label"]), output_transform=lambda x: x["pred"]
),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net}, save_key_metric=True),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
post_transform=val_post_transforms,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
val_handlers=val_handlers,
amp=True if amp else False,
)
train_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
train_handlers = [
LrScheduleHandler(lr_scheduler=lr_scheduler, print_lr=True),
ValidationHandler(validator=evaluator, interval=2, epoch_level=True),
StatsHandler(tag_name="train_loss", output_transform=lambda x: x["loss"]),
TensorBoardStatsHandler(log_dir=root_dir, tag_name="train_loss", output_transform=lambda x: x["loss"]),
CheckpointSaver(save_dir=root_dir, save_dict={"net": net, "opt": opt}, save_interval=2, epoch_level=True),
]
trainer = SupervisedTrainer(
device=device,
max_epochs=5,
train_data_loader=train_loader,
network=net,
optimizer=opt,
loss_function=loss,
inferer=SimpleInferer(),
post_transform=train_post_transforms,
key_train_metric={"train_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
train_handlers=train_handlers,
amp=True if amp else False,
)
trainer.run()
return evaluator.state.best_metric
def run_inference_test(root_dir, model_file, device="cuda:0", amp=False):
images = sorted(glob(os.path.join(root_dir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(root_dir, "seg*.nii.gz")))
val_files = [{"image": img, "label": seg} for img, seg in zip(images, segs)]
# define transforms for image and segmentation
val_transforms = Compose(
[
LoadNiftid(keys=["image", "label"]),
AsChannelFirstd(keys=["image", "label"], channel_dim=-1),
ScaleIntensityd(keys=["image", "label"]),
ToTensord(keys=["image", "label"]),
]
)
# create a validation data loader
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
val_loader = monai.data.DataLoader(val_ds, batch_size=1, num_workers=4)
# create UNet, DiceLoss and Adam optimizer
net = monai.networks.nets.UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
val_post_transforms = Compose(
[
Activationsd(keys="pred", sigmoid=True),
AsDiscreted(keys="pred", threshold_values=True),
KeepLargestConnectedComponentd(keys="pred", applied_labels=[1]),
]
)
val_handlers = [
StatsHandler(output_transform=lambda x: None),
CheckpointLoader(load_path=f"{model_file}", load_dict={"net": net}),
SegmentationSaver(
output_dir=root_dir,
batch_transform=lambda batch: batch["image_meta_dict"],
output_transform=lambda output: output["pred"],
),
]
evaluator = SupervisedEvaluator(
device=device,
val_data_loader=val_loader,
network=net,
inferer=SlidingWindowInferer(roi_size=(96, 96, 96), sw_batch_size=4, overlap=0.5),
post_transform=val_post_transforms,
key_val_metric={
"val_mean_dice": MeanDice(include_background=True, output_transform=lambda x: (x["pred"], x["label"]))
},
additional_metrics={"val_acc": Accuracy(output_transform=lambda x: (x["pred"], x["label"]))},
val_handlers=val_handlers,
amp=True if amp else False,
)
evaluator.run()
return evaluator.state.best_metric
class IntegrationWorkflows(unittest.TestCase):
def setUp(self):
set_determinism(seed=0)
self.data_dir = tempfile.mkdtemp()
for i in range(40):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"img{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(self.data_dir, f"seg{i:d}.nii.gz"))
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu:0")
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def tearDown(self):
set_determinism(seed=None)
shutil.rmtree(self.data_dir)
@skip_if_quick
def test_training(self):
repeated = []
test_rounds = 3 if monai.config.get_torch_version_tuple() >= (1, 6) else 2
for i in range(test_rounds):
set_determinism(seed=0)
repeated.append([])
best_metric = run_training_test(self.data_dir, device=self.device, amp=(i == 2))
print("best metric", best_metric)
if i == 2:
np.testing.assert_allclose(best_metric, 0.9219996750354766, rtol=1e-2)
else:
np.testing.assert_allclose(best_metric, 0.921965891122818, rtol=1e-2)
repeated[i].append(best_metric)
model_file = sorted(glob(os.path.join(self.data_dir, "net_key_metric*.pt")))[-1]
infer_metric = run_inference_test(self.data_dir, model_file, device=self.device, amp=(i == 2))
print("infer metric", infer_metric)
# check inference properties
if i == 2:
np.testing.assert_allclose(infer_metric, 0.9217855930328369, rtol=1e-2)
else:
np.testing.assert_allclose(infer_metric, 0.9217526227235794, rtol=1e-2)
repeated[i].append(infer_metric)
output_files = sorted(glob(os.path.join(self.data_dir, "img*", "*.nii.gz")))
if i == 2:
sums = [
0.14183807373046875,
0.15151405334472656,
0.13811445236206055,
0.1336650848388672,
0.1842341423034668,
0.16353750228881836,
0.14104795455932617,
0.16643333435058594,
0.15668964385986328,
0.1764383316040039,
0.16112232208251953,
0.1641840934753418,
0.14401578903198242,
0.11075973510742188,
0.16075706481933594,
0.19603967666625977,
0.1743607521057129,
0.05361223220825195,
0.19009971618652344,
0.19875097274780273,
0.19498729705810547,
0.2027440071105957,
0.16035127639770508,
0.13188838958740234,
0.15143728256225586,
0.1370086669921875,
0.22630071640014648,
0.16111421585083008,
0.14713764190673828,
0.10443782806396484,
0.11977195739746094,
0.13068008422851562,
0.11225223541259766,
0.15175437927246094,
0.1594991683959961,
0.1894702911376953,
0.21605825424194336,
0.17748403549194336,
0.18474626541137695,
0.03627157211303711,
]
else:
sums = [
0.14183568954467773,
0.15139484405517578,
0.13803958892822266,
0.13356733322143555,
0.18455982208251953,
0.16363763809204102,
0.14090299606323242,
0.16649341583251953,
0.15651702880859375,
0.17655181884765625,
0.1611647605895996,
0.1644759178161621,
0.14383649826049805,
0.11055231094360352,
0.16080236434936523,
0.19629907608032227,
0.17441368103027344,
0.053577423095703125,
0.19043731689453125,
0.19904851913452148,
0.19525957107543945,
0.20304203033447266,
0.16030073165893555,
0.13170528411865234,
0.15118885040283203,
0.13686418533325195,
0.22668886184692383,
0.1611466407775879,
0.1472468376159668,
0.10427331924438477,
0.11962461471557617,
0.1305699348449707,
0.11204767227172852,
0.15171241760253906,
0.1596231460571289,
0.18976259231567383,
0.21649408340454102,
0.17761707305908203,
0.1851673126220703,
0.036365509033203125,
]
for (output, s) in zip(output_files, sums):
ave = np.mean(nib.load(output).get_fdata())
np.testing.assert_allclose(ave, s, rtol=1e-2)
repeated[i].append(ave)
np.testing.assert_allclose(repeated[0], repeated[1])
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
323187 | # Copyright 2017: Orange
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import utils
from rally.common import validation
from rally.task import context
from rally_openstack.cleanup import manager as resource_manager
from rally_openstack import consts
from rally_openstack.scenarios.neutron import utils as neutron_utils
@validation.add("required_platform", platform="openstack", admin=True,
users=True)
@context.configure(name="router", platform="openstack", order=351)
class Router(context.Context):
"""Create networking resources.
This creates router for all tenants.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"routers_per_tenant": {
"type": "integer",
"minimum": 1
},
"admin_state_up ": {
"description": "A human-readable description for the resource",
"type": "boolean",
},
"external_gateway_info": {
"description": "The external gateway information .",
"type": "object",
"properties": {
"network_id": {"type": "string"},
"enable_snat": {"type": "boolean"}
},
"additionalProperties": False
},
"network_id": {
"description": "Network ID",
"type": "string"
},
"external_fixed_ips": {
"description": "Ip(s) of the external gateway interface.",
"type": "array",
"items": {
"type": "object",
"properties": {
"ip_address": {"type": "string"},
"subnet_id": {"type": "string"}
},
"additionalProperties": False,
}
},
"distributed": {
"description": "Distributed router. Require dvr extension.",
"type": "boolean"
},
"ha": {
"description": "Highly-available router. Require l3-ha.",
"type": "boolean"
},
"availability_zone_hints": {
"description": "Require router_availability_zone extension.",
"type": "boolean"
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"routers_per_tenant": 1,
}
def setup(self):
kwargs = {}
parameters = ("admin_state_up", "external_gateway_info", "network_id",
"external_fixed_ips", "distributed", "ha",
"availability_zone_hints")
for parameter in parameters:
if parameter in self.config:
kwargs[parameter] = self.config[parameter]
for user, tenant_id in (utils.iterate_per_tenants(
self.context.get("users", []))):
self.context["tenants"][tenant_id]["routers"] = []
scenario = neutron_utils.NeutronScenario(
context={"user": user, "task": self.context["task"],
"owner_id": self.context["owner_id"]}
)
for i in range(self.config["routers_per_tenant"]):
router = scenario._create_router(kwargs)
self.context["tenants"][tenant_id]["routers"].append(router)
def cleanup(self):
resource_manager.cleanup(
names=["neutron.router"],
users=self.context.get("users", []),
superclass=neutron_utils.NeutronScenario,
task_id=self.get_owner_id())
| StarcoderdataPython |
6698586 | <gh_stars>1-10
import discord
from discord.ext import commands
from discord import app_commands
from discord.app_commands import Choice, choices, Group, checks
import aiohttp
class webhook(commands.Cog, name="webhook", description="Manage the servers webhooks"):
def __init__(self, client):
self.client = client
slash_webhook = Group(
name="webhook",
guild_only=True,
default_permissions=False,
description="Manage webhooks in server"
)
@slash_webhook.command(
name="create",
description="Create a webhook in a channel"
)
@checks.has_permissions(administrator=True)
async def slash_webhook_create(
self,
interaction,
name:str,
avatar:discord.Attachment=None,
channel:discord.TextChannel=None
):
channel = channel or interaction.channel
webhook = await channel.create_webhook(name=name)
await interaction.response.send_message(
f"**Webhook was successfully created in {channel.mention}**\n`{webhook.url}`\n\n*Remember to not shere this with anyone*",
ephemeral=True
)
@slash_webhook.command(
name="send",
description="Send message with a webhook"
)
@checks.has_permissions(administrator=True)
async def slash_webhook_send(
self,
interaction,
webhook_url:str,
content:str
):
async with aiohttp.ClientSession() as session:
webhook = discord.Webhook.from_url(webhook_url, session=session)
message = await webhook.send(content=content, wait=True)
await session.close()
await interaction.response.send_message(
f"Sent message <[here]({message.jump_url})>.\n{content}",
ephemeral=True
)
async def setup(client):
await client.add_cog(webhook(client)) | StarcoderdataPython |
11212726 | """
Mako based Configuration Generator
"""
import logging
import re
from mako.exceptions import CompileException, SyntaxException
from mako.template import Template
logger = logging.getLogger("confgen")
class TemplateSyntaxException(BaseException):
"""
This exception is raised, if the rendering of the mako template failed
"""
pass
class MakoConfigGenerator:
"""
Config Generator that utilizes the Mako Template Engine
"""
# variable name regular expression
_variable_name_regex = r"(\$\{[ ]*(?P<name>[a-zA-Z0-9_]+)[ ]*\})"
# template content
_template_string = None
_template_variable_dict = dict()
@property
def template_string(self):
return self._template_string
@template_string.setter
def template_string(self, value):
self._template_string = value
# clean list and parse data again
self._parse_variable_from_template_string()
@property
def template_variables(self):
return sorted(list(self._template_variable_dict.keys()))
def __init__(self, template_string=""):
if type(template_string) is not str:
raise ValueError("template string must be a string type")
self.template_string = template_string
self._parse_variable_from_template_string()
def _parse_variable_from_template_string(self):
"""populates the template_variables list with the variables that are found in the config template
"""
self._template_variable_dict = dict()
if self.template_string:
for var in re.findall(self._variable_name_regex, self.template_string):
logger.debug("found variable %s" % var[1])
self.add_variable(var[1])
def add_variable(self, variable):
"""create a variable with no value
:param variable:
:return:
"""
self.set_variable_value(variable, "")
def set_variable_value(self, variable, value=""):
"""change the value of the given variable. If the variable doesn't exist, it will be created
:param variable:
:param value:
:return:
"""
self._template_variable_dict[variable] = value
def get_variable_value(self, variable):
"""get the value of a variable
:param variable:
:return:
"""
return self._template_variable_dict[variable]
def get_rendered_result(self, remove_empty_lines=True):
"""render template result
:param remove_empty_lines: true, if blank lines should be removed
:return:
"""
try:
result = Template(self.template_string).render(**self._template_variable_dict)
except SyntaxException as ex:
msg = "Template Syntax error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except CompileException as ex:
msg = "Template Compile error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except AttributeError as ex:
msg = "Template Attribute error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
except Exception as ex:
msg = "Template Attribute error: %s" % str(ex)
logger.error(msg, exc_info=True)
raise TemplateSyntaxException(msg)
# remove empty lines
if remove_empty_lines:
lines = result.splitlines()
result = ""
counter = 1
for line in lines:
if line != "":
result += line
if len(lines) != counter:
result += "\n"
counter += 1
return result
| StarcoderdataPython |
4884764 | <gh_stars>100-1000
from textbox.evaluator.base_evaluator import *
| StarcoderdataPython |
4833528 | __all__ = ('VERSION',)
VERSION = '0.3'
| StarcoderdataPython |
11274431 | <reponame>KonikaChaurasiya-GSLab/j2lint
"""statement.py - Class and variables for jinja statements.
"""
import re
JINJA_STATEMENT_TAG_NAMES = [
('for', 'else', 'endfor'),
('if', 'elif', 'else', 'endif'),
]
class JinjaStatement:
"""Class for representing a jinja statement.
"""
begin = None
words = []
def __init__(self, line):
whitespaces = re.findall('\s*', line[0])
self.begin = len(whitespaces[0])
self.line = line[0]
self.words = line[0].split()
self.start_line_no = line[1]
self.end_line_no = line[2]
self.start_delimeter = line[3]
self.end_delimeter = line[4]
| StarcoderdataPython |
9668500 | <filename>core/migrations/0003_pontoturistico_attraction_list.py
# Generated by Django 2.2.5 on 2019-09-14 22:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('attractions', '0001_initial'),
('core', '0002_auto_20190914_2202'),
]
operations = [
migrations.AddField(
model_name='pontoturistico',
name='attraction_list',
field=models.ManyToManyField(to='attractions.Attraction'),
),
]
| StarcoderdataPython |
5173508 | <filename>project_2/launch/bringup_thebot.launch.py
from ament_index_python.packages import get_package_share_path
import launch
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch_ros.parameter_descriptions import ParameterValue
from launch_ros.actions import Node
from launch.substitutions import Command, LaunchConfiguration
from launch.launch_description_sources import PythonLaunchDescriptionSource
def generate_launch_description():
pkg_share = get_package_share_path("project_2")
default_model_path = pkg_share / "urdf/the_bot.urdf"
rplidar_pkg = get_package_share_path("rplidar_ros2")
sim_time_arg = DeclareLaunchArgument(
name="use_sim_time",
default_value="false",
choices=["true", "false"],
description="Flag to enable use simulation time",
)
model_arg = DeclareLaunchArgument(
name="model",
default_value=str(default_model_path),
description="Absolute path to robot urdf file",
)
robot_description = ParameterValue(
Command(["xacro ", LaunchConfiguration("model")]), value_type=str
)
robot_state_publisher_node = Node(
package="robot_state_publisher",
executable="robot_state_publisher",
parameters=[
{
"use_sim_time": LaunchConfiguration("use_sim_time"),
"robot_description": robot_description,
}
],
)
joint_state_publisher_node = Node(
package="joint_state_publisher",
executable="joint_state_publisher",
name="joint_state_publisher",
)
camera_pub_node = Node(
package="cv_basics",
executable="img_publisher",
name="webcam_pub",
)
odom_pub_node = Node(
package="project_2",
executable="odom_publisher",
name="odom_publisher",
)
imu_pub_node = Node(
package="project_2",
executable="imu_publisher",
name="imu_pub",
)
robot_localization_node = Node(
package="robot_localization",
executable="ekf_node",
name="ekf_filter_node",
output="screen",
parameters=[
str(pkg_share / "configs/ekf.yaml"),
{"use_sim_time": LaunchConfiguration("use_sim_time")},
],
)
launch_rplidar = IncludeLaunchDescription(
PythonLaunchDescriptionSource(
str(rplidar_pkg / "launch/rplidar_launch.py")
),
launch_arguments={
"frame_id": "lidar_link"
}.items()
)
return launch.LaunchDescription(
[
sim_time_arg,
model_arg,
odom_pub_node,
camera_pub_node,
imu_pub_node,
joint_state_publisher_node,
robot_state_publisher_node,
robot_localization_node,
launch_rplidar,
]
)
| StarcoderdataPython |
1849428 | import numpy as np
from scipy import interpolate
##
# filter a list given indices
# @param alist a list
# @param indices indices in that list to select
def filter(alist, indices):
rlist = []
for i in indices:
rlist.append(alist[i])
return rlist
##
# Given a list of 1d time arrays, find the sequence that started first and
# subtract all sequences from its first time recording.
#
# @param list_of_time_arrays a list of 1d arrays
# @return list_of_time_arrays adjusted so that time arrays would start at 0
def equalize_times(list_of_time_arrays):
start_times = []
end_times = []
for tarray in list_of_time_arrays:
start_times.append(tarray[0,0])
end_times.append(tarray[0,-1])
#print start_times
#print end_times
#import pdb
#pdb.set_trace()
min_start = np.min(start_times)
max_end = np.max(end_times)
adjusted_list_of_time_arrays = []
for tarray in list_of_time_arrays:
adjusted_list_of_time_arrays.append(tarray - min_start)
return adjusted_list_of_time_arrays, min_start, max_end
##
# calc dx/dt
# @param t matrix 1xn
# @param x matrix mxn
def gradient(t, x):
#pdb.set_trace()
dx = x[:, 2:] - x[:, 0:-2]
dt = t[0, 2:] - t[0, 0:-2]
dx_dt = np.multiply(dx, 1/dt)
#pdb.set_trace()
dx_dt = np.column_stack((dx_dt[:,0], dx_dt))
dx_dt = np.column_stack((dx_dt, dx_dt[:,-1]))
return dx_dt
##
# 1D interpolation
#
# @param x 1xn mat x to interpolate from
# @param y 1xn mat y to interpolate from
# @param xquery 1xn mat of query x's
def interpolate_1d(x, y, xquery):
try:
x = x.A1
y = y.A1
xquery = xquery.A1
minx = np.min(x)
minx_query = np.min(xquery)
maxx = np.max(x)
maxx_querry = np.max(xquery)
if minx_query <= minx:
x = np.concatenate((np.array([minx_query-.01]), x))
y = np.concatenate((np.array([y[0]]), y))
if maxx <= maxx_querry:
x = np.concatenate((x, np.array([maxx_querry+.01])))
y = np.concatenate((y, np.array([y[-1]])))
f = interpolate.interp1d(x, y)
return f(xquery)
except ValueError, e:
pdb.set_trace()
print e
##
# Given a histogram with params, calculate
def histogram_get_bin_numb(n, min_index, bin_size, nbins):
bin_numb = int(np.floor((n - min_index) / bin_size))
if bin_numb == nbins:
bin_numb = bin_numb - 1
return bin_numb
##
#
#
# @param index_list_list a list of list of indices to histogram by
# @param elements_list_list a list of list of elements to place in histogram bins
# @param bin_size size of bins in index_list_list units
# @param min_index optional argument for mininum index to create histogram over
# @param max_index optional argument for maximum index to create histogram over
def histogram(index_list_list, elements_list_list, bin_size, min_index=None, max_index=None):
if min_index is None:
min_index = np.min(np.concatenate(index_list_list))
if max_index is None:
max_index = np.max(np.concatenate(index_list_list))
index_range = (max_index - min_index)
nbins = int(np.ceil(index_range / bin_size))
bins = []
for i in range(nbins):
bins.append([])
#pdb.set_trace()
#Each slice contains the data for one trial, idx is the trial number
for trial_number, element_list_slice in enumerate(zip(*elements_list_list)):
#Iterate by using the length of the first set of data in the given trial
for i in range(len(element_list_slice[0])):
bin_numb = histogram_get_bin_numb(index_list_list[trial_number][i], min_index, bin_size, nbins)
elements = [el_list[i] for el_list in element_list_slice]
if bin_numb < 0 or bin_numb > nbins:
continue
bins[bin_numb].append(elements)
return bins, np.arange(min_index, max_index, bin_size)
##
# smooth the data using a window with requested size.
#
# This method is based on the convolution of a scaled window with the signal.
# The signal is prepared by introducing reflected copies of the signal
# (with the window size) in both ends so that transient parts are minimized
# in the begining and end part of the output signal.
#
# output:
# the smoothed signal
#
# example:
#
# t=linspace(-2,2,0.1)
# x=sin(t)+randn(len(t))*0.1
# y=smooth(x)
#
# see also:
#
# numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
# scipy.signal.lfilter
#
# Copied from http://www.scipy.org/Cookbook/SignalSmooth
#
# @param x the input signal
# @param window_len the dimension of the smoothing window; should be an odd integer
# @param window the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
# flat window will produce a moving average smoothing.
# @return the smoothed signal function
def signal_smooth(x,window_len=11,window='hamming'):
if x.ndim != 1:
raise ValueError, "smooth only accepts 1 dimension arrays."
if x.size < window_len:
raise ValueError, "Input vector needs to be bigger than window size."
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError, "Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'"
s=np.r_[x[window_len:1:-1],x,x[-1:-window_len:-1]]
# s=numpy.r_[2*x[0]-x[window_len:1:-1],x,2*x[-1]-x[-1:-window_len:-1]]
#print(len(s))
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='same')
return y[window_len-1:-window_len+1]
##
# Returns the variance of the series x given mean function y
# over a window of size window_len.
# @param x the original signal
# @param y the smoothed signal function
# @param window_len size of the window to calculate variances over
# @return the variance function
def signal_variance(x, y, window_len=10):
if len(x) != len(y):
raise ValueError, "Must have same length"
vars = []
for i in range(len(x)):
cursum = 0.
cura = i - window_len/2
curb = i + window_len/2
if cura < 0:
cura = 0
if curb > len(x):
curb = len(x)
for xval in x[cura:curb]:
cursum += (xval - y[i])**2
vars += [cursum / (curb-cura)]
vars += [vars[len(vars)-1]]
return vars
##
# TODO docs
# Returns the variance of the series x given mean function y
# over a window of size window_len.
# @param x the original signal
# @param y the smoothed signal function
# @param window_len size of the window to calculate variances over
# @return the variance function
def signal_list_variance(x_list, means, window_len=10, num_samples=30, resample=1):
# if len(x_list[0]) != len(means):
# raise ValueError, "Must have same length"
vars = []
num_samples_in_mean = num_samples / len(x_list)
for i in range(0, len(means), resample):
cursum = 0.
cura = i - window_len/2
curb = i + window_len/2
if cura < 0:
cura = 0
if curb > len(means):
curb = len(means)
step = (curb - cura) / num_samples_in_mean
n = 0
for x in x_list:
if cura >= len(x):
continue
ccurb = curb
cstep = step
if ccurb >= len(x):
ccurb = len(x)
cstep = (ccurb - cura) / num_samples_in_mean
if cstep > 0:
for xval in x[cura:ccurb:cstep]:
cursum += (xval - means[i])**2
n += 1
vars += [np.sqrt(cursum)/(n)]
return np.array(vars)
| StarcoderdataPython |
9798149 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from numpy.testing import assert_array_equal
from sktracker.tracker.lapjv import lapjv
def test_lapjv():
i = [0, 0, 0, 0, 0, 0, 1, 1, 1, 2,
2, 2, 3, 3, 3, 4, 4, 4, 4, 4,
5, 5, 5, 5, 5, 6, 6, 7, 7, 8, 8]
j = [0, 1, 2, 3, 4, 5, 0, 1, 6, 0,
1, 7, 0, 1, 8, 0, 5, 6, 7, 8,
1, 5, 6, 7, 8, 2, 5, 3, 5, 4, 5]
costs = [1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 2., 2.,
2., 2., 1., 2., 2., 2.,
2., 1., 2., 1., 2., 1., 2.]
in_links, out_links = lapjv(i, j, costs)
assert_array_equal(in_links, [5, 6, 7, 8, 0, 1, 2, 3, 4])
assert_array_equal(out_links, [4, 5, 6, 7, 8, 0, 1, 2, 3])
| StarcoderdataPython |
3344141 | import warnings
from .common import Alignment, CursorMode, ShiftMode, BacklightMode
from .contextmanagers import cursor, cleared
from .gpio import CharLCD as GpioCharLCD
class CharLCD(GpioCharLCD):
def __init__(self, *args, **kwargs):
warnings.warn("Using RPLCD.CharLCD directly is deprecated. " +
"Use RPLCD.gpio.CharLCD instead!", DeprecationWarning)
super(CharLCD, self).__init__(*args, **kwargs)
| StarcoderdataPython |
3424238 | """Grad-CAM class for analyzing CNN network.
- Author: <NAME>
- Contact: <EMAIL>
- Paper: https://arxiv.org/pdf/1610.02391v1.pdf
- Reference: https://github.com/RRoundTable/XAI
"""
from collections import OrderedDict
from typing import Callable
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
# pylint: disable=attribute-defined-outside-init
class CAMBaseWrapper:
"""Base Wrapping module for CAM."""
def __init__(self, model: nn.Module):
"""Initialize."""
super(CAMBaseWrapper, self).__init__()
self.device = next(model.parameters()).device
self.model = model
self.handlers = [] # a set of hook function handlers
def _encode_one_hot(self, ids: torch.Tensor) -> torch.Tensor:
"""Convert input to one-hot."""
one_hot = torch.zeros_like(self.logits).to(self.device)
one_hot[0][ids] = 1
return one_hot
def forward(self, image: torch.Tensor) -> torch.Tensor:
"""
Simple classification
"""
self.model.zero_grad()
self.logits = self.model(image)
return self.logits
def backward(self, ids: torch.Tensor) -> torch.Tensor:
"""
Class-specific backpropagation.
Either way works:
1. self.logits.backward(gradient=one_hot, retain_graph=True)
2. (self.logits * one_hot).sum().backward(retain_graph=True)
"""
one_hot = self._encode_one_hot(ids)
self.logits.backward(gradient=one_hot, retain_graph=True)
def generate(self, target_layer: str):
raise NotImplementedError
def remove_hook(self):
"""
Remove all the forward/backward hook functions
"""
for handle in self.handlers:
handle.remove()
# pylint: disable=attribute-defined-outside-init
class GradCAM(CAMBaseWrapper):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure 2 on page 4
"""
def __init__(self, model: nn.Module, candidate_layers: list = None):
"""Initialize."""
super(GradCAM, self).__init__(model)
self.fmap_pool = OrderedDict()
self.grad_pool = OrderedDict()
self.candidate_layers = candidate_layers # list
def forward_hook(key: str) -> Callable:
def forward_hook_(_, __, output: torch.Tensor):
# Save featuremaps
self.fmap_pool[key] = output.detach()
return forward_hook_
def backward_hook(key: str) -> Callable:
def backward_hook_(_, __, grad_out: tuple):
# Save the gradients correspond to the featuremaps
self.grad_pool[key] = grad_out[0].detach()
return backward_hook_
# If any candidates are not specified, the hook is registered to all the layers.
for name, module in self.model.named_modules():
print(name, module)
if self.candidate_layers is None or name in self.candidate_layers:
self.handlers.append(module.register_forward_hook(forward_hook(name)))
self.handlers.append(module.register_backward_hook(backward_hook(name)))
@staticmethod
def _find(pool: OrderedDict, target_layer: str) -> torch.Tensor:
"""Get designated layer from model."""
if target_layer in pool.keys():
return pool[target_layer]
else:
raise ValueError("Invalid layer name: {}".format(target_layer))
@staticmethod
def _compute_grad_weights(grads: torch.Tensor) -> torch.Tensor:
"""Compute gradient weight with average pooling."""
return F.adaptive_avg_pool2d(grads, 1)
def forward(self, image: np.ndarray) -> torch.Tensor:
"""Forward method implementation."""
self.image_shape = image.shape[1:]
return super(GradCAM, self).forward(image)
def generate(self, target_layer: str) -> torch.Tensor:
"""Generate feature map of target layer with Grad-CAM."""
fmaps = self._find(self.fmap_pool, target_layer)
grads = self._find(self.grad_pool, target_layer)
weights = self._compute_grad_weights(grads)
gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)
gcam = F.relu(gcam)
gcam = F.interpolate(
gcam, self.image_shape, mode="bilinear", align_corners=False
)
B, C, H, W = gcam.shape
gcam = gcam.view(B, -1)
gcam -= gcam.min(dim=1, keepdim=True)[0]
gcam /= gcam.max(dim=1, keepdim=True)[0]
gcam = gcam.view(B, C, H, W)
return gcam
| StarcoderdataPython |
9753871 | import pytest
from src.decko.debug import (
raise_error_if,
)
@pytest.mark.parametrize("test_case", [
(2, 5, 5),
(200, 500, 600)
])
def test_raise_errors_if(test_case):
first_num, second_num, threshold_value = test_case
def sum_greater_than_threshold(total_sum):
return threshold_value < total_sum
@raise_error_if(sum_greater_than_threshold)
def add(a, b):
return a + b
with pytest.raises(RuntimeError):
add(first_num, second_num)
| StarcoderdataPython |
321453 | """Class to perform over-sampling using ADASYN."""
# Authors: <NAME> <<EMAIL>>
# <NAME>
# License: MIT
import numpy as np
from scipy import sparse
from sklearn.utils import check_random_state
from sklearn.utils import _safe_indexing
from imblearn.over_sampling.base import BaseOverSampler
from imblearn.utils import check_neighbors_object
from imblearn.utils import check_sampling_strategy
class ADASYN():
def __init__(
self,
sampling_strategy="auto",
variables=None,
variables_3d=None,
random_state=None,
n_neighbors=5,
n_jobs=None,
):
self.sampling_strategy=sampling_strategy
self.random_state = random_state
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
self.variables = variables
self.variables_3d = variables_3d
def _validate_estimator(self):
"""Create the necessary objects for ADASYN"""
self.nn_ = check_neighbors_object(
"n_neighbors", self.n_neighbors, additional_neighbor=1
)
self.nn_.set_params(**{"n_jobs": self.n_jobs})
def fit_resample(self, X, X_3d, y, y_org):
self.sampling_strategy_ = check_sampling_strategy(
self.sampling_strategy, y, 'over-sampling'
)
self._validate_estimator()
random_state = check_random_state(self.random_state)
X_resampled = [X.copy()]
X_3d_resampled = [X_3d.copy()]
y_resampled = [y.copy()]
y_org_resampled = [y_org.copy()]
for class_sample, n_samples in self.sampling_strategy_.items():
if n_samples == 0:
continue
target_class_indices = np.flatnonzero(y == class_sample)
X_class = _safe_indexing(X, target_class_indices)
X_class_3d = _safe_indexing(X_3d, target_class_indices)
y_class_org = _safe_indexing(y_org, target_class_indices)
# self.nn_.set_params(**{"n_neighbors": self.n_neighbors})
self.nn_.fit(X[:, self.variables])
nns = self.nn_.kneighbors(X_class[:, self.variables], return_distance=False)[:, 1:]
# The ratio is computed using a one-vs-rest manner. Using majority
# in multi-class would lead to slightly different results at the
# cost of introducing a new parameter.
n_neighbors = self.nn_.n_neighbors - 1
ratio_nn = np.sum(y[nns] != class_sample, axis=1) / n_neighbors
if not np.sum(ratio_nn):
raise RuntimeError(
"Not any neigbours belong to the majority"
" class. This case will induce a NaN case"
" with a division by zero. ADASYN is not"
" suited for this specific dataset."
" Use SMOTE instead."
)
ratio_nn /= np.sum(ratio_nn)
n_samples_generate = np.rint(ratio_nn * n_samples).astype(int)
# rounding may cause new amount for n_samples
n_samples = np.sum(n_samples_generate)
if not n_samples:
raise ValueError(
"No samples will be generated with the"
" provided ratio settings."
)
# the nearest neighbors need to be fitted only on the current class
# to find the class NN to generate new samples
# self.nn_.set_params(**{"n_neighbors": np.minimum(int(X_class.shape[0]-1), self.n_neighbors)})
self.nn_.fit(X_class[:, self.variables])
nns = self.nn_.kneighbors(X_class[:, self.variables], return_distance=False)[:, 1:]
enumerated_class_indices = np.arange(len(target_class_indices))
rows = np.repeat(enumerated_class_indices, n_samples_generate)
cols = random_state.choice(n_neighbors, size=n_samples)
diffs = X_class[nns[rows, cols]][:, self.variables] - X_class[rows][:, self.variables]
diffs_3d = X_class_3d[nns[rows, cols]][:, self.variables_3d, :] - X_class_3d[rows][:, self.variables_3d, :]
steps = random_state.uniform( size=(n_samples, 1))
X_new = X_class[rows]
X_new_3d = X_class_3d[rows]
y_new_org = y_class_org[rows]
if sparse.issparse(X):
sparse_func = type(X).__name__
steps = getattr(sparse, sparse_func)(steps)
X_new[:, self.variables] = X_class[rows][:, self.variables] + steps.multiply(diffs)
X_new_3d[:, self.variables_3d, :] = X_class_3d[rows][:, self.variables_3d, :] + steps[:, :,
np.newaxis].multiply(diffs)
else:
X_new[:, self.variables] = X_class[rows][:, self.variables] + steps * diffs
X_new_3d[:, self.variables_3d, :] = X_class_3d[rows][:, self.variables_3d, :] + steps[:, :,
np.newaxis] * diffs_3d
X_new = X_new.astype(X.dtype)
X_new_3d = X_new_3d.astype(X.dtype)
y_new = np.full(n_samples, fill_value=class_sample, dtype=y.dtype)
X_resampled.append(X_new)
X_3d_resampled.append(X_new_3d)
y_resampled.append(y_new)
y_org_resampled.append(y_new_org)
if sparse.issparse(X):
X_resampled = sparse.vstack(X_resampled, format=X.format)
X_3d_resampled = sparse.vstack(X_3d_resampled, format=X.format)
else:
X_resampled = np.vstack(X_resampled)
X_3d_resampled = np.vstack(X_3d_resampled)
y_resampled = np.hstack(y_resampled)
y_org_resampled = np.hstack(y_org_resampled)
return X_resampled, X_3d_resampled, y_org_resampled
| StarcoderdataPython |
1605281 | <filename>src/server/import/parser/docInfo.py
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 14:39:35 2015
@author: smichel
"""
def get(soup, docType=None):
other = False
# CFR criteria
if docType == "cfr" or soup.find("cfrdoc"):
docType = "cfr"
docNumTag = "titlenum"#soup.find("titlenum").text.replace("Title ", "")
urlTag = ""
urlAttr = ""
sectionTag = "section"
secNumTag = "sectno"
secValueTag = ""
headingTag = "subject"
headingAltTag = "reserved"
contextTags = ["p"]
citationTag = "cita"
noteTag = "auth"
verTag = ""
originalDateTag = "amddate"
verDateTag = "date"
# USC criteria
elif docType == "usc" or soup.find("docnumber"):
docType = "usc"
docNumTag = "docnumber"
urlTag = "ref"
urlAttr = "href"
sectionTag = "section"
secNumTag = "num"
secValueTag = "value"
headingTag = "heading"
headingAltTag = ""
contextTags = ["chapeau", "clause", "content", "item", "p", "subclause", "subitem", "subparagraph", "subsection"]
citationTag = "sourcecredit"
noteTag = "note"
verTag = "docpublicationname"
originalDateTag = ""
verDateTag = "dcterms:created"
# HR criteria
elif docType == "hr" or soup.find("legis-num"):
docType = "hr"
docNumTag = "legis-num"
urlTag = "external-xref"
urlAttr = "parsable-cite"
sectionTag = "section"
secNumTag = "enum"
secValueTag = ""
headingTag = ""
headingAltTag = ""
contextTags = ["text"]
citationTag = ""
noteTag = ""
verTag = ""
originalDateTag = ""
verDateTag = "dc:date"
# Public Law criteria
elif docType == "pl" or soup.find("legis-num"):
docType = "hr"
docNumTag = "legis-num"
urlTag = "external-xref"
urlAttr = "parsable-cite"
sectionTag = "section"
secNumTag = "enum"
secValueTag = ""
headingTag = ""
headingAltTag = ""
contextTags = ["text"]
citationTag = ""
noteTag = ""
verTag = ""
originalDateTag = ""
verDateTag = "dc:date"
# Other criteria
else:
print("Unrecognized document type. Looking for available tags...")
other == True
if other == False:
# use the following to produce the title number (docNum): [x for x in soup.find(docNumTag).text.split(" ") if x[0].isdigit()][0]
return docType, docNumTag, urlTag, urlAttr, sectionTag, secNumTag, secValueTag, headingTag, headingAltTag, contextTags, citationTag, noteTag, verTag, originalDateTag, verDateTag
else:
try:
docTags = soup.tags
print("The following tags exist in this document. You can manually specify which tags to use by passing the tags you want for each category (docType, docNumTag, sectionTag, contextTag, citationTag).")
print(docTags)
except:
print("Cannot find any tags in this document. \nYou can try again by running this program with the flag RegEx=True.") | StarcoderdataPython |
3468125 | <reponame>elfido/node-java-c
{
"targets":[
{
"target_name": "fibo",
"sources": ["fib.cc"]
}
]
}
| StarcoderdataPython |
3419176 | <filename>src/plotly.py<gh_stars>0
from dfply import *
import warnings
import numpy as np
import plotly.offline as offline
from plotly.graph_objs import Scatter, Annotation, Heatmap, Trace, Bar
import cufflinks as cf
def init():
#offline.init_notebook_mode()
cf.set_config_file(offline=True, offline_show_link=False, world_readable=False)
def _solve_intention(obj, df):
if isinstance(obj, dict):
return {k: _solve_intention(v, df) for k, v in obj.items()}
elif isinstance(obj, base.Intention):
return obj.evaluate(df).tolist()
else:
return obj
_which = lambda lst:list(np.where(lst)[0])
@dfpipe
def iplot(df, *args, **kwargs):
return df.iplot(*args, **kwargs)
@pipe
def plot_ly(df):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
df.plotly = {'data': [], 'layout': {}}
df._metadata.append("plotly")
return df
@dfpipe
def add_trace(df, *args, **kwargs):
df.plotly['data'].append(Trace(*args, **kwargs))
return df
@dfpipe
def _add_scatter(df, *args, **kwargs):
df.plotly['data'].append(Scatter(*args, **kwargs))
return df
@pipe
def add_scatter(df, *args, **kwargs):
if "color" in kwargs:
uniquecolors = kwargs['color'].evaluate(df).drop_duplicates().tolist()
df_color = kwargs['color'].evaluate(df).tolist()
kw = kwargs
del kw['color']
for c in uniquecolors:
kw['name'] = c
df.plotly = (df.iloc[_which(list(map(lambda x: x == c, df_color))),:] >>
_add_scatter(*args, **kwargs)).plotly
return df
else:
return df >> _add_scatter(*args, **kwargs)
@dfpipe
def add_bar(df, *args, **kwargs):
df.plotly['data'].append(Bar(*args, **kwargs))
return df
@dfpipe
def add_heatmap(df, *args, **kwargs):
df.plotly['data'].append(Heatmap(*args, **kwargs))
return df
@dfpipe
def add_annotations(df, *args, **kwargs):
if 'annotations' in df.plotly['layout']:
df.plotly['layout']['annotations'].append(Annotation(*args, **kwargs))
else:
df.plotly['layout']['annotations'] = [Annotation(*args, **kwargs)]
return df
@dfpipe
def layout(df, *args, **kwargs):
df.plotly['layout'].update(kwargs)
return df
@pipe
def show(df):
offline.iplot(df.plotly, show_link=False)
@pipe
def export(df, filename, format="png"):
offline.plot(df.plotly, filename=filename, image=format)
| StarcoderdataPython |
1870366 | <filename>bot/plugins/gdrive.py<gh_stars>0
import asyncio
import base64
import json
import re
from datetime import datetime, timedelta
from typing import Any, AsyncIterator, ClassVar, Iterable, List, MutableMapping, Optional, Set, Tuple, Union
import pyrogram
from aiopath import AsyncPath
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build, Resource
from googleapiclient.errors import HttpError
from googleapiclient.http import MediaFileUpload
from oauthlib.oauth2.rfc6749.errors import InvalidGrantError
from .. import command, plugin, util
FOLDER = "application/vnd.google-apps.folder"
MIME_TYPE = {
"application/gzip": "📦",
"application/octet-stream": "⚙️",
"application/rar": "📦",
"application/vnd.google-apps.folder": "📁️",
"application/vnd.rar": "📦",
"application/x-7z-compressed": "📦",
"application/x-bzip": "📦",
"application/x-bzip2": "📦",
"application/x-rar": "📦",
"application/x-tar": "📦",
"application/zip": "📦",
"audio/aac": "🎵",
"audio/mp4": "🎵",
"audio/mpeg": "🎵",
"audio/ogg": "🎵",
"audio/wav": "🎵",
"audio/x-opus+ogg": "🎵",
"image/gif": "🖼️",
"image/jpeg": "🖼️",
"image/png": "🖼️",
"video/mp4": "🎥️",
"video/x-matroska": "🎥️"
}
PATTERN = re.compile(r"(?<=/folders/)([\w-]+)|(?<=%2Ffolders%2F)([\w-]+)|"
r"(?<=/file/d/)([\w-]+)|(?<=%2Ffile%2Fd%2F)([\w-]+)|"
r"(?<=id=)([\w-]+)|(?<=id%3D)([\w-]+)")
DOMAIN = re.compile(r"https?:\/\/(?:www\.|:?www\d+\.|(?!www))"
r"([a-zA-Z0-9][a-zA-Z0-9-]+[a-zA-Z0-9])\.[^\s]{2,}")
def getIdFromUrl(url: Optional[str]) -> Optional[str]:
if not url:
return None
match = PATTERN.search(url)
return match[0] if match else url
class GoogleDrive(plugin.Plugin):
name: ClassVar[str] = "GoogleDrive"
configs: MutableMapping[str, Any]
credentials: Optional[Credentials]
db: util.db.AsyncCollection
service: Resource
aria2: Any
cache: MutableMapping[int, int]
copy_tasks: Set[Tuple[int, str]]
index_link: Optional[str]
parent_id: Optional[str]
tasks: Set[Tuple[int, asyncio.Task[Any]]]
getDirectLink: util.aria2.DirectLinks
async def on_load(self) -> None:
self.credentials = None
self.db = self.bot.db.get_collection("gdrive")
self.index_link = self.bot.config["gdrive_index_link"]
self.parent_id = getIdFromUrl(self.bot.config["gdrive_folder_id"])
self.tasks = set()
self.cache = {}
self.copy_tasks = set()
data = await self.db.find_one({"_id": 1})
if not data:
self.configs = self.bot.config["gdrive_secret"]
if not self.configs:
self.log.warning(f"{self.name} module secret not satisfy.")
self.bot.unload_plugin(self)
return
else:
credentials = data["credentials"]
self.aria2 = self.bot.plugins["Aria2"]
self.credentials = Credentials.from_authorized_user_info(credentials)
# service will be overwrite if credentials is expired
self.service = await util.run_sync(build,
"drive",
"v3",
credentials=self.credentials,
cache_discovery=False)
async def on_start(self, _: int) -> None:
self.getDirectLink = util.aria2.DirectLinks(self.bot.http)
async def check_credentials(self, ctx: command.Context) -> None:
if not self.credentials or not self.credentials.valid:
if self.credentials and self.credentials.expired and (
self.credentials.refresh_token):
self.log.info("Refreshing credentials")
await util.run_sync(self.credentials.refresh, Request())
await self.db.update_one(
{"_id": 1},
{"$set": {
"credentials": json.loads(self.credentials.to_json())
}})
else:
user = ctx.msg.from_user
if user.id != self.bot.owner:
await ctx.respond("Please, ask the owner to generate the credentials.")
return
await asyncio.gather(
ctx.respond("Credentials is empty, generating..."),
asyncio.sleep(2.5))
ret = await self.getAccessToken(ctx)
await ctx.respond(ret)
await self.on_load()
@command.desc("Check your GoogleDrive credentials")
@command.alias("gdauth")
async def cmd_gdcheck(self, ctx: command.Context) -> None:
await ctx.respond("__You are all set__")
@command.desc("Clear/Reset your GoogleDrive credentials")
@command.alias("gdreset")
async def cmd_gdclear(self, ctx: command.Context) -> Optional[str]:
if not self.credentials:
return "__Credentials already empty.__"
await asyncio.gather(self.db.delete_one({"_id": 1}),
ctx.respond("__Credentials cleared.__"))
await self.on_load()
async def getAccessToken(self, ctx: command.Context) -> str:
flow = InstalledAppFlow.from_client_config(
self.configs, ["https://www.googleapis.com/auth/drive"],
redirect_uri=self.configs["installed"]["redirect_uris"][0])
auth_url, _ = flow.authorization_url(access_type="offline",
prompt="consent")
await ctx.respond("Check the PM from me")
async with self.bot.conversation(self.bot.owner, timeout=60) as conv:
request = await conv.send_message(
f"Please visit the link:\n{auth_url}\n"
"And reply the token here.\n**You have 60 seconds**.")
try:
response = await conv.get_response()
except asyncio.TimeoutError:
await request.delete()
return "⚠️ <u>Timeout no token receive</u>"
token_response = await request.reply("Token received...")
token = response.text
try:
await asyncio.gather(request.delete(), response.delete(), token_response.delete(),
util.run_sync(flow.fetch_token, code=token))
except InvalidGrantError:
return ("⚠️ **Error fetching token**\n\n"
"__Refresh token is invalid, expired, revoked, "
"or does not match the redirection URI.__")
self.credentials = flow.credentials
await self.db.update_one(
{"_id": 1},
{"$set": {
"credentials": json.loads(self.credentials.to_json())
}}, upsert=True)
return "Credentials created."
async def getInfo(self, identifier: str,
fields: Iterable[str]) -> MutableMapping[str, Any]:
fields = ", ".join(fields)
return await util.run_sync(self.service.files().get( # type: ignore
fileId=identifier, fields=fields, supportsAllDrives=True).execute)
async def copyFile(self, file_id: str, parent_id: Optional[str] = None) -> str:
metadata = {}
if parent_id is not None:
metadata["parents"] = [parent_id]
elif parent_id is None and self.parent_id is not None:
metadata["parents"] = [self.parent_id]
file = await util.run_sync(self.service.files().copy( # type: ignore
body=metadata, fileId=file_id, supportsAllDrives=True).execute)
return file["id"]
async def copyFolder(self, target: str, *, parent_id: Optional[str] = None,
name: Optional[str] = None, msg_id: Optional[int] = None
) -> AsyncIterator[asyncio.Task]:
query = f"'{target}' in parents"
async for contents in self.searchContent(query=query, limit=1000):
if msg_id is not None:
self.cache[msg_id] += len(contents)
for content in contents:
if content["mimeType"] == FOLDER:
# Dont count folder
if msg_id is not None:
self.cache[msg_id] -= 1
childFolder = await self.createFolder(content["name"],
folderId=parent_id)
async for task in self.copyFolder(target=content["id"],
parent_id=childFolder,
name=name, msg_id=msg_id):
yield task
else:
yield self.bot.loop.create_task(self.copyFile(
content["id"],
parent_id=parent_id),
name=name)
await asyncio.sleep(0.5)
async def createFolder(self,
folderName: str,
folderId: Optional[str] = None) -> str:
folder_metadata: MutableMapping[str, Any] = {
"name": folderName,
"mimeType": "application/vnd.google-apps.folder"
}
if folderId is not None:
folder_metadata["parents"] = [folderId]
elif folderId is None and self.parent_id is not None:
folder_metadata["parents"] = [self.parent_id]
folder = await util.run_sync(self.service.files().create( # type: ignore
body=folder_metadata, fields="id", supportsAllDrives=True).execute)
return folder["id"]
async def uploadFolder(
self,
sourceFolder: AsyncPath,
*,
gid: Optional[str] = None,
parent_id: Optional[str] = None,
msg: Optional[pyrogram.types.Message] = None
) -> AsyncIterator[asyncio.Task]:
async for content in sourceFolder.iterdir():
if await content.is_dir():
childFolder = await self.createFolder(content.name, parent_id)
async for task in self.uploadFolder(content,
gid=gid,
parent_id=childFolder,
msg=msg):
yield task
elif await content.is_file():
file = util.File(content)
content = await self.uploadFile(file, parent_id, msg)
if isinstance(content, str): # Skip because file size is 0
continue
yield self.bot.loop.create_task(file.progress(update=False),
name=gid)
await asyncio.sleep(0.5)
async def uploadFile(self,
file: Union[util.File, util.aria2.Download],
parent_id: Optional[str] = None,
msg: Optional[pyrogram.types.Message] = None
) -> Union[MediaFileUpload, str]:
body: MutableMapping[str, Any] = {"name": file.name, "mimeType": file.mime_type}
if parent_id is not None:
body["parents"] = [parent_id]
elif parent_id is None and self.parent_id is not None:
body["parents"] = [self.parent_id]
if (await file.path.stat()).st_size > 0:
media_body = MediaFileUpload(file.path,
mimetype=file.mime_type,
resumable=True,
chunksize=50 * 1024 * 1024)
content = await util.run_sync(self.service.files().create, # type: ignore
body=body,
media_body=media_body,
fields="id, size, webContentLink",
supportsAllDrives=True)
else:
media_body = MediaFileUpload(file.path, mimetype=file.mime_type)
content = await util.run_sync(self.service.files().create( # type: ignore
body=body,
media_body=media_body,
fields="id, size, webContentLink",
supportsAllDrives=True).execute)
return content.get("id")
if isinstance(file, util.aria2.Download):
content.gid, content.name, content.start_time = (file.gid, file.name,
util.time.sec())
elif isinstance(file, util.File):
file.content, file.start_time, file.invoker = (content,
util.time.sec(),
msg)
if self.index_link is not None:
file.index_link = self.index_link
return content
async def downloadFile(self, ctx: command.Context,
msg: pyrogram.types.Message) -> Optional[AsyncPath]:
download_path = self.bot.config["download_path"]
before = util.time.sec()
last_update_time = None
human = util.file.human_readable_bytes
time = util.time.format_duration_td
if msg.document:
file_name = msg.document.file_name
elif msg.audio:
file_name = msg.audio.file_name
elif msg.video:
file_name = msg.video.file_name
elif msg.sticker:
file_name = msg.sticker.file_name
elif msg.photo:
date = datetime.fromtimestamp(msg.photo.date)
file_name = f"photo_{date.strftime('%Y-%m-%d_%H-%M-%S')}.jpg"
elif msg.voice:
date = datetime.fromtimestamp(msg.voice.date)
file_name = f"audio_{date.strftime('%Y-%m-%d_%H-%M-%S')}.ogg"
else:
file_name = None
def prog_func(current: int, total: int) -> None:
nonlocal last_update_time
percent = current / total
after = util.time.sec() - before
now = datetime.now()
try:
speed = round(current / after, 2)
eta = timedelta(seconds=int(round((total - current) / speed)))
except ZeroDivisionError:
speed = 0
eta = timedelta(seconds=0)
bullets = "●" * int(round(percent * 10)) + "○"
if len(bullets) > 10:
bullets = bullets.replace("○", "")
space = ' ' * (10 - len(bullets))
progress = (
f"`{file_name}`\n"
f"Status: **Downloading**\n"
f"Progress: [{bullets + space}] {round(percent * 100)}%\n"
f"__{human(current)} of {human(total)} @ "
f"{human(speed, postfix='/s')}\neta - {time(eta)}__\n\n")
# Only edit message once every 5 seconds to avoid ratelimits
if last_update_time is None or (
now - last_update_time).total_seconds() >= 5:
self.bot.loop.create_task(ctx.respond(progress))
last_update_time = now
if file_name is None:
file_path = await ctx.bot.client.download_media(msg, progress=prog_func)
else:
file_path = f"{download_path}/{file_name}"
file_path = await ctx.bot.client.download_media(msg,
file_name=file_path,
progress=prog_func) # type: ignore
if file_path is not None:
return AsyncPath(file_path)
return
async def searchContent(self, query: str,
limit: int) -> AsyncIterator[List[MutableMapping[str, Any]]]:
fields = "nextPageToken, files(name, id, mimeType, webViewLink)"
pageToken = None
while True:
response = await util.run_sync(self.service.files().list( # type: ignore
supportsAllDrives=True,
includeItemsFromAllDrives=True,
q=query,
spaces="drive",
corpora="allDrives",
fields=fields,
pageSize=limit,
orderBy="folder, modifiedTime desc, name asc",
pageToken=pageToken).execute)
yield response.get("files", [])
pageToken = response.get("nextPageToken", None)
if pageToken is None:
break
@command.desc("Delete your GoogleDrive files/folders, warning this will skip trash")
@command.alias("gdrm")
async def cmd_gdremove(self, ctx: command.Context, *,
identifier: Optional[str] = None
) -> Optional[str]:
if not ctx.input and not identifier:
return "__Pass the id of content to delete it__"
if ctx.input and not identifier:
identifier = getIdFromUrl(ctx.input)
await util.run_sync(self.service.files().delete( # type: ignore
fileId=identifier, supportsAllDrives=True).execute)
return f"__Deleted:__ `{identifier}`"
@command.desc("Copy public GoogleDrive folder/file into your own")
@command.usage("[file id or folder id]")
@command.alias("gdcp")
async def cmd_gdcopy(self, ctx: command.Context) -> Optional[str]:
if not ctx.input and not ctx.msg.reply_to_message:
return "__Input the id of the file/folder or reply to abort copy task__"
if ctx.msg.reply_to_message and ctx.input != "abort":
return "__Replying to message only for aborting task__"
if ctx.msg.reply_to_message:
reply_msg_id = ctx.msg.reply_to_message.message_id
for msg_id, identifier in self.copy_tasks.copy():
if msg_id == reply_msg_id:
await self.cmd_gdremove(ctx, identifier=identifier)
break
else:
return "__Replied message is not task__"
return "__Aborted__"
await ctx.respond("Gathering...")
identifier = getIdFromUrl(ctx.input)
if not identifier:
return "__Invalid id__"
try:
content = await self.getInfo(identifier, ["id", "name", "mimeType"])
except HttpError as e:
if "'location': 'fileId'" in str(e):
return "__Invalid input of id.__"
raise
if content["mimeType"] == FOLDER:
cancelled = False
counter = 0
progress_string = ""
last_update_time = None
self.cache[ctx.msg.message_id] = 0
self.copy_tasks.add((ctx.msg.message_id, content["id"]))
parentFolder = await self.createFolder(content["name"])
async for task in self.copyFolder(target=content["id"],
parent_id=parentFolder,
name=content["name"],
msg_id=ctx.msg.message_id):
try:
await task
except HttpError as e:
if "'reason': 'notFound'" in str(e):
cancelled = True
break
raise
else:
counter += 1
now = datetime.now()
length = self.cache[ctx.msg.message_id]
percent = round(((counter / length) * 100), 2)
progress_string = (f"__Copying {content['name']}"
f": [{counter}/{length}] {percent}%__")
if last_update_time is None or (now - last_update_time
).total_seconds() >= 5 and (
progress_string != ""):
await ctx.respond(progress_string)
last_update_time = now
del self.cache[ctx.msg.message_id]
if cancelled:
self.copy_tasks.remove((ctx.msg.message_id, content["id"]))
try:
await self.cmd_gdremove(ctx, identifier=parentFolder)
except Exception: # skipcq: PYL-W0703
return "__Aborted, but failed to delete the content__"
return "__Transmission aborted__"
ret = await self.getInfo(parentFolder, ["webViewLink"])
else:
task = self.bot.loop.create_task(self.copyFile(content["id"]))
self.copy_tasks.add((ctx.msg.message_id, content["id"]))
try:
await task
except asyncio.CancelledError:
return "__Transmission aborted__"
file_id = task.result()
ret = await self.getInfo(file_id, ["webViewLink"])
self.copy_tasks.remove((ctx.msg.message_id, content["id"]))
return f"Copying success: [{content['name']}]({ret['webViewLink']})"
@command.desc("Mirror Magnet/Torrent/Link/Message Media into GoogleDrive")
@command.usage("[Magnet/Torrent/Link or reply to message]")
async def cmd_gdmirror(self, ctx: command.Context) -> Optional[str]:
if not ctx.input and not ctx.msg.reply_to_message:
return "__Either link nor media found.__"
if ctx.input and ctx.msg.reply_to_message:
return "__Can't pass link while replying to message.__"
if ctx.msg.reply_to_message:
reply_msg = ctx.msg.reply_to_message
if reply_msg.media:
await ctx.respond("Preparing...")
task = self.bot.loop.create_task(
self.downloadFile(ctx, reply_msg))
self.tasks.add((ctx.response.message_id, task))
try:
await task
except asyncio.CancelledError:
return "__Transmission aborted.__"
else:
path = task.result()
if path is None:
return "__Something went wrong, file probably corrupt__"
finally:
self.tasks.remove((ctx.response.message_id, task))
if path.suffix == ".torrent":
types = base64.b64encode(await path.read_bytes())
else:
file = util.File(path)
await self.uploadFile(file, msg=ctx.response)
task = self.bot.loop.create_task(file.progress())
self.tasks.add((ctx.response.message_id, task))
try:
await task
except asyncio.CancelledError:
return "__Transmission aborted.__"
finally:
self.tasks.remove((ctx.response.message_id, task))
return
elif reply_msg.text:
types = reply_msg.text
else:
return "__Unsupported types of download.__"
else:
types = ctx.input
if isinstance(types, str):
match = DOMAIN.match(types)
if match:
direct = await self.getDirectLink(match.group(1), types)
if direct is not None and isinstance(direct, list):
if len(direct) == 1:
types = direct[0]["url"]
elif len(direct) > 1:
text = "Multiple links found, choose one of the following:\n\n"
for index, mirror in enumerate(direct):
text += f"`{index + 1}`. {mirror['name']}\n"
text += "\nSend only the number here."
async with self.bot.conversation(ctx.msg.chat.id,
timeout=60) as conv:
request = await conv.send_message(text)
try:
user = ctx.msg.from_user.id
response = await conv.get_response(
filters=pyrogram.filters.user(user))
except asyncio.TimeoutError:
await request.delete()
types = direct[0]["url"]
else:
await request.delete()
index = int(response.text) - 1
types = direct[index]["url"]
elif direct is not None:
types = direct
try:
ret = await self.aria2.addDownload(types, ctx)
return ret
except NameError:
return "__Mirroring torrent file/url needs Aria2 loaded.__"
@command.usage("[parent=\"folderId\"] [name=\"file/folder name\"] "
"[limit=number] [filter=file/folder]"
"[q=\"search query\"], **single/double quote important for "
"parent, name and q parameters**",
optional=True)
@command.desc("Search through all Google Drive by given query/parent/name")
@command.alias("gdlist", "gdls")
async def cmd_gdsearch(self, ctx: command.Context) -> Optional[str]:
regex = re.compile(
r"(parent)=(\"(?:[^\"\\]|\\.)*\"|'(?:[^'\\]|\\.)*')|"
r"(limit)=(\d+)|(filter)=(file|folder)|"
r"(name)=(\"(?:[^\"\\]|\\.)*\"|'(?:[^'\\]|\\.)*')|"
r"(q)=(\"(?:[^\"\\]|\\.)*\"|'(?:[^'\\]|\\.)*')"
)
matches = regex.finditer(ctx.msg.text)
await ctx.respond("Collecting...")
length = 0
options: MutableMapping[str, Any] = {}
for match in matches:
for index, option in enumerate(match.groups()):
if option is not None and match.group(index + 2) is not None:
match = match.group(index + 2)
options[option] = match
# Remove quote/double quote and override
if option not in {"limit", "filter"}:
options[option] = match.removesuffix(
match[0]).removeprefix(match[0])
length += 1
break
if ctx.input and length == 0:
return "__Invalid parameters of input.__"
name = options.get("name")
parent = getIdFromUrl(options.get("parent"))
limit = int(options.get("limit", 15))
if limit > 1000:
return "__Can't use limit more than 1000.__"
filters = options.get("filter")
if filters is not None and filters == "folder":
filters = f"mimeType = '{FOLDER}'"
elif filters is not None and filters == "file":
filters = f"mimeType != '{FOLDER}'"
else:
filters = None
if all(x is not None for x in [parent, name, filters]):
query = f"'{parent}' in parents and (name contains '{name}' and {filters})"
elif parent is not None and name is not None and filters is None:
query = f"'{parent}' in parents and (name contains '{name}')"
elif parent is not None and name is None and filters is not None:
query = f"'{parent}' in parents and ({filters})"
elif parent is not None and name is None and filters is None:
query = f"'{parent}' in parents"
elif parent is None and name is not None and filters is not None:
query = f"name contains '{name}' and {filters}"
elif parent is None and name is not None and filters is None:
query = f"name contains '{name}'"
elif parent is None and name is None and filters is not None:
query = filters
else:
query = ""
try:
# Ignore given parent, name, filter options if q present
query = options["q"]
except KeyError:
pass
output = ""
count = 0
try:
async for contents in self.searchContent(query=query, limit=limit):
for content in contents:
if count >= limit:
break
count += 1
output += (
MIME_TYPE.get(content["mimeType"], "📄") +
f" [{content['name']}]({content['webViewLink']})\n")
if count >= limit:
break
except HttpError as e:
if "'location': 'q'" in str(e):
return "__Invalid parameters of query.__"
if "'location': 'fileId'" in str(e):
return "__Invalid parameters of parent.__"
raise
if query == "":
query = "Not specified"
return f"**Google Drive Search**:\n{query}\n\n**Result**\n{output}"
| StarcoderdataPython |
11314896 | """ Rabbitai utilities for pandas.DataFrame.
"""
import warnings
from typing import Any, Dict, List
import pandas as pd
from rabbitai.utils.core import JS_MAX_INTEGER
def _convert_big_integers(val: Any) -> Any:
"""
Cast integers larger than ``JS_MAX_INTEGER`` to strings.
:param val: the value to process
:returns: the same value but recast as a string if it was an integer over
``JS_MAX_INTEGER``
"""
return str(val) if isinstance(val, int) and abs(val) > JS_MAX_INTEGER else val
def df_to_records(dframe: pd.DataFrame) -> List[Dict[str, Any]]:
"""
Convert a DataFrame to a set of records.
:param dframe: the DataFrame to convert
:returns: a list of dictionaries reflecting each single row of the DataFrame
"""
if not dframe.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
columns = dframe.columns
return list(
dict(zip(columns, map(_convert_big_integers, row)))
for row in zip(*[dframe[col] for col in columns])
)
| StarcoderdataPython |
3214942 | <reponame>Zeref-Draganeel/hata<filename>hata/discord/events/handling_helpers.py
__all__ = ('EventHandlerBase', 'EventWaitforBase', 'eventlist', )
import sys
from functools import partial as partial_func
from ...backend.utils import FunctionType, RemovedDescriptor, MethodLike, WeakKeyDictionary, NEEDS_DUMMY_INIT
from ...backend.futures import Task, is_coroutine_function
from ...backend.analyzer import CallableAnalyzer
from ..core import KOKORO
from ..message import Message
from .core import EVENT_HANDLER_NAME_TO_PARSER_NAMES
def _check_name_should_break(name):
"""
Checks whether the passed `name` is type `str`.
Used inside of ``check_name`` to check whether the given variable is usable, so we should stop checking
other alternative cases.
Parameters
----------
name : `Any`
Returns
-------
should_break : `bool`
If non empty `str` is received returns `True`, meanwhile if `None` or empty `str` is received `False`.
Raises
------
TypeError
If `name` was not passed as `None` or type `str`.
"""
if (name is None):
return False
if type(name) is not str:
raise TypeError(f'`name` should be `None` or type `str`, got `{name.__class__.__name__}`.')
if name:
return True
return False
def check_name(func, name):
"""
Tries to find the given `func`'s preferred name. The check order is the following:
- Passed `name` parameter.
- `func.__event_name__`.
- `func.__name__`.
- `func.__class__.__name__`.
If any of these is set (or passed at the case of `name`) as `None` or as an empty string, then those are ignored.
Parameters
----------
func : `None` or `callable`
The function, what preferred name we are looking for.
name : `None` or `str`
A directly given name value by the user. Defaults to `None` by caller (or at least sit should).
Returns
-------
name : `str`
The preferred name of `func` with lower case characters only.
Raises
------
TypeError
- If a checked name is not `None` or `str` instance.
- If a metaclass was given.
- If both `name` and `func` are given as `None`.
"""
if None is func is name:
raise TypeError(f'Both `func` and `name` are given as `None`')
while True:
if _check_name_should_break(name):
break
if hasattr(func, '__event_name__'):
name = func.__event_name__
if _check_name_should_break(name):
break
#func or method
if hasattr(func, '__name__'):
name = func.__name__
if _check_name_should_break(name):
break
func = type(func)
if not issubclass(func, type) and hasattr(func, '__name__'):
name = func.__name__
if _check_name_should_break(name):
break
raise TypeError(f'Meta-classes are not allowed, got {func!r}.')
if not name.islower():
name = name.lower()
return name
def check_parameter_count_and_convert(func, expected, *, name='event', can_be_async_generator=False,
error_message=None):
"""
If needed converts the given `func` to an async callable and then checks whether it expects the specified
amount of non reserved positional parameters.
`func` can be either:
- An async `callable`.
- A class with non async `__new__` (neither `__init__` of course) accepting no non reserved parameters,
meanwhile it's `__call__` is async. This is the convert (or instance) case and it causes the final parameter
count check to be applied on the type's `__call__`.
- A class with async `__new__`.
After the callable was chosen, then the amount of positional parameters are checked what it expects. Reserved
parameters, like `self` are ignored and if the callable accepts keyword only parameter, then it is a no-go.
If every check passed, then at the convert case instances the type and returns that, meanwhile at the other cases
it returns the received `func`.
Parameters
----------
func : `callable`
The callable, what's type and parameter count will checked.
expected : `int`
The amount of parameters, what would be passed to the given `func` when called at the future.
name : `str`, Optional (Keyword only)
The event's name, what is checked and converted. Defaults to `'event'`.
can_be_async_generator : `bool`, Optional (Keyword only)
Whether async generators are accepted as well.
error_message : `str`, Optional (Keyword only)
A specified error message with what a `TypeError` will be raised at the end, if the given `func` is not async
and neither cannot be converted to an async callable.
Returns
-------
func : `callable`
Raises
------
TypeError
- If `func` was not given as callable.
- If `func` is not as async and neither cannot be converted to an async one.
- If `func` expects less or more non reserved positional parameters as `expected` is.
"""
analyzer = CallableAnalyzer(func)
if analyzer.is_async() or (analyzer.is_async_generator() if can_be_async_generator else False):
min_, max_ = analyzer.get_non_reserved_positional_parameter_range()
if min_ > expected:
raise TypeError(f'A `{name}` should accept `{expected!r}` parameters, meanwhile the given callable expects '
f'at least `{min_!r}`, got `{func!r}`.')
if min_ == expected:
return func
# min < expected
if max_ >= expected:
return func
if analyzer.accepts_args():
return func
raise TypeError(f'A `{name}` should accept `{expected}` parameters, meanwhile the given callable expects up to '
f'`{max_!r}`, got `{func!r}`.')
if analyzer.can_instance_to_async_callable() or \
(analyzer.can_instance_to_async_generator() if can_be_async_generator else False):
sub_analyzer = CallableAnalyzer(func.__call__, as_method=True)
if sub_analyzer.is_async():
min_, max_ = sub_analyzer.get_non_reserved_positional_parameter_range()
if min_ > expected:
raise TypeError(f'A `{name}` should accept `{expected!r}` parameters, meanwhile the given callable '
f'after instancing expects at least `{min_!r}`, got `{func!r}`.')
if min_ == expected:
func = analyzer.instance()
return func
# min < expected
if max_ >= expected:
func = analyzer.instance()
return func
if sub_analyzer.accepts_args():
func = analyzer.instance()
return func
raise TypeError(f'A `{name}` should accept `{expected}` parameters, meanwhile the given callable after '
f'instancing expects up to `{max_!r}`, got `{func!r}`.')
func = analyzer.instance()
return func
if error_message is None:
error_message = f'Not async callable type, or cannot be instance to async: `{func!r}`.'
raise TypeError(error_message)
def compare_converted(converted, non_converted):
"""
Compares a maybe instance-able type to an instanced object.
Parameters
----------
converted : `Any`
The already converted object.
non_converted : `Any`
The not yet converted instance to match `converted` on.
Returns
-------
matches : `bool`
Whether `converted` is matched by `non_converted.
"""
# function, both should be functions
if isinstance(non_converted, FunctionType):
return (converted is non_converted)
# method, both should be methods
if isinstance(non_converted, MethodLike):
return (converted is non_converted)
# callable object, both should be the same
if not isinstance(non_converted, type) and hasattr(type(non_converted), '__call__'):
return (converted is non_converted)
# type, but not metaclass
if not issubclass(non_converted, type) and isinstance(non_converted, type):
# async initializer, both is type
if is_coroutine_function(non_converted.__new__):
return (converted is non_converted)
# async call -> should be initialized already, compare the converted's type
if hasattr(non_converted, '__call__'):
return (type(converted) is non_converted)
#meow?
raise TypeError(f'Expected function, method or a callable object, got {non_converted!r}')
def _convert_unsafe_event_iterable(iterable, type_=None):
"""
Converts an iterable to a list of ``EventListElement``-s. This function is called to generate a ``eventlist``
compatible `list` to avoid handling the same cases everywhere.
`iterable`'s element's can be:
- ``EventListElement`` instance.
- `type_` instance if given.
- `tuple` of `1`-`3` elements (`func`, `args`, `kwargs`).
- `func` itself.
Parameters
----------
iterable : `iterable`
The iterable, what's elements will be checked.
type_ : `None `or `type`
If `type_` was passed, then each element is pre-validated with the given type. Some extension classes might
support behaviour.
The given `type_` should implement a `from_args_kwargs` constructor.
Returns
-------
result : `list` of (``EventListElement`` or ``type_``)
Raises
------
ValueError
If an element of the received iterable does not matches any of the expected formats.
"""
result = []
for element in iterable:
if type(element) is EventListElement:
if (type_ is not None):
element = type_.from_args_kwargs(element.func, element.args, element.kwargs)
if isinstance(element, type_):
pass
else:
if isinstance(element, tuple):
element_len = len(element)
if element_len > 3 or element_len == 0:
raise ValueError(f'Expected `tuple` with length 1 or 2, got `{element!r}`.')
func = element[0]
if element_len == 1:
args = None
kwargs = None
else:
args = element[1]
if (args is not None) and not isinstance(args, tuple):
raise ValueError(f'Expected `None` or `tuple` instance at index 1 at element: `{element!r}`')
if element_len == 2:
kwargs = None
else:
kwargs = element[2]
if (kwargs is not None):
if (type(kwargs) is not dict):
raise ValueError(f'Expected `None` or `dict` instance at index 2 at element: '
f'`{element!r}`')
if not kwargs:
kwargs = None
else:
func = element
args = None
kwargs = None
if type_ is None:
element = EventListElement(func, args, kwargs)
else:
element = type_.from_args_kwargs(func, args, kwargs)
result.append(element)
continue
return result
def create_event_from_class(constructor, klass, parameter_names, name_name, event_name):
"""
Creates an event passing trough a constructor.
Parameters
----------
klass : `type`
The type to work with.
parameter_names : `tuple` of `str`
The parameters names to pass to the constructor.
name_name : `str` or `None`
The event's name's name.
event_name : `str`
The event's name. If event is nto found, then defaults to `name_name`'s found value if any.
Returns
-------
instance : `Any`
The created instance.
Raises
------
BasesException
Any occurred exception.
"""
if not isinstance(klass, type):
raise TypeError(f'Expected `type` instance, got {klass.__class__.__name__}.')
parameters_by_name = {}
for parameter_name in parameter_names:
try:
parameter = getattr(klass, parameter_name)
except AttributeError:
found = False
parameter = None
else:
found = True
parameters_by_name[parameter_name] = (parameter, found)
name = klass.__name__
if (name_name is not None) and (not parameters_by_name[name_name][1]):
parameters_by_name[name_name] = (name, True)
if not parameters_by_name[event_name][1]:
try:
parameter = getattr(klass, name)
except AttributeError:
pass
else:
parameters_by_name[event_name] = (parameter, True)
return constructor(*(parameters_by_name[parameter_name][0] for parameter_name in parameter_names))
class _EventHandlerManager:
"""
Gives a decorator functionality to an event handler, because 'rich' event handlers still can not be used a
decorator, their `__call__` is already allocated for handling their respective event.
This class is familiar to ``eventlist``, but it directly works with the respective event handler giving an
easy API to do operations with it.
Attributes
----------
parent : `Any`
The ``_EventHandlerManager``'s parent event handler.
"""
__slots__ = ('parent',)
def __init__(self, parent):
"""
Creates an ``_EventHandlerManager`` from the given event handler.
The `parent` event handler should implement the following methods:
- `.create_event(func, *args, **kwargs)`
- `.delete_event(func)`
And optionally:
- `.create_event_from_class(klass)`
Parameters
----------
parent : `Any`
The respective event handler.
"""
self.parent = parent
def __repr__(self):
"""Returns the representation of the event handler manager."""
return f'<{self.__class__.__name__} of {self.parent!r}>'
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to the event handler manager's parent. If `func` is not passed, then returns a
``._wrapper` to allow using the manager as a decorator with still passing keyword parameters.
Parameters
----------
func : `callable`, Optional
The event to be added to the respective event handler.
*args : Positional parameters
Additionally passed positional parameters to be passed with the given `func` to the event handler.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be passed with the given `func` to the event handler.
Returns
-------
func : `callable`
- The created instance by the respective event handler.
- If `func` was not passed, then returns a ``._wrapper`` instance.
"""
if func is ...:
return partial_func(self, *args, **kwargs)
func = self.parent.create_event(func, *args, **kwargs)
return func
def from_class(self, klass):
"""
Allows the event handler manager to be able to capture a class and create add it to the parent event handler
from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
func : `callable`
The created instance by the respective event handler.
Raises
------
TypeError
If the parent of the event handler manager has no support for `.from_class`.
"""
from_class_constructor = getattr(type(self.parent), 'create_event_from_class', None)
if (from_class_constructor is None):
raise TypeError(f'`.from_class` is not supported by `{self.parent!r}`.')
return from_class_constructor(self.parent, klass)
def remove(self, func, *args, **kwargs):
"""
Removes the given `func` from the event handler manager's parent.
Parameters
----------
func : `callable`
The event to be removed to the respective event handler.
*args : Positional parameters
Additional positional parameters.
**kwargs : Keyword parameters
Additional keyword parameters.
"""
self.parent.delete_event(func, *args, **kwargs)
def __getattr__(self, name):
"""Returns the attribute of the event handler manager's parent."""
return getattr(self.parent, name)
def extend(self, iterable):
"""
Extends the respective event handler with the given iterable of events.
Parameters
----------
iterable : `iterable`
Raises
------
TypeError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(parent, 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
for element in iterable:
parent.create_event(element)
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
parent = self.parent
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
if args is None:
if kwargs is None:
parent.create_event(func,)
else:
parent.create_event(func, **kwargs)
else:
if kwargs is None:
parent.create_event(func, *args)
else:
parent.create_event(func, *args, **kwargs)
def unextend(self, iterable):
"""
Unextends the respective event handler with the given `iterable`.
Parameters
----------
iterable : `iterable`
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed element is not stored by the parent event handler. At this case error is raised
only at the end.
"""
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(parent, 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
collected = []
for element in iterable:
try:
parent.delete_event(element, None)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
collected = []
parent = self.parent
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
try:
if args is None:
if kwargs is None:
parent.delete_event(func)
else:
parent.delete_event(func, **kwargs)
else:
if kwargs is None:
parent.delete_event(func, *args)
else:
parent.delete_event(func, *args, **kwargs)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
class _EventHandlerManagerRouter(_EventHandlerManager):
"""
Wraps multiple `Client``'s ``_EventHandlerManager`` functionality together.
Attributes
----------
_getter : `callable`
A callable what should return the ``_EventHandlerManager``-s of the `_EventHandlerManagerRouter`, on who the
extension is applied.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handler_manager_router | ``_EventHandlerManagerRouter`` |
+-------------------------------+-----------------------------------+
Should return the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handlers | `Any` |
+-------------------------------+-----------------------------------+
_from_class_constructor : `callable` or `None`
Whether the extension supports `.from_class` method and how exactly it does. If set as `None`, means it not
supports it.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| klass | `klass` |
+-------------------------------+-----------------------------------+
Should returns the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| commands | `list` of `Any` |
+-------------------------------+-----------------------------------+
parent : ``ClientWrapper``
The parent ``ClientWrapper``.
"""
__slots__ = ('_getter', '_from_class_constructor', 'parent')
def __init__(self, parent, getter, from_class_constructor):
"""
Creates an ``_EventHandlerManagerRouter`` routing to all the clients of a ``ClientWrapper``.
Parameters
----------
parent : ``ClientWrapper``
The respective routed client wrapper.
getter : `callable`
A callable what should return the ``_EventHandlerManager``-s of the `_EventHandlerManagerRouter`, on who the
extension is applied.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handler_manager_router | ``_EventHandlerManagerRouter`` |
+-------------------------------+-----------------------------------+
Should return the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| event_handlers | `Any` |
+-------------------------------+-----------------------------------+
from_class_constructor : `None` or `callable`
Whether the extension supports `.from_class` method and how exactly it does. If given as `None`, then it
means it not supports it.
Should always get the following attributes:
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| klass | `klass` |
+-------------------------------+-----------------------------------+
Should returns the following value(s):
+-------------------------------+-----------------------------------+
| Name | Value |
+===============================+===================================+
| commands | `list` of `Any` |
+-------------------------------+-----------------------------------+
"""
self.parent = parent
self._getter = getter
self._from_class_constructor = from_class_constructor
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to all of the represented client's respective event handler managers.
Parameters
----------
func : `callable`, Optional
The event to be added to the respective event handler.
*args : Positional parameter
Additionally passed positional parameters to be passed with the given `func` to the event handler.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be passed with the given `func` to the event handler.
Returns
-------
func : ``Routed``
The added functions.
"""
if func is ...:
return partial_func(self, *args, **kwargs)
handlers = self._getter(self)
if not handlers:
return
count = len(handlers)
routed_args = route_args(args, count)
routed_kwargs = route_kwargs(kwargs, count)
routed_func = maybe_route_func(func, count)
routed = []
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
func = handler.create_event(func_, *args, **kwargs)
routed.append(func)
return Router(routed)
def from_class(self, klass):
"""
Allows the event handler manager router to be able to capture a class and create and add it to the represented
event handlers from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
routed : ``Router``
The routed created instances.
Raises
------
TypeError
If the parent of the event handler manager has no support for `.from_class`.
BaseException
Any exception raised by any of the event handler.
"""
from_class_constructor = self._from_class_constructor
if from_class_constructor is None:
raise TypeError(f'`.from_class` is not supported by `{self.parent!r}`.')
handlers = self._getter(self)
count = len(handlers)
if not count:
return
routed_maybe = from_class_constructor(klass)
if isinstance(routed_maybe, Router):
if len(routed_maybe) != count:
raise ValueError(f'The given class is routed to `{len(routed_maybe)}`, meanwhile expected to be routed '
f'to `{count}` times, got {klass!r}.')
routed = routed_maybe
else:
copy_method = getattr(type(routed_maybe), 'copy', None)
if copy_method is None:
routed = [routed_maybe for _ in range(count)]
else:
routed = [copy_method(routed_maybe) for _ in range(count)]
for handler, event in zip(handlers, routed):
handler.create_event(event)
return routed
def remove(self, func, *args, **kwargs):
"""
Removes the given `func` from the represented event handler managers.
Parameters
----------
func : ``Router``, `callable`
The event to be removed to the respective event handlers.
*args : `str` or `None`
Additional positional parameters.
**kwargs : Keyword parameters
Additional keyword parameters.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if isinstance(func, Router):
if len(func) != count:
raise ValueError(f'The given `func` is routed `{len(func)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {func!r}.')
for func, handler in zip(func, handlers):
handler.delete_event(func, *args, **kwargs)
else:
for handler in handlers:
handler.delete_event(func, *args, **kwargs)
def extend(self, iterable):
"""
Extends the event handler manager router's respective managers with the given iterable of events.
Parameters
----------
iterable : `iterable`
Raises
------
TypeError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
raise ValueError(f'The given `func` is routed `{len(element)}` times, meanwhile expected to be routed '
f'to `{count}` times, got {element!r}.')
for func, handler in zip(element, handlers):
handler.create_event(func, None)
else:
for handler in handlers:
handler.create_event(element, None)
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_args = route_args(args, count)
routed_func = maybe_route_func(func, count)
routed_kwargs = route_kwargs(kwargs, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
handler.create_event(func_, *args, **kwargs)
def unextend(self, iterable):
"""
Unextends the event handler router's represented event handlers with the given `iterable`.
Parameters
----------
iterable : `iterable`
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute not accepted by the parent
event handler.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed element is not stored by the parent event handler. At this case error is raised
only at the end.
"""
handlers = self._getter(self)
count = len(handlers)
if not count:
return
if type(iterable) is eventlist:
type_ = iterable.type
if (type_ is not None):
parent = self.parent
supported_types = getattr(handlers[0], 'SUPPORTED_TYPES', None)
if (supported_types is None) or (type_ not in supported_types):
raise TypeError(f'`{parent!r}` does not supports elements of type `{type_!r}`.')
collected = []
for element in iterable:
if isinstance(element, Router):
if len(element) != count:
collected.append(f'The given `func` is routed `{len(element)}` times, meanwhile expected '
f'to be routed to `{count}` times, got {element!r}.')
continue
for func, handler in zip(element, handlers):
try:
handler.delete_event(func, None)
except ValueError as err:
collected.append(err.args[0])
else:
for handler in handlers:
try:
handler.delete_event(element, None)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
return
else:
iterable = _convert_unsafe_event_iterable(iterable)
collected = []
for element in iterable:
func = element.func
args = element.args
kwargs = element.kwargs
routed_func = maybe_route_func(func, count)
if kwargs is None:
for handler, func_ in zip(handlers, routed_func):
try:
handler.delete_event(func_)
except ValueError as err:
collected.append(err.args[0])
else:
routed_kwargs = route_kwargs(kwargs, count)
routed_args = route_args(args, count)
for handler, func_, args, kwargs in zip(handlers, routed_func, routed_args, routed_kwargs):
try:
handler.delete_event(func_, *args, **kwargs)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected)) from None
def __repr__(self):
return f'<{self.__class__.__name__} parent={self.parent!r}, getter={self._getter!r}, from_class_constructor=' \
f'{self._from_class_constructor!r}>'
class EventListElement:
"""
Represents an element of an ``eventlist``.
Attributes
----------
func : `callable`
The event of the event-list element.
args : `None` or `tuple` of `Any`
Additional positional parameters for `func`.
kwargs : `None` or `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
__slots__ = ('func', 'args', 'kwargs', )
def __init__(self, func, args, kwargs):
"""
Creates a ``EventListElement` from the given parameters.
Parameters
----------
func : `callable`
The event of the eventlist element.
args : `None` or `str`
Additional positional parameters for `func`.
kwargs : `None` or `dict` of (`str`, `Any`) items
Additional key word parameters for `func`.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __repr__(self):
"""Returns the representation of the eventlist element."""
return f'{self.__class__.__name__}({self.func!r}, args={self.args!r}, kwargs={self.kwargs!r})'
def __len__(self):
"""Additional information for unpacking if needed."""
return 3
def __iter__(self):
"""
Unpacks the eventlist element.
This method is a generator.
"""
yield self.func
yield self.args
yield self.kwargs
class Router(tuple):
"""
Object used to describe multiple captured created command-like objects.
"""
def __repr__(self):
"""Returns the router's representation."""
result = [self.__class__.__name__, '(']
limit = len(self)
if limit:
index = 0
while True:
element = self[index]
result.append(repr(element))
index += 1
if index == limit:
break
result.append(', ')
result.append(')')
return ''.join(result)
def route_value(to_route_value, count, default=None):
"""
Routes only a single `name` - `value` pair.
Parameters
----------
to_route_value : `Any`
The respective value to route
count : `int`
The expected amount of copies to generate.
default : `Any`, Optional
Optional default variable to use. Defaults to `None`.
Returns
-------
result : `list` of `Any`
A list of the routed values
"""
result = []
if isinstance(to_route_value, tuple):
if len(to_route_value) != count:
raise ValueError(f'The represented router has `{count}` applicable clients, meanwhile received only '
f'`{len(to_route_value)}` routed values, got: {to_route_value!r}.')
last = ...
for value in to_route_value:
if value is None:
value = default
last = default
elif value is ...:
if last is ...:
last = default
value = last
else:
last = value
result.append(value)
continue
else:
if (to_route_value is None) or (to_route_value is ...):
to_route_value = default
for _ in range(count):
result.append(to_route_value)
return result
def route_parameter(parameter, count):
"""
Routes a parameter to `count` amount of copies.
This function is an iterable generator.
Parameters
----------
parameter : `Any`
The parameter to route.
count : `int`
The expected amount of copies to generate.
Yields
------
result : `Any`
Raises
------
ValueError
A value is a `tuple` instance, but it's length is different from `count`.
"""
if isinstance(parameter, tuple):
if len(parameter) != count:
raise ValueError(f'The represented router has `{count}` applicable clients, meanwhile received only '
f'`{len(parameter)}` routed values, got: {parameter!r}.')
last = None
for value in parameter:
if value is None:
last = None
elif value is ...:
value = last
else:
last = value
yield value
continue
else:
for _ in range(count):
yield parameter
def route_kwargs(kwargs, count):
"""
Routes the given `kwargs` to the given `count` amount of copies.
If a value of a keyword is given as a `tuple` instance, then it will be routed by element for each applicable
client.
Parameters
----------
kwargs : `dict` of (`str`, `Any`) items
Keyword parameter to route.
count : `int`
The expected amount of copies to generate.
Returns
-------
result : `tuple` of `dict` of (`str`, `Any) items
Raises
------
ValueError
A value is a `tuple` instance, but it's length is different from `count`.
"""
result = tuple({} for _ in range(count))
if (kwargs is not None):
for parameter_name, parameter_value in kwargs.items():
for route_to, parameter in zip(result, route_parameter(parameter_value, count)):
route_to[parameter_name] = parameter
return result
def route_args(args, count):
"""
Routes the given `args` to the given `count` amount of copies.
Parameters
----------
args : `tuple` of `Any`
Positional parameter to route.
count : `int`
The expected amount of copies to generate.
Returns
-------
result : `tuple` of `tuple` of `Any`
Raises
------
ValueError
A value is a `tuple` instance, but it's length is different from `count`.
"""
if (args is None):
result = tuple(tuple() for _ in range(count))
else:
result = tuple([] for _ in range(count))
for parameter_value in args:
for route_to, parameter in zip(result, route_parameter(parameter_value, count)):
route_to.append(parameter)
result = tuple(tuple(routed_to) for routed_to in result)
return result
def route_name(name, count):
"""
Routes the given `name` to the given `count` amount of copies.
If `name` is given as `tuple`, then each element of it will be returned for each applicable client.
Parameters
----------
name : `None`, `Ellipsis`, `str`, `tuple` of (`None`, `Ellipsis`, `str`)
The name to use instead of `func`'s real one.
count : `int`
The expected amount of names.
Returns
-------
result : `list` of `str`
Raises
------
TypeError
- If `name` was not given as `None`, `Ellipsis`, `str`, neither as `tuple` of (`None`, `Ellipsis`, `str`).
ValueError
If `name` was given as `tuple` but it's length is different from the expected one.
"""
result = []
if isinstance(name, tuple):
for index, name_value in enumerate(name):
if (name_value is not None) and (name_value is not ...) and (not isinstance(name_value, str)):
raise TypeError(f'`name` was given as a `tuple`, but it\'s {index}th element is not `None`, '
f'`Ellipsis`, neither `str` instance, got, {name_value.__class__.__name__}: {name_value}.')
if len(name) != count:
raise ValueError(f'`name` was given as `tuple`, but it\'s length ({len(name)!r}) not matches the expected '
f'(`{count}`) one, got {name!r}.')
last = None
for name_value in name:
if name is None:
name_value = None
last = None
elif name_value is ...:
name_value = last
else:
last = name_value
result.append(name_value)
else:
if name is None:
name_value = None
elif isinstance(name, str):
name_value = str(name)
else:
raise TypeError('`name` can be given as `None` or as `tuple` of (`None, `Ellipsis`, `str`), got: '
f'{name.__class__.__name__}: {name!r}.')
for _ in range(count):
result.append(name_value)
return result
def maybe_route_func(func, count):
"""
Routes the given `func` `count` times if applicable.
Parameters
----------
func : `callable`
The respective callable to ass
count : `int`
The expected amount of functions to return.
Returns
-------
result : `list` of `func`
"""
copy_function = getattr(type(func), 'copy', None)
result = []
if copy_function is None:
for _ in range(count):
result.append(func)
else:
for _ in range(count):
copied = copy_function(func)
result.append(copied)
return result
class eventlist(list):
"""
Represents a container to store events before adding them to a client. Some extension classes might support this
class as well.
Attributes
----------
kwargs : `None` or `dict` of (`str`, `Any`) items
Keyword parameters used for each element when extending the client's events with the event-list.
type : `None` or `type`
If `type_` was passed when creating the eventlist, then each added element is pre-validated with the given type
before adding them. Some extension classes might support behaviour.
Notes
-----
Hata's `commands` extension class supports collecting commands in ``eventlist`` and pre-validating as well with
passing `type_` as `Command`.
"""
insert = RemovedDescriptor()
sort = RemovedDescriptor()
pop = RemovedDescriptor()
reverse = RemovedDescriptor()
remove = RemovedDescriptor()
index = RemovedDescriptor()
count = RemovedDescriptor()
__mul__ = RemovedDescriptor()
__rmul__ = RemovedDescriptor()
__imul__ = RemovedDescriptor()
__add__ = RemovedDescriptor()
__radd__ = RemovedDescriptor()
__iadd__ = RemovedDescriptor()
__setitem__ = RemovedDescriptor()
__contains__ = RemovedDescriptor()
__slots__ = ('kwargs', 'type')
def __new__(cls, iterable=None, type_=None, **kwargs):
"""
Creates a new eventlist from the the given parameters.
Parameters
----------
iterable : `iterable`, Optional
An iterable of events to extend the eventlist with.
type_ : `type`, Optional
A type to validate each added element to the eventlist.
**kwargs : Keyword parameters
Additional keyword parameters to be used when adding each element.
Raises
------
TypeError
If `type_` was passed as not as `type` instance, or if it has no `from_args_kwargs` method.
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if (type_ is not None) and (not isinstance(type_, type)):
raise TypeError(f'`type_` can be given as `None` or as `type` instance, got `{type_!r}`.')
if not kwargs:
kwargs = None
self = list.__new__(cls)
self.type = type_
self.kwargs = kwargs
if (iterable is not None):
self.extend(iterable)
return self
if NEEDS_DUMMY_INIT:
def __init__(self, *args, **kwargs):
pass
def from_class(self, klass):
"""
Allows the ``eventlist`` to be able to capture a class and create an element from it's attributes.
Parameters
----------
klass : `type`
The class to capture.
Returns
-------
element : `callable`
The created instance from the eventlist's `.type`.
Raises
------
TypeError
If the eventlist has no `.type` set, or if it's `.type` is not supporting this method.
"""
type_ = self.type
if type_ is None:
message = 'On `eventlist` without type `.from_class` method cannot be used.'
from_class = getattr(type_, 'from_class', None)
if from_class is None:
message = f'The `eventlist`\'s type: `{type_!r}` is not supporting `.from_class`.'
element = from_class(klass)
list.append(self, element)
return element
def extend(self, iterable):
"""
Extends the ``eventlist`` with the given `iterable`.
Parameters
----------
iterable : `iterable`
An iterable of events to extend the eventlist with.
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
"""
if type(iterable) is type(self):
if self.type is not iterable.type:
raise ValueError(f'Extending {self.__class__.__name__} with an other object of the same type, but with '
f'a different type, own: `{self.type!r}`, other\'s: `{iterable.type!r}`.')
else:
iterable = _convert_unsafe_event_iterable(iterable, self.type)
list.extend(self, iterable)
def unextend(self, iterable):
"""
Unextends the eventlist with the given `iterable`.
Parameters
----------
iterable : `iterable`
An iterable of events to unextend the eventlist with.
Raises
------
ValueError
- If `iterable` was passed as ``eventlist`` and it's `.type` attribute is different.
- If `iterable` was not passed as type ``eventlist`` and any of it's element's format is incorrect.
- If any of the passed elements is not at the ``eventlist``. At this case error is raised only at the end.
"""
if type(iterable) is not type(self):
iterable = _convert_unsafe_event_iterable(iterable, self.type)
else:
if self.type is not iterable.type:
raise ValueError(f'Extending {self.__class__.__name__} with an other object of the same type, but with '
f'a different type, own: `{self.type!r}`, other\'s: `{iterable.type!r}`.')
collected = []
for element in iterable:
try:
self.remove(*element)
except ValueError as err:
collected.append(err.args[0])
if collected:
raise ValueError('\n'.join(collected))
def __call__(self, func=..., *args, **kwargs):
"""
Adds the given `func` to the ``eventlist`` with the other given keyword parameters. If `func` is not passed,
then returns a ``._wrapper` to allow using the ``eventlist`` as a decorator with still passing keyword
parameters.
Parameters
----------
func : `callable`, Optional
The event to be added to the eventlist.
*args : Positional parameter
Additionally passed positional parameters to be used when the passed `func` is used up.
**kwargs : Keyword parameters
Additionally passed keyword parameters to be used when the passed `func` is used up.
Returns
-------
func : `callable`
- If `func` was passed and the eventlist has no `.type` then returns the passed `func`.
- If `func` was passed and the eventlist has `.type` set, then returns an instance of that.
- If `func` was not passed, then returns a ``._wrapper`` instance.
"""
own_kwargs = self.kwargs
if (own_kwargs is not None) and own_kwargs:
for name_, value_ in own_kwargs.items():
kwargs.setdefault(name_, value_)
if func is ...:
return partial_func(self, *args, **kwargs)
type_ = self.type
if type_ is None:
element = EventListElement(func, *args, **kwargs)
else:
element = func = type_(func, *args, **kwargs)
list.append(self, element)
return func
def remove(self, func):
"""
Removes an element of the eventlist.
Parameters
----------
func : `callable`
The function to remove.
Raises
------
ValueError
If the passed `func` - `name` combination was not found.
"""
# we might overwrite __iter__ later
for element in list.__iter__(self):
if compare_converted(element.func, func):
return
raise ValueError(f'Did not find any element, what matched the passed func={func!r}, combination.')
def __repr__(self):
"""Returns the representation of the eventlist."""
repr_parts = [
self.__class__.__name__,
'([',
]
limit = list.__len__(self)
if limit != 0:
index = 0
while True:
element=list.__getitem__(self, index)
repr_parts.append(repr(element))
index +=1
if index == limit:
break
repr_parts.append(', ')
continue
repr_parts.append(']')
type_ = self.type
if (type_ is not None):
repr_parts.append(', type=')
repr_parts.append(repr(type_))
kwargs = self.kwargs
if (kwargs is not None):
repr_parts.append(', kwargs=')
repr_parts.append(repr(kwargs))
repr_parts.append(')')
return ''.join(repr_parts)
def add_kwargs(self, **kwargs):
"""
Adds keyword parameters to the ``eventlist`'s.
Parameters
----------
**kwargs : Keyword parameters
KeyWord parameters to extend the ``eventlist``'s with.
"""
if not kwargs:
return
own_kwargs = self.kwargs
if own_kwargs is None:
self.kwargs = kwargs
else:
own_kwargs.update(kwargs)
def remove_kwargs(self, *names):
"""
Removes keyword parameters of the ``eventlist`` by their name.
Parameters
----------
*names : Positional parameters
Keyword parameter's name added to the ``eventlist``.
"""
if not names:
return
own_kwargs = self.kwargs
if own_kwargs is None:
return
for name in names:
try:
del own_kwargs[name]
except KeyError:
pass
if not own_kwargs:
self.kwargs = None
def clear_kwargs(self):
"""
Clears the kwargs of the eventlist.
"""
self.kwargs = None
# This class is a placeholder for the `with` statement support also for the `shortcut` property as well.
class EventHandlerBase:
"""
Base class for event handlers.
"""
__slots__ = ()
# subclasses should overwrite it
async def __call__(self, *args):
"""
The method what will be called by the respective parser. The first received parameter is always a ``Client``
meanwhile the rest depends on the dispatch event.
This method is a coroutine.
Parameters
----------
*args : Additional positional parameters
"""
pass
# subclasses should overwrite it
def create_event(self, func, *args, **kwargs):
"""
Adds the specified event to the event handler. Subclasses might add additional keyword parameters as well.
Parameters
----------
func : `callable`
The callable to be added.
*args : Positional parameters
Positional parameters to pass to the created event.
**kwargs : Keyword parameters
Keyword parameters to pass to the created event.
Returns
-------
func : `callable`
The created event.
"""
pass
# subclasses should overwrite it
def delete_event(self, func):
"""
Removes the specified event from the event handler. Subclasses might add additional keyword parameters as well.
Parameters
----------
func : `callable`
The callable to be removed.
"""
pass
@property
def shortcut(self):
"""
Shortcuts the event handler's event adding and removing functionality to make those operations easier.
Returns
-------
event_handler_manager : ``_EventHandlerManager``
"""
return _EventHandlerManager(self)
class EventWaitforMeta(type):
"""
Metaclass for `waitfor` event handlers
The defaultly supported events are the following:
- `message_create`
- `message_edit`
- `message_delete`
- `channel_create`
- `channel_edit`
- `channel_delete`
- `role_create`
- `role_edit`
- `role_delete`
- `guild_delete`
- `guild_edit`
- `emoji_edit`
- `emoji_delete`
- `reaction_add`
- `reaction_delete`
See Also
--------
``EventWaitforBase`` : Base class to inherit instead of meta-classing ``EventWaitforMeta``.
"""
def __call__(cls, *args, **kwargs):
"""
Instances the type.
Auto-adds a `.waitfors` instance attribute to them and also sets it as a `WeakKeyDictionary`, so you would not
need to bother with that.
Parameters
----------
*args : Additional positional parameters
**kwargs : Additional keyword parameters
Returns
-------
object_ : `Any`
"""
object_ = cls.__new__(cls, *args, **kwargs)
if type(object_) is not cls:
return object_
object_.waitfors = WeakKeyDictionary()
cls.__init__(object_, *args, **kwargs)
return object_
_call_waitfors = {}
async def _call_message_create(self, client, message):
args = (client, message)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_create'] = _call_message_create
del _call_message_create
async def _call_message_edit(self, client, message, old_attributes):
args = (client, message, old_attributes)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_edit'] = _call_message_edit
del _call_message_edit
async def _call_message_delete(self, client, message,):
args = (client, message)
channel = message.channel
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['message_delete'] = _call_message_delete
del _call_message_delete
async def _call_typing(self, client, channel, user, timestamp):
args = (client, channel, user, timestamp)
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['typing'] = _call_typing
del _call_typing
async def _call_channel_create(self, client, channel):
guild = channel.guild
if guild is None:
return
args = (client, channel)
self._run_waitfors_for(guild, args)
_call_waitfors['channel_create'] = _call_channel_create
del _call_channel_create
async def _call_channel_edit(self, client, channel, old_attributes):
args = (client, channel, old_attributes)
self._run_waitfors_for(channel, args)
guild = channel.guild
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['channel_edit'] = _call_channel_edit
del _call_channel_edit
async def _call_channel_delete(self, client, channel, guild):
args = (client, channel, guild)
self._run_waitfors_for(channel, args)
if guild is None:
return
self._run_waitfors_for(guild, args)
_call_waitfors['channel_delete'] = _call_channel_delete
del _call_channel_delete
async def _call_role_create(self, client, role):
args = (client, role)
guild = role.guild
self._run_waitfors_for(guild, args)
_call_waitfors['role_create'] = _call_role_create
del _call_role_create
async def _call_role_edit(self, client, role, old_attributes):
args = (client, role, old_attributes)
self._run_waitfors_for(role, args)
guild = role.guild
self._run_waitfors_for(guild, args)
_call_waitfors['role_edit'] = _call_role_edit
del _call_role_edit
async def _call_role_delete(self, client, role, guild):
args = (client, role, guild)
self._run_waitfors_for(role, args)
self._run_waitfors_for(guild, args)
_call_waitfors['role_delete'] = _call_role_delete
del _call_role_delete
async def _call_guild_delete(self, client, guild, profile):
args = (client, guild, profile)
self._run_waitfors_for(guild, args)
_call_waitfors['guild_delete'] = _call_guild_delete
del _call_guild_delete
async def _call_guild_edit(self, client, guild, old_attributes):
args = (client, guild, old_attributes)
self._run_waitfors_for(guild, args)
_call_waitfors['guild_edit'] = _call_guild_edit
del _call_guild_edit
async def _call_emoji_create(self, client, emoji):
args = (client, emoji)
guild = emoji.guild
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_create'] = _call_emoji_create
del _call_emoji_create
async def _call_emoji_edit(self, client, emoji, old_attributes):
args = (client, emoji, old_attributes)
self._run_waitfors_for(emoji, args)
guild = emoji.guild
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_edit'] = _call_emoji_edit
del _call_emoji_edit
async def _call_emoji_delete(self, client, emoji, guild):
args = (client, emoji, guild)
self._run_waitfors_for(emoji, args)
self._run_waitfors_for(guild, args)
_call_waitfors['emoji_delete'] = _call_emoji_delete
del _call_emoji_delete
async def _call_reaction_add(self, client, event):
message = event.message
if not isinstance(message, Message):
return
args = (client, event)
self._run_waitfors_for(message, args)
_call_waitfors['reaction_add'] = _call_reaction_add
del _call_reaction_add
async def _call_reaction_delete(self, client, event):
message = event.message
if not isinstance(message, Message):
return
args = (client, event)
self._run_waitfors_for(message, args)
_call_waitfors['reaction_delete'] = _call_reaction_delete
del _call_reaction_delete
class EventWaitforBase(EventHandlerBase, metaclass=EventWaitforMeta):
"""
Base class for event handlers, which implement waiting for a specified action to occur.
Attributes
----------
waitfors : `WeakValueDictionary` of (``DiscordEntity``, `async-callable`) items
An auto-added container to store `entity` - `async-callable` pairs.
Class Attributes
----------------
__event_name__ : `None` or `str` = `None`
Predefined name to what the event handler will be added.
call_waitfors : `None` or `async callable` = `None`
An added method to subclasses to ensure the waitfors if overwrite `__call__` is overwritten. Subclasses can
also overwrite `call_waitfors` method as well.
"""
__slots__ = ('waitfors', )
__event_name__ = None
call_waitfors = None
def append(self, target, waiter):
"""
Adds a new relation to `.waitfors`.
When the respective event is received with the specified `target` entity, then `waiter` will be ensured.
Parameters
----------
target : ``DiscordEntity`` instance
The target entity, to what relative waiters will be called.
waiter : `async callable`
Waiter to call every time a respective event to `target` is received.
"""
try:
actual = self.waitfors[target]
if type(actual) is asynclist:
list.append(actual, waiter)
else:
self.waitfors[target] = container = asynclist()
list.append(container, actual)
list.append(container, waiter)
except KeyError:
self.waitfors[target] = waiter
def remove(self, target, waiter):
"""
Removes the specified relation from `.waitfors`.
Parameters
----------
target : ``DiscordEntity`` instance
The entity on what the given waiter waits for the respective event.
waiter : `async callable`
The waiter, what is called with the respective parameters if the respective event occurs related to the
given `target`.
"""
try:
container = self.waitfors.pop(target)
except KeyError:
return
if type(container) is not asynclist:
return
try:
list.remove(container, waiter)
except ValueError:
pass
else:
if len(container) == 1:
self.waitfors[target] = container[0]
return
self.waitfors[target] = container
def get_waiter(self, target, waiter, by_type = False, is_method=False):
"""
Looks up whether any of the given `target` - `waiter` relation is stored inside of `.waiters` and if there is any,
then returns the first find. If non, then returns `None`.
Parameters
----------
target : ``DiscordEntity`` instance
The target entity.
waiter : `Any`
The waiter. `by_type` and `is_method` overwrite the behaviour of checking it.
by_type : `bool`, Optional
Whether `waiter` was given as the type of the real waiter. Defaults to `False`.
is_method : `bool`, Optional
Whether the real waiter is a method-like, and you want to check it's "self". Applied before `by_type` and
defaults to `False`.
Returns
-------
waiter : `Any`
"""
try:
element = self.waitfors[target]
except KeyError:
return None
if type(element) is asynclist:
for element in element:
if is_method:
if not isinstance(element, MethodLike):
continue
element = element.__self__
if by_type:
if type(element) is waiter:
return element
else:
continue
else:
if element == waiter:
return element
else:
continue
return None
else:
if is_method:
if not isinstance(element, MethodLike):
return None
element = element.__self__
if by_type:
if type(element) is waiter:
return element
else:
return None
else:
if element == waiter:
return element
else:
return None
def get_waiters(self, target, waiter, by_type=False, is_method=False):
"""
Looks up the waiters of `target` - `waiter` relation stored inside of `.waiters` and returns all the matched
one.
Parameters
----------
target : ``DiscordEntity`` instance
The target entity.
waiter : `Any`
The waiter. `by_type` and `is_method` overwrite the behaviour of checking it.
by_type : `bool`, Optional
Whether `waiter` was given as the type of the real waiter. Defaults to `False`.
is_method : `bool`, Optional
Whether the real waiter is a method-like, and you want to check it's "self". Applied before `by_type` and
defaults to `False`.
Returns
-------
waiters : `list` of `Any`
"""
result = []
try:
element = self.waitfors[target]
except KeyError:
return result
if type(element) is asynclist:
for element in element:
if is_method:
if not isinstance(element, MethodLike):
continue
element = element.__self__
if by_type:
if type(element) is not waiter:
continue
else:
if element != waiter:
continue
result.append(element)
continue
else:
if is_method:
if not isinstance(element, MethodLike):
return result
element = element.__self__
if by_type:
if type(element) is waiter:
result.append(element)
else:
if element == waiter:
result.append(element)
return result
def _run_waitfors_for(self, target, args):
"""
Runs the waitfors of the given target.
Parameters
----------
target : ``DiscordEntity`` instance
The target entity.
args : `tuple` of `Any`
Parameters to ensure the waitfors with.
"""
try:
event = self.waitfors[target]
except KeyError:
pass
else:
if type(event) is asynclist:
for event in event:
Task(event(*args), KOKORO)
else:
Task(event(*args), KOKORO)
def EventWaitforMeta__new__(cls, class_name, class_parents, class_attributes):
"""
Subclasses ``EventWaitforBase``.
Parameters
----------
class_name : `str`
The created class's name.
class_parents : `tuple` of `type` instances
The superclasses of the creates type.
class_attributes : `dict` of (`str`, `Any`) items
The class attributes of the created type.
Returns
-------
type : ``EventWaitforMeta`` instance
The created type.
Raises
------
TypeError
- If the class do not inherits ``EventWaitforBase``.
- If `.__event_name__` was not set or was no set correctly. (Note that if was not ste, then the class's name
is used instead.)
- If there is no predefined `call_waitfors` for the class and it does not defines one either.
"""
for base in class_parents:
if issubclass(base,EventWaitforBase):
break
else:
raise TypeError(f'`{cls.__name__} should be only the metaclass of `{EventWaitforBase.__name__}`.')
event_name = class_attributes.get('__event_name__', None)
if event_name is None:
event_name = class_name
if event_name not in EVENT_HANDLER_NAME_TO_PARSER_NAMES:
raise TypeError(f'`{class_name}.__event_name__` is not set, or not set correctly.')
if (class_attributes.get('call_waitfors', None) is None):
try:
call_waitfors = cls._call_waitfors[event_name]
except KeyError:
raise TypeError(f'The following event name: `{event_name!r}` has no auto `call_waitfor` added. Please '
'define one.')
class_attributes['call_waitfors'] = call_waitfors
try:
call = class_attributes.get('__call__', None)
except KeyError:
call = None
if (call is None) or (call is EventHandlerBase.__call__):
class_attributes['__call__'] = call_waitfors
return type.__new__(cls, class_name, class_parents, class_attributes)
EventWaitforMeta.__new__ = EventWaitforMeta__new__
del EventWaitforMeta__new__
class ChunkWaiter(EventHandlerBase):
__slots__ = ('waiters',)
__event_name__ = 'guild_user_chunk'
def __init__(self):
self.waiters = {}
# Interact directly with `self.waiters` instead.
def create_event(self, waiter, nonce):
"""
Raises
------
RuntimeError
Interact with self.waiters instead.
"""
raise RuntimeError('Interact with self.waiters instead.')
def delete_event(self, waiter, nonce):
"""
Raises
------
RuntimeError
Interact with self.waiters instead.
"""
raise RuntimeError('Interact with self.waiters instead.')
async def __call__(self, client, event):
"""
Ensures that the chunk waiter for the specified nonce is called and if it returns `True` it is removed from the
waiters.
This method is a coroutine.
Parameters
----------
client : ``Client``
The client, who received the respective dispatch event.
event : ``GuildUserChunkEvent``
The received guild user chunk event.
"""
nonce = event.nonce
if nonce is None:
return
waiters = self.waiters
try:
waiter = waiters[nonce]
except KeyError:
return
if waiter(event):
del waiters[nonce]
async def default_error_event(client, name, err):
"""
Defaults error event for client. Renders the given exception to `sys.stderr`.
This function is a generator.
Parameters
----------
client : ``client``
The client who caught the error.
name : `str`
Identifier name of the place where the error occurred.
err : `Any`
The caught exception. Can be given as non `BaseException` instance as well.
"""
extracted = [
client.full_name,
' ignores occurred exception at ',
name,
'\n',
]
if isinstance(err, BaseException):
await KOKORO.render_exc_async(err, extracted)
return
if not isinstance(err, str):
err = repr(err)
extracted.append(err)
extracted.append('\n')
sys.stderr.write(''.join(extracted))
class asynclist(list):
"""
Container used by events to call more events and by waitfor events to call more waiters.
"""
__slots__ = ()
def __init__(self, iterable=None):
"""
Creates a new asynclist from the given iterable.
Parameters
----------
iterable : `iterable`, Optional
"""
if (iterable is not None):
list.extend(self, iterable)
async def __call__(self, *args):
"""
Ensures the contained async callables on the client's loop.
This method is a coroutine.
Parameters
----------
*args : Additional position parameters
Parameters to call with the contained async callables.
"""
for coro in list.__iter__(self):
Task(coro(*args), KOKORO)
def __repr__(self):
"""Returns the asynclist's representation."""
result = [
self.__class__.__name__,
'([']
limit = list.__len__(self)
if limit:
index = 0
while True:
element = list.__getitem__(self, index)
result.append(repr(element))
index += 1
if index == limit:
break
result.append(', ')
continue
result.append('])')
return ''.join(result)
def __getattribute__(self, name):
"""Gets the given attribute from the elements of the asynclist."""
if not isinstance(name, str):
raise TypeError(f'Attribute name must be string, not `{name.__class__.__name__}`.')
try:
attribute = object.__getattribute__(self, name)
except AttributeError:
pass
else:
if attribute is not ...:
return attribute
for coro in list.__iter__(self):
attribute = getattr(coro, name, ...)
if attribute is ...:
continue
return attribute
raise AttributeError(f'`{self.__class__.__name__}` object has no attribute `{name}`.')
append = RemovedDescriptor()
clear = RemovedDescriptor()
copy = RemovedDescriptor()
count = RemovedDescriptor()
extend = RemovedDescriptor()
index = RemovedDescriptor()
insert = RemovedDescriptor()
pop = RemovedDescriptor()
remove = RemovedDescriptor()
reverse = RemovedDescriptor()
sort = RemovedDescriptor()
async def _with_error(client, task):
"""
Runs the given awaitable and if it raises, calls `client.events.error` with the exception.
This function is a coroutine.
Parameters
----------
client : ``Client``
The client, who's `client.events.error` will be called.
task : `awaitable`
The awaitable to run.
"""
try:
await task
except BaseException as err:
await client.events.error(client, repr(task), err)
| StarcoderdataPython |
3259483 | <reponame>Sky-zzt/lintcodePractice
class Solution:
"""
@param triangle: a list of lists of integers
@return: An integer, minimum path sum
"""
import sys
best = sys.maxsize
def minimumTotal(self, triangle):
# write your code here
import sys
best = sys.maxsize
self.traverse(0, 0, 0, len(triangle), triangle, best)
return best
def traverse(self, m, n, sum, rows, triangle, best):
if m == rows:
best = min(best,
sum) # todo it is vital to think why best is list work ,numberic not work (I mean can take elemet out or not )
return
sum += triangle[m][n]
self.traverse(m + 1, n, sum, rows, triangle, best)
self.traverse(m + 1, n + 1, sum, rows, triangle, best)
s = Solution()
print(s.minimumTotal([[-10]]))
| StarcoderdataPython |
8104178 | <reponame>6un9-h0-Dan/mixbox<filename>mixbox/idgen.py
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
"""Methods for generating IDs"""
import uuid
import contextlib
from .namespaces import Namespace
EXAMPLE_NAMESPACE = Namespace("http://example.com", "example", '')
# Don't register this namespace (yet, at least)
__all__ = ['InvalidMethodError', 'IDGenerator', 'set_id_namespace',
'set_id_method', 'create_id']
class InvalidMethodError(ValueError):
def __init__(self, method):
ValueError.__init__(self, "invalid method: %s" % method)
class IDGenerator(object):
"""Utility class for generating IDs for various entities"""
METHOD_UUID = 1
METHOD_INT = 2
METHODS = (METHOD_UUID, METHOD_INT,)
def __init__(self, namespace=EXAMPLE_NAMESPACE, method=METHOD_UUID):
self.namespace = namespace
self.method = method
self.reset()
def reset(self):
self.next_int = 1
@property
def namespace(self):
return self._namespace
@namespace.setter
def namespace(self, value):
if not isinstance(value, Namespace):
raise ValueError("Must be a Namespace object")
self._namespace = value
self.reset()
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value not in IDGenerator.METHODS:
raise InvalidMethodError("invalid method: %s" % value)
self._method = value
self.reset()
def create_id(self, prefix="guid"):
"""Create an ID.
Note that if `prefix` is not provided, it will be `guid`, even if the
`method` is `METHOD_INT`.
"""
if self.method == IDGenerator.METHOD_UUID:
id_ = str(uuid.uuid4())
elif self.method == IDGenerator.METHOD_INT:
id_ = self.next_int
self.next_int += 1
else:
raise InvalidMethodError(self.method)
return "%s:%s-%s" % (self.namespace.prefix, prefix, id_)
#: Singleton instance within this module. It is lazily instantiated, so simply
#: importing the utils module will not create the object.
__generator = None
def _get_generator():
"""Return the default IDGenerator object.
Only under rare circumstances should this function be called by external
code. More likely, external code should initialize its own IDGenerator or
use the `set_id_namespace`, `set_id_method`, or `create_id` functions.
"""
global __generator
if not __generator:
__generator = IDGenerator()
return __generator
def set_id_namespace(namespace):
""" Set the namespace for the module-level ID Generator"""
_get_generator().namespace = namespace
def set_id_method(method):
""" Set the method for the module-level ID Generator"""
_get_generator().method = method
def get_id_namespace():
"""Return the namespace associated with generated ids"""
return _get_generator().namespace.name
def get_id_namespace_prefix():
"""Returns the namespace prefix assoicated with generated ids"""
return _get_generator().namespace.prefix
# For backwards compatibility with old name
get_id_namespace_alias = get_id_namespace_prefix
def create_id(prefix=None):
""" Create an ID using the module-level ID Generator"""
if not prefix:
return _get_generator().create_id()
else:
return _get_generator().create_id(prefix)
@contextlib.contextmanager
def temp_id_namespace(namespace):
try:
saved_id_namespace = {get_id_namespace(): get_id_namespace_alias()}
set_id_namespace(namespace)
yield
finally:
set_id_namespace(saved_id_namespace)
| StarcoderdataPython |
6458604 | <filename>pythonlearn/python_script/lib/pidfile.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pid file manager
"""
import os
import sys
import fcntl
import atexit
def register_pidfile(pidfile):
try:
fd = os.open(pidfile, os.O_RDWR|os.O_CREAT|os.O_NONBLOCK|os.O_DSYNC)
fcntl.flock(fd, fcntl.LOCK_EX|fcntl.LOCK_NB)
os.write(fd, str(os.getpid()))
atexit.register(clear_pidfile, pidfile=pidfile)
except (OSError, IOError), e:
if e.errno in (35, 11):
print >> sys.stderr, '%s already running: pid=%s' % (' '.join(sys.argv), open(pidfile).read())
else:
print >> sys.stderr, e
sys.exit(-1)
def clear_pidfile(pidfile):
if os.path.exists(pidfile):
try:
os.unlink(pidfile)
except OSError:
pass
if __name__ == '__main__':
register_pidfile('test.pid')
print 'pid=%s, running...' % os.getpid()
import time
while True:
time.sleep(1)
| StarcoderdataPython |
84405 | <filename>legal_radar/entrypoints/streamlit_app/__init__.py<gh_stars>0
#!/usr/bin/python3
# __init__.py
# Date: 27.08.2021
# Author: <NAME>
# Email: <EMAIL>
| StarcoderdataPython |
6531231 | <reponame>vishnuyar/supreme-court-data
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_html_components as html
from datetime import datetime as dt
import plotly.graph_objs as go
from joblib import load
import numpy as np
import pandas as pd
from app import app
from joblib import load
# Loading the xgboost model
xgboost = load('assets/xgboost.joblib')
# Features used by the model
selected_features = [
'caseSource',
'caseOriginState',
'respondent',
'lcDisagreement',
'issueArea',
'case_reargued',
'case_argued',
'lcDisposition',
'respondentState',
'caseSourceState',
'threeJudgeFdc',
'petitioner',
'is_adminAction',
'certReason',
'petitionerState']
state_dict = {
-1: 'Not Applicable',
1: 'Alabama',
2: 'Alaska',
3: 'American Samoa',
4: 'Arizona',
5: 'Arkansas',
6: 'California',
7: 'Colorado',
8: 'Connecticut',
9: 'Delaware',
10: 'District of Columbia',
11: 'Federated States of Micronesia',
12: 'Florida',
13: 'Georgia',
14: 'Guam',
15: 'Hawaii',
16: 'Idaho',
17: 'Illinois',
18: 'Indiana',
19: 'Iowa',
20: 'Kansas',
21: 'Kentucky',
22: 'Louisiana',
23: 'Maine',
24: 'Marshall Islands',
25: 'Maryland',
26: 'Massachusetts',
27: 'Michigan',
28: 'Minnesota',
29: 'Mississippi',
30: 'Missouri',
31: 'Montana',
32: 'Nebraska',
33: 'Nevada',
34: 'New Hampshire',
35: 'New Jersey',
36: 'New Mexico',
37: 'New York',
38: 'North Carolina',
39: 'North Dakota',
40: 'Northern Mariana Islands',
41: 'Ohio',
42: 'Oklahoma',
43: 'Oregon',
44: 'Palau',
45: 'Pennsylvania',
46: 'Puerto Rico',
47: 'Rhode Island',
48: 'South Carolina',
49: 'South Dakota',
50: 'Tennessee',
51: 'Texas',
52: 'Utah',
53: 'Vermont',
54: 'Virgin Islands',
55: 'Virginia',
56: 'Washington',
57: 'West Virginia',
58: 'Wisconsin',
59: 'Wyoming',
60: 'United States',
61: 'Interstate Compact',
62: 'Philippines',
63: 'Indian',
64: 'Dakota'
}
court_dict = {
-1: 'Not Applicable',
32: 'Appeals, District of Columbia',
28: 'Appeals, Eighth Circuit',
31: 'Appeals, Eleventh Circuit',
8: 'Appeals, Federal Circuit',
25: 'Appeals, Fifth Circuit',
21: 'Appeals, First Circuit',
24: 'Appeals, Fourth Circuit',
29: 'Appeals, Ninth Circuit',
22: 'Appeals, Second Circuit',
27: 'Appeals, Seventh Circuit',
26: 'Appeals, Sixth Circuit',
30: 'Appeals, Tenth Circuit',
23: 'Appeals, Third Circuit',
48: 'CA Central District Court',
50: 'CA Northern District Court',
51: 'CA Southern District Court',
3: 'Court of Federal Claims',
55: 'DC District Court',
58: 'Florida Southern District Court',
66: 'IL Northern District Court',
75: 'LA Eastern District Court',
80: 'MA District Court',
81: 'MI Eastern District Court',
92: 'NJ District Court',
94: 'NY Eastern District Court',
96: 'NY Southern District Court',
109: 'PA Eastern District Court',
301: 'State Appellate Court',
300: 'State Supreme Court',
302: 'State Trial Court',
121: 'TX Southern District Court',
9: 'U.S. Tax Court',
126: 'VA District Court',
9999: 'Other Courts'
}
lc_disposition_dict = {1: 'Stay Granted',
2: 'Affirmed',
3: 'Reversed',
4: 'Reversed and Remanded',
5: 'Vacated and Remanded',
6: 'Affirmed and Reversed in part',
7: 'Affirmed and Remanded in part',
8: 'Vacated',
9: 'Appeal Dismissed',
10: 'Modify',
11: 'Remand',
12: 'Unusual decision'}
cert_labels_dict = {1: 'Cert not granted',
2: 'Federal court conflict',
3: 'Federal court and important question',
4: 'Putative conflict',
5: 'Conflict between Federal and State',
6: 'State court conflict',
7: 'Federal court uncertainty',
8: 'state court uncertainty',
9: 'Federal and State uncertainty',
10: 'To resolve important question',
11: 'To resolve question presented',
12: 'No Reason Given',
13: 'Other reason'}
issue_areas_dict = {1: 'Criminal Procedure',
2: 'Civil Rights',
3: 'First Amendment',
4: 'Due Process',
5: 'Privacy',
6: 'Attorneys',
7: 'Unions',
8: 'Economic Activity',
9: 'Judicial Power',
10: 'Federalism',
11: 'Interstate Relations',
12: 'Federal Taxation',
13: 'Miscellaneous',
14: 'Private Action'}
parties_category = {28: 'State Government',
27: 'United States',
100: 'Person accused of crime',
126: 'Person convicted of crime',
19: 'Govt Official',
145: 'Employee',
151: 'Employer',
249: 'Union',
8: 'Governmental Employee',
3: 'City,Town or Govt Unit',
106: 'Alien',
215: 'Prisoner',
382: 'Labor Board',
195: 'Owner',
240: 'Taxpayer',
9999: 'Others'}
style = {'padding': '1.5em'}
empty_col = dbc.Col(
html.Div(id='prediction-content_values'),
md=2
)
output_col = dbc.Col([
# dbc.Col(
# html.Div(id='prediction-content_values')
# ),
dbc.Col(
html.Div(
dcc.Graph(id='prediction-content'),
style={"position": "fixed", 'width': '40%', 'height': '20%'})
)
] # ,md=3
)
input_col = dbc.Col([
html.Div([
dcc.Markdown("""
### Predict
Use the controls below to update your latest case status details.
"""),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Cert Reason'),
dcc.Dropdown(
id='certReason',
options=[{'label': cert_labels_dict[key], 'value': key} for key in cert_labels_dict],
value=1,
clearable=False
),
dbc.Tooltip(
"Reason give by Supreme Court to grant the petition",
target="certReason",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Issue Area'),
dcc.Dropdown(
id='issueArea',
options=[{'label': issue_areas_dict[key], 'value': key} for key in issue_areas_dict],
value=1,
clearable=False
),
dbc.Tooltip(
"Issue area of this case",
target="issueArea",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Petitioner Category'),
dcc.Dropdown(
id='petitioner',
options=[{'label': parties_category[key], 'value': key} for key in parties_category],
value=27,
clearable=False
),
dbc.Tooltip(
"Petitoner: The one who approaches the Supreme Court",
target="petitioner",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Petitioner State'),
dcc.Dropdown(
id='petitionerState',
options=[{'label': state_dict[key], 'value': key} for key in state_dict],
value=-1,
clearable=False
),
dbc.Tooltip(
"State of the Petitioner: Not Applicable if US Govt is the Petitioner",
target="petitionerState",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Respondent Category'),
dcc.Dropdown(
id='respondent',
options=[{'label': parties_category[key], 'value': key} for key in parties_category],
value=28,
clearable=False
),
dbc.Tooltip(
"Party against whom the petition has been filed by the Petitioner",
target="respondent",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Respondent State'),
dcc.Dropdown(
id='respondentState',
options=[{'label': state_dict[key], 'value': key} for key in state_dict],
value=-1,
clearable=False
),
dbc.Tooltip(
"State of the Respondent: Not Applicable if US Govt is the Respondent",
target="respondentState",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Lower Court Decision'),
dcc.Dropdown(
id='lcDisposition',
options=[{'label': lc_disposition_dict[key], 'value': key} for key in lc_disposition_dict],
value=1,
clearable=False
),
dbc.Tooltip(
"Decision which the Petitioner has approached the Supreme Court to review",
target="lcDisposition",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Dissent in Lower Court decision'),
dcc.RadioItems(
id='lcDisagreement',
options=[{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0}, ],
value=0,
labelStyle={'margin-right': '20px'}
),
dbc.Tooltip(
"Dissent is applicable only when the Lower Court decision is not unanimous",
target="lcDisagreement",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Lower Court'),
dcc.Dropdown(
id='caseSource',
options=[{'label': court_dict[key], 'value': key} for key in court_dict],
value=28,
clearable=False
),
dbc.Tooltip(
"Name of the Lower Court whose decision is being reviewed by the Supreme Court",
target="caseSource",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Lower Court State'),
dcc.Dropdown(
id='caseSourceState',
options=[{'label': state_dict[key], 'value': key} for key in state_dict],
value=-1,
clearable=False
),
dbc.Tooltip(
"Applicable : only when the Lower Court is a State Court",
target="caseSourceState",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Case Origin Court'),
dcc.Dropdown(
id='caseOrigin',
options=[{'label': court_dict[key], 'value': key} for key in court_dict],
value=28,
clearable=False
),
dbc.Tooltip(
"Court in which the case originated, Not Trial Court either a state or federal appellate court",
target="caseOrigin",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Case Origin State'),
dcc.Dropdown(
id='caseOriginState',
options=[{'label': state_dict[key], 'value': key} for key in state_dict],
value=-1,
clearable=False
),
dbc.Tooltip(
"Applicable : only when the Case Origin Court is a State Court",
target="caseOriginState",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Argument Completed ?'),
dcc.RadioItems(
id='case_argued',
options=[
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value=0,
labelStyle={'margin-right': '20px'}
),
dbc.Tooltip(
"Select if Oral Arguments have been heard by the Supreme Court",
target="case_argued",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Re Argument Completed ?'),
dcc.RadioItems(
id='case_reargued',
options=[
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value=0,
labelStyle={'margin-right': '20px'}
),
dbc.Tooltip(
"Rarely : Supreme Court asks Oral Arguments to be presented again.",
target="case_reargued",
),
], style=style),
),
]),
dbc.Row([
dbc.Col(
html.Div([
dcc.Markdown('###### Adminstraion action prior to litigation ?'),
dcc.RadioItems(
id='is_adminAction',
options=[{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value=0,
labelStyle={'margin-right': '20px'}
),
dbc.Tooltip(
"Applicable only if there is Administrative activity prior"
"to onset of Litigation "
"Note Administrative action is taken by either a State or Federal Agency",
target="is_adminAction",
),
], style=style),
),
dbc.Col(
html.Div([
dcc.Markdown('###### Three Judge Court ?'),
dcc.RadioItems(
id='threeJudgeFdc',
options=[
{'label': 'Yes', 'value': 1},
{'label': 'No', 'value': 0},
],
value=0,
labelStyle={'margin-right': '20px'}
),
dbc.Tooltip(
"Is the case being heard by Three Judge Court ?",
target="threeJudgeFdc",
),
], style=style),
),
]),
], md=7,
)
@app.callback(
Output('prediction-content_values', 'children'),
[Input('threeJudgeFdc', 'value'),
Input('petitioner', 'value'),
Input('case_argued', 'value'),
Input('lcDisposition', 'value'),
Input('respondent', 'value'),
Input('certReason', 'value'),
Input('caseOriginState', 'value'),
Input('petitionerState', 'value'),
Input('lcDisagreement', 'value'),
Input('respondentState', 'value'),
Input('caseSourceState', 'value'),
Input('issueArea', 'value'),
Input('caseSource', 'value'),
Input('is_adminAction', 'value'),
Input('case_reargued', 'value'),
])
def send_outcomes(
threeJudgeFdc,
petitioner,
case_argued,
lcDisposition,
respondent,
certReason,
caseOriginState,
petitionerState,
lcDisagreement,
respondentState,
caseSourceState,
issueArea,
caseSource,
is_adminAction,
case_reargued):
predict_data = pd.DataFrame(
columns=[
'threeJudgeFdc',
'petitioner',
'case_argued',
'lcDisposition',
'respondent',
'certReason',
'caseOriginState',
'petitionerState',
'lcDisagreement',
'respondentState',
'caseSourceState',
'issueArea',
'caseSource',
'is_adminAction',
'case_reargued'],
data=[
[
threeJudgeFdc,
petitioner,
case_argued,
lcDisposition,
respondent,
certReason,
caseOriginState,
petitionerState,
lcDisagreement,
respondentState,
caseSourceState,
issueArea,
caseSource,
is_adminAction,
case_reargued]])
y_proba = xgboost.predict_proba(predict_dataselected_features)[:, 1][0]
favorable_outcome = 100 * y_proba
unfavorable_outcome = 100 - favorable_outcome
graphdata = go.Pie(values=[favorable_outcome, unfavorable_outcome])
inputvalues = str([1,
petitioner,
case_argued,
lcDisposition,
respondent,
certReason,
caseOriginState,
petitionerState,
lcDisagreement,
respondentState,
caseSourceState,
issueArea,
caseSource])
return str([favorable_outcome, unfavorable_outcome])
@app.callback(
Output('prediction-content', 'figure'),
[Input('threeJudgeFdc', 'value'),
Input('petitioner', 'value'),
Input('case_argued', 'value'),
Input('lcDisposition', 'value'),
Input('respondent', 'value'),
Input('certReason', 'value'),
Input('caseOriginState', 'value'),
Input('petitionerState', 'value'),
Input('lcDisagreement', 'value'),
Input('respondentState', 'value'),
Input('caseSourceState', 'value'),
Input('issueArea', 'value'),
Input('caseSource', 'value'),
Input('is_adminAction', 'value'),
Input('case_reargued', 'value'),
])
def send_piechart(
threeJudgeFdc,
petitioner,
case_argued,
lcDisposition,
respondent,
certReason,
caseOriginState,
petitionerState,
lcDisagreement,
respondentState,
caseSourceState,
issueArea,
caseSource,
is_adminAction,
case_reargued):
predict_data = pd.DataFrame(
columns=[
'threeJudgeFdc',
'petitioner',
'case_argued',
'lcDisposition',
'respondent',
'certReason',
'caseOriginState',
'petitionerState',
'lcDisagreement',
'respondentState',
'caseSourceState',
'issueArea',
'caseSource',
'is_adminAction',
'case_reargued'],
data=[
[
threeJudgeFdc,
petitioner,
case_argued,
lcDisposition,
respondent,
certReason,
caseOriginState,
petitionerState,
lcDisagreement,
respondentState,
caseSourceState,
issueArea,
caseSource,
is_adminAction,
case_reargued]])
y_proba = xgboost.predict_proba(predict_data[selected_features])[:, 1][0]
favorable_outcome = 100 * y_proba
unfavorable_outcome = 100 - favorable_outcome
colors = ['ForestGreen', 'Crimson']
graphdata = go.Pie(values=[favorable_outcome, unfavorable_outcome], labels=['Favorable', 'Unfavorable'],
# labels=['Favorable','Unfavorable'],
marker=dict(colors=colors, line=dict(color='#000000', width=1)))
# title=('Outcome Probability'))
return {
'data': [graphdata],
'layout': {
'titlefont': {
'size': 24,
'color': '#287D95',
'family': 'Raleway'},
'title': 'Case Outcome Probability'}}
layout = dbc.Row([
input_col, output_col
])
| StarcoderdataPython |
321781 | import os
from flask import Flask
from flask_appconfig import HerokuConfig
from flask_bootstrap import Bootstrap
from .frontend import frontend
def create_app(configfile=None):
app = Flask(__name__)
HerokuConfig(app, configfile)
Bootstrap(app)
app.register_blueprint(frontend)
app.config['BOOTSTRAP_SERVE_LOCAL'] = True
app.config['EMBEDLY_KEY'] = os.environ.get('EMBEDLY_KEY')
app.config['TWTXT_FEED'] = os.environ.get('TWTXT_FEED')
app.config['TWTXT_NICK'] = os.environ.get('TWTXT_NICK')
return app
app = create_app()
port = int(os.environ.get("PORT", 5000))
app.run(host='0.0.0.0', port=port, debug=True)
| StarcoderdataPython |
3526797 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from command import PagedCommand
class Status(PagedCommand):
common = True
helpSummary = "Show the working tree status"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
'%prog' compares the working tree to the staging area (aka index),
and the most recent commit on this branch (HEAD), in each project
specified. A summary is displayed, one line per file where there
is a difference between these three states.
Status Display
--------------
The status display is organized into three columns of information,
for example if the file 'subcmds/status.py' is modified in the
project 'repo' on branch 'devwork':
project repo/ branch devwork
-m subcmds/status.py
The first column explains how the staging area (index) differs from
the last commit (HEAD). Its values are always displayed in upper
case and have the following meanings:
-: no difference
A: added (not in HEAD, in index )
M: modified ( in HEAD, in index, different content )
D: deleted ( in HEAD, not in index )
R: renamed (not in HEAD, in index, path changed )
C: copied (not in HEAD, in index, copied from another)
T: mode changed ( in HEAD, in index, same content )
U: unmerged; conflict resolution required
The second column explains how the working directory differs from
the index. Its values are always displayed in lower case and have
the following meanings:
-: new / unknown (not in index, in work tree )
m: modified ( in index, in work tree, modified )
d: deleted ( in index, not in work tree )
"""
def Execute(self, opt, args):
all = self.GetProjects(args)
clean = 0
on = {}
for project in all:
cb = project.CurrentBranch
if cb:
if cb not in on:
on[cb] = []
on[cb].append(project)
branch_names = list(on.keys())
branch_names.sort()
for cb in branch_names:
print '# on branch %s' % cb
for project in all:
state = project.PrintWorkTreeStatus()
if state == 'CLEAN':
clean += 1
if len(all) == clean:
print 'nothing to commit (working directory clean)'
| StarcoderdataPython |
3409959 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 18 12:10:06 2021
@author: philippbst
"""
import os
import numpy as np
def getProjectPaths(pathToProject):
# Set up path structure of the project
cwd = os.getcwd()
pathToProject = pathToProject #cwd [:-4]
pathNames = ['pathToProject', 'pathToSrc', 'pathToRawFEMData', 'pathToProcessedFEMData'\
,'pathToBMWData','pathToTrainedModels', 'pathToModelRuns', 'pathToModelStudies']
paths = [pathToProject, pathToProject+'/src', pathToProject+'/data/raw_FEM_data', pathToProject+'/data/processed_FEM_data'\
,pathToProject+'/data/BMW_data',pathToProject+'/models/trained_models',pathToProject+'/models/runs',pathToProject+'/models/studies']
pathDict = {}
for i,name in enumerate(pathNames):
pathDict[name] = paths[i]
return pathDict
def scale3dDataPoints(X):
x = X[:,0]
y = X[:,1]
z = X[:,2]
x_scaled = (x-min(x)) / (max(x) - min(x))
y_scaled = (y-min(y)) / (max(y) - min(y))
z_scaled = (z-min(z)) / (max(z) - min(z))
X_scaled = np.array([x_scaled,y_scaled,z_scaled]).T
minmaxDict = {}
minmaxDict['x_min'] = min(x)
minmaxDict['y_min'] = min(y)
minmaxDict['z_min'] = min(z)
minmaxDict['x_max'] = max(x)
minmaxDict['y_max'] = max(y)
minmaxDict['z_max'] = max(z)
return X_scaled,minmaxDict
def rescale3DDataPoints(X,minmaxDict):
x_s = X[:,0]
y_s = X[:,1]
z_s = X[:,2]
x_min = minmaxDict['x_min']
y_min = minmaxDict['y_min']
z_min = minmaxDict['z_min']
x_max = minmaxDict['x_max']
y_max = minmaxDict['y_max']
z_max = minmaxDict['z_max']
x_rescaled = x_s * (x_max - x_min) + x_min
y_rescaled = y_s * (y_max - y_min) + y_min
z_rescaled = z_s * (z_max - z_min) + z_min
X_rescaled = np.array([x_rescaled,y_rescaled,z_rescaled]).T
return X_rescaled
def pointsInRange3D(numPoints,x_range,y_range,z_range):
'''
Takes the range for x,y and z coordinates as input and samples the
desired number of points within the domain defined by the coordinate ranges
'''
r1_norm = np.random.uniform(min(x_range), max(x_range), numPoints)
r2_norm = np.random.uniform(min(y_range), max(y_range), numPoints)
r3_norm = np.random.uniform(min(z_range), max(z_range), numPoints)
pointCloud = np.array([r1_norm,r2_norm,r3_norm]).transpose()
return pointCloud
def surfPoints(numPoints,range1,range2,fixpoint,fixCoord = 'x'):
'''
Takes two coordinate ranges as input, a fixed constant and the information
which coordinate direction is fixed
Samples random points in the given ranges with the thired coordinate beeing
set to the fixed value
The return array always has the order of:
r1 = x or y
r2 = y or z
depending on the coordinate of the fixed coordinate which can be x,y,z
'''
diff1 = abs(max(range1) - min(range1))
diff2 = abs(max(range2) - min(range2))
r1 = np.random.rand(numPoints)
r1_range = ((r1-min(r1)) / (max(r1) - min(r1))) * diff1
r1_norm = r1_range - abs(min(range1))
r2 = np.random.rand(numPoints)
r2_range = ((r2-min(r2)) / (max(r2) - min(r2))) * diff2
r2_norm = r2_range - abs(min(range2))
fix = np.ones(numPoints) * fixpoint
if fixCoord == 'x':
pointCloud = np.array([fix,r1_norm,r2_norm]).transpose()
elif fixCoord == 'y':
pointCloud = np.array([r1_norm,fix,r2_norm]).transpose()
elif fixCoord == 'z':
pointCloud = np.array([r1_norm,r2_norm,fix]).transpose()
else:
raise ValueError
return pointCloud
def getSurfPointsOf3DBlock(numSamplesPerSurf,x_range,y_range,z_range):
'''
Samples points on all sides of the surface of a rectangular defined by x,y and z range
'''
surf1 = surfPoints(numSamplesPerSurf,x_range,y_range,max(z_range),'z')
surf2 = surfPoints(numSamplesPerSurf,x_range,y_range,min(z_range),'z')
surf3 = surfPoints(numSamplesPerSurf,x_range,z_range,max(y_range),'y')
surf4 = surfPoints(numSamplesPerSurf,x_range,z_range,min(y_range),'y')
surf5 = surfPoints(numSamplesPerSurf,y_range,z_range,max(x_range),'x')
surf6 = surfPoints(numSamplesPerSurf,y_range,z_range,min(x_range),'x')
surfacePoints = np.concatenate((surf1,surf2,surf3,surf4,surf5,surf6),axis = 0)
return surfacePoints
| StarcoderdataPython |
9716817 | <reponame>TomNicholas/xBOUT-1
from pathlib import Path
import re
import pytest
import numpy as np
from xarray import DataArray, Dataset, concat
from xarray.tests.test_dataset import create_test_data
import xarray.testing as xrt
from natsort import natsorted
from xbout.load import _check_filetype, _expand_wildcards, _expand_filepaths,\
_arrange_for_concatenation, _trim, _strip_metadata, \
_auto_open_mfboutdataset
def test_check_extensions(tmpdir):
files_dir = tmpdir.mkdir("data")
example_nc_file = files_dir.join('example.nc')
example_nc_file.write("content_nc")
filetype = _check_filetype(Path(str(example_nc_file)))
assert filetype == 'netcdf4'
example_hdf5_file = files_dir.join('example.h5netcdf')
example_hdf5_file.write("content_hdf5")
filetype = _check_filetype(Path(str(example_hdf5_file)))
assert filetype == 'h5netcdf'
example_invalid_file = files_dir.join('example.txt')
example_hdf5_file.write("content_txt")
with pytest.raises(IOError):
filetype = _check_filetype(Path(str(example_invalid_file)))
class TestPathHandling:
def test_glob_expansion_single(self, tmpdir):
files_dir = tmpdir.mkdir("data")
example_file = files_dir.join('example.0.nc')
example_file.write("content")
path = Path(str(example_file))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
path = Path(str(files_dir.join('example.*.nc')))
filepaths = _expand_wildcards(path)
assert filepaths[0] == Path(str(example_file))
@pytest.mark.parametrize("ii, jj", [(1, 1), (1, 4), (3, 1), (5, 3), (12, 1),
(1, 12), (121, 2), (3, 111)])
def test_glob_expansion_both(self, tmpdir, ii, jj):
files_dir = tmpdir.mkdir("data")
filepaths = []
for i in range(ii):
example_run_dir = files_dir.mkdir('run' + str(i))
for j in range(jj):
example_file = example_run_dir.join('example.' + str(j) + '.nc')
example_file.write("content")
filepaths.append(Path(str(example_file)))
expected_filepaths = natsorted(filepaths,
key=lambda filepath: str(filepath))
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_wildcards(path)
assert actual_filepaths == expected_filepaths
def test_no_files(self, tmpdir):
files_dir = tmpdir.mkdir("data")
with pytest.raises(IOError):
path = Path(str(files_dir.join('run*/example.*.nc')))
actual_filepaths = _expand_filepaths(path)
print(actual_filepaths)
@pytest.fixture()
def create_filepaths():
return _create_filepaths
def _create_filepaths(nxpe=1, nype=1, nt=1):
filepaths = []
for t in range(nt):
for i in range(nype):
for j in range(nxpe):
file_num = (j + nxpe * i)
path = './run{}'.format(str(t)) \
+ '/BOUT.dmp.{}.nc'.format(str(file_num))
filepaths.append(path)
return filepaths
class TestArrange:
def test_arrange_single(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, None]
def test_arrange_along_x(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc',
'./run0/BOUT.dmp.1.nc',
'./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, None, 'x']
def test_arrange_along_y(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=3, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc'],
['./run0/BOUT.dmp.1.nc'],
['./run0/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=3)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', None]
def test_arrange_along_t(self, create_filepaths):
paths = create_filepaths(nxpe=1, nype=1, nt=3)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc']],
[['./run1/BOUT.dmp.0.nc']],
[['./run2/BOUT.dmp.0.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=1, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, None]
def test_arrange_along_xy(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=1)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == [None, 'y', 'x']
def test_arrange_along_xt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=1, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(
paths, nxpe=3, nype=1)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', None, 'x']
def test_arrange_along_xyt(self, create_filepaths):
paths = create_filepaths(nxpe=3, nype=2, nt=2)
expected_path_grid = [[['./run0/BOUT.dmp.0.nc', './run0/BOUT.dmp.1.nc', './run0/BOUT.dmp.2.nc'],
['./run0/BOUT.dmp.3.nc', './run0/BOUT.dmp.4.nc', './run0/BOUT.dmp.5.nc']],
[['./run1/BOUT.dmp.0.nc', './run1/BOUT.dmp.1.nc', './run1/BOUT.dmp.2.nc'],
['./run1/BOUT.dmp.3.nc', './run1/BOUT.dmp.4.nc', './run1/BOUT.dmp.5.nc']]]
actual_path_grid, actual_concat_dims = _arrange_for_concatenation(paths, nxpe=3, nype=2)
assert expected_path_grid == actual_path_grid
assert actual_concat_dims == ['t', 'y', 'x']
@pytest.fixture()
def bout_xyt_example_files(tmpdir_factory):
return _bout_xyt_example_files
def _bout_xyt_example_files(tmpdir_factory, prefix='BOUT.dmp', lengths=(2,4,7,6),
nxpe=4, nype=2, nt=1, ghosts={}, guards={}, syn_data_type='random'):
"""
Mocks up a set of BOUT-like netCDF files, and return the temporary test directory containing them.
Deletes the temporary directory once that test is done.
"""
save_dir = tmpdir_factory.mktemp("data")
ds_list, file_list = create_bout_ds_list(prefix=prefix, lengths=lengths, nxpe=nxpe, nype=nype, nt=nt,
ghosts=ghosts, guards=guards, syn_data_type=syn_data_type)
for ds, file_name in zip(ds_list, file_list):
ds.to_netcdf(str(save_dir.join(str(file_name))))
# Return a glob-like path to all files created, which has all file numbers replaced with a single asterix
path = str(save_dir.join(str(file_list[-1])))
count = 1
if nt > 1:
count += 1
# We have to reverse the path before limiting the number of numbers replaced so that the
# tests don't get confused by pytest's persistent temporary directories (which are also designated
# by different numbers)
glob_pattern = (re.sub(r'\d+', '*', path[::-1], count=count))[::-1]
return glob_pattern
def create_bout_ds_list(prefix, lengths=(2,4,7,6), nxpe=4, nype=2, nt=1, ghosts={}, guards={}, syn_data_type='random'):
"""
Mocks up a set of BOUT-like datasets.
Structured as though they were produced by a x-y parallelised run with multiple restarts.
"""
file_list = []
ds_list = []
for i in range(nxpe):
for j in range(nype):
num = (i + nxpe * j)
filename = prefix + "." + str(num) + ".nc"
file_list.append(filename)
# Include ghost cells
upper_bndry_cells = {dim: ghosts.get(dim) for dim in ghosts.keys()}
lower_bndry_cells = {dim: ghosts.get(dim) for dim in ghosts.keys()}
# Include guard cells
for dim in ['x', 'y']:
if dim in guards.keys():
if i == 0:
lower_bndry_cells[dim] = guards[dim]
if i == nxpe-1:
upper_bndry_cells[dim] = guards[dim]
ds = create_bout_ds(syn_data_type=syn_data_type, num=num, lengths=lengths, nxpe=nxpe, nype=nype,
upper_bndry_cells=upper_bndry_cells, lower_bndry_cells=lower_bndry_cells,
guards=guards, ghosts=ghosts)
ds_list.append(ds)
# Sort this in order of num to remove any BOUT-specific structure
ds_list_sorted = [ds for filename, ds in sorted(zip(file_list, ds_list))]
file_list_sorted = [filename for filename, ds in sorted(zip(file_list, ds_list))]
return ds_list_sorted, file_list_sorted
def create_bout_ds(syn_data_type='random', lengths=(2,4,7,6), num=0, nxpe=1, nype=1,
upper_bndry_cells={}, lower_bndry_cells={}, guards={}, ghosts={}):
# Set the shape of the data in this dataset
x_length, y_length, z_length, t_length = lengths
x_length += upper_bndry_cells.get('x', 0) + lower_bndry_cells.get('x', 0)
y_length += upper_bndry_cells.get('y', 0) + lower_bndry_cells.get('y', 0)
z_length += upper_bndry_cells.get('z', 0) + lower_bndry_cells.get('z', 0)
t_length += upper_bndry_cells.get('t', 0) + lower_bndry_cells.get('t', 0)
shape = (x_length, y_length, z_length, t_length)
# Fill with some kind of synthetic data
if syn_data_type is 'random':
# Each dataset contains the same random noise
np.random.seed(seed=0)
data = np.random.randn(*shape)
elif syn_data_type is 'linear':
# Variables increase linearly across entire domain
raise NotImplementedError
elif syn_data_type is 'stepped':
# Each dataset contains a different number depending on the filename
data = np.ones(shape) * num
elif isinstance(syn_data_type, int):
data = np.ones(shape)* syn_data_type
else:
raise ValueError('Not a recognised choice of type of synthetic bout data.')
T = DataArray(data, dims=['x', 'y', 'z', 't'])
n = DataArray(data, dims=['x', 'y', 'z', 't'])
ds = Dataset({'n': n, 'T': T})
# Include metadata
ds['NXPE'] = nxpe
ds['NYPE'] = nype
ds['MXG'] = guards.get('x', 0)
ds['MYG'] = guards.get('y', 0)
ds['nx'] = x_length
ds['MXSUB'] = ghosts.get('x', 0)
ds['MYSUB'] = ghosts.get('y', 0)
ds['MZ'] = z_length
return ds
METADATA_VARS = ['NXPE', 'NYPE', 'MXG', 'MYG', 'nx', 'MXSUB', 'MYSUB',
'MZ']
class TestStripMetadata():
def test_strip_metadata(self):
original = create_bout_ds()
assert original['NXPE'] == 1
ds, metadata = _strip_metadata(original)
assert original.drop(METADATA_VARS).equals(ds)
assert metadata['NXPE'] == 1
# TODO also test loading multiple files which have ghost cells
class TestCombineNoTrim:
def test_single_file(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=1, nype=1, nt=1)
actual, metadata = _auto_open_mfboutdataset(datapath=path)
expected = create_bout_ds()
xrt.assert_equal(actual.load(), expected.drop(METADATA_VARS))
def test_combine_along_x(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=1, nt=1,
syn_data_type='stepped')
actual, metadata = _auto_open_mfboutdataset(datapath=path)
bout_ds = create_bout_ds
expected = concat([bout_ds(0), bout_ds(1), bout_ds(2), bout_ds(3)], dim='x')
xrt.assert_equal(actual.load(), expected.drop(METADATA_VARS))
def test_combine_along_y(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=1, nype=3, nt=1,
syn_data_type='stepped')
actual, metadata = _auto_open_mfboutdataset(datapath=path)
bout_ds = create_bout_ds
expected = concat([bout_ds(0), bout_ds(1), bout_ds(2)], dim='y')
xrt.assert_equal(actual.load(), expected.drop(METADATA_VARS))
@pytest.mark.skip
def test_combine_along_t(self):
...
def test_combine_along_xy(self, tmpdir_factory, bout_xyt_example_files):
path = bout_xyt_example_files(tmpdir_factory, nxpe=4, nype=3, nt=1,
syn_data_type='stepped')
actual, metadata = _auto_open_mfboutdataset(datapath=path)
bout_ds = create_bout_ds
line1 = concat([bout_ds(0), bout_ds(1), bout_ds(2), bout_ds(3)], dim='x')
line2 = concat([bout_ds(4), bout_ds(5), bout_ds(6), bout_ds(7)], dim='x')
line3 = concat([bout_ds(8), bout_ds(9), bout_ds(10), bout_ds(11)], dim='x')
expected = concat([line1, line2, line3], dim='y')
xrt.assert_equal(actual.load(), expected.drop(METADATA_VARS))
@pytest.mark.skip
def test_combine_along_tx(self):
...
class TestTrim:
def test_no_trim(self):
ds = create_test_data(0)
actual = _trim(ds)
xrt.assert_equal(actual, ds)
def test_trim_ghosts(self):
ds = create_test_data(0)
actual = _trim(ds, ghosts={'time': 2})
selection = {'time': slice(2, -2)}
expected = ds.isel(**selection)
xrt.assert_equal(expected, actual)
| StarcoderdataPython |
4842107 | import pytest
from app_data.selectors.amazon import NEXT_BUTTON
from helpers import dom
from helpers.amazon import do_search, verify_search_result_summary
URL = {
'link': 'https://www.amazon.com/',
'title': 'Amazon.com: Online Shopping for Electronics, Apparel, Computers, Books, DVDs & more'
}
@pytest.mark.smoke
@pytest.mark.usefixtures("open_url")
@pytest.mark.parametrize("search_term", ("gardening tools", "plush animals", "pots",))
def test_amazon_search_summary(selenium, search_term):
"""
This test validates the expected summary of a search is shown on the first and second search results page.
Search terms used are defined in the parameterize pytest marker above.
"""
# search for results
do_search(selenium, search_term)
# verify results shown for search
verify_search_result_summary(selenium, low=1, high=48, expected_search_term=search_term)
dom.click_element(selenium, NEXT_BUTTON)
verify_search_result_summary(selenium, low=49, high=96, expected_search_term=search_term)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.