content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
import torch
import numpy as np
import pandas as pd
from os.path import join
from pathlib import Path
from torch.utils.data import Dataset
from torch.nn.utils.rnn import pad_sequence
class DSet(Dataset):
''' This is the WSJ parser '''
def __init__(self, path, split):
# Setup
self.path = path
self.wav_form = join(path, 'wav', '{}.wav')
self.phn_form = join(path, 'phn', '{}.pt')
# List all wave files
self.file_list = []
for s in split:
s_list = pd.read_csv(join(path,'meta',s+'_phn.csv'),header=None)[0].tolist()
assert len(s_list) > 0, "No data found @ {}".format(join(path,s))
self.file_list += s_list
def __getitem__(self, index):
fid = self.file_list[index]
return self.wav_form.format(fid), self.phn_form.format(fid)
def __len__(self):
return len(self.file_list)
def collect_batch(batch, audio_transform, audio_max_frames, mode):
'''Collects a batch, should be list of <str> file_path '''
# Load Batch
file_id, audio_feat, phn_seq, audio_len = [], [], [], []
with torch.no_grad():
for wav,phn in batch:
file_id.append(wav.rsplit('/',1)[-1].replace('.wav',''))
# Audio feature (sequence) on-the-fly
x = audio_transform(filepath=wav)
# Phn label sequence (test set shouldn't be cropped)
if mode =='test':
phn = phn.replace('.pt','_nocrop.pt')
y = torch.load(phn)+1 # 0 = pad
# Crop to avoid batch too large
x,y = _crop(x,y,audio_max_frames, mode)
audio_feat.append(x)
audio_len.append(len(x))
phn_seq.append(y[:len(x)])
# Descending audio length within each batch
audio_len, audio_feat, phn_seq, file_id = zip(*[(fl, f, phn, fid)
for fl, f, phn, fid in sorted(zip(audio_len, audio_feat, phn_seq, file_id),
reverse=True, key=lambda x:x[0])])
# Zero padding
audio_feat = pad_sequence(audio_feat, batch_first=True)
phn_seq = pad_sequence(phn_seq, batch_first=True)
return file_id, audio_feat, audio_len, phn_seq
def _crop(x, y, max_len, mode):
if len(x)>len(y):
if mode == 'test':
raise NotImplementedError('Test set are not supposed to be cropped')
else:
# Crop files that are too long
x = x[:len(y)]
if len(x) > max_len:
return x[:max_len],y[:max_len]
else:
return x,y
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import astropy.units as u
__all__ = ['toltec_info', ]
toltec_info = {
'instru': 'toltec',
'name': 'TolTEC',
'name_long': 'TolTEC Camera',
'array_physical_diameter': 127.049101 << u.mm,
'fov_diameter': 4. << u.arcmin,
'fg_names': ['fg0', 'fg1', 'fg2', 'fg3'],
'fg0': {
'index': 0,
'det_pa': 0. << u.deg,
},
'fg1': {
'index': 1,
'det_pa': 45. << u.deg,
},
'fg2': {
'index': 2,
'det_pa': 90. << u.deg,
},
'fg3': {
'index': 3,
'det_pa': 135. << u.deg,
},
'array_names': ['a1100', 'a1400', 'a2000'],
'a1100': {
'index': 0,
'name': 'a1100',
'name_long': 'TolTEC 1.1 mm array',
'wl_center': 1.1 << u.mm,
'array_mounting_angle': 90. << u.deg
},
'a1400': {
'index': 1,
'name': 'a1400',
'name_long': 'TolTEC 1.4 mm array',
'wl_center': 1.4 << u.mm,
'array_mounting_angle': -90. << u.deg
},
'a2000': {
'index': 2,
'name': 'a2000',
'name_long': 'TolTEC 2.0 mm array',
'wl_center': 2.0 << u.mm,
'array_mounting_angle': -90. << u.deg
},
'nws': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
'interfaces': [
'toltec0', 'toltec1', 'toltec2', 'toltec3',
'toltec4', 'toltec5', 'toltec6',
'toltec7', 'toltec8', 'toltec9', 'toltec10',
'toltec11', 'toltec12',
'hwpr', 'wyatt', 'tel', 'toltec_hk'],
'toltec0': {
'name': 'toltec0',
'nw': 0,
'array_name': 'a1100',
},
'toltec1': {
'name': 'toltec1',
'nw': 1,
'array_name': 'a1100',
},
'toltec2': {
'name': 'toltec2',
'nw': 2,
'array_name': 'a1100',
},
'toltec3': {
'name': 'toltec3',
'nw': 3,
'array_name': 'a1100',
},
'toltec4': {
'name': 'toltec4',
'nw': 4,
'array_name': 'a1100',
},
'toltec5': {
'name': 'toltec5',
'nw': 5,
'array_name': 'a1100',
},
'toltec6': {
'name': 'toltec6',
'nw': 6,
'array_name': 'a1100',
},
'toltec7': {
'name': 'toltec7',
'nw': 7,
'array_name': 'a1400',
},
'toltec8': {
'name': 'toltec8',
'nw': 8,
'array_name': 'a1400',
},
'toltec9': {
'name': 'toltec9',
'nw': 9,
'array_name': 'a1400',
},
'toltec10': {
'name': 'toltec10',
'nw': 10,
'array_name': 'a1400',
},
'toltec11': {
'name': 'toltec11',
'nw': 11,
'array_name': 'a2000',
},
'toltec12': {
'name': 'toltec12',
'nw': 12,
'array_name': 'a2000',
},
'hwpr': {
'name': 'hwpr',
},
'wyatt': {
'name': 'wyatt',
},
'tel': {
'name': 'tel',
},
'toltec_hk': {
'name': 'toltec_hk'
},
}
|
nilq/baby-python
|
python
|
"""\
Examples
For the development.ini you must supply the paster app name:
%(prog)s development.ini --app-name app --init --clear
"""
from pyramid.paster import get_app
import atexit
import logging
import os.path
import select
import shutil
import sys
EPILOG = __doc__
logger = logging.getLogger(__name__)
def main():
import argparse
parser = argparse.ArgumentParser(
description="Run development servers", epilog=EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument('--app-name', help="Pyramid app name in configfile")
parser.add_argument('config_uri', help="path to configfile")
parser.add_argument('--clear', action="store_true", help="Clear existing data")
parser.add_argument('--init', action="store_true", help="Init database")
parser.add_argument('--load', action="store_true", help="Load test set")
parser.add_argument('--datadir', default='/tmp/clincoded', help="path to datadir")
args = parser.parse_args()
logging.basicConfig()
# Loading app will have configured from config file. Reconfigure here:
logging.getLogger('clincoded').setLevel(logging.DEBUG)
from clincoded.tests import elasticsearch_fixture, postgresql_fixture
from contentbase.elasticsearch import create_mapping
datadir = os.path.abspath(args.datadir)
pgdata = os.path.join(datadir, 'pgdata')
esdata = os.path.join(datadir, 'esdata')
if args.clear:
for dirname in [pgdata, esdata]:
if os.path.exists(dirname):
shutil.rmtree(dirname)
if args.init:
postgresql_fixture.initdb(pgdata, echo=True)
postgres = postgresql_fixture.server_process(pgdata, echo=True)
elasticsearch = elasticsearch_fixture.server_process(esdata, echo=True)
processes = [postgres, elasticsearch]
@atexit.register
def cleanup_process():
for process in processes:
if process.poll() is None:
process.terminate()
for process in processes:
try:
for line in process.stdout:
sys.stdout.write(line.decode('utf-8'))
except IOError:
pass
process.wait()
if args.init:
app = get_app(args.config_uri, args.app_name)
create_mapping.run(app)
if args.load:
from webtest import TestApp
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = TestApp(app, environ)
from clincoded.loadxl import load_all
from pkg_resources import resource_filename
inserts = resource_filename('clincoded', 'tests/data/inserts/')
docsdir = [resource_filename('clincoded', 'tests/data/documents/')]
load_all(testapp, inserts, docsdir)
print('Started. ^C to exit.')
stdouts = [p.stdout for p in processes]
# Ugly should probably use threads instead
while True:
readable, writable, err = select.select(stdouts, [], stdouts, 5)
for stdout in readable:
for line in iter(stdout.readline, b''):
sys.stdout.write(line.decode('utf-8'))
if err:
for stdout in err:
for line in iter(stdout.readline, b''):
sys.stdout.write(line.decode('utf-8'))
break
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import logging
import os.path
DEFAULT_LOG_PATH = None
DEFAULT_LOG_DIR = os.path.join(os.path.dirname(__file__), "logs")
if not os.path.exists(DEFAULT_LOG_DIR):
try:
os.mkdir(DEFAULT_LOG_DIR)
except OSError:
DEFAULT_LOG_DIR = None
if DEFAULT_LOG_DIR:
DEFAULT_LOG_PATH = os.path.join(DEFAULT_LOG_DIR, "search.log")
def with_logging_methods(methods):
"""
Class decorator to add logging methods like info(), warning(), ... to logger class
:param methods: A list of string method names
:return: Class decorator
"""
def logger_decorator(clazz):
def create_log_method(name):
def inner(self, msg, force_console_print=False):
if logging.root.isEnabledFor(self.log_level_mappings()[name]):
getattr(logging, name)(msg)
elif force_console_print:
print(msg)
return inner
for level in methods:
setattr(clazz, level, create_log_method(level))
return clazz
return logger_decorator
@with_logging_methods(("info", "error", "warning", "debug", "critical"))
class SearchLogger:
_instance = None
@classmethod
def get_logger(cls):
if not cls._instance:
raise RuntimeError(
"Logger should be initialized before the first use. Use SearchLogger.init_logger() to do so."
)
return cls._instance
@classmethod
def init_logger(cls, *args, **kwargs):
if not cls._instance:
cls._instance = cls(*args, **kwargs)
@classmethod
def log_level_mappings(cls):
return {
"info": logging.INFO,
"error": logging.ERROR,
"warning": logging.WARNING,
"debug": logging.DEBUG,
"critical": logging.CRITICAL
}
@classmethod
def get_actual_log_level(cls, level):
return cls.log_level_mappings().get(level, logging.INFO)
def __init__(self, path=DEFAULT_LOG_PATH, log_to_console=True, level="info"):
log_level = self.__class__.get_actual_log_level(level)
handlers = []
if path:
handlers.append(logging.FileHandler(path, mode='w'))
if log_to_console or not path:
handlers.append(logging.StreamHandler())
logging.root.handlers = []
logging.basicConfig(
level=log_level,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=handlers
)
|
nilq/baby-python
|
python
|
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
# Headnode of the return list
headNode = ListNode(0)
# A pointer that moves around, note we don't want to use the
# headNode otherwise we will lose the starting point
dummyPointer = headNode
# A place hold for carry
carry = 0
# We need carry to be here in case l1 and l2 only have one element and
# the addup is greater than 10
while l1 or l2 or carry:
val = (l1.val if l1 else 0) + (l2.val if l2 else 0) + carry
dummyPointer.next = ListNode(val % 10)
dummyPointer = dummyPointer.next
carry = val / 10
# Move on to the next node
l1 = l1.next if l1 else None
l2 = l2.next if l2 else None
headNode = headNode.next
return headNode
|
nilq/baby-python
|
python
|
# coding: utf-8
from mhw_armor_edit.ftypes import StructFile, Struct
class WpDatEntry(Struct):
STRUCT_SIZE = 65
id: "<I"
unk1: "<H"
base_model_id: "<H"
part1_id: "<H"
part2_id: "<H"
color: "<B"
tree_id: "<B"
is_fixed_upgrade: "<B"
crafting_cost: "<I"
rarity: "<B"
kire_id: "<B"
handicraft: "<B"
raw_damage: "<H"
defense: "<H"
affinity: "<b"
element_id: "<B"
element_damage: "<H"
hidden_element_id: "<B"
hidden_element_damage: "<H"
elderseal: "<B"
num_gem_slots: "<B"
gem_slot1_lvl: "<B"
gem_slot2_lvl: "<B"
gem_slot3_lvl: "<B"
wep1_id: "<H"
wep2_id: "<H"
unk2: "<I"
unk3: "<I"
unk4: "<I"
tree_position: "<B"
order: "<H"
gmd_name_index: "<H"
gmd_description_index: "<H"
skill_id: "<H"
unk5: "<H"
class WpDat(StructFile):
EntryFactory = WpDatEntry
MAGIC = 0x0186
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.4 on 2021-06-20 12:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Token',
fields=[
('id', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='auth.user')),
('token', models.JSONField()),
],
),
migrations.CreateModel(
name='WalletIncome',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('regular', 'Regular'), ('casual', 'Casual')], default='regular', max_length=32)),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletHouse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=3, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletExpense',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('life', 'Life'), ('tickets', 'Ticket'), ('house rent', 'House rent')], default='life', max_length=32)),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletDeposit',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=2, max_digits=20)),
('rate', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletCredit',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=2, max_digits=20)),
('rate', models.DecimalField(decimal_places=2, max_digits=20)),
('balance', models.DecimalField(decimal_places=2, max_digits=20)),
('interest', models.DecimalField(decimal_places=2, max_digits=20)),
('capital', models.DecimalField(decimal_places=2, max_digits=20)),
('insurance', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletCar',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('car', models.CharField(max_length=64)),
('exploitation', models.IntegerField()),
('payment', models.DecimalField(decimal_places=2, max_digits=20)),
('refuelling', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='WalletAccount',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('wallet', 'Wallet'), ('bank account', 'Bank account'), ('mobile account', 'Mobile account')], default='wallet', max_length=32)),
('name', models.CharField(max_length=64)),
('value', models.DecimalField(decimal_places=2, max_digits=20)),
('year', models.IntegerField(choices=[(2010, 2010), (2011, 2011), (2012, 2012), (2013, 2013), (2014, 2014), (2015, 2015), (2016, 2016), (2017, 2017), (2018, 2018), (2019, 2019), (2020, 2020), (2021, 2021), (2022, 2022), (2023, 2023), (2024, 2024), (2025, 2025), (2026, 2026), (2027, 2027), (2028, 2028), (2029, 2029), (2030, 2030), (2031, 2031), (2032, 2032), (2033, 2033), (2034, 2034), (2035, 2035), (2036, 2036), (2037, 2037), (2038, 2038), (2039, 2039), (2040, 2040), (2041, 2041), (2042, 2042), (2043, 2043), (2044, 2044), (2045, 2045), (2046, 2046), (2047, 2047), (2048, 2048), (2049, 2049), (2050, 2050), (2051, 2051), (2052, 2052), (2053, 2053), (2054, 2054), (2055, 2055), (2056, 2056), (2057, 2057), (2058, 2058), (2059, 2059), (2060, 2060), (2061, 2061), (2062, 2062), (2063, 2063), (2064, 2064), (2065, 2065), (2066, 2066), (2067, 2067), (2068, 2068), (2069, 2069), (2070, 2070), (2071, 2071), (2072, 2072), (2073, 2073), (2074, 2074), (2075, 2075), (2076, 2076), (2077, 2077), (2078, 2078), (2079, 2079), (2080, 2080), (2081, 2081), (2082, 2082), (2083, 2083), (2084, 2084), (2085, 2085), (2086, 2086), (2087, 2087), (2088, 2088), (2089, 2089), (2090, 2090), (2091, 2091), (2092, 2092), (2093, 2093), (2094, 2094), (2095, 2095), (2096, 2096), (2097, 2097), (2098, 2098), (2099, 2099)], default=2021)),
('month', models.IntegerField(choices=[(1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10), (11, 11), (12, 12)], default=6)),
('currency', models.CharField(choices=[('zł', 'zł'), ('eu', 'eu'), ('$', '$'), ('gbp', 'gbp'), ('kc', 'kc')], default='zł', max_length=8)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ReminderGroup',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.CharField(choices=[('red', 'Red'), ('orange', 'Orange'), ('blue', 'Blue'), ('green', 'Green'), ('black', 'Black'), ('grey', 'Grey'), ('brown', 'Brown'), ('yellow', 'Yellow'), ('magenta', 'Magenta')], default='orange', max_length=32)),
('name', models.CharField(max_length=64)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Reminder',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('when', models.DateTimeField(blank=True, null=True)),
('repeat', models.CharField(blank=True, choices=[('1d', 'Every day'), ('7d', 'Every week'), ('14d', 'Every 2 weeks'), ('30d', 'Every 30 days'), ('1m', 'Every month'), ('2m', 'Every 2 months'), ('3m', 'Every 3 months'), ('1y', 'Every year')], max_length=32, null=True)),
('priority', models.CharField(choices=[('low', 'Low'), ('normal', 'Normal'), ('high', 'High')], default='normal', max_length=32)),
('modified', models.DateTimeField(auto_now=True)),
('group', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.remindergroup')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Log',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('wallet', 'Wallet'), ('calendar', 'Calendar'), ('reminder', 'Reminder'), ('note', 'Note')], default='note', max_length=32)),
('info', models.CharField(max_length=64)),
('json', models.TextField(blank=True, max_length=512, null=True)),
('modified', models.DateTimeField(auto_now=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
from setuptools import setup
from os import path
with open('README.md') as f:
long_description = f.read()
setup(
name='itrcnt',
module='itrcnt.py',
version='0.1.2',
license='BSD',
author='mao2009',
url='https://github.com/mao2009/Python_Counter',
description='Alternative for Range and Enumerator',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='range enumrator'
)
|
nilq/baby-python
|
python
|
"""
testing for agent's config
"""
import os
import pytest
import yaml
from eha.agent.config import load
@pytest.mark.parametrize('content, envs, result', (
(
"""
foo: 123
bar: 234
""",
{},
{
'foo': 123,
'bar': 234,
}
),
(
"""
foo: 123
bar: 234
""",
{
'EHA_AGENT_FOO': 'abc',
'EHA_AGENT_BAR': '234',
},
{
'foo': 'abc',
'bar': '234',
}
),
))
def test_load(content, envs, result, mocker, monkeypatch):
patched_open = mocker.mock_open(read_data=content)
mocker.patch('builtins.open', patched_open)
mocker.patch('os.path.isfile', bool)
with monkeypatch.context() as patch:
for key, value in envs.items():
patch.setenv(key, value)
config = load()
assert config == result
|
nilq/baby-python
|
python
|
default_app_config = 'kolibri.content.apps.KolibriContentConfig'
|
nilq/baby-python
|
python
|
import os
import pandas as pd
def read_parquet(data_path, num_partitions=None, random=False, verbose=True, columns=None):
files = os.listdir(data_path)
if random:
import random
random.shuffle(files)
if num_partitions is None:
num_partitions = len(files)
data = []
num_reads = 0
for file_path in files:
if num_reads >= num_partitions:
break
root, ext = os.path.splitext(file_path)
# exclude non-parquet files (e.g. gitkeep, other folders)
if ext == '.parquet':
fp = os.path.join(data_path, file_path)
if verbose:
print('Reading in data from {}'.format(fp))
data.append(pd.read_parquet(os.path.join(data_path, file_path), columns=columns))
if verbose:
print('Data of shape {}'.format(data[-1].shape))
num_reads += 1
else:
continue
data = pd.concat(data, axis=0)
if verbose:
print('Total dataframe of shape {}'.format(data.shape))
return data
def feature_label_split(data, model_features, label='label', qid='qid'):
# assumes data of same QIDs are grouped together
X = data[model_features]
y = data[label]
qid = data[qid].value_counts(sort=False).sort_index()
return X, y, qid
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
import sys
fn_read_keys = None
dn_sstable_keys = None
read_keys = []
key_sstgen = {}
def LoadReadKeys():
global read_keys
print "loading read keys from %s ..." % fn_read_keys
with open(fn_read_keys) as fo:
for line in fo.readlines():
read_keys.append(line.strip().lower())
def LoadSSTableKeys():
global dn_sstable_keys
global key_sstgen
print "loading sstable keys from %s ..." % dn_sstable_keys
sst_gen = 0
while True:
sst_gen += 1
fn = "%s/keys-%d" % (dn_sstable_keys, sst_gen)
if not os.path.isfile(fn):
break
with open(fn) as fo:
for line in fo.readlines():
key = line.strip()
if key not in key_sstgen:
key_sstgen[key] = []
key_sstgen[key].append(sst_gen)
print "len(key_sstgen)=%d" % len(key_sstgen)
def CheckDupKeys():
print "Checking duplicate keys ..."
for k, v in key_sstgen.iteritems():
if len(v) > 1:
print k, v
def CountReadsBySSTables():
sstgen_readcnt_first = {}
sstgen_readcnt_all = {}
memtable_read_cnt = 0
print "len(read_keys)=%d" % len(read_keys)
for rk in read_keys:
# If a read key is not in any of the sstables, it may be in the memtable
if rk not in key_sstgen:
memtable_read_cnt += 1
continue
# Get the youngest sstable, which is the last one in the list
sstgen = key_sstgen[rk][-1]
if sstgen not in sstgen_readcnt_first:
sstgen_readcnt_first[sstgen] = 1
else:
sstgen_readcnt_first[sstgen] += 1
for sstgen in key_sstgen[rk]:
if sstgen not in sstgen_readcnt_all:
sstgen_readcnt_all[sstgen] = 1
else:
sstgen_readcnt_all[sstgen] += 1
print "memtable_read_cnt=%d" % memtable_read_cnt
print "sstable_readcnt: sstgen first_hit all_hit:"
for k, v in sorted(sstgen_readcnt_first.iteritems()):
print " %2d %6d %6d" % (k, v, sstgen_readcnt_all[k])
def main(argv):
if len(argv) != 3:
print "Usage: %s fn_read_keys dn_sstable_keys" % (argv[0])
print " E.g.: %s data/read-keys-15-11-26-18:15:55 ../check-keys-in-sstables/standard1-2d180380949311e5945a1d822de6a4f1" % (argv[0])
sys.exit(1)
global fn_read_keys
global dn_sstable_keys
fn_read_keys = argv[1]
dn_sstable_keys = argv[2]
LoadReadKeys()
LoadSSTableKeys()
#CheckDupKeys()
CountReadsBySSTables()
# Stopping when the max timestamp of a sstable is older than the current timestamp is not simulated.
# We assume that bigger sstable gens have younger keys (records)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
nilq/baby-python
|
python
|
#
# Copyright (c) Sinergise, 2019 -- 2021.
#
# This file belongs to subproject "field-delineation" of project NIVA (www.niva4cap.eu).
# All rights reserved.
#
# file in the root directory of this source tree.
# This source code is licensed under the MIT license found in the LICENSE
#
from typing import Callable, List, Any
from concurrent.futures import ProcessPoolExecutor
from fs_s3fs import S3FS
from dataclasses import dataclass
from tqdm.auto import tqdm
from sentinelhub import SHConfig
@dataclass
class BaseConfig:
bucket_name: str
aws_access_key_id: str
aws_secret_access_key: str
aws_region: str
def prepare_filesystem(config: BaseConfig) -> S3FS:
return S3FS(bucket_name=config.bucket_name,
aws_access_key_id=config.aws_access_key_id,
aws_secret_access_key=config.aws_secret_access_key,
region=config.aws_region)
def set_sh_config(config: BaseConfig) -> SHConfig:
""" Set AWS and SH credentials in SHConfig file to allow usage of download and io tasks """
sh_config = SHConfig()
sh_config.aws_access_key_id = config.aws_access_key_id
sh_config.aws_secret_access_key = config.aws_secret_access_key
if all(key in config.__annotations__.keys() for key in ['sh_client_id', 'sh_client_secret']):
sh_config.sh_client_id = config.sh_client_id
sh_config.sh_client_secret = config.sh_client_secret
sh_config.save()
return sh_config
def multiprocess(process_fun: Callable, arguments: List[Any], max_workers: int = 4) -> List[Any]:
"""
Executes multiprocessing with tqdm.
Parameters
----------
process_fun: A function that processes a single item.
arguments: Arguments with which te function is called.
max_workers: Max workers for the process pool executor.
Returns A list of results.
-------
"""
with ProcessPoolExecutor(max_workers=max_workers) as executor:
results = list(tqdm(executor.map(process_fun, arguments), total=len(arguments)))
return results
|
nilq/baby-python
|
python
|
# Copyright 2016
# Drewan Tech, LLC
# ALL RIGHTS RESERVED
db_user = 'web_service_admin'
db_password = 'web_service_admin'
db_host = 'postgres'
db_port = '5432'
users_to_manage = {'random_matrix':
{'authorized_databases':
['matrix_database'],
'password':
'random_matrix'},
'matrix_mult':
{'authorized_databases':
['matrix_database'],
'password':
'matrix_mult'}}
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# (C) 2014 Arulalan.T <arulalant@gmail.com>
#
# This file is part of 'open-tamil/txt2ipa' package examples
#
import sys
sys.path.append("../..")
from tamil.txt2ipa.ipaconvert import ipa, broad
from tamil.txt2ipa.transliteration import tam2lat
text = "வணக்கம் தமிழகம் "
t1 = tam2lat(text)
t2 = " " + t1 + " "
t2 = ipa(t2)
t3 = broad(t2)
print("after tam2lat", t1)
print("after ipa", t2)
print("after broad", t3)
|
nilq/baby-python
|
python
|
import tkinter as tk
import tkinter.messagebox as msg
import socket
import configparser
import threading
import time
import os
def warning(message):
msg.showwarning("Предупреждение", message)
def error(message, error=None):
msg.showerror("Ошибка", message)
print(error)
class Server(socket.socket):
def __init__(self, host, port, max_connections, warning_disconnect=0, warning_connection_attempt=0):
self.connections = []
self.max_connections = max_connections
self.warning_disconnect = warning_disconnect
self.warning_connection_attempt = warning_connection_attempt
self.address = host, port
try:
super().__init__(socket.AF_INET, socket.SOCK_STREAM)
self.bind(self.address)
self.listen(max_connections)
except OSError as e:
error(f"{e}\n{':'.join(map(str, self.address))} - Этот адрес уже используется или IP не действительный\nПопробуйте сменить порт или IP")
print(f"{e}\n{':'.join(map(str, self.address))} - Этот адрес уже используется или IP не действительный")
def start(self):
while True:
connection, sockname = self.accept()
if len(self.connections) + 1 > self.max_connections:
connection.close()
if self.warning_connection_attempt:
warning(f"Попытка подключения, превышающее макс. кол-во подключений\nID({sockname[1]})")
continue
self.connections.append((connection, sockname[1]))
print(f"\nID({sockname[1]}) подключился")
def send_data(self, data):
for connection in self.connections:
try:
connection[0].send(data.encode("utf-8"))
except ConnectionError as e:
print(f"\n{e}\nID({connection[1]}) закрыл соединение")
self.connections.remove(connection)
if self.warning_disconnect:
warning(f"ID({connection[1]}) закрыл соединение")
continue
class App:
def __init__(self):
self.buffer_text = None
self.config = configparser.ConfigParser()
self.config.read(os.path.split(__file__)[0] + "/settings.conf")
try:
self.SHOW_WINDOW = int(self.config["APP"]["SHOW_WINDOW"])
self.SHOW_DATA = int(self.config["APP"]["SHOW_DATA"])
self.SERVER_PORT = int(self.config["SERVER"]["SERVER_PORT"])
self.SERVER_HOST = self.config["SERVER"]["SERVER_HOST"]
self.MAX_CONNECTIONS = int(self.config["CONNECTIONS"]["MAX_CONNECTIONS"])
self.STUDENT_DISCONNECTION = int(self.config["SHOW_WARNINGS"]["STUDENT_DISCONNECTION"])
self.CONNECTION_ATTEMPT = int(self.config["SHOW_WARNINGS"]["CONNECTION_ATTEMPT"])
except KeyError as e:
error("Неправильно составлен или отсутствует файл settings.conf", error=e)
except ValueError as e:
error("Неправильное значение параметров в файле settings.conf", error=e)
self.server = Server(
self.SERVER_HOST,
self.SERVER_PORT,
max_connections=self.MAX_CONNECTIONS,
warning_disconnect=self.STUDENT_DISCONNECTION,
warning_connection_attempt=self.CONNECTION_ATTEMPT
)
self.root = tk.Tk()
self.root.withdraw()
self.root.resizable(False, False)
if self.SHOW_WINDOW:
self.root.deiconify()
self.root.title("App")
self.root.geometry(f"200x{100*self.SHOW_DATA}")
self.root.wm_attributes("-topmost", True)
if self.SHOW_DATA:
self.root.resizable(True, True)
self.text_data = tk.Text(self.root, text=self.buffer_text, state="disabled")
self.text_data.config(bd=0, highlightthickness=0)
self.text_data.pack(expand=True, fill="both")
threading.Thread(target=self.check_buffer, daemon=True).start()
threading.Thread(target=self.server.start, daemon=True).start()
self.root.mainloop()
def check_buffer(self):
while True:
time.sleep(0.1)
try:
self.server.send_data("¤")
clipboard_text = self.root.clipboard_get()
if clipboard_text != self.buffer_text:
self.buffer_text = clipboard_text
self._send_buffer()
except tk.TclError:
continue
def _send_buffer(self):
self.buffer_text = self.root.clipboard_get()
self.server.send_data(self.buffer_text)
if self.SHOW_DATA:
self._show_data()
def _show_data(self):
self.text_data.configure(state="normal")
self.text_data.delete(1.0, "end")
self.text_data.insert(1.0, self.buffer_text)
self.text_data.configure(state="disabled")
if __name__ == "__main__":
try:
App()
except KeyboardInterrupt:
print("\nПриложение принудительно остановлено")
except Exception as e:
print(f"Ошибка: {e}")
|
nilq/baby-python
|
python
|
import pytest
from text_normalizer.tokenization import replace_bigrams
@pytest.mark.benchmark(group='ivr_convert')
def test_benchmark_replace_synonyms(benchmark, tokenize, benchmark_text):
tokens = list(tokenize(benchmark_text))
benchmark(lambda: list(replace_bigrams(tokens)))
|
nilq/baby-python
|
python
|
from product import product
from company import company
from pathlib import Path
# Loading products info
products = []
products_list_file = open(str(Path(__file__).resolve().parent) + "/products_list.txt", "r")
for p in products_list_file:
p = p.replace("\n", "")
p = p.split(",")
products.append(product(p[0], float(p[1]), float(p[2])))
# Loading companies info
companies = []
companies_list_file = open(str(Path(__file__).resolve().parent) + "/companies_list.txt", "r")
for c in companies_list_file:
c = c.replace("\n", "")
c = c.split(",")
if len(c) == 6:
companies.append(company(c[0], float(c[1]), float(c[2]), float(c[3]), float(c[4]), float(c[5])))
else:
companies.append(company(c[0], float(c[1]), float(c[2])))
# Showing results
for p in products:
print("Product info:")
print("Name: {}".format(p.get_name()))
print("Distance: {:.2f}".format(p.get_distance()))
print("Weight: {:.2f}".format(p.get_weight()))
print()
print("Budgets:")
for c in companies:
print("{}: R$ {:.2f}".format(c.get_name(), c.calculate_budget(p)))
print("---")
|
nilq/baby-python
|
python
|
# Title: Trapping Rain Water
# Link: https://leetcode.com/problems/trapping-rain-water/
import sys
from heapq import heappop, heappush
sys.setrecursionlimit(10 ** 6)
class Solution():
def trap(self, heights: list) -> int:
water = 0
walls = []
for i, height in enumerate(heights):
last_level = 0
while walls:
left_height, left_index = heappop(walls)
if left_height <= height:
water += (i - left_index - 1) * (left_height - last_level)
last_level = left_height
else:
water += (i - left_index - 1) * (height - last_level)
heappush(walls, (left_height, left_index))
break
heappush(walls, (height, i))
return water
def main():
solution = Solution()
height = [4,2,0,3,2,5]
print(solution.trap(height))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from __future__ import unicode_literals
from django.contrib import admin
from authtools.admin import NamedUserAdmin
from .models import Profile, TokenFirebase
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from import_export.admin import ImportExportModelAdmin
from import_export import resources
User = get_user_model()
class UserProfileInline(admin.StackedInline):
model = Profile
#model = Persona
class NewUserAdmin(NamedUserAdmin):
inlines = [UserProfileInline]
list_display = ('is_active', 'email', 'name', 'permalink',
'is_superuser', 'is_staff',)
# 'View on site' didn't work since the original User model needs to
# have get_absolute_url defined. So showing on the list display
# was a workaround.
def permalink(self, obj):
url = reverse("profiles:show",
kwargs={"slug": obj.profile.slug})
# Unicode hex b6 is the Pilcrow sign
return '<a href="{}">{}</a>'.format(url, '\xb6')
permalink.allow_tags = True
class ProfileResource(resources.ModelResource):
class Meta:
model = Profile
exclude = ('id',)
import_id_fields = ('id_persona',)
skip_unchanged = True
fields = ['id_persona', 'email_verified', 'nombre', 'segundo_nombre', 'apellido_pa', 'apellido_ma', 'tipo_documento',
'numero_documento', 'sexo', 'correo', 'fecha_nac']
class ProfileAdmin(ImportExportModelAdmin):
resource_class = ProfileResource
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(TokenFirebase)
|
nilq/baby-python
|
python
|
from decimal import Decimal
from django.apps import apps
from rest_framework import serializers
from rest_flex_fields import FlexFieldsModelSerializer
from ....checkout.utils import get_taxes_for_checkout
from ....glovo.utils import glovo_get_lowest_price
from ....runningbox.utils import runningbox_order_estimate
from ...fields import MoneyField, TaxedMoneyField
from ..shipping_method import ShippingMethodSerializer
from .checkout_line import CheckoutLineSerializer
from .glovo_order import GlovoOrderSerializer
from .runningbox_order import RunningBoxOrderSerializer
__all__ = [
'CheckoutSerializer',
]
Checkout = apps.get_model(*'checkout.Checkout'.split())
CheckoutLine = apps.get_model(*'checkout.CheckoutLine'.split())
Address = apps.get_model(*'account.Address'.split())
ShippingMethod = apps.get_model(*'shipping.ShippingMethod'.split())
PhysicalStore = apps.get_model(*'store.PhysicalStore'.split())
GlovoOrder = apps.get_model(*'glovo.GlovoOrder'.split())
RunningBoxOrder = apps.get_model(*'runningbox.RunningBoxOrder'.split())
class CheckoutSerializer(FlexFieldsModelSerializer):
"""Serializer for :model:`checkout.Checkout`:
`**Fields:**`
01. `billing_address` : `ForeignKey` [:model:`account.Address`]
02. `created` : `DateTimeField`
03. `discount_amount` : `DecimalField`
04. `discount_name` : `CharField`
05. `email` : `CharField`
06. `last_change` : `DateTimeField`
07. `note` : `TextField`
08. `quantity` : `PositiveIntegerField`
09. `shipping_address` : `ForeignKey` [:model:`account.Address`]
10. `shipping_method` : `ForeignKey` [:model:`shipping.ShippingMethod`]
11. `token` : `UUIDField`
12. `translated_discount_name` : `CharField`
13. `user` : `ForeignKey` [:model:`account.User`]
14. `voucher_code` : `CharField`
**Reverse Fields:**
01. `lines` : `ForeignKey` [:model:`checkout.CheckoutLine`]
02. `payments` : `ForeignKey` [:model:`payment.Payment`]
"""
lines = serializers.PrimaryKeyRelatedField(
queryset=CheckoutLine.objects.all(),
allow_null=False,
required=False,
many=True,
)
shipping_address = serializers.PrimaryKeyRelatedField(
queryset=Address.objects.all(),
allow_null=True,
required=True
)
billing_address = serializers.PrimaryKeyRelatedField(
queryset=Address.objects.all(),
allow_null=True,
required=False
)
glovo_order = serializers.PrimaryKeyRelatedField(
queryset=GlovoOrder.objects.all(),
allow_null=True,
required=False
)
runningbox_order = serializers.PrimaryKeyRelatedField(
queryset=RunningBoxOrder.objects.all(),
allow_null=True,
required=False
)
discount_amount = MoneyField()
total = serializers.SerializerMethodField()
subtotal = serializers.SerializerMethodField()
shipping_price = serializers.SerializerMethodField()
applicable_shipping_methods = serializers.SerializerMethodField()
expandable_fields = {
'lines': (
CheckoutLineSerializer, {
'fields': [
'id',
'quantity',
'variant',
],
'many': True
}
),
'shipping_method': (
ShippingMethodSerializer, {
'fields': [
'id',
'name',
'price',
]
}
),
'glovo_order': (
GlovoOrderSerializer, {
'fields': [
'id',
'price',
]
}
),
'runningbox_order': (
RunningBoxOrder, {
'fields': [
'id',
'price',
]
}
)
}
class Meta:
model = Checkout
fields = [
# Fields
'token',
'created',
'user',
'email',
'quantity',
'voucher_code',
'discount_name',
'discount_amount',
'shipping_type',
'shipping_address',
'shipping_method',
'billing_type',
'billing_address',
'note',
# 'last_change',
# 'translated_discount_name',
# Reverse Fields
'lines',
# 'payments',
'glovo_order',
'runningbox_order',
# other fields
'subtotal',
'total',
'shipping_price',
'applicable_shipping_methods',
]
read_only_fields = []
# def create(self, validated_data):
# return super().create(validated_data)
# def update(self, instance, validated_data):
# return super().update(instance, validated_data)
def get_subtotal(self, obj):
discounts = None
taxes = None
context = self.context.get('request', None)
if context is not None:
discounts = context.discounts
taxes = context.taxes
subtotal = obj.get_subtotal(discounts, taxes)
return TaxedMoneyField().to_representation(subtotal)
def get_shipping_price(self, obj):
taxes = None
context = self.context.get('request', None)
if context is not None:
taxes = context.taxes
shipping_price = obj.get_shipping_price(taxes)
return TaxedMoneyField().to_representation(shipping_price)
def get_total(self, obj):
discounts = None
taxes = None
context = self.context.get('request', None)
if context is not None:
discounts = context.discounts
taxes = context.taxes
total = obj.get_total(discounts, taxes)
return TaxedMoneyField().to_representation(total)
def get_applicable_shipping_methods(self, obj):
if obj.shipping_address is None:
return None
request = self.context.get('request', None)
discounts = None
taxes = None
if request is None:
discounts = request.discounts
taxes = get_taxes_for_checkout(obj, request.taxes)
# country_code = obj.shipping_address.country.code
shpping_methods = ShippingMethod.objects.applicable_shipping_methods(
price=obj.get_subtotal(discounts, taxes).gross,
weight=obj.get_total_weight(),
address=obj.shipping_address
)
shpping_methods = ShippingMethodSerializer(shpping_methods, many=True)
shpping_methods = shpping_methods.data
stores = PhysicalStore.objects.filter(
glovo_delivery_permission__glovo_enabled=True)
if stores.exists():
if getattr(obj.shipping_address, 'position', None):
glovo_shipping_method = glovo_get_lowest_price(
stores, obj.shipping_address)
if glovo_shipping_method is not None:
glovo_shipping_method['price']['amount'] = Decimal(
str(glovo_shipping_method['price']['amount'] / 100))
glovo_shipping_method['name'] = 'Glovo'
glovo_shipping_method['id'] = 'shipping-with-glovo'
shpping_methods.append(glovo_shipping_method)
stores = PhysicalStore.objects.filter(
runningbox_delivery_permission__runningbox_enabled=True)
if stores.exists():
if getattr(obj.shipping_address, 'ubigeo', None):
runningbox_shipping_method = runningbox_order_estimate(
obj.get_total_weight().value,
obj.shipping_address.ubigeo,
'EXPRESS'
)
if runningbox_shipping_method is not None:
runningbox_shipping_method['name'] = 'RunningBox'
runningbox_shipping_method['id'] = 'shipping-with-runningbox'
shpping_methods.append(runningbox_shipping_method)
return shpping_methods
|
nilq/baby-python
|
python
|
import requests
import json
import re
class RestApi(object):
# base_url example http://aaa.co.com/webhdfs
def __init__(self, base_url, username, password):
self.name = "nhso core api" + base_url
self.base_url = base_url
self.username = username
self.password = password
self.token = ""
def __check_over_retry(self, retry):
if retry < 0:
raise Exception("Retry lost")
# ถ้ามี error ให้ลองใหม่ ตามจำนวน retry
def __request_retry(self, http_method, url, headers, data, retry=5):
try:
return requests.request(http_method, url, headers=headers, data=data)
except Exception as ex:
if retry <= 0:
raise ex
else:
print("Req retry " + str(retry) + " " + url)
return self.__request_retry(http_method, url, headers, data, retry - 1)
# ถ้ามี error ให้ลองใหม่ ตามจำนวน retry
def __request_retry_file(self, http_method, url, headers, data, file, retry=5):
try:
return requests.request(
http_method, url, headers=headers, data=data, files=file
)
except Exception as ex:
if retry <= 0:
raise ex
else:
print("Req retry " + str(retry) + " " + url)
return self.__request_retry_file(
http_method, url, headers, data, file, retry - 1
)
# ใช้สำหรับขอ token จาก user
# return token
def __auth_core(self):
api_url = self.base_url + "/auth-jwt"
print(api_url)
payload = json.dumps({"username": self.username, "password": self.password})
headers = {"Content-Type": "application/json"}
response = self.__request_retry("POST", api_url, headers=headers, data=payload)
status = response.status_code
if status == 200:
token = response.json()["token"]
return token
else:
raise Exception(api_url + " code " + str(status))
# ตรวจสอบว่า Token ยังใช้งานได้อยู่หรือไม่
# return bool
def __verify_token_core(self):
api_url = self.base_url + "/auth-jwt-verify"
payload = json.dumps({"token": self.token})
headers = {"Content-Type": "application/json"}
response = self.__request_retry("POST", api_url, headers=headers, data=payload)
status = response.status_code
print(api_url + " status code " + str(status))
if status == 200:
return True
else:
return False
# จะทำการตรวจสอบ verify ก่อน ว่าผ่านไหม ถ้าไม่ผ่านจะเข้าสู่การขอ token ใหม่
def __auth(self):
verify = self.__verify_token_core()
if verify == False:
self.token = self.__auth_core()
# แสดงรายการไฟล์
def __list_file(self, dir_parth, retry=3):
self.__check_over_retry(retry)
api_url = self.base_url + "/v1/" + dir_parth + "?op=LISTSTATUS"
print(api_url + " deep:" + str(retry))
payload = {}
headers = {"Authorization": "JWT " + self.token}
response = self.__request_retry("GET", api_url, headers=headers, data=payload)
status = response.status_code
if status == 200:
return response.json()
elif status == 401:
self.__auth()
return self.__list_file(dir_parth, retry - 1)
else:
raise Exception(api_url + " code " + str(status))
def list_file(self, dir_parth):
return self.__list_file(dir_parth, 5)
# สร้างโฟเดอร์แบบคำสั่ง mkdir -p โดยที่จะไม่มี ruturn
def __mkdirs(self, dir_parth, retry=3):
self.__check_over_retry(retry)
api_url = self.base_url + "/v1/" + dir_parth + "?op=MKDIRS"
print(api_url + " deep:" + str(retry))
payload = {}
headers = {"Authorization": "JWT " + self.token}
response = self.__request_retry("PUT", api_url, headers=headers, data=payload)
status = response.status_code
# if status != 200:
# raise Exception(api_url + " code " + str(status))
if status == 401:
self.__auth()
self.__mkdirs(dir_parth, retry - 1)
def mkdirs(self, dir_parth):
self.__mkdirs(dir_parth, 5)
# มีไฟล์ หรือ ไดเรกทอรี่ที่ระบุอยู่หรือไม่
def exists(self, dir_or_file_parth):
print("call Check exists file")
try:
self.list_file(dir_or_file_parth)
print("Check exists file true")
return True
except:
print("Check exists file false")
return False
def __move_file_and_rename(self, source_path, destination_path, retry=3):
self.__check_over_retry(retry)
api_url = (
self.base_url
+ "/v1/"
+ source_path
+ "?op=RENAME&destination=/"
+ destination_path
)
print(api_url + " deep:" + str(retry))
payload = {}
headers = {"Authorization": "JWT " + self.token}
response = self.__request_retry("PUT", api_url, headers=headers, data=payload)
status = response.status_code
if status == 401:
self.__auth()
self.__move_file_and_rename(source_path, destination_path, retry - 1)
def move_file_and_rename(self, source_path, destination_path):
self.__move_file_and_rename(source_path, destination_path, 5)
def __delete(self, dir_or_file_parth, retry=3):
self.__check_over_retry(retry)
api_url = self.base_url + "/v1/" + dir_or_file_parth + "?op=DELETE"
print(api_url + " deep:" + str(retry))
payload = {}
headers = {"Authorization": "JWT " + self.token}
response = self.__request_retry(
"DELETE", api_url, headers=headers, data=payload
)
status = response.status_code
if status == 401:
# 401 Un
self.__auth()
self.__delete(dir_or_file_parth, retry - 1)
elif status == 500:
# 500 มีไฟล์หรือ โฟเดอร์อยู่ ไม่สามารถลบได้
raise Exception(api_url + " code " + str(status))
elif status == 200:
# ไม่มีไฟล์ 200 และ {"boolean": false}
pass
def delete(self, dir_or_file_parth):
self.__delete(dir_or_file_parth)
# แยกชื่อไฟล์ออกมาจาก นามสกุลไฟล์
def __get_file_name(self, full_parth):
p = re.compile("/?.+/(.+)$")
return p.match(full_parth).groups()[0]
# อัพโหลดไฟล์
def __upload_and_overwrite(self, local_file_path, nhso_file_path, retry=3):
self.__check_over_retry(retry)
self.__auth() # ใส่ไว้เลย เพราะเป็น fun ที่ช้า
api_url = self.base_url + "/v1/" + nhso_file_path + "?op=CREATE"
print(api_url + " deep:" + str(retry))
filename = self.__get_file_name(local_file_path)
payload = {}
headers = {"Authorization": "JWT " + self.token}
files = [
(
"file",
(
filename,
open(local_file_path, "rb"),
"application/octet-stream",
),
)
]
response = self.__request_retry_file(
"PUT", api_url, headers=headers, data=payload, file=files
)
status = response.status_code
if status == 401:
# 401 Un
self.__auth()
self.__upload_and_overwrite(local_file_path, nhso_file_path, retry - 1)
def upload_and_overwrite(self, local_file_path, nhso_file_path):
self.__upload_and_overwrite(local_file_path, nhso_file_path, 3)
|
nilq/baby-python
|
python
|
import threading
import time
import socket
import sys
import copy
import pprint
pp = pprint.PrettyPrinter(indent=2)
# global variables
turn = 1
convergence = 0
round = 1
update_occured = 0
nodes = {
"0" : {"name": "A", "index": 0, "port": 10000, "update": 1},
"1" : {"name": "B", "index": 1, "port": 10001, "update": 1},
"2" : {"name": "C", "index": 2, "port": 10002, "update": 1},
"3" : {"name": "D", "index": 3, "port": 10003, "update": 1},
"4" : {"name": "E", "index": 4, "port": 10004, "update": 1}
}
updates = [1,1,1,1,1]
final_output = {"0":None, "1":None, "2":None, "3":None, "4":None}
final_round = 0
def set_next(next):
global turn
turn = next
def set_unset_update(value):
global update_occured
update_occured = value
def server_thread_task(port, old_dv_matrix, updated_dv_matrix, node_index):
global round
global final_round
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', port)
#print('node %d server thread : starting up on %s port %s' % (node_index, server_address[0], server_address[1]))
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
last_updated_dv_matrix = copy.deepcopy(updated_dv_matrix)
while True:
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(1024)
if data:
message = data.decode()
from_node_index = int(message.split(":")[0])
received_dv_estimate = message.split(":")[1].split(",")
receiver = nodes[str(node_index)]["name"]
sender = nodes[str(from_node_index)]["name"]
print("Node %s received DV from %s" % (receiver, sender))
for i in range(len(received_dv_estimate)):
received_dv_estimate[i] = int(received_dv_estimate[i])
#------------update neighbor's row-------------------
updated_dv_matrix[from_node_index] = received_dv_estimate
#print(updated_dv_matrix[from_node_index])
#------------recalculate own dv estimate-------------
self_row = updated_dv_matrix[node_index]
for i in range(len(self_row)):
if(i != node_index):
new_value = updated_dv_matrix[from_node_index][node_index] + updated_dv_matrix[from_node_index][i]
existing_value = updated_dv_matrix[node_index][i]
updated_dv_matrix[node_index][i] = min(new_value, existing_value)
#----------check if DV estimate is different---------
if(updated_dv_matrix[node_index] == last_updated_dv_matrix[node_index]):
print("No change in DV at node %s" % (receiver))
else:
updates[node_index] = 1
print("Updating DV matrix at node %s" % (receiver))
print("New DV matrix at node %s = " % (receiver))
pp.pprint(updated_dv_matrix)
last_updated_dv_matrix = copy.deepcopy(updated_dv_matrix)
final_round = round
#-----------sending data back to the client----------
connection.sendall(data)
else:
break
finally:
# Clean up the connection
connection.close()
def send_dv_to_neighbor(neighbor_name, port, fromNodeIndex, message):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = ('localhost', port)
#print('connecting to %s port %s' % server_address)
sock.connect(server_address)
try:
# Send data
print("\nSending DV to node %s" % (neighbor_name))
sock.sendall(message.encode())
# response from server
response = sock.recv(1024).decode()
#print('received "%s"' % response)
finally:
#print('closing socket')
sock.close()
#print(response)
return response
def init_nxn_matrix(n):
initial_nxn_matrix = []
for i in range(n):
row = []
for j in range(n):
row.append(999)
initial_nxn_matrix.append(row)
return initial_nxn_matrix
def populate_nxn_matrix(updated_dv_matrix, node_index, neighbor_info):
for i in range(len(updated_dv_matrix[node_index])):
if(i in neighbor_info.keys()):
updated_dv_matrix[node_index][i] = neighbor_info[i]
updated_dv_matrix[node_index][node_index] = 0
def create_server_thread(port, old_dv_matrix, updated_dv_matrix, node_index):
server_thread = threading.Thread(target=server_thread_task, args=(port, old_dv_matrix, updated_dv_matrix, node_index), daemon=True)
server_thread.start()
time.sleep(2)
def create_dv_msg(updated_dv_matrix, node_index):
weight_list = []
for value in updated_dv_matrix[node_index]:
weight_list.append(str(value))
delimeter = ","
dv_msg = str(node_index) + ":" + delimeter.join(weight_list)
return dv_msg
def send_update(node_index, neighbor_info, updated_dv_matrix):
dv_msg = create_dv_msg(updated_dv_matrix, node_index)
neighbors = []
for key in neighbor_info.keys():
neighbors.append(key)
neighbors.sort()
#---------------
bigger = []
smaller = []
for value in neighbors:
if value > node_index:
bigger.append(value)
else:
smaller.append(value)
neighbors_list = bigger + smaller
#---------------
i = 0
while i < len(neighbors_list):
neighbor_port = nodes[str(neighbors_list[i])]["port"]
neighbor_name = nodes[str(neighbors_list[i])]["name"]
response = send_dv_to_neighbor(neighbor_name, neighbor_port, node_index, dv_msg)
if(response):
i += 1
def print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round):
print("---------------------------------------------------------------------")
print("Round %d : %s" % (round, nodes[str(node_index)]["name"]))
#print("Current DV matrix = ", str(updated_dv_matrix))
print("Current DV matrix = ")
pp.pprint(updated_dv_matrix)
#print("Last DV matrix = ", str(old_dv_matrix))
print("Last DV matrix = ")
pp.pprint(old_dv_matrix)
def node_thread(lock, node_index, port, neighbor_info):
global turn
global convergence
global round
global final_round
print("node %s started" % nodes[str(node_index)]["name"])
#initialize nxn matrix
old_dv_matrix = init_nxn_matrix(5)
#populate nxn matrix with neighbor info
populate_nxn_matrix(old_dv_matrix, node_index, neighbor_info)
updated_dv_matrix = copy.deepcopy(old_dv_matrix)
#--------------server thread-------------------------
create_server_thread(port, old_dv_matrix, updated_dv_matrix, node_index)
#--------------server thread-------------------------
while True:
lock.acquire()
if(turn == 1 and node_index == 0):
if(convergence == 1):
#print('nothing to do %s' % nodes[str(node_index)]["name"])
set_next(2)
final_output["0"] = updated_dv_matrix
lock.release()
break
print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round)
if(updates[node_index] == 1):
print("Updated from last DV matrix or the same? Updated")
#------------send data to neighbors----------------
send_update(node_index, neighbor_info, updated_dv_matrix)
#------------update 'update' flag------------------
updates[node_index] = 0
#-----------update the old dv----------------------
old_dv_matrix = copy.deepcopy(updated_dv_matrix)
else:
print("Updated from last DV matrix or the same? Same")
if(sum(updates) == 0):
convergence = 1
set_next(2)
final_output["0"] = updated_dv_matrix
lock.release()
break
else:
set_next(2)
round += 1
time.sleep(1)
if(turn == 2 and node_index == 1):
if(convergence == 1):
#print('nothing to do %s' % nodes[str(node_index)]["name"])
set_next(3)
final_output["1"] = updated_dv_matrix
lock.release()
break
print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round)
if(updates[node_index] == 1):
print("Updated from last DV matrix or the same? Updated")
#------------send data to neighbors----------------
send_update(node_index, neighbor_info, updated_dv_matrix)
#------------update 'update' flag------------------
updates[node_index] = 0
#-----------update the old dv----------------------
old_dv_matrix = copy.deepcopy(updated_dv_matrix)
else:
print("Updated from last DV matrix or the same? Same")
if(sum(updates) == 0):
convergence = 1
set_next(3)
final_output["1"] = updated_dv_matrix
lock.release()
break
else:
set_next(3)
round += 1
time.sleep(1)
if(turn == 3 and node_index == 2):
if(convergence == 1):
#print('nothing to do %s' % nodes[str(node_index)]["name"])
set_next(4)
final_output["2"] = updated_dv_matrix
lock.release()
break
print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round)
if(updates[node_index] == 1):
print("Updated from last DV matrix or the same? Updated")
#------------send data to neighbors----------------
send_update(node_index, neighbor_info, updated_dv_matrix)
#------------update 'update' flag------------------
updates[node_index] = 0
#-----------update the old dv----------------------
old_dv_matrix = copy.deepcopy(updated_dv_matrix)
else:
print("Updated from last DV matrix or the same? Same")
if(sum(updates) == 0):
convergence = 1
set_next(4)
final_output["2"] = updated_dv_matrix
lock.release()
break
else:
set_next(4)
round += 1
time.sleep(1)
if(turn == 4 and node_index == 3):
if(convergence == 1):
#print('nothing to do %s' % nodes[str(node_index)]["name"])
set_next(5)
final_output["3"] = updated_dv_matrix
lock.release()
break
print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round)
if(updates[node_index] == 1):
print("Updated from last DV matrix or the same? Updated")
#------------send data to neighbors----------------
send_update(node_index, neighbor_info, updated_dv_matrix)
#------------update 'update' flag------------------
updates[node_index] = 0
#-----------update the old dv----------------------
old_dv_matrix = copy.deepcopy(updated_dv_matrix)
else:
print("Updated from last DV matrix or the same? Same")
if(sum(updates) == 0):
convergence = 1
set_next(5)
final_output["3"] = updated_dv_matrix
lock.release()
break
else:
set_next(5)
round += 1
time.sleep(1)
if(turn == 5 and node_index == 4):
if(convergence == 1):
#print('nothing to do %s' % nodes[str(node_index)]["name"])
set_next(1)
final_output["4"] = updated_dv_matrix
lock.release()
break
print_node_current_old_dv(node_index, updated_dv_matrix, old_dv_matrix, round)
if(updates[node_index] == 1):
print("Updated from last DV matrix or the same? Updated")
#------------send data to neighbors----------------
send_update(node_index, neighbor_info, updated_dv_matrix)
#------------update 'update' flag------------------
updates[node_index] = 0
#-----------update the old dv----------------------
old_dv_matrix = copy.deepcopy(updated_dv_matrix)
else:
print("Updated from last DV matrix or the same? Same")
if(sum(updates) == 0):
convergence = 1
set_next(1)
final_output["4"] = updated_dv_matrix
lock.release()
break
else:
set_next(1)
round += 1
time.sleep(1)
lock.release()
def get_adjacency_matrix():
file_name = "network1.txt"
adjacency_matrix = []
with open(file_name) as fp:
lines = fp.readlines()
for line in lines:
row = line.strip().split()
for i in range(len(row)):
row[i] = int(row[i])
adjacency_matrix.append(row)
return adjacency_matrix
def get_neighbor_info_list(adjacency_matrix):
neighbor_info_list = []
for node_index in range(len(adjacency_matrix)):
neighbor_info = {}
for adj_node_index in range(len(adjacency_matrix[node_index])):
if(adjacency_matrix[node_index][adj_node_index] != 0):
neighbor_info[adj_node_index] = adjacency_matrix[node_index][adj_node_index]
neighbor_info_list.append(neighbor_info)
return neighbor_info_list
def main_task():
#adjacency matrix & neighbor info
adjacency_matrix= get_adjacency_matrix()
neighbor_info_list = get_neighbor_info_list(adjacency_matrix)
#print(neighbor_info_list)
#node index
nodeA_index = nodes["0"]["index"]
nodeB_index = nodes["1"]["index"]
nodeC_index = nodes["2"]["index"]
nodeD_index = nodes["3"]["index"]
nodeE_index = nodes["4"]["index"]
#node ports
nodeA_port = nodes["0"]["port"]
nodeB_port = nodes["1"]["port"]
nodeC_port = nodes["2"]["port"]
nodeD_port = nodes["3"]["port"]
nodeE_port = nodes["4"]["port"]
# creating a lock
lock = threading.Lock()
# creating threads
thread_A = threading.Thread(target=node_thread, args=(lock, nodeA_index, nodeA_port, neighbor_info_list[0]))
thread_B = threading.Thread(target=node_thread, args=(lock, nodeB_index, nodeB_port, neighbor_info_list[1]))
thread_C = threading.Thread(target=node_thread, args=(lock, nodeC_index, nodeC_port, neighbor_info_list[2]))
thread_D = threading.Thread(target=node_thread, args=(lock, nodeD_index, nodeD_port, neighbor_info_list[3]))
thread_E = threading.Thread(target=node_thread, args=(lock, nodeE_index, nodeE_port, neighbor_info_list[4]))
# start threads
thread_A.start()
thread_B.start()
thread_C.start()
thread_D.start()
thread_E.start()
# wait until threads finish their job
thread_A.join()
thread_B.join()
thread_C.join()
thread_D.join()
thread_E.join()
#final output
print("---------------------------------------------------------------------")
print("Final output: \n")
print("Node A DV = ")
pp.pprint(final_output["0"])
print("Node B DV = ")
pp.pprint(final_output["1"])
print("Node C DV = ")
pp.pprint(final_output["2"])
print("Node D DV = ")
pp.pprint(final_output["3"])
print("Node E DV = ")
pp.pprint(final_output["4"])
print("\nNumber of rounds till convergence (Round # when one of the nodes last updated its DV) = %d" % (final_round))
if __name__ == "__main__":
main_task()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""Setup script for the package."""
import os
import sys
import setuptools
PACKAGE_NAME = 'api'
MINIMUM_PYTHON_VERSION = 3, 6
def check_python_version():
"""Exit when the Python version is too low."""
if sys.version_info < MINIMUM_PYTHON_VERSION:
sys.exit("Python {}.{}+ is required.".format(*MINIMUM_PYTHON_VERSION))
def read_package_variable(key):
"""Read the value of a variable from the package without importing."""
module_path = os.path.join(PACKAGE_NAME, '__init__.py')
with open(module_path) as module:
for line in module:
parts = line.strip().split(' ')
if parts and parts[0] == key:
return parts[-1].strip("'")
assert 0, "'{0}' not found in '{1}'".format(key, module_path)
def read_descriptions():
"""Build a description for the project from documentation files."""
try:
readme = open("README.rst").read()
changelog = open("CHANGELOG.rst").read()
except IOError:
return "<placeholder>"
else:
return readme + '\n' + changelog
check_python_version()
setuptools.setup(
name=read_package_variable('__project__'),
version=read_package_variable('__version__'),
description="A place to track your code coverage metrics.",
url='https://github.com/jacebrowning/coverage-space',
author='Jace Browning',
author_email='jacebrowning@gmail.com',
packages=setuptools.find_packages(),
entry_points={'console_scripts': []},
long_description=read_descriptions(),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
)
|
nilq/baby-python
|
python
|
# Back compatibility -- use broad subdirectory for new code
from bcbio.broad.metrics import *
|
nilq/baby-python
|
python
|
import copy
import torch
import numpy as np
from PIL import Image
from torchvision import transforms
class BlackBoxAttack(object):
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
def __init__(self, model, input_size=224, epsilon=16, num_iters=10000,
early_stopping=False, use_cuda=False, random_state=None):
'''__INIT__
reference:
Guo C, Gardner J R, You Y, et al.
Simple black-box adversarial attacks[J].
arXiv preprint arXiv:1905.07121, 2019.
model: model instance or list of model instances
input_size: int, size of input tentor to model
epsilon: int, limit on the perturbation size
num_iters: int, number of iterations
early_stopping: bool, if True, stop at once if
adversarial image has been found
use_cuda: bool, True or False, whether to use GPU
random_state: int or None, for reproducing
'''
self.num_iters = num_iters
self.epsilon = epsilon
# self.epsilon = epsilon / 255
self.early_stopping = early_stopping
self.use_cuda = torch.cuda.is_available() and use_cuda
self.nbits = int(np.ceil(np.log10(num_iters)) + 1)
self.preprocess = transforms.Compose([
transforms.Resize(input_size),
transforms.CenterCrop(input_size),
transforms.ToTensor(),
transforms.Normalize(self.MEAN, self.STD),
])
if not isinstance(model, list):
model = [model]
model = [copy.deepcopy(m) for m in model]
for m in model:
m.eval()
if self.use_cuda:
m.cuda()
self.model = model
if random_state is not None:
np.random.seed(seed=random_state)
return
def __call__(self, image_path, label, target=False):
'''__CALL__
image_path: string, path of uint8 input image
label: int, the true label of input image if target is False,
the target label to learn if target is True
target: bool, if True, perform target adversarial attack;
if False, perform non-target adversarial attack
'''
self.target = target
src_image = np.array(Image.open(image_path))
adv_image = self.forward(src_image, label)
return adv_image.astype(np.uint8)
def forward(self, src_image, label):
image = src_image.copy().astype(float)
n_dims = len(image.flatten())
perm = np.random.permutation(n_dims)
last_prob, _ = self.__predict(image, label)
is_better = np.greater if self.target else np.less
num_iters = min([self.num_iters, len(perm)])
for i in range(num_iters):
diff = np.zeros((n_dims))
diff[perm[i]] = self.epsilon
diff = diff.reshape(image.shape)
left_image = np.clip(image - diff, 0.0, 255.0)
left_prob, is_stop = self.__predict(left_image, label)
if is_stop or is_better(left_prob, last_prob):
image = left_image.copy()
last_prob = left_prob
if is_stop:
break
else:
right_image = np.clip(image + diff, 0.0, 255.0)
right_prob, is_stop = self.__predict(right_image, label)
if is_stop or is_better(right_prob, last_prob):
image = right_image.copy()
last_prob = right_prob
if is_stop:
break
iter_msg = '[Running]-[Step:{}/{}]-[Prob:{:.6f}]'
print(iter_msg.format(i + 1, num_iters, last_prob), end='\r')
iter_msg = '\n[Stopped]-[Step:{}/{}]-[Prob:{:.6f}]'
print(iter_msg.format(i + 1, num_iters, last_prob))
return image
def __predict(self, image, label):
def get_prob(model, image_norm):
pred = model(image_norm)
probs = torch.softmax(pred, dim=1)
probs = probs.data.cpu().detach().numpy().flatten()
pred = np.argmax(probs)
return probs[label], pred
image_norm = self.__norm(image)
if self.use_cuda:
image_norm = image_norm.cuda()
prob_preds = [get_prob(model, image_norm) for model in self.model]
probs = [item[0] for item in prob_preds]
prob = min(probs) if self.target else max(probs)
preds = [item[1] for item in prob_preds]
is_stop = False
if self.early_stopping:
if self.target and preds.count(label) == len(preds):
is_stop = True
elif (not self.target) and preds.count(label) == 0:
is_stop = True
return prob, is_stop
def __norm(self, image):
image_cp = Image.fromarray(image.astype(np.uint8))
image_norm = self.preprocess(image_cp)
image_norm = image_norm.unsqueeze(0)
return image_norm
|
nilq/baby-python
|
python
|
import RPi.GPIO
import sys
import random
sys.path.append("../../")
from gfxlcd.driver.nju6450.gpio import GPIO
from gfxlcd.driver.nju6450.nju6450 import NJU6450
RPi.GPIO.setmode(RPi.GPIO.BCM)
def hole(o, x, y):
o.draw_pixel(x+1, y)
o.draw_pixel(x+2, y)
o.draw_pixel(x+3, y)
o.draw_pixel(x+1, y + 4)
o.draw_pixel(x+2, y + 4)
o.draw_pixel(x+3, y + 4)
o.draw_pixel(x, y + 1)
o.draw_pixel(x+4, y + 1)
o.draw_pixel(x, y + 2)
o.draw_pixel(x+4, y + 2)
o.draw_pixel(x, y + 3)
o.draw_pixel(x+4, y + 3)
def draw_points(o):
for _ in range(0, 50):
hole(o, random.randint(2, o.width-10), random.randint(2, o.height-10))
def draw_net(o):
s = 0
while s < o.width-1:
o.draw_line(s, 0, s, o.height-1)
s += 10
s = 0
while s < o.height-1:
o.draw_line(0, s, o.width-1, s)
s += 10
lcd_nju = NJU6450(122, 32, GPIO())
lcd_nju.init()
lcd_nju.auto_flush = False
lcd_nju.draw_circle(60, 15, 15)
lcd_nju.draw_circle(53, 10, 3)
lcd_nju.draw_circle(67, 10, 3)
lcd_nju.draw_arc(60, 15, 10, 45, 135)
lcd_nju.draw_line(60, 12, 57, 17)
lcd_nju.draw_line(60, 12, 63, 17)
lcd_nju.draw_arc(60, 15, 3, 45, 135)
lcd_nju.fill_rect(2, 2, 42, 29)
lcd_nju.fill_rect(119, 2, 109, 12)
lcd_nju.fill_rect(119, 17, 109, 19)
lcd_nju.draw_rect(77, 6, 105, 16)
lcd_nju.fill_rect(77, 16, 105, 25)
lcd_nju.flush(True)
|
nilq/baby-python
|
python
|
from utils.functions import get_env
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'HOST': get_env("POSTGRES_HOST", "db"),
'PORT': get_env("POSTGRES_PORT", "5432"),
'NAME': get_env("POSTGRES_DB"),
'USER': get_env("POSTGRES_USER"),
'PASSWORD': get_env("POSTGRES_PASSWORD")
}
}
|
nilq/baby-python
|
python
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import utilities: Utilities related to imports and our lazy inits.
"""
import importlib.util
import json
import os
import sys
from collections import OrderedDict
from functools import wraps
from itertools import chain
from types import ModuleType
from typing import Any
from packaging import version
from transformers.utils.versions import importlib_metadata
from . import logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})
USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
_torch_version = "N/A"
if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
_torch_available = importlib.util.find_spec("torch") is not None
if _torch_available:
try:
_torch_version = importlib_metadata.version("torch")
logger.info(f"PyTorch version {_torch_version} available.")
except importlib_metadata.PackageNotFoundError:
_torch_available = False
else:
logger.info("Disabling PyTorch because USE_TF is set")
_torch_available = False
_tf_version = "N/A"
if USE_TF in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TORCH not in ENV_VARS_TRUE_VALUES:
_tf_available = importlib.util.find_spec("tensorflow") is not None
if _tf_available:
candidates = (
"tensorflow",
"tensorflow-cpu",
"tensorflow-gpu",
"tf-nightly",
"tf-nightly-cpu",
"tf-nightly-gpu",
"intel-tensorflow",
"intel-tensorflow-avx512",
"tensorflow-rocm",
"tensorflow-macos",
)
_tf_version = None
# For the metadata, we have to look for both tensorflow and tensorflow-cpu
for pkg in candidates:
try:
_tf_version = importlib_metadata.version(pkg)
break
except importlib_metadata.PackageNotFoundError:
pass
_tf_available = _tf_version is not None
if _tf_available:
if version.parse(_tf_version) < version.parse("2"):
logger.info(f"TensorFlow found but with version {_tf_version}. Transformers requires version 2 minimum.")
_tf_available = False
else:
logger.info(f"TensorFlow version {_tf_version} available.")
else:
logger.info("Disabling Tensorflow because USE_TORCH is set")
_tf_available = False
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
_flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
if _flax_available:
try:
_jax_version = importlib_metadata.version("jax")
_flax_version = importlib_metadata.version("flax")
logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
except importlib_metadata.PackageNotFoundError:
_flax_available = False
else:
_flax_available = False
_datasets_available = importlib.util.find_spec("datasets") is not None
try:
# Check we're not importing a "datasets" directory somewhere but the actual library by trying to grab the version
# AND checking it has an author field in the metadata that is HuggingFace.
_ = importlib_metadata.version("datasets")
_datasets_metadata = importlib_metadata.metadata("datasets")
if _datasets_metadata.get("author", "") != "HuggingFace Inc.":
_datasets_available = False
except importlib_metadata.PackageNotFoundError:
_datasets_available = False
_detectron2_available = importlib.util.find_spec("detectron2") is not None
try:
_detectron2_version = importlib_metadata.version("detectron2")
logger.debug(f"Successfully imported detectron2 version {_detectron2_version}")
except importlib_metadata.PackageNotFoundError:
_detectron2_available = False
_faiss_available = importlib.util.find_spec("faiss") is not None
try:
_faiss_version = importlib_metadata.version("faiss")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
try:
_faiss_version = importlib_metadata.version("faiss-cpu")
logger.debug(f"Successfully imported faiss version {_faiss_version}")
except importlib_metadata.PackageNotFoundError:
_faiss_available = False
_ftfy_available = importlib.util.find_spec("ftfy") is not None
try:
_ftfy_version = importlib_metadata.version("ftfy")
logger.debug(f"Successfully imported ftfy version {_ftfy_version}")
except importlib_metadata.PackageNotFoundError:
_ftfy_available = False
coloredlogs = importlib.util.find_spec("coloredlogs") is not None
try:
_coloredlogs_available = importlib_metadata.version("coloredlogs")
logger.debug(f"Successfully imported sympy version {_coloredlogs_available}")
except importlib_metadata.PackageNotFoundError:
_coloredlogs_available = False
sympy_available = importlib.util.find_spec("sympy") is not None
try:
_sympy_available = importlib_metadata.version("sympy")
logger.debug(f"Successfully imported sympy version {_sympy_available}")
except importlib_metadata.PackageNotFoundError:
_sympy_available = False
_tf2onnx_available = importlib.util.find_spec("tf2onnx") is not None
try:
_tf2onnx_version = importlib_metadata.version("tf2onnx")
logger.debug(f"Successfully imported tf2onnx version {_tf2onnx_version}")
except importlib_metadata.PackageNotFoundError:
_tf2onnx_available = False
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
try:
_onxx_version = importlib_metadata.version("onnx")
logger.debug(f"Successfully imported onnx version {_onxx_version}")
except importlib_metadata.PackageNotFoundError:
_onnx_available = False
_scatter_available = importlib.util.find_spec("torch_scatter") is not None
try:
_scatter_version = importlib_metadata.version("torch_scatter")
logger.debug(f"Successfully imported torch-scatter version {_scatter_version}")
except importlib_metadata.PackageNotFoundError:
_scatter_available = False
_pytorch_quantization_available = importlib.util.find_spec("pytorch_quantization") is not None
try:
_pytorch_quantization_version = importlib_metadata.version("pytorch_quantization")
logger.debug(f"Successfully imported pytorch-quantization version {_pytorch_quantization_version}")
except importlib_metadata.PackageNotFoundError:
_pytorch_quantization_available = False
_soundfile_available = importlib.util.find_spec("soundfile") is not None
try:
_soundfile_version = importlib_metadata.version("soundfile")
logger.debug(f"Successfully imported soundfile version {_soundfile_version}")
except importlib_metadata.PackageNotFoundError:
_soundfile_available = False
_tensorflow_probability_available = importlib.util.find_spec("tensorflow_probability") is not None
try:
_tensorflow_probability_version = importlib_metadata.version("tensorflow_probability")
logger.debug(f"Successfully imported tensorflow-probability version {_tensorflow_probability_version}")
except importlib_metadata.PackageNotFoundError:
_tensorflow_probability_available = False
_timm_available = importlib.util.find_spec("timm") is not None
try:
_timm_version = importlib_metadata.version("timm")
logger.debug(f"Successfully imported timm version {_timm_version}")
except importlib_metadata.PackageNotFoundError:
_timm_available = False
_torchaudio_available = importlib.util.find_spec("torchaudio") is not None
try:
_torchaudio_version = importlib_metadata.version("torchaudio")
logger.debug(f"Successfully imported torchaudio version {_torchaudio_version}")
except importlib_metadata.PackageNotFoundError:
_torchaudio_available = False
_phonemizer_available = importlib.util.find_spec("phonemizer") is not None
try:
_phonemizer_version = importlib_metadata.version("phonemizer")
logger.debug(f"Successfully imported phonemizer version {_phonemizer_version}")
except importlib_metadata.PackageNotFoundError:
_phonemizer_available = False
_pyctcdecode_available = importlib.util.find_spec("pyctcdecode") is not None
try:
_pyctcdecode_version = importlib_metadata.version("pyctcdecode")
logger.debug(f"Successfully imported pyctcdecode version {_pyctcdecode_version}")
except importlib_metadata.PackageNotFoundError:
_pyctcdecode_available = False
_librosa_available = importlib.util.find_spec("librosa") is not None
try:
_librosa_version = importlib_metadata.version("librosa")
logger.debug(f"Successfully imported librosa version {_librosa_version}")
except importlib_metadata.PackageNotFoundError:
_librosa_available = False
# This is the version of torch required to run torch.fx features and torch.onnx with dictionary inputs.
TORCH_FX_REQUIRED_VERSION = version.parse("1.10")
TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION = version.parse("1.8")
def is_torch_available():
return _torch_available
def is_pyctcdecode_available():
return _pyctcdecode_available
def is_librosa_available():
return _librosa_available
def is_torch_cuda_available():
if is_torch_available():
import torch
return torch.cuda.is_available()
else:
return False
def is_torch_bf16_gpu_available():
if not is_torch_available():
return False
import torch
# since currently no utility function is available we build our own.
# some bits come from https://github.com/pytorch/pytorch/blob/2289a12f21c54da93bf5d696e3f9aea83dd9c10d/torch/testing/_internal/common_cuda.py#L51
# with additional check for torch version
# to succeed:
# 1. torch >= 1.10 (1.9 should be enough for AMP API has changed in 1.10, so using 1.10 as minimal)
# 2. the hardware needs to support bf16 (GPU arch >= Ampere, or CPU)
# 3. if using gpu, CUDA >= 11
# 4. torch.autocast exists
# XXX: one problem here is that it may give invalid results on mixed gpus setup, so it's
# really only correct for the 0th gpu (or currently set default device if different from 0)
if version.parse(torch.__version__) < version.parse("1.10"):
return False
if torch.cuda.is_available() and torch.version.cuda is not None:
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if not hasattr(torch.cuda.amp, "autocast"):
return False
else:
return False
return True
def is_torch_bf16_cpu_available():
if not is_torch_available():
return False
import torch
if version.parse(torch.__version__) < version.parse("1.10"):
return False
try:
# multiple levels of AttributeError depending on the pytorch version so do them all in one check
_ = torch.cpu.amp.autocast
except AttributeError:
return False
return True
def is_torch_bf16_available():
return is_torch_bf16_cpu_available() or is_torch_bf16_gpu_available()
def is_torch_tf32_available():
if not is_torch_available():
return False
import torch
if not torch.cuda.is_available() or torch.version.cuda is None:
return False
if torch.cuda.get_device_properties(torch.cuda.current_device()).major < 8:
return False
if int(torch.version.cuda.split(".")[0]) < 11:
return False
if version.parse(torch.__version__) < version.parse("1.7"):
return False
return True
torch_version = None
_torch_fx_available = _torch_onnx_dict_inputs_support_available = False
if _torch_available:
torch_version = version.parse(importlib_metadata.version("torch"))
_torch_fx_available = (torch_version.major, torch_version.minor) >= (
TORCH_FX_REQUIRED_VERSION.major,
TORCH_FX_REQUIRED_VERSION.minor,
)
_torch_onnx_dict_inputs_support_available = torch_version >= TORCH_ONNX_DICT_INPUTS_MINIMUM_VERSION
def is_torch_fx_available():
return _torch_fx_available
def is_torch_onnx_dict_inputs_support_available():
return _torch_onnx_dict_inputs_support_available
def is_tf_available():
return _tf_available
def is_coloredlogs_available():
return _coloredlogs_available
def is_tf2onnx_available():
return _tf2onnx_available
def is_onnx_available():
return _onnx_available
def is_flax_available():
return _flax_available
def is_ftfy_available():
return _ftfy_available
def is_torch_tpu_available():
if not _torch_available:
return False
# This test is probably enough, but just in case, we unpack a bit.
if importlib.util.find_spec("torch_xla") is None:
return False
if importlib.util.find_spec("torch_xla.core") is None:
return False
return importlib.util.find_spec("torch_xla.core.xla_model") is not None
def is_torchdynamo_available():
return importlib.util.find_spec("torchdynamo") is not None
def is_datasets_available():
return _datasets_available
def is_detectron2_available():
return _detectron2_available
def is_rjieba_available():
return importlib.util.find_spec("rjieba") is not None
def is_psutil_available():
return importlib.util.find_spec("psutil") is not None
def is_py3nvml_available():
return importlib.util.find_spec("py3nvml") is not None
def is_apex_available():
return importlib.util.find_spec("apex") is not None
def is_ipex_available():
return importlib.util.find_spec("intel_extension_for_pytorch") is not None
def is_bitsandbytes_available():
return importlib.util.find_spec("bitsandbytes") is not None
def is_faiss_available():
return _faiss_available
def is_scipy_available():
return importlib.util.find_spec("scipy") is not None
def is_sklearn_available():
if importlib.util.find_spec("sklearn") is None:
return False
return is_scipy_available() and importlib.util.find_spec("sklearn.metrics")
def is_sentencepiece_available():
return importlib.util.find_spec("sentencepiece") is not None
def is_protobuf_available():
if importlib.util.find_spec("google") is None:
return False
return importlib.util.find_spec("google.protobuf") is not None
def is_accelerate_available():
return importlib.util.find_spec("accelerate") is not None
def is_tokenizers_available():
return importlib.util.find_spec("tokenizers") is not None
def is_vision_available():
return importlib.util.find_spec("PIL") is not None
def is_pytesseract_available():
return importlib.util.find_spec("pytesseract") is not None
def is_spacy_available():
return importlib.util.find_spec("spacy") is not None
def is_in_notebook():
try:
# Test adapted from tqdm.autonotebook: https://github.com/tqdm/tqdm/blob/master/tqdm/autonotebook.py
get_ipython = sys.modules["IPython"].get_ipython
if "IPKernelApp" not in get_ipython().config:
raise ImportError("console")
if "VSCODE_PID" in os.environ:
raise ImportError("vscode")
if "DATABRICKS_RUNTIME_VERSION" in os.environ:
raise ImportError("databricks")
return importlib.util.find_spec("IPython") is not None
except (AttributeError, ImportError, KeyError):
return False
def is_scatter_available():
return _scatter_available
def is_pytorch_quantization_available():
return _pytorch_quantization_available
def is_tensorflow_probability_available():
return _tensorflow_probability_available
def is_pandas_available():
return importlib.util.find_spec("pandas") is not None
def is_sagemaker_dp_enabled():
# Get the sagemaker specific env variable.
sagemaker_params = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
sagemaker_params = json.loads(sagemaker_params)
if not sagemaker_params.get("sagemaker_distributed_dataparallel_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_sagemaker_mp_enabled():
# Get the sagemaker specific mp parameters from smp_options variable.
smp_options = os.getenv("SM_HP_MP_PARAMETERS", "{}")
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
smp_options = json.loads(smp_options)
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
mpi_options = os.getenv("SM_FRAMEWORK_PARAMS", "{}")
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
mpi_options = json.loads(mpi_options)
if not mpi_options.get("sagemaker_mpi_enabled", False):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed") is not None
def is_training_run_on_sagemaker():
return "SAGEMAKER_JOB_NAME" in os.environ
def is_soundfile_availble():
return _soundfile_available
def is_timm_available():
return _timm_available
def is_torchaudio_available():
return _torchaudio_available
def is_speech_available():
# For now this depends on torchaudio but the exact dependency might evolve in the future.
return _torchaudio_available
def is_phonemizer_available():
return _phonemizer_available
def torch_only_method(fn):
def wrapper(*args, **kwargs):
if not _torch_available:
raise ImportError(
"You need to install pytorch to use this method or class, "
"or activate it with environment variables USE_TORCH=1 and USE_TF=0."
)
else:
return fn(*args, **kwargs)
return wrapper
# docstyle-ignore
DATASETS_IMPORT_ERROR = """
{0} requires the 🤗 Datasets library but it was not found in your environment. You can install it with:
```
pip install datasets
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install datasets
```
then restarting your kernel.
Note that if you have a local folder named `datasets` or a local python file named `datasets.py` in your current
working directory, python may try to import this instead of the 🤗 Datasets library. You should rename this folder or
that python file if that's the case.
"""
# docstyle-ignore
TOKENIZERS_IMPORT_ERROR = """
{0} requires the 🤗 Tokenizers library but it was not found in your environment. You can install it with:
```
pip install tokenizers
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install tokenizers
```
"""
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the SentencePiece library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/google/sentencepiece#installation and follow the ones
that match your environment.
"""
# docstyle-ignore
PROTOBUF_IMPORT_ERROR = """
{0} requires the protobuf library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/protocolbuffers/protobuf/tree/master/python#installation and follow the ones
that match your environment.
"""
# docstyle-ignore
FAISS_IMPORT_ERROR = """
{0} requires the faiss library but it was not found in your environment. Checkout the instructions on the
installation page of its repo: https://github.com/facebookresearch/faiss/blob/master/INSTALL.md and follow the ones
that match your environment.
"""
# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
"""
# docstyle-ignore
SKLEARN_IMPORT_ERROR = """
{0} requires the scikit-learn library but it was not found in your environment. You can install it with:
```
pip install -U scikit-learn
```
In a notebook or a colab, you can install it by executing a cell with
```
!pip install -U scikit-learn
```
"""
# docstyle-ignore
TENSORFLOW_IMPORT_ERROR = """
{0} requires the TensorFlow library but it was not found in your environment. Checkout the instructions on the
installation page: https://www.tensorflow.org/install and follow the ones that match your environment.
"""
# docstyle-ignore
DETECTRON2_IMPORT_ERROR = """
{0} requires the detectron2 library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md and follow the ones
that match your environment.
"""
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
"""
# docstyle-ignore
FTFY_IMPORT_ERROR = """
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
that match your environment.
"""
# docstyle-ignore
SCATTER_IMPORT_ERROR = """
{0} requires the torch-scatter library but it was not found in your environment. You can install it with pip as
explained here: https://github.com/rusty1s/pytorch_scatter.
"""
# docstyle-ignore
PYTORCH_QUANTIZATION_IMPORT_ERROR = """
{0} requires the pytorch-quantization library but it was not found in your environment. You can install it with pip:
`pip install pytorch-quantization --extra-index-url https://pypi.ngc.nvidia.com`
"""
# docstyle-ignore
TENSORFLOW_PROBABILITY_IMPORT_ERROR = """
{0} requires the tensorflow_probability library but it was not found in your environment. You can install it with pip as
explained here: https://github.com/tensorflow/probability.
"""
# docstyle-ignore
PANDAS_IMPORT_ERROR = """
{0} requires the pandas library but it was not found in your environment. You can install it with pip as
explained here: https://pandas.pydata.org/pandas-docs/stable/getting_started/install.html.
"""
# docstyle-ignore
PHONEMIZER_IMPORT_ERROR = """
{0} requires the phonemizer library but it was not found in your environment. You can install it with pip:
`pip install phonemizer`
"""
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip:
`pip install scipy`
"""
# docstyle-ignore
SPEECH_IMPORT_ERROR = """
{0} requires the torchaudio library but it was not found in your environment. You can install it with pip:
`pip install torchaudio`
"""
# docstyle-ignore
TIMM_IMPORT_ERROR = """
{0} requires the timm library but it was not found in your environment. You can install it with pip:
`pip install timm`
"""
# docstyle-ignore
VISION_IMPORT_ERROR = """
{0} requires the PIL library but it was not found in your environment. You can install it with pip:
`pip install pillow`
"""
# docstyle-ignore
PYTESSERACT_IMPORT_ERROR = """
{0} requires the PyTesseract library but it was not found in your environment. You can install it with pip:
`pip install pytesseract`
"""
# docstyle-ignore
PYCTCDECODE_IMPORT_ERROR = """
{0} requires the pyctcdecode library but it was not found in your environment. You can install it with pip:
`pip install pyctcdecode`
"""
# docstyle-ignore
ACCELERATE_IMPORT_ERROR = """
{0} requires the accelerate library but it was not found in your environment. You can install it with pip:
`pip install accelerate`
"""
BACKENDS_MAPPING = OrderedDict(
[
("datasets", (is_datasets_available, DATASETS_IMPORT_ERROR)),
("detectron2", (is_detectron2_available, DETECTRON2_IMPORT_ERROR)),
("faiss", (is_faiss_available, FAISS_IMPORT_ERROR)),
("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
("pandas", (is_pandas_available, PANDAS_IMPORT_ERROR)),
("phonemizer", (is_phonemizer_available, PHONEMIZER_IMPORT_ERROR)),
("protobuf", (is_protobuf_available, PROTOBUF_IMPORT_ERROR)),
("pyctcdecode", (is_pyctcdecode_available, PYCTCDECODE_IMPORT_ERROR)),
("pytesseract", (is_pytesseract_available, PYTESSERACT_IMPORT_ERROR)),
("scatter", (is_scatter_available, SCATTER_IMPORT_ERROR)),
("pytorch_quantization", (is_pytorch_quantization_available, PYTORCH_QUANTIZATION_IMPORT_ERROR)),
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
("sklearn", (is_sklearn_available, SKLEARN_IMPORT_ERROR)),
("speech", (is_speech_available, SPEECH_IMPORT_ERROR)),
("tensorflow_probability", (is_tensorflow_probability_available, TENSORFLOW_PROBABILITY_IMPORT_ERROR)),
("tf", (is_tf_available, TENSORFLOW_IMPORT_ERROR)),
("timm", (is_timm_available, TIMM_IMPORT_ERROR)),
("tokenizers", (is_tokenizers_available, TOKENIZERS_IMPORT_ERROR)),
("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
("vision", (is_vision_available, VISION_IMPORT_ERROR)),
("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
("accelerate", (is_accelerate_available, ACCELERATE_IMPORT_ERROR)),
]
)
def requires_backends(obj, backends):
if not isinstance(backends, (list, tuple)):
backends = [backends]
name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
checks = (BACKENDS_MAPPING[backend] for backend in backends)
failed = [msg.format(name) for available, msg in checks if not available()]
if failed:
raise ImportError("".join(failed))
class DummyObject(type):
"""
Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
`requires_backend` each time a user tries to access any method of that class.
"""
def __getattr__(cls, key):
if key.startswith("_"):
return super().__getattr__(cls, key)
requires_backends(cls, cls._backends)
def torch_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires PyTorch.")
return wrapper
def tf_required(func):
# Chose a different decorator name than in tests so it's clear they are not the same.
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f"Method `{func.__name__}` requires TF.")
return wrapper
def is_torch_fx_proxy(x):
if is_torch_fx_available():
import torch.fx
return isinstance(x, torch.fx.Proxy)
return False
class _LazyModule(ModuleType):
"""
Module class that surfaces all objects but only performs associated imports when the objects are requested.
"""
# Very heavily inspired by optuna.integration._IntegrationModule
# https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
super().__init__(name)
self._modules = set(import_structure.keys())
self._class_to_module = {}
for key, values in import_structure.items():
for value in values:
self._class_to_module[value] = key
# Needed for autocompletion in an IDE
self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
self.__file__ = module_file
self.__spec__ = module_spec
self.__path__ = [os.path.dirname(module_file)]
self._objects = {} if extra_objects is None else extra_objects
self._name = name
self._import_structure = import_structure
# Needed for autocompletion in an IDE
def __dir__(self):
result = super().__dir__()
# The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
# they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
for attr in self.__all__:
if attr not in result:
result.append(attr)
return result
def __getattr__(self, name: str) -> Any:
if name in self._objects:
return self._objects[name]
if name in self._modules:
value = self._get_module(name)
elif name in self._class_to_module.keys():
module = self._get_module(self._class_to_module[name])
value = getattr(module, name)
else:
raise AttributeError(f"module {self.__name__} has no attribute {name}")
setattr(self, name, value)
return value
def _get_module(self, module_name: str):
try:
return importlib.import_module("." + module_name, self.__name__)
except Exception as e:
raise RuntimeError(
f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
f" traceback):\n{e}"
) from e
def __reduce__(self):
return (self.__class__, (self._name, self.__file__, self._import_structure))
class OptionalDependencyNotAvailable(BaseException):
"""Internally used error class for signalling an optional dependency was not found."""
|
nilq/baby-python
|
python
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Volume utilities for virt drivers.
"""
from os_brick.initiator import connector
from oslo_concurrency import processutils as putils
from jacket.compute import utils
def get_iscsi_initiator(execute=None):
"""Get iscsi initiator name for this machine."""
root_helper = utils.get_root_helper()
# so we can mock out the execute itself
# in unit tests.
if not execute:
execute = putils.execute
iscsi = connector.ISCSIConnector(root_helper=root_helper,
execute=execute)
return iscsi.get_initiator()
|
nilq/baby-python
|
python
|
import tensorflow as tf
class Model:
def __init__(self, image_size = 224, n_classes = 16, fc_size = 1024):
self.n_classes = n_classes
tf.compat.v1.disable_eager_execution()
self.dropout = tf.compat.v1.placeholder(tf.float32, name="dropout_rate")
self.input_images = tf.compat.v1.placeholder(tf.float32, shape=[None, image_size, image_size, 3], name="input_images")
## First convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3,3,3,16],stddev=1e-1), name="conv1_weights")
conv = tf.nn.conv2d(self.input_images, kernel, [1,2,2,1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([16]))
conv_with_bias = tf.nn.bias_add(conv, bias)
# Rectifier see: https://en.wikipedia.org/wiki/Rectifier_(neural_networks)
conv1 = tf.nn.leaky_relu(conv_with_bias, name="conv1")
# local response normalization see: https://prateekvjoshi.com/2016/04/05/what-is-local-response-normalization-in-convolutional-neural-networks/
lrn1 = tf.nn.lrn(conv1, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pooled_conv1 = tf.nn.max_pool2d(lrn1, ksize=[1,3,3,1], strides=[1,2,2,1], padding="SAME", name="pool1")
## Second convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 16, 64],stddev=1e-1),
name="conv2_weights")
conv = tf.nn.conv2d(pooled_conv1, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([64]), name="conv2_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv2 = tf.nn.leaky_relu(conv_with_bias, name="conv2")
lrn2 = tf.nn.lrn(conv2, alpha=1e-4, beta=0.75, depth_radius=2, bias=2.0)
pooled_conv2 = tf.nn.max_pool2d(lrn2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding="SAME", name="pool2")
## Third convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 64, 128],stddev=1e-1), name="conv3_weights")
conv = tf.nn.conv2d(pooled_conv2, kernel, [1, 1, 1, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([128]), name="conv3_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv3 = tf.nn.leaky_relu(conv_with_bias, name="conv3")
## Fourth convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 128, 256],stddev=1e-1), name="conv4_weights")
conv = tf.nn.conv2d(conv3, kernel, [1, 1, 1, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([256]), name="conv4_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv4 = tf.nn.leaky_relu(conv_with_bias, name="conv4")
## Fifth convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 256, 384],stddev=1e-1), name="conv5_weights")
conv = tf.nn.conv2d(conv4, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([384]), name="conv5_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv5 = tf.nn.leaky_relu(conv_with_bias, name="conv5")
## 6th convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 384, 512],stddev=1e-1), name="conv6_weights")
conv = tf.nn.conv2d(conv5, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([512]), name="conv6_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv6 = tf.nn.leaky_relu(conv_with_bias, name="conv6")
## 7th convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 512, 768],stddev=1e-1), name="conv7_weights")
conv = tf.nn.conv2d(conv6, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([768]), name="conv7_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv7 = tf.nn.leaky_relu(conv_with_bias, name="conv7")
## 8th convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 768, 768],stddev=1e-1), name="conv8_weights")
conv = tf.nn.conv2d(conv7, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([768]), name="conv8_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv8 = tf.nn.leaky_relu(conv_with_bias, name="conv8")
## 9th convolutional layer
kernel = tf.Variable(tf.random.truncated_normal([3, 3, 768, 768],stddev=1e-1), name="conv8_weights")
conv = tf.nn.conv2d(conv8, kernel, [1, 2, 2, 1], padding="SAME")
bias = tf.Variable(tf.random.truncated_normal([768]), name="conv9_bias")
conv_with_bias = tf.nn.bias_add(conv, bias)
conv9 = tf.nn.leaky_relu(conv_with_bias, name="conv9")
## Fully connected layers
conv9 = tf.keras.layers.Flatten()(conv9) # tf.flatten
# fc_size_in = 768
fc_size_in = conv9.shape[-1]
# First fully connected layer
weights = tf.Variable(tf.random.truncated_normal([fc_size_in, fc_size]), name="fc1_weights")
bias = tf.Variable(tf.random.truncated_normal([fc_size]), name="fc1_bias")
fc1 = tf.matmul(conv9, weights) + bias
fc1 = tf.nn.leaky_relu(fc1, name="fc1")
fc1 = tf.nn.dropout(fc1, rate = (self.dropout))
# Second fully connected layer
weights = tf.Variable(tf.random.truncated_normal([fc_size, fc_size]), name="fc2_weights")
bias = tf.Variable(tf.random.truncated_normal([fc_size]), name="fc2_bias")
fc2 = tf.matmul(fc1, weights) + bias
fc2 = tf.nn.leaky_relu(fc2, name="fc2")
fc2 = tf.nn.dropout(fc2, rate = (self.dropout))
# Output layer
weights = tf.Variable(tf.zeros([fc_size, n_classes]), name="output_weight")
bias = tf.Variable(tf.random.truncated_normal([n_classes]), name="output_bias")
self.out = tf.matmul(fc2, weights) + bias
|
nilq/baby-python
|
python
|
'''
based on the noise model of https://github.com/paninski-lab/yass
'''
import numpy as np
from scipy.spatial.distance import pdist, squareform
import os
import torch
def make_noise(n, spatial_SIG, temporal_SIG):
"""Make noise
Parameters
----------
n: int
Number of noise events to generate
Returns
------
numpy.ndarray
Noise
"""
n_neigh, _ = spatial_SIG.shape
waveform_length, _ = temporal_SIG.shape
# get noise
noise = np.random.normal(size=(n, waveform_length, n_neigh))
for c in range(n_neigh):
noise[:, :, c] = np.matmul(noise[:, :, c], temporal_SIG)
reshaped_noise = np.reshape(noise, (-1, n_neigh))
the_noise = np.reshape(np.matmul(reshaped_noise, spatial_SIG),
(n, waveform_length, n_neigh))
return the_noise
def make_noise_torch(n, spatial_SIG, temporal_SIG):
"""Make noise in Pytorch
Parameters
----------
n: int
Number of noise events to generate
spatial_SIG: torch array
Spatial covariance matrix
temporal_SIG: torch array
Temporal covariance matrix
Returns
------
torch array
Noise
"""
n_neigh, _ = spatial_SIG.shape
waveform_length, _ = temporal_SIG.shape
# get noise
dist = torch.distributions.normal.Normal(0.0, 1.0)
noise = dist.sample(sample_shape=(n, waveform_length, n_neigh))
# multiple random gaussian; then multiply by covariance
for c in range(n_neigh):
noise[:, :, c] = torch.matmul(noise[:, :, c], temporal_SIG)
reshaped_noise = torch.reshape(noise, (-1, n_neigh))
the_noise = torch.reshape(torch.matmul(reshaped_noise, spatial_SIG),
(n, waveform_length, n_neigh))
return the_noise
def kill_signal(recordings, threshold, window_size):
"""
Thresholds recordings, values above 'threshold' are considered signal
(set to 0), a window of size 'window_size' is drawn around the signal
points and those observations are also killed
Returns
-------
recordings: numpy.ndarray
The modified recordings with values above the threshold set to 0
is_noise_idx: numpy.ndarray
A boolean array with the same shap as 'recordings' indicating if the
observation is noise (1) or was killed (0).
"""
recordings = np.copy(recordings)
# print("rec shape:", recordings.shape)
T, C = recordings.shape
R = int((window_size-1)/2)
# this will hold a flag 1 (noise), 0 (signal) for every obseration in the
# recordings
is_noise_idx = np.zeros((T, C))
# go through every neighboring channel
for c in range(C):
# get obserations where observation is above threshold
idx_temp = np.where(np.abs(recordings[:, c]) > threshold)[0]
# shift every index found
for j in range(-R, R+1):
# shift
idx_temp2 = idx_temp + j
# remove indexes outside range [0, T]
idx_temp2 = idx_temp2[np.logical_and(idx_temp2 >= 0,
idx_temp2 < T)]
# set surviving indexes to nan
recordings[idx_temp2, c] = np.nan
# noise indexes are the ones that are not nan
# FIXME: compare to np.nan instead
is_noise_idx_temp = (recordings[:, c] == recordings[:, c])
# standarize data, ignoring nans
recordings[:, c] = recordings[:, c]/np.nanstd(recordings[:, c])
# set non noise indexes to 0 in the recordings
recordings[~is_noise_idx_temp, c] = 0
# save noise indexes
is_noise_idx[is_noise_idx_temp, c] = 1
# print("recordings after kill sig:", recordings.shape, is_noise_idx.shape)
# print("number of zeros:", np.sum(recordings==0))
return recordings, is_noise_idx
def noise_cov(recordings, temporal_size, window_size, sample_size=1000,
threshold=3.0, max_trials_per_sample=100,
allow_smaller_sample_size=False):
"""Compute noise temporal and spatial covariance
Parameters
----------
recordings: numpy.ndarray
Recordings
temporal_size:
Waveform size
sample_size: int
Number of noise snippets of temporal_size to search
threshold: float
Observations below this number are considered noise
Returns
-------
spatial_SIG: numpy.ndarray
temporal_SIG: numpy.ndarray
"""
#logger = logging.getLogger(__name__)
# kill signal above threshold in recordings
rec, is_noise_idx = kill_signal(recordings, threshold, window_size)
# compute spatial covariance, output: (n_channels, n_channels)
spatial_cov = np.divide(np.matmul(rec.T, rec),
np.matmul(is_noise_idx.T, is_noise_idx))
# compute spatial sig
w_spatial, v_spatial = np.linalg.eig(spatial_cov)
spatial_SIG = np.matmul(np.matmul(v_spatial,
np.diag(np.sqrt(w_spatial))),
v_spatial.T)
# apply spatial whitening to recordings
spatial_whitener = np.matmul(np.matmul(v_spatial,
np.diag(1/np.sqrt(w_spatial))),
v_spatial.T)
rec = np.matmul(rec, spatial_whitener)
# search single noise channel snippets
noise_wf = search_noise_snippets(
rec, is_noise_idx, sample_size,
temporal_size,
channel_choices=None,
max_trials_per_sample=max_trials_per_sample,
allow_smaller_sample_size=allow_smaller_sample_size)
print ('Computing temporal sig...')
w, v = np.linalg.eig(np.cov(noise_wf.T))
temporal_SIG = np.matmul(np.matmul(v, np.diag(np.sqrt(w))), v.T)
return spatial_SIG, temporal_SIG
def search_noise_snippets(recordings, is_noise_idx, sample_size,
temporal_size, channel_choices=None,
max_trials_per_sample=100,
allow_smaller_sample_size=False):
"""
Randomly search noise snippets of 'temporal_size'
Parameters
----------
channel_choices: list
List of sets of channels to select at random on each trial
max_trials_per_sample: int, optional
Maximum random trials per sample
allow_smaller_sample_size: bool, optional
If 'max_trials_per_sample' is reached and this is True, the noise
snippets found up to that time are returned
Raises
------
ValueError
if after 'max_trials_per_sample' trials, no noise snippet has been
found this exception is raised
Notes
-----
Channels selected at random using the random module from the standard
library (not using np.random)
"""
#logger = logging.getLogger(__name__)
T, C = recordings.shape
if channel_choices is None:
noise_wf = np.zeros((sample_size, temporal_size))
else:
lenghts = set([len(ch) for ch in channel_choices])
if len(lenghts) > 1:
raise ValueError('All elements in channel_choices must have '
'the same length, got {}'.format(lenghts))
n_channels = len(channel_choices[0])
noise_wf = np.zeros((sample_size, temporal_size, n_channels))
count = 0
#logger.debug('Starting to search noise snippets...')
trial = 0
# repeat until you get sample_size noise snippets
while count < sample_size:
# random number for the start of the noise snippet
t_start = np.random.randint(T-temporal_size)
if channel_choices is None:
# random channel
ch = np.random.randint(0, C - 1)
else:
ch = np.random.choice(channel_choices)
t_slice = slice(t_start, t_start+temporal_size)
# get a snippet from the recordings and the noise flags for the same
# location
snippet = recordings[t_slice, ch]
snipped_idx_noise = is_noise_idx[t_slice, ch]
# check if all observations in snippet are noise
if snipped_idx_noise.all():
# add the snippet and increase count
noise_wf[count] = snippet
count += 1
trial = 0
#logger.debug('Found %i/%i...', count, sample_size)
trial += 1
if trial == max_trials_per_sample:
if allow_smaller_sample_size:
return noise_wf[:count]
else:
raise ValueError("Couldn't find snippet {} of size {} after "
"{} iterations (only {} found)"
.format(count + 1, temporal_size,
max_trials_per_sample,
count))
return noise_wf
def order_channels_by_distance(reference, channels, geom):
"""Order channels by distance using certain channel as reference
Parameters
----------
reference: int
Reference channel
channels: np.ndarray
Channels to order
geom
Geometry matrix
Returns
-------
numpy.ndarray
1D array with the channels ordered by distance using the reference
channels
numpy.ndarray
1D array with the indexes for the ordered channels
"""
coord_main = geom[reference]
coord_others = geom[channels]
idx = np.argsort(np.sum(np.square(coord_others - coord_main), axis=1))
return channels[idx], idx
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
p_guess = [0.5,0.55,0.6,0.7]
repeat_experiment = 30
n = 32
k = 5
plt.title('n = 32, k = 5')
plt.xlabel("Number of CRPs", fontsize=12)
plt.ylabel("Accuracy (x100%)", fontsize=12)
crps = np.load('./xorpuf'+str(k)+'_n'+str(n)+'_reps'+str(repeat_experiment)+'_crps.npy')
for i in range(len(p_guess)):
accuracy_hpuf = np.load('./xorpuf'+str(k)+'_n'+str(n)+'_p'+str(p_guess[i])+'_reps'+str(repeat_experiment)+'_accuracy.npy')
plt.plot(crps,accuracy_hpuf, label = 'p_guess = '+str(p_guess[i]))
plt.legend()
plt.show()
|
nilq/baby-python
|
python
|
# Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.ScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def resource_setup(cls):
if CONF.scenario.large_ops_number < 1:
raise cls.skipException("large_ops_number not set to multiple "
"instances")
cls.set_network_resources()
super(TestLargeOpsScenario, cls).resource_setup()
def _wait_for_server_status(self, status):
for server in self.servers:
# Make sure nova list keeps working throughout the build process
self.servers_client.list_servers()
self.servers_client.wait_for_server_status(server['id'], status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server-')
flavor_id = CONF.compute.flavor_ref
secgroup = self._create_security_group()
self.servers_client.create_server(
name,
self.image,
flavor_id,
min_count=CONF.scenario.large_ops_number,
security_groups=[secgroup])
# needed because of bug 1199788
params = {'name': name}
_, server_list = self.servers_client.list_servers(params)
self.servers = server_list['servers']
for server in self.servers:
# after deleting all servers - wait for all servers to clear
# before cleanup continues
self.addCleanup(self.servers_client.wait_for_server_termination,
server['id'])
for server in self.servers:
self.addCleanup_with_wait(
waiter_callable=(self.servers_client.
wait_for_server_termination),
thing_id=server['id'], thing_id_param='server_id',
cleanup_callable=self.delete_wrapper,
cleanup_args=[self.servers_client.delete_server, server['id']])
self._wait_for_server_status('ACTIVE')
def _large_ops_scenario(self):
self.glance_image_create()
self.nova_boot()
@test.services('compute', 'image')
def test_large_ops_scenario_1(self):
self._large_ops_scenario()
@test.services('compute', 'image')
def test_large_ops_scenario_2(self):
self._large_ops_scenario()
@test.services('compute', 'image')
def test_large_ops_scenario_3(self):
self._large_ops_scenario()
|
nilq/baby-python
|
python
|
# -*- coding: UTF-8 -*-
# @yasinkuyu
import sys
import time
import config
from BinanceAPI import *
# trader.py --quantity -- symbol --profit --wait_time
# ex: trader.py 1 IOTABTC 1.3 1
#int(sys.argv[0]) #quantity
#sys.argv[1] #symbol
#sys.argv[2] #percentage of profit
#sys.argv[3] #wait_time
TEST_MODE = False
PROFIT = 1.3 #percentage of profit
ORDER_ID = None
TARGET_PRICE = 0
QUANTITY = 2
INCREASING = 0.00000001
TARGET_PROFITABLE_PRICE = None
WAIT_TIME = 3 # default 3 seconds
client = BinanceAPI(config.api_key, config.api_secret)
def buy_limit(symbol, quantity, buyPrice):
global TEST_MODE
if not TEST_MODE:
ret = client.buy_limit(symbol, quantity, buyPrice)
if 'msg' in ret:
errexit(ret['msg'])
orderId = ret['orderId']
file = open("ORDER", "w")
file.write("{}\n".format([symbol, orderId, quantity, buyPrice]))
print "******************"
print 'Order Id: %d' % orderId
else:
orderId = "100000"
return orderId
def sell_limit(symbol, quantity, orderId):
global TEST_MODE
global ORDER_ID
global TARGET_PRICE
global TARGET_PROFITABLE_PRICE
ret = client.get_open_orders(symbol)
if 'msg' in ret:
errexit(ret['msg'])
print "Orders"
for order in ret:
price = float(order['price'])
origQty = float(order['origQty'])
executedQty = float(order['executedQty'])
if order['orderId'] == orderId:
print "Order: %d: %lf\t%lf\t%lf" % (order['orderId'], price, origQty, executedQty)
TARGET_PROFITABLE_PRICE = None
ORDER_ID = None
if not TEST_MODE:
ret = client.sell_limit(symbol, quantity, TARGET_PRICE)
print 'Sales were made at %s price.' % (TARGET_PRICE)
print '---------------------------------------------'
if 'msg' in ret:
errexit(ret['msg'])
print ret
else:
print "Order Id: %s. The test order is complete. Price %s" % (orderId, TARGET_PRICE)
def cancel_order(symbol, orderId):
global TEST_MODE
if orderId is not None:
if not TEST_MODE:
ret = client.cancel(symbol, orderId)
if 'msg' in ret:
errexit(ret['msg'])
print 'Order has been canceled.'
def get_ticker(symbol):
ret = client.get_ticker(symbol)
return float(ret["lastPrice"])
def errexit(msg):
print("Error: " + msg)
exit(1)
def action(symbol):
global ORDER_ID
global QUANTITY
global TARGET_PRICE
global TARGET_PROFITABLE_PRICE
file = open("ORDER", "r")
#print file.read()
lastPrice = get_ticker(symbol)
ret = client.get_orderbooks(symbol, 5)
lastBid = float(ret['bids'][0][0])
lastAsk = float(ret['asks'][0][0])
btcPrice = get_ticker("BTCUSDT")
buyPrice = lastBid + INCREASING
sellPrice = lastAsk - INCREASING
profitablePrice = buyPrice + (buyPrice * PROFIT / 100)
earnTotal = sellPrice - buyPrice
TARGET_PRICE = sellPrice
if ORDER_ID is None:
print 'price:%.8f buyp:%.8f sellp:%.8f-bid:%.8f ask:%.8f BTC:$%.1f' % (lastPrice, buyPrice, sellPrice, lastBid, lastAsk, btcPrice)
if lastAsk >= profitablePrice:
TARGET_PROFITABLE_PRICE = profitablePrice
ORDER_ID = buy_limit(symbol, QUANTITY, buyPrice)
print "Percentage of %s profit. Order created from %.8f. Profit: %.8f BTC" % (PROFIT, sellPrice, earnTotal)
print "#####################"
else:
TARGET_PROFITABLE_PRICE = None
cancel_order(symbol, ORDER_ID)
else:
print "Target sell price: %.8f " % TARGET_PROFITABLE_PRICE
if lastAsk >= TARGET_PROFITABLE_PRICE:
sell_limit(symbol, QUANTITY, ORDER_ID)
def main():
symbol = 'IOTABTC'
print "@yasinkuyu, 2017"
print "Auto Trading for Binance.com (Beta). Enter your symbol. Ex: %s" % symbol
name = raw_input()
if name != "":
symbol = name
print '%%%s profit for scanning %s' % (PROFIT, symbol)
if TEST_MODE:
print "Test mode active"
while True:
startTime = time.time()
action(symbol)
endTime = time.time()
if endTime - startTime < WAIT_TIME:
time.sleep(WAIT_TIME - (endTime - startTime))
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from typing import List, Dict, Optional, Set, Any, Tuple, Type
from Dataset import GraphDataset
from Models.EmbeddingLayers import EmbeddingLayer
from Models.GnnLayers import GCNLayer, GATLayer, HGCNLayer, IHGNNLayer
from Models.PredictionLayers import HemPredictionLayer
from Helpers.Torches import *
from Helpers.GlobalSettings import Gs, Gsv
class RawGnn(nn.Module):
_saved_output_feature: Tensor = None
def __init__(self,
device: torch.device,
dataset: GraphDataset,
embedding_size: int,
gnn_layer_type: Type,
gnn_layer_count: int,
feature_interaction_order: int,
phase2_attention: bool,
predictions: Type,
lambda_muq: float):
"""初始化一个模型。
参数:
device: 设备。
embedding_size: 对 user, query 或 item 做 embedding 时的目标向量维度。
gcn_layer_count: GCN 网络层数。
users_onehot: 一维张量。
queries_multihot: 二维稀疏张量,每行表示一个 query,列表示 one-hot 维度。
items_onehot: 一维张量。
adjacency: 二维稀疏张量,表示邻接矩阵。对角线上的元素须为零。
vocabulary_onehot: query 所用的词典,是一维张量。
"""
super().__init__()
# 记录参数
self.device = device
self.dataset = dataset
self.embedding_size = embedding_size
self.gnn_layer_type = gnn_layer_type
self.gnn_layer_count = gnn_layer_count
self.feature_interaction_order = feature_interaction_order
self.phase2_attention = phase2_attention
self.prediction_layer_type = predictions
self.output_feature_size = embedding_size * (1 + self.gnn_layer_count)
# Embedding 层
self.embeddings = EmbeddingLayer(
dataset=dataset,
embedding_size=embedding_size
)
# GNN 网络层,多层
self.gnns = []
for layer in range(self.gnn_layer_count):
if gnn_layer_type in [
GCNLayer,
GATLayer,
HGCNLayer
]:
self.gnns.append(
gnn_layer_type(
device=device,
dataset=dataset,
input_dimension=embedding_size,
output_dimension=embedding_size
)
)
elif gnn_layer_type in [
IHGNNLayer
]:
fi_order_here = feature_interaction_order
if fi_order_here > 1 and layer > 0:
fi_order_here = 1
self.gnns.append(
gnn_layer_type(
device=device,
dataset=dataset,
input_dimension=embedding_size,
output_dimension=embedding_size,
feature_interaction_order=fi_order_here,
phase2_attention=phase2_attention
)
)
else:
raise NotImplementedError(f'不支持的 GNN 网络层类型:{gnn_layer_type}')
for i, gnn in enumerate(self.gnns): self.add_module(f'gnn_{i}', gnn)
# 预测层
if predictions == HemPredictionLayer:
self.prediction_layer = HemPredictionLayer(
feature_dimension=self.output_feature_size,
lambda_muq=lambda_muq,
item_count=dataset.item_count
)
else:
raise NotImplementedError(f'不支持的预测层类型:{predictions}')
def forward(self, user_indices: Tensor, query_indices: Tensor, item_indices: Optional[Tensor] = None):
"""参数:这里的 u q i 并非其在邻接矩阵的结点列表中的索引,而是从 0 开始的。\n
返回值:由每个 interaction 发生的可能性分数构成的一维张量。"""
# 训练模式下
if self._saved_output_feature is None:
# 计算图中所有结点的特征,生成结点特征矩阵 X
input_features = torch.cat(self.embeddings(None, None, None))
# 算 GCN 输出,把所有输出拼接起来,得到输出的结点特征矩阵 X'
gnn_outputs = [input_features]
gnn_output = input_features
for gnn in self.gnns:
gnn_output = gnn(gnn_output)
gnn_outputs.append(gnn_output)
# 需要测量高阶特征权重时,直接中断操作
if Gs.Debug._calculate_highorder_info:
return
output_feature = torch.cat(gnn_outputs, 1)
# 测试模式下
else:
output_feature = self._saved_output_feature
# 分别提取 user query item 的 feature
output_user_feature = output_feature[user_indices]
output_query_feature = output_feature[query_indices + self.dataset.query_start_index_in_graph]
if item_indices is not None:
output_item_feature = output_feature[item_indices + self.dataset.item_start_index_in_graph]
else:
output_item_feature = output_feature[self.dataset.item_start_index_in_graph:]
# 做预测
if self.prediction_layer_type == HemPredictionLayer:
similarity: Tensor = self.prediction_layer(
output_user_feature,
output_query_feature,
output_item_feature,
item_indices
)
return similarity
def save_features_for_test(self) -> None:
'''在测试模式(无梯度)下,保存所有 GNN 网络层的输出以加速测试。'''
input_features = torch.cat(self.embeddings(None, None, None))
gnn_outputs = [input_features]
gnn_output = input_features
for gnn in self.gnns:
gnn_output = gnn(gnn_output)
gnn_outputs.append(gnn_output)
self._saved_output_feature = torch.cat(gnn_outputs, 1)
def clear_saved_feature(self) -> None:
self._saved_output_feature = None
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
# Define a main() function that prints a data statistics.
def main():
data = np.loadtxt('data/populations.txt')
year, hares, lynxes, carrots = data.T # trick: columns to variables
plt.axes([0.1, 0.1, 0.5, 0.8])
plt.plot(year, hares, year, lynxes, year, carrots)
plt.legend(('Hare', 'Lynx', 'Carrot'), loc=(1.05, 0.5))
plt.show()
# calculate mean and std population for each species (column) separately is slower.
# calculating separately takes ~3.06 µs for each mean operation and on array
# that contains all the data by axis=0 takes ~4.68 µs
populations = data[:, 1:]
print("Means by species: {}".format(populations.mean(axis=0)))
print("Standard deviation by species: {}".format(populations.std(axis=0)))
# calculate year when each species had the larges population
max_populations = np.argmax(populations, axis=0)
print("Max populations in years: {}".format(year[max_populations]))
# calculate species that has larges population for each year
max_species_idx = np.argmax(populations, axis=1)
max_species = np.array(['H', 'L', 'C'])[max_species_idx]
print("Max species: {}".format(tuple(zip(year, max_species))))
# calculate years when any of the populations is above 50000
above_mask = np.any(np.greater(populations, 50000), axis=1)
print("Years any population above 50000: {}".format(year[above_mask]))
# find the top 2 years for each species when they had the lowest populations
sorted_indices = populations.argsort(axis=0)
years_sorted = year[sorted_indices]
print("Two smallest years: {}".format(years_sorted[:2, :]))
# compare (plot) the change in hare population and the number of lynxes
hare_gradients = np.gradient(hares)
plt.axes([0.1, 0.1, 0.7, 0.8])
plt.plot(year, hare_gradients, year, lynxes)
plt.legend(('Hare', 'Lynx'), loc=(1.05, 0.5))
plt.show()
# calculate correlation
print("Hares and lynxes correlation: {}".format(np.corrcoef(hare_gradients, lynxes)[0, 1]))
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from ..datapack import DataPack
from ..logging import logging
from .data_utils import make_coord_array
import numpy as np
import os
import astropy.time as at
def make_example_datapack(Nd,Nf,Nt,pols=None, time_corr=50.,dir_corr=0.5*np.pi/180.,tec_scale=0.02,tec_noise=1e-3,name='test.hdf5',clobber=False):
logging.info("=== Creating example datapack ===")
name = os.path.abspath(name)
if os.path.isfile(name) and clobber:
os.unlink(name)
datapack = DataPack(name,readonly=False)
with datapack:
datapack.add_antennas()
datapack.add_sources(np.random.normal(np.pi/4.,np.pi/180.*2.5,size=[Nd,2]))
_, directions = datapack.sources
_, antennas = datapack.antennas
ref_dist = np.linalg.norm(antennas - antennas[0:1,:],axis=1)[None,None,:,None]#1,1,Na,1
times = at.Time(np.linspace(0,Nt*8,Nt)[:,None],format='gps').mjd*86400.#mjs
freqs = np.linspace(120,160,Nf)*1e6
if pols is not None:
use_pols = True
assert isinstance(pols,(tuple,list))
else:
use_pols = False
pols = ['XX']
tec_conversion = -8.440e9/freqs #Nf
X = make_coord_array(directions/dir_corr, times/time_corr)# Nd*Nt, 3
X2 = np.sum((X[:,:,None] - X.T[None,:,:])**2, axis=1)#N,N
K = tec_scale**2 * np.exp(-0.5*X2)
L = np.linalg.cholesky(K + 1e-6*np.eye(K.shape[0]))#N,N
Z = np.random.normal(size=(K.shape[0],len(pols)))#N,npols
tec = np.einsum("ab,bc->ac",L,Z)#N,npols
tec = tec.reshape((Nd,Nt,len(pols))).transpose((2,0,1))#Npols,Nd,Nt
tec = tec[:,:,None,:]*(0.2+ref_dist/np.max(ref_dist))#Npols,Nd,Na,Nt
# print(tec)
tec += tec_noise*np.random.normal(size=tec.shape)
phase = tec[:,:,:,None,:]*tec_conversion[None,None,None,:,None]##Npols,Nd,Na,Nf,Nt
# print(phase)
phase = np.angle(np.exp(1j*phase))
if not use_pols:
phase = phase[0,...]
pols = None
datapack.add_freq_dep_tab('phase',times=times[:,0],freqs=freqs,pols=pols,vals=phase)
datapack.phase = phase
return datapack
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# If you ever need to modify example JSON data that is shown in the sampleData.js file, you can use this script to generate it.
import sys
import os
from pathlib import Path
sys.path.append(str(Path(os.path.dirname(__file__)).parent))
import json
from cloudsplaining.shared.validation import check_authorization_details_schema
from cloudsplaining.scan.authorization_details import AuthorizationDetails
account_authorization_details_file = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"examples",
"files",
"example.json",
)
)
with open(account_authorization_details_file) as json_file:
account_authorization_details_cfg = json.load(json_file)
results_file = os.path.abspath(os.path.join(
os.path.dirname(__file__),
"example-iam-data.json",
)
)
def generate_example_iam_data():
check_authorization_details_schema(account_authorization_details_cfg)
authorization_details = AuthorizationDetails(account_authorization_details_cfg)
results = authorization_details.results
print(f"Top-level keys of results dictionary: {results.keys()}")
# Write the results
if os.path.exists(results_file):
os.remove(results_file)
with open(results_file, "w") as file:
json.dump(results, file, indent=4)
print(f"Wrote new example IAM data file to: {results_file}")
# print(json.dumps(results, indent=4))
return results
def replace_sample_data_js(results):
sample_data_js_file = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
"cloudsplaining", "output", "src", "sampleData.js"
))
content = f"""var sample_iam_data = {json.dumps(results, indent=4)}
exports.sample_iam_data = sample_iam_data;
"""
if os.path.exists(sample_data_js_file):
print(f"Removing existing file and replacing its contents")
os.remove(sample_data_js_file)
with open(sample_data_js_file, "w") as f:
f.write(content)
if __name__ == '__main__':
results = generate_example_iam_data()
print("Replacing sampleData.js content with the most recent content")
replace_sample_data_js(results)
print("Replaced sampleData.js content")
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# id3v1.py
# From the stagger project: http://code.google.com/p/stagger/
#
# Copyright (c) 2009-2011 Karoly Lorentey <karoly@lorentey.hu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# This test automatically downloads the ID3v1 test suite by Martin Nilsson,
# and runs stagger's id3v1 decoder on all 274 test cases, comparing
# decoded field values to expected values listed in the test suite's
# generation.log file.
#
# Nilsson's tests are rather strict -- stagger intentionally accepts broken
# id3v1 tags, so it only complains on test case 3 (bad tag header).
#
# Test cases 7 and 8 (junk after string terminator) include NUL characters
# in field values in the log file, which is likely a mistake. Their
# description prescribes that the NULs and the data after them should
# not show up for the user, so I override the test case's field values to check that.
#
# Test case 12 has leading spaces in the year field which are intentionally
# stripped by stagger.
#
# In two test cases, Nilsson uses genre names that differ from most other
# sources/implementations:
#
# Test case Genre # Genre in test Genre elsewhere
# 151 136 Christian Christian Gangsta Rap
# 155 140 Contemporary Contemporary Christian
#
# Stagger follows the de facto ID3v1 standard and resolves 136 and 140 to
# the insane genres on the right.
import unittest
import os
import os.path
import re
import string
import urllib.request
import tarfile
import random
import io
import warnings
from stagger.errors import *
import stagger.id3v1
testsuite_url = r"http://id3.org/Developer%20Information?action=AttachFile&do=get&target=id3v1_test_suite.tar.gz"
testsuite_file = os.path.join(os.path.dirname(__file__), "id3v1_test_suite.tar.gz")
testsuite_log = "id3v1/generation.log"
def download_testsuite():
try:
with open(testsuite_file, "rb") as file:
pass
except IOError:
urllib.request.urlretrieve(testsuite_url, testsuite_file)
class ID3v1TestCase(unittest.TestCase):
def parse_log(self):
log = self.tar.extractfile(testsuite_log)
try:
tests = []
tag = {}
for bline in log:
line = bline.decode('iso-8859-1')
m = re.match(r'^Test case ([0-9]+)$', line)
if m is not None:
tag["id"] = int(m.group(1))
continue
m = re.match(r'^Generated test file "([a-zA-Z0-9_.]+)"$', line)
if m is not None:
tag["filename"] = m.group(1)
continue
m = re.match(r'^([a-z]+) *: "([^"]*)"$', line)
if m is not None:
tag[m.group(1)] = m.group(2)
continue
m = re.match(r'^version: (1\.[01])$', line)
if m is not None:
tag["version"] = m.group(1)
continue
m = re.match(r'^genre : ([0-9]+ \(.*\))$', line)
if m is not None:
tag["genre"] = m.group(1)
continue
m = re.match(r'^$', line)
if m is not None and tag:
tests.append(tag)
tag = {}
return tests
finally:
log.close()
def setUp(self):
download_testsuite()
self.tar = tarfile.open(testsuite_file)
def tearDown(self):
self.tar.close()
def testID3v1Conformance(self):
for test in self.parse_log():
# Fix expected values in test cases 7-8 (junk after string terminator).
if test["id"] in [7, 8]:
for field in ["title", "artist", "album", "comment"]:
test[field] = "12345"
# Fix expected value in test case 12 (strip year field).
if test["id"] == 12:
test["year"] = test["year"].strip(string.whitespace)
# Fix expected genre names in test cases 151 and 155 to de-facto standard values.
if test["id"] == 151:
test["genre"] = '136 (Christian Gangsta Rap)'
if test["id"] == 155:
test["genre"] = '140 (Contemporary Christian)'
filename = 'id3v1/' + test["filename"]
file = self.tar.extractfile(filename)
try:
# Test case 3 contains no valid ID3v1 tag.
if test["id"] == 3:
self.assertRaises(NoTagError, stagger.id3v1.Tag1.read, file)
continue
tag = stagger.id3v1.Tag1.read(file)
for field in ["title", "artist", "album",
"year", "comment", "track", "genre"]:
if field in test:
self.assertEqual(test[field], getattr(tag, field),
"Value mismatch in field " + field
+ " of testcase " + str(test["id"])
+ ": '" + test[field] + "' vs '"
+ getattr(tag, field) + "'")
# Try encoding the tag and comparing binary data
if test["id"] not in [7, 8, 12]:
data = tag.encode()
file.seek(-128, 2)
data2 = file.read(128)
self.assertEqual(data, data2, "Data mismatch in testcase " + str(test["id"]))
finally:
file.close()
suite = unittest.TestLoader().loadTestsFromTestCase(ID3v1TestCase)
if __name__ == "__main__":
warnings.simplefilter("always", stagger.Warning)
unittest.main(defaultTest="suite")
|
nilq/baby-python
|
python
|
a = np.arange(30).reshape((2,3,5))
a[a>5]
|
nilq/baby-python
|
python
|
import os
import sys
from cseg import cut_file
msr_test = 'corpus/msr_test.utf8'
msr_test_gold = 'corpus/msr_test_gold.utf8'
msr_out = ['output/msr_test_2_add1', 'output/msr_test_2_ad', 'output/msr_test_2_kn', 'output/msr_test_1',
'output/msr_test_2_add1_hmm', 'output/msr_test_2_ad_hmm',
'output/msr_test_2_kn_hmm', 'output/msr_test_1_hmm']
pku_test = 'corpus/pku_test.utf8'
pku_test_gold = 'corpus/pku_test_gold.utf8'
pku_out = ['output/pku_test_2_add1', 'output/pku_test_2_ad', 'output/pku_test_2_kn', 'output/pku_test_1',
'output/pku_test_2_add1_hmm', 'output/pku_test_2_ad_hmm', 'output/pku_test_2_kn_hmm', 'output/pku_test_1_hmm']
weibo_test = 'corpus/nlpcc2016-wordseg-dev.dat'
weibo_test_gold = 'corpus/nlpcc2016-wordseg-dev_gold.dat'
weibo_out = ['output/weibo_test_2_add1', 'output/weibo_test_2_ad', 'output/weibo_test_2_kn', 'output/weibo_test_1',
'output/weibo_test_2_add1_hmm', 'output/weibo_test_2_ad_hmm', 'output/weibo_test_2_kn_hmm', 'output/weibo_test_1_hmm']
tips = ["2-gram, +1平滑:", "2-gram, 绝对减值平滑:", "2-gram, Kneser-Ney平滑:", "1-gram:", "HMM: 2-gram, +1平滑:",
"HMM: 2-gram, 绝对减值平滑:", "HMM: 2-gram, Kneser-Ney平滑:", "HMM: 1-gram:"]
use_hmm = [False, False, False, False, True, True, True, True]
use_2gram = [True, True, True, False, True, True, True, False]
smooth = ['add1', 'abs', 'kneser_ney', '', 'add1', 'abs', 'kneser_ney', '']
tests = {'msr': msr_test, 'pku': pku_test, 'weibo': weibo_test }
test_golds = {'msr': msr_test_gold, 'pku': pku_test_gold, 'weibo': weibo_test_gold }
outs = {'msr': msr_out, 'pku': pku_out, 'weibo': weibo_out }
if __name__ == '__main__':
argv = sys.argv[1:]
if len(argv) < 1:
print('test.py msr|pku|weibo')
sys.exit()
else:
if argv[0] not in ['msr', 'pku', 'weibo']:
print('test.py msr|pku|weibo')
sys.exit()
print("开始切分... ")
test = tests[argv[0]]
test_gold = test_golds[argv[0]]
out = outs[argv[0]]
for i in range(len(out)):
cut_file(test, out[i], use_hmm[i], use_2gram[i], smooth[i])
print("%s 测试结果: " % argv[0])
for i in range(len(out)):
print(tips[i])
os.system("python eval.py %s %s %s" % (test_gold, out[i], out[i]+'_err'))
|
nilq/baby-python
|
python
|
"""
created by ldolin
"""
"""
正则表达式
动机:
1.经常性文本处理
2.文本内容的快速搜索,定位,提取比较复杂
3.产生正则表达式
定义:
正则即是文本的高级匹配模式,提供搜索,替代,查找等功能,
本质是由一系列特殊符号和字符组成的字符串
特点:
1.方便检索和修改文本内容的操作
2.支持多种编程语言
3.灵活多样
目标:
1.能够看懂并编写基本简单的正则表达式
2.能够使用python操作正则表达式
设计原则:
1.正确性:能够正确匹配要求内容
2.唯一性:只要需要的
3.全面性:对目标特征考虑全面
"""
# import re
#
#
# def main():
# tel = input("请输入手机号:")
# # ret = re.match(r"1[35678]\d{9}", tel)
# # 由于手机号位数大于11位也能匹配成功,所以修改如下:
# ret = re.match(r"^1[35678]\d{9}$", tel)
# if ret:
# print("匹配成功")
# else:
# print("匹配失败")
#
#
# if __name__ == "__main__":
# main()
"""
python操作正则表达式-re模块-处理正则表达式
1.导入模块 import re
元字符:在正则表达式中有一定特殊含义的符号
1.re.findall(pattern.string):
功能:使用正则表达式匹配字符串
参数:
pattern:表达式字符串
string:目标字符串
返回值:返回匹配到的字符串
1.普通字符:
元字符:a b c & #
匹配规则:匹配字符本身
"""
import re
# s = 'abcdefg'
# str1 = re.findall('abc', s)
# print(str1)
# s1 = '你们好'
# str2 = re.findall('你好', s1)
# print(str2)
"""
2.或
元字符:|
匹配规则:匹配|两边任意一个正则表达式
注意:1.竖线的两边不要有空格
2.匹配过的不会再匹配
"""
# s3 = '你好你们好'
# str3 = re.findall('你好|你们好', s3)
# print(str3)
"""
3.匹配单一字符
元字符:.
匹配规则:匹配除了\n以外任意的字符
比如:
a.c --> abc adc a@c a!c
"""
# s3 = 'abc adc a@c a!c'
# str3 = re.findall('a.c', s3)
# print(str3)
"""
4.匹配开始位置
元字符:^
匹配规则:匹配一个字符的开始位置
"""
# s4 = 'hello python'
# str5 = re.findall('^hello', s4)
# print(str5)
"""
5.匹配结尾位置
元字符:$
匹配规则:匹配字符串的结尾位置
"""
# s5 = 'regx_demo.py'
# str6 = re.findall('py$', s5)
# print(str6)
"""
6.匹配重复字符
元字符:*
匹配规则:匹配前面的正则表达式,重复0到多次
"""
# s5 = 'abababcdfghacbca'
# str6 = re.findall('ab*', s5)
# print(str6)
"""
7.匹配重复
元字符:+
匹配规则:匹配前面的正则表达式,重复1到多次
"""
# s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbb'
# str6 = re.findall('a+b+', s5)
# print(str6)
"""
8.匹配重复
元字符:?
匹配规则:匹配前面出现的元字符,重复0或1次
"""
# s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbb'
# str6 = re.findall('ab?', s5)
# print(str6)
"""
9.匹配重复
元字符:{n}
匹配规则:匹配前面的正则表达式n次
"""
# s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbbab'
# str6 = re.findall('ab{3}', s5) # b 重复3次
# print(str6)
"""
10.匹配重复
元字符:{m,n}
匹配规则:匹配前面的正则表达式出现m到n次
"""
# s5 = 'abbbbbbbbbbbbdecbcccaaaaaabbbbbbabbbb'
# str6 = re.findall('ab{3,15}', s5) # b 重复3-15次 a 1次
# print(str6)
"""
11.匹配字符集合
元字符:[字符集]
匹配规则:匹配括号内任意一个字符
"""
# s5 = 'abcd 123456'
# str6 = re.findall('[ab12]', s5)
# print(str6)
"""
12.匹配字符集合
元字符:[^字符集]
匹配规则:匹配除了字符集中任意一个字符
"""
# s5 = 'abcd 1 2 3 4 5 6'
# str6 = re.findall('[^ab 12]', s5)
# print(str6)
"""
13.匹配任意数字字符(非数字)
元字符:\d \D
匹配规则:
\d : 匹配任意数字字符[0-9]
\D : 匹配任意非数字字符[^0-9]
"""
# s5 = '13789721034'
# str6 = re.findall('^1[3567]\d{9}$', s5)
# print(str6)
"""
14.匹配任意普通字符---数字字母下划线
元字符:\w \W
匹配规则:
\w : 匹配任意一个普通字符 [_0-9a-zA-Z]
\W : 匹配任意一个非普通字符 [^_0-9a-zA-Z]
"""
# s5 = 'hello 中国 @ $'
# str6 = re.findall('\W+', s5)
# print(str6)
"""
15.匹配空与非空字符---空格 \r \t \n \0
元字符:\s \S
匹配规则:
\s : 匹配任意空字符
\S : 匹配任意非空字符
"""
# s5 = 'hello python'
# str6 = re.findall('\s+', s5)
# str7 = re.findall('\S+', s5)
# print(str6)
# print(str7)
"""
元字符:
1.匹配单个字符:a . \d \D \w \W \s \S
[] [^]
匹配重复:* + ? {n} {m,n}
匹配位置:^ $
其他:|
如果匹配?咋匹配
可使用\进行转义,也可在表达式前加小写r,代表原生字符,不进行转义
"""
# s5 = 'hello python?你\好'
# str6 = re.findall('\?', s5)
# str7 = re.findall(r'\\', s5)
# print(str6)
# print(str7)
"""
贪婪与非贪婪:
贪婪模式:
正则表达式在匹配正确的情况下,尽可能多的向后匹配
如:* + ? {m,n}
非贪婪模式:
正则表达式在匹配正确的情况下,尽可能少的向后匹配
贪婪转非贪婪
如:*? +? ?? {m,n}?
"""
# s5 = 'abbbbcd 1 2 3 4 5 6'
# str6 = re.findall('ab+?', s5)
# print(str6)
"""
2.re.split:
格式:re.split(pattern, string)
功能:通过正则表达式分割字符串
参数:
pattern:正则表达式
string:目标字符串
返回分割后的字符串
"""
# s5 = 'abbbbcd 1 2 3 4 5 6'
# print(re.split('\s', s5))
# print(re.split('[ ]', s5))
# print(re.split(' ', s5))
"""
3.re.sub:
格式:re.sub(pattern, replaceStr, string, max)
替换正则匹配到的内容
参数:
pattern:正则
replaceStr:要换入的新的内容
string:目标字符串
max:最多替换几处
"""
# s5 = 'abbbbcd,1,2,3,4,5,6'
# a = re.sub('[,]', '#', s5, 2)
# b = re.sub('[,]', '?', a, 4)
# print(b)
"""
4.re.match:
匹配目标字符串开头
格式:re.match(pattern, string)
参数:
pattern:正则
string:目标字符串
返回值:返回匹配到的内容
注意:取值需要通过group(1)来取值,取第一个,如果失败返回None,则不能返回match对象的属性
"""
# s = """hello,python
# hello,java
# hello,c++
# """
# str1 = re.match('hello', s).group()
# print(str1)
"""
5.re.search(pattern, string)
功能:匹配目标字符串,只能到第一处
"""
s = 'AabbbbAc'
b = re.findall('[A-Z][a-z]*', s)
print(b)
s1 = 'a0,1,2,3,4,5,6,9,88s'
b1 = re.findall('[1-98]{1,2}', s1)
print(b1)
s2 = 'a01234569sAss_sssssssssssssss'
s3 = re.search('[a-zA-Z0-9_]{8,20}', s2).group()
print(s3)
s4 = 'python = 9999, c = 7890, c++ = 12345'
s5 = re.findall('[0-9]{4,5}', s4)
print(s5)
s6 = '阅读次数为 9999'
s7 = re.search(r'\d+', s6).group()
print(s7)
s8 = 'python = 997'
s9 = re.sub('[0-9]{3}', '998', s8, 1)
print(s9)
s10 = 'info:xiaozhang 33 shandong'
s11 = re.split('[: ]', s10)
s14 = re.split('\W', s10)
print(s11)
print(s14)
# search(),match(),sub() 返回字符串
# findall(),split() 返回列表
import random
L = []
random.shuffle(L)
# a = re.findall('<title>(.*)</title>', html)
|
nilq/baby-python
|
python
|
from datetime import timezone, timedelta, datetime, date, time
import databases
import pytest
import sqlalchemy
import ormar
from tests.settings import DATABASE_URL
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
class DateFieldsModel(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
created_date: datetime = ormar.DateTime(
default=datetime.now(tz=timezone(timedelta(hours=3))), timezone=True
)
updated_date: datetime = ormar.DateTime(
default=datetime.now(tz=timezone(timedelta(hours=3))),
name="modification_date",
timezone=True,
)
class SampleModel(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
updated_at: datetime = ormar.DateTime()
class TimeModel(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
elapsed: time = ormar.Time()
class DateModel(ormar.Model):
class Meta:
database = database
metadata = metadata
id: int = ormar.Integer(primary_key=True)
creation_date: date = ormar.Date()
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.drop_all(engine)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
@pytest.mark.asyncio
async def test_model_crud_with_timezone():
async with database:
datemodel = await DateFieldsModel().save()
assert datemodel.created_date is not None
assert datemodel.updated_date is not None
@pytest.mark.asyncio
async def test_query_with_datetime_in_filter():
async with database:
creation_dt = datetime(2021, 5, 18, 0, 0, 0, 0)
sample = await SampleModel.objects.create(updated_at=creation_dt)
current_dt = datetime(2021, 5, 19, 0, 0, 0, 0)
outdated_samples = await SampleModel.objects.filter(
updated_at__lt=current_dt
).all()
assert outdated_samples[0] == sample
@pytest.mark.asyncio
async def test_query_with_date_in_filter():
async with database:
sample = await TimeModel.objects.create(elapsed=time(0, 20, 20))
await TimeModel.objects.create(elapsed=time(0, 12, 0))
await TimeModel.objects.create(elapsed=time(0, 19, 55))
sample4 = await TimeModel.objects.create(elapsed=time(0, 21, 15))
threshold = time(0, 20, 0)
samples = await TimeModel.objects.filter(TimeModel.elapsed >= threshold).all()
assert len(samples) == 2
assert samples[0] == sample
assert samples[1] == sample4
@pytest.mark.asyncio
async def test_query_with_time_in_filter():
async with database:
await DateModel.objects.create(creation_date=date(2021, 5, 18))
sample2 = await DateModel.objects.create(creation_date=date(2021, 5, 19))
sample3 = await DateModel.objects.create(creation_date=date(2021, 5, 20))
outdated_samples = await DateModel.objects.filter(
creation_date__in=[date(2021, 5, 19), date(2021, 5, 20)]
).all()
assert len(outdated_samples) == 2
assert outdated_samples[0] == sample2
assert outdated_samples[1] == sample3
|
nilq/baby-python
|
python
|
import json
import os
from pathlib import Path
import shutil
from appdirs import user_data_dir
from elpis.engines.common.objects.fsobject import FSObject
from elpis.engines.common.utilities import hasher
from elpis.engines.common.utilities.logger import Logger
from elpis.engines.common.errors import InterfaceError
from elpis.engines.common.objects.dataset import Dataset
from elpis.engines.common.objects.pron_dict import PronDict
class Interface(FSObject):
_config_file = 'interface.json'
def __init__(self, path: Path = None, use_existing=False):
"""
:param Boolean use_existing: If this flag is enabled and an interface
already exists at the specified ``path``, then load the interface
at the ``path``. When ``path`` is not specified or if the
interface is not at the ``path``, then a new interface is created.
"""
path_was_none = False
if path is None:
path_was_none = True
name = hasher.new()
parent_path = Path(user_data_dir('elpis')).joinpath('interfaces')
path = parent_path.joinpath(name)
# super().__init__(
# parent_path=Path(user_data_dir('elpis')),
# dir_name=name,
# pre_allocated_hash=name,
# name=name
# )
path = Path(path).absolute()
# === Check if the existing interface is valid ===================
# If any of the below nested if-statements fail, the existing (if
# it exists) interface is not valid. In that case, wipe the
# path directory and start a new interface directory.
class InvalidInterfaceError(Exception):
pass
config_file_path = path.joinpath(Interface._config_file)
try:
if (use_existing is True
and path.exists()
and path.is_dir()
and config_file_path.exists()
and config_file_path.is_file()):
# a valid interface exists. (this is a shallow check)
pass
else:
raise InvalidInterfaceError
# === Create a new interface object ==============================
except InvalidInterfaceError:
# Must wipe the interface and make a new one
if path.exists():
# Tempted to use shutil.rmtree? It breaks if we have mounted /state from
# local filesystem into the docker container.
# Error is "Device or resource busy: '/state'"
# We need to keep the dir and delete the contents...
for root, subdirectories, files in os.walk(path):
for file_ in files:
os.unlink(os.path.join(root, file_))
for directory in subdirectories:
shutil.rmtree(os.path.join(root, directory))
super().__init__(
parent_path=path.parent,
dir_name=path.name,
pre_allocated_hash=(path.name if path_was_none else None),
name=(path.name if path_was_none else None)
)
self.config['loggers'] = []
self.config['datasets'] = {}
self.config['pron_dicts'] = {}
self.config['models'] = {}
self.config['transcriptions'] = {}
# === Use existing interface object ==============================
else:
# Create a new interface without wiping the directory.
# Uses existing _config_file.
super().__init__(
parent_path=path.parent,
dir_name=path.name
)
# ensure object directories exist
self.datasets_path = self.path.joinpath('datasets')
self.datasets_path.mkdir(parents=True, exist_ok=True)
self.pron_dicts_path = self.path.joinpath('pron_dicts')
self.pron_dicts_path.mkdir(parents=True, exist_ok=True)
self.models_path = self.path.joinpath('models')
self.models_path.mkdir(parents=True, exist_ok=True)
self.loggers_path = self.path.joinpath('loggers')
self.loggers_path.mkdir(parents=True, exist_ok=True)
self.transcriptions_path = self.path.joinpath('transcriptions')
# config objects
self.loggers = []
self.datasets = {}
self.pron_dicts = {}
self.models = {}
self.transcriptions = {}
# make a default logger
self.new_logger(default=True)
# set during runtime
self.engine = None
@classmethod
def load(cls, base_path: Path):
self = super().load(base_path)
self.datasets_path = self.path.joinpath('datasets')
self.datasets_path.mkdir(parents=True, exist_ok=True)
self.pron_dicts_path = self.path.joinpath('pron_dicts')
self.pron_dicts_path.mkdir(parents=True, exist_ok=True)
self.models_path = self.path.joinpath('models')
self.models_path.mkdir(parents=True, exist_ok=True)
self.loggers_path = self.path.joinpath('loggers')
self.loggers_path.mkdir(parents=True, exist_ok=True)
self.transcriptions_path = self.path.joinpath('transcriptions')
# config objects
self.loggers = []
self.datasets = {}
self.pron_dicts = {}
self.models = {}
self.transcriptions = {}
return self
def new_logger(self, default=False):
logger = Logger(self.loggers_path)
self.config['loggers'] += [logger.hash]
if default:
self.logger = logger
return logger
def new_dataset(self, dsname):
existing_names = self.list_datasets()
if dsname in self.config['datasets'].keys():
raise InterfaceError(
f'Tried adding \'{dsname}\' which is already in {existing_names} with hash {self.config["datasets"][dsname]}.',
human_message=f'Dataset with name "{dsname}" already exists'
)
ds = Dataset(parent_path=self.datasets_path, name=dsname)
datasets = self.config['datasets']
datasets[dsname] = ds.hash
self.config['datasets'] = datasets
return ds
def get_dataset(self, dsname):
if dsname not in self.list_datasets():
raise InterfaceError(f'Tried to load a dataset called "{dsname}" that does not exist')
hash_dir = self.config['datasets'][dsname]
return Dataset.load(self.datasets_path.joinpath(hash_dir))
def list_datasets(self):
names = [name for name in self.config['datasets'].keys()]
return names
def new_pron_dict(self, pdname):
existing_names = self.list_pron_dicts()
if pdname in self.config['pron_dicts'].keys():
raise InterfaceError(
f'Tried adding \'{pdname}\' which is already in {existing_names} with hash {self.config["pron_dicts"][pdname]}.',
human_message=f'Pronunciation dictionary with name "{pdname}" already exists'
)
pd = PronDict(parent_path=self.pron_dicts_path, name=pdname)
pron_dicts = self.config['pron_dicts']
pron_dicts[pdname] = pd.hash
self.config['pron_dicts'] = pron_dicts
return pd
def get_pron_dict(self, pdname):
if pdname not in self.list_pron_dicts():
raise InterfaceError(f'Tried to load a pron dict called "{pdname}" that does not exist')
hash_dir = self.config['pron_dicts'][pdname]
pd = PronDict.load(self.pron_dicts_path.joinpath(hash_dir))
pd.dataset = self.get_dataset(pd.config['dataset_name'])
return pd
def list_pron_dicts(self):
names = [name for name in self.config['pron_dicts'].keys()]
return names
def list_pron_dicts_verbose(self):
pron_dicts = []
names = [name for name in self.config['pron_dicts'].keys()]
for name in names:
pd = self.get_pron_dict(name)
pron_dicts.append({"name": name, "dataset_name": pd.dataset.name})
return pron_dicts
def new_model(self, mname):
if self.engine is None:
raise RuntimeError("Engine must be set before model creation")
existing_names = self.list_models()
if mname in self.config['models'].keys():
raise InterfaceError(
f'Tried adding \'{mname}\' which is already in {existing_names} with hash {self.config["models"][mname]}.',
human_message=f'Model with name "{mname}" already exists'
)
m = self.engine.model(parent_path=self.models_path, name=mname)
models = self.config['models']
models[mname] = m.hash
self.config['models'] = models
return m
def get_model(self, mname):
if self.engine is None:
raise RuntimeError("Engine must be set to get a model")
if mname not in self.list_models():
raise InterfaceError(f'Tried to load a model called "{mname}" that does not exist')
hash_dir = self.config['models'][mname]
m = self.engine.model.load(self.models_path.joinpath(hash_dir))
m.dataset = self.get_dataset(m.config['dataset_name'])
if m.config['pron_dict_name'] is not None:
m.pron_dict = self.get_pron_dict(m.config['pron_dict_name'])
return m
def list_models(self):
models = []
for hash_dir in os.listdir(f'{self.models_path}'):
if not hash_dir.startswith('.'):
with self.models_path.joinpath(hash_dir, "model.json").open() as fin:
name = json.load(fin)['name']
models.append(name)
return models
def list_models_verbose(self):
models = []
for hash_dir in os.listdir(f'{self.models_path}'):
if not hash_dir.startswith('.'):
config_file_path = self.models_path.joinpath(hash_dir, "model.json")
if os.path.isfile(config_file_path):
with config_file_path.open() as model_config_file:
model = json.load(model_config_file)
model_info = {
'name': model['name'],
'dataset_name': model['dataset_name'],
'engine_name': model['engine_name'],
'pron_dict_name': model['pron_dict_name'],
'status': model['status'],
'results': model['results']
}
models.append(model_info)
return models
def new_transcription(self, tname):
if self.engine is None:
raise RuntimeError("Engine must be set prior to transcription")
print("{}".format(self.engine))
t = self.engine.transcription(parent_path=self.transcriptions_path, name=tname)
transcriptions = self.config['transcriptions']
transcriptions[tname] = t.hash
self.config['transcriptions'] = transcriptions
return t
def get_transcription(self, tname):
if tname not in self.list_transcriptions():
raise InterfaceError(f'Tried to load a transcription called "{tname}" that does not exist')
hash_dir = self.config['transcriptions'][tname]
t = self.engine.transcription.load(self.transcriptions_path.joinpath(hash_dir))
t.model = self.get_model(t.config['model_name'])
return t
def list_transcriptions(self):
if self.engine is None:
raise RuntimeError("Engine must be set to list transcriptions")
names = []
if not Path(f'{self.transcriptions_path}').exists():
return names # no directory -> no items in list
for hash_dir in os.listdir(f'{self.transcriptions_path}'):
if not hash_dir.startswith('.'):
with self.transcriptions_path.joinpath(
hash_dir, self.engine.transcription._config_file).open() as fin:
name = json.load(fin)['name']
names.append(name)
return names
def set_engine(self, engine):
self.engine = engine
|
nilq/baby-python
|
python
|
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader, sampler
import h5py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from itertools import cycle
import seaborn as sns
from matplotlib.colors import ListedColormap
import matplotlib as mpl
from matplotlib.font_manager import FontProperties
class HDF5Dataset(Dataset):
"""
Args:
h5data_path(str): path of h5 file
train(boolean): whether use train data or not
transform(optional)
"""
def __init__(self, h5data_path,train=True, transform=None):
self.h5data = h5py.File(h5data_path,'r')
self.transform = transform
self.train = train
self.train_x = np.array(self.h5data["train_in_seq"])
self.train_y = np.array(self.h5data["train_out"])
self.valid_x = np.array(self.h5data["valid_in_seq"])
self.valid_y = np.array(self.h5data["valid_out"])
def __getitem__(self, index):
if self.train:
x = self.train_x[index,...]
y = self.train_y[index,...]
else:
x = self.valid_x[index,...]
y = self.valid_y[index,...]
if self.transform:
x = self.transform(x)
else:
x = torch.from_numpy(x)
y = torch.from_numpy(y)
# convert datatype
x = x.type('torch.cuda.FloatTensor')
y = y.type('torch.cuda.FloatTensor')
return (x, y)
def __len__(self):
if self.train:
return self.train_x.shape[0]
else:
return self.valid_x.shape[0]
# test purpose
# def __len__(self):
# return 200
class RMdata(Dataset):
def __init__(self, data_path, use_embedding,length,mode):
"""
Inputs:
mode: train, valid, test
"""
self.data_path = data_path
self.mode = mode
self.use_embedding = use_embedding
self.radius = length // 2
if self.mode == 'train':
self.train_x = pd.read_hdf(self.data_path,'train_in')
self.train_y = pd.read_hdf(self.data_path,'train_out').to_numpy()
self.valid_x = pd.read_hdf(self.data_path,'valid_in')
self.valid_y = pd.read_hdf(self.data_path,'valid_out').to_numpy()
if self.use_embedding:
print('Using pre-trained embeddings!'+'-' * 60)
self.train_x = pd.read_hdf(self.data_path,'train_in_3_mers')
self.valid_x = pd.read_hdf(self.data_path,'valid_in_3_mers')
total_length = self.train_x.shape[1]
middle_index = total_length // 2
self.train_x = self.train_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy()
# print(self.train_x.shape[1])
self.valid_x = self.valid_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy()
else:
# cropping the sequence one_hot encoding
total_length = self.train_x.shape[1]
middle_index = total_length // 2
# print(middle_index)
self.train_x = self.train_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy()
self.valid_x = self.valid_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy()
else:
self.valid_x = pd.read_hdf(self.data_path,'valid_in')
self.valid_y = pd.read_hdf(self.data_path,'valid_out').to_numpy()
self.test_x = pd.read_hdf(self.data_path,'test_in')
self.test_y = pd.read_hdf(self.data_path,'test_out').to_numpy()
if self.use_embedding:
self.valid_x = pd.read_hdf(self.data_path,'valid_in_3_mers')
self.test_x = pd.read_hdf(self.data_path,'test_in_3_mers')
# cropping the sequence accroding to its length
total_length = self.valid_x.shape[1]
middle_index = total_length // 2
# print(self.train_x.shape[1])
self.valid_x = self.valid_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy()
self.test_x = self.test_x.iloc[:,middle_index-self.radius+1:middle_index+self.radius-1+1].to_numpy()
else:
# cropping the sequence one_hot encoding
total_length = self.valid_x.shape[1]
middle_index = total_length // 2
# print(middle_index)
self.valid_x = self.valid_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy()
# print(self.train_x.shape[1])
self.test_x = self.test_x.iloc[:,2000-self.radius*4:2004+self.radius*4].to_numpy()
self.class_name = list(pd.read_hdf(self.data_path,'test_out').columns)
def __getitem__(self,index):
if self.mode == 'train':
x = self.train_x[index,...]
y = self.train_y[index,...]
elif self.mode == 'valid':
x = self.valid_x[index,...]
y = self.valid_y[index,...]
elif self.mode == 'test':
x = self.test_x[index,...]
y = self.test_y[index,...]
x = torch.from_numpy(x)
y = torch.from_numpy(y)
x = x.type('torch.cuda.FloatTensor')
y = y.type('torch.cuda.FloatTensor')
return (x, y)
def __len__(self):
if self.mode == 'train':
return self.train_x.shape[0]
elif self.mode == 'valid':
return self.valid_x.shape[0]
elif self.mode == 'test':
return self.test_x.shape[0]
def load_RM_data(path,batch_size,length,use_embedding,balanced_sampler=False):
train = RMdata(path,use_embedding=use_embedding,
length= length,mode='train')
valid = RMdata(path,use_embedding=use_embedding,
length=length, mode='valid')
if not balanced_sampler:
train_loader = DataLoader(dataset=train,batch_size=batch_size,shuffle=True)
else:
weights_train = make_weights_for_balanced_classes(train)
# weights_valid = make_weights_for_balanced_classes(valid)
weights_train = torch.cuda.DoubleTensor(weights_train)
# weights_valid = torch.cuda.DoubleTensor(weights_valid)
sampler_train = sampler.WeightedRandomSampler(weights_train, len(weights_train))
# sampler_valid = sampler.WeightedRandomSampler(weights_valid, len(weights_valid))
train_loader = DataLoader(dataset=train,batch_size=batch_size,sampler=sampler_train)
# valid_loader = DataLoader(dataset=valid,batch_size=batch_size,sampler=sampler_valid)
valid_loader = DataLoader(dataset=valid,batch_size=batch_size,shuffle=True)
return train_loader, valid_loader
def make_weights_for_balanced_classes(dataset):
X, y = dataset[:]
num_examples = len(y)
nclasses = len(y[1]) + 1
count = np.zeros(nclasses)
y = y.cpu().numpy()
for i in range(num_examples):
count[np.concatenate([np.squeeze(y[i,:]),np.array([0])])==1] += 1
# negative class weight
count[-1] = num_examples - np.sum([count[i] for i in range(nclasses)])
weight_per_class = np.zeros(nclasses)
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * num_examples
for i in range(num_examples):
if not list(np.squeeze(y[i,:])) == list(np.zeros(len(y[1]))):
weight[i] = np.mean(weight_per_class[np.concatenate([np.squeeze(y[i,:]),np.array([0])])==1])
else:
# negative cases
weight[i] = weight_per_class[-1]
return weight
def cal_precision(y_true, y_pred,eps=1e-7):
true_positives = torch.sum(torch.round(torch.clamp(y_true * y_pred, 0, 1)))
predicted_positives = torch.sum(torch.round(torch.clamp(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + eps)
return precision
def cal_recall(y_true, y_pred,eps=1e-7):
true_positives = torch.sum(torch.round(torch.clamp(y_true * y_pred, 0, 1)))
possible_positives = torch.sum(torch.round(torch.clamp(y_true, 0, 1)))
recall = true_positives / (possible_positives + eps)
return recall
def cal_accuary(y_true, y_pred):
acc = torch.mean((torch.round(torch.clamp(y_pred,0,1))==y_true).type('torch.cuda.FloatTensor'))
return acc
def precision_multi(y_true,y_pred):
"""
Input: y_true, y_pred with shape: [n_samples, n_classes]
Output: example-based precision
"""
n_samples = y_true.shape[0]
result = 0
for i in range(n_samples):
if not (y_pred[i] == 0).all():
true_posi = y_true[i] * y_pred[i]
n_true_posi = np.sum(true_posi)
n_pred_posi = np.sum(y_pred[i])
result += n_true_posi / n_pred_posi
return result / n_samples
def recall_multi(y_true,y_pred):
"""
Input: y_true, y_pred with shape: [n_samples, n_classes]
Output: example-based recall
"""
n_samples = y_true.shape[0]
result = 0
for i in range(n_samples):
if not (y_true[i] == 0).all():
true_posi = y_true[i] * y_pred[i]
n_true_posi = np.sum(true_posi)
n_ground_true = np.sum(y_true[i])
result += n_true_posi / n_ground_true
return result / n_samples
def f1_multi(y_true,y_pred):
"""
Input: y_true, y_pred with shape: [n_samples, n_classes]
Output: example-based recall
"""
n_samples = y_true.shape[0]
result = 0
for i in range(n_samples):
if not ((y_true[i] == 0).all() and (y_pred[i] == 0).all()):
true_posi = y_true[i] * y_pred[i]
n_true_posi = np.sum(true_posi)
n_ground_true = np.sum(y_true[i])
n_pred_posi = np.sum(y_pred[i])
f1 = 2*(n_true_posi) / (n_ground_true+n_pred_posi)
result += f1
return result / n_samples
def hamming_loss(y_true,y_pred):
"""
Input: y_true, y_pred with shape: [n_samples, n_classes]
Output: hamming loss
"""
n_samples = y_true.shape[0]
n_classes = y_true.shape[1]
loss = 0
for i in range(n_samples):
xor = np.sum((y_true[i] + y_pred[i]) % 2)
loss += xor / n_classes
return loss / n_samples
def cal_metrics(model_out,label,plot=False,class_names=None,plot_name=None):
"""
Inputs:
class_name: for plot purpose
"""
from sklearn.metrics import recall_score,precision_score,roc_auc_score,roc_curve, average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
num_task = len(model_out)
# threshold_list = [0.5 for i in range(num_task)] # thresholds standard
threshold_list = [0.002887,0.004897,0.001442,0.010347,0.036834,0.028677,
0.009135,0.095019,0.001394,0.007883,0.113931,0.125591] # thresholds for multirm #
# threshold_list = [0.004554,0.014769,0.005969,0.043316,0.076438,0.091157,
# 0.121174,0.175164,0.006239,0.001260,0.051128,0.255274] # thresholds for hmm
# threshold_list = [0.007389,0.050478,0.046165,0.068021,0.088967,0.150652, # thresholds for CNN+Lstm
# 0.080001,0.317348,0.003866,0.013430,0.090117,0.256765]
metrics = {'recall':[],'precision':[],'accuracy':[],'auc':[],'auc_2':[],
'sn':[],'sp':[],'acc_2':[],'mcc':[], 'ap':[], 'ap_2':[]}
# auc_2: auc across all samples
# auc: auc across one single class
metrics_avg = {'recall':0, 'precision':0,'accuracy':0,'auc':0,'auc_2':0}
# Compute ROC curve and ROC area for each class
fpr,tpr = dict(), dict()
fpr_2,tpr_2 = dict(), dict()
precisions, recalls = dict(), dict()
precisions_m, recalls_m = dict(), dict()
label = label.cpu().numpy()
Y_pred = np.zeros(label.shape)
for i in range(num_task):
y_true = label[:,i]
y_pred = torch.clamp(model_out[i].cpu().detach(),0,1).numpy()
y_pred = np.array([0 if instance < threshold_list[i] else 1 for instance in list(y_pred)])
Y_pred[:,i] = y_pred
y_score = model_out[i].cpu().detach().numpy()
# if i==0:
# print(y_pred[y_true==1])
# recall = recall_score(y_true,y_pred,zero_division=1)
# precision = precision_score(y_true,y_pred,zero_division=1)
acc = np.mean(y_true==y_pred)
# handle one_class problem
# test binary auc
auc = roc_auc_score(y_true[i*100:(i+1)*100],y_score[i*100:(i+1)*100])
# test binary ap
ap = average_precision_score(y_true[i*100:(i+1)*100],y_score[i*100:(i+1)*100])
# test multiclass auc
auc_2 = roc_auc_score(y_true,y_score)
# test multi ap
ap_2 = average_precision_score(y_true,y_score)
fpr[i], tpr[i], thresholds = roc_curve(y_true[i*100:(i+1)*100], y_score[i*100:(i+1)*100])
fpr_2[i], tpr_2[i], thresholds_2 = roc_curve(y_true, y_score)
precisions[i], recalls[i], _ = precision_recall_curve(y_true[i*100:(i+1)*100], y_score[i*100:(i+1)*100])
precisions_m[i], recalls_m[i], _ = precision_recall_curve(y_true, y_score)
gmeans = np.sqrt(tpr_2[i] * (1-fpr_2[i]))
# locate the index of the largest g-mean
ix = np.argmax(gmeans)
print('Best Threshold=%f, G-Mean=%.3f' % (thresholds_2[ix], gmeans[ix]))
best_threshold = thresholds_2[ix]
y_pred_new = np.array([0 if instance < best_threshold else 1 for instance in list(y_score)])
# binary based confusion_matrix
# tn, fp, fn, tp = confusion_matrix(y_true[i*100:(i+1)*100], y_pred_new[i*100:(i+1)*100]).ravel()
# multiclass based confusion_matrix
tn, fp, fn, tp = confusion_matrix(y_true, y_pred_new).ravel()
pp = tp+fn
pn = tn+fp
sensitivity = tp / pp
specificity = tn / pn
recall = sensitivity
precision = tp / (tp + fp)
acc_2 = (tp+tn) / (pp+pn)
# mcc = acc_2 / np.sqrt((1+(fp-fn)/pp)*(1+(fn-fp)/pn))
mcc = ((tp*tn)-(fp*fn))/np.sqrt((tp+fp)*(tp+fn)*(tn+fp)*(tn+fn))
# update dictionary
metrics['auc_2'].append(auc_2)
metrics['sn'].append(sensitivity)
metrics['sp'].append(specificity)
metrics['acc_2'].append(acc_2)
metrics['mcc'].append(mcc)
metrics['ap'].append(ap)
metrics['ap_2'].append(ap_2)
metrics['recall'].append(recall)
metrics['precision'].append(precision)
metrics['accuracy'].append(acc)
metrics['auc'].append(auc)
metrics_avg['recall'] += recall
metrics_avg['precision'] += precision
metrics_avg['accuracy'] += acc
# metrics_avg['auc'] += auc
precision_multi_ = precision_multi(label,Y_pred)
recall_multi_ = recall_multi(label,Y_pred)
f1_multi_ = f1_multi(label,Y_pred)
hamming_loss_ = hamming_loss(label,Y_pred)
print("precision multi: %f"%(precision_multi_))
print("recall multi: %f"%(recall_multi_))
print("f1 multi: %f"%(f1_multi_))
print("hamming loss: %f"%(hamming_loss_))
metrics_avg['recall'] /= num_task
metrics_avg['precision'] /= num_task
metrics_avg['accuracy'] /= num_task
# metrics_avg['auc'] /= num_task
print(plot)
if plot:
# define colors
colors = [(39,64,139),(0,128,128),(31, 119, 180), (44, 160, 44), (152, 223, 138), (174, 199, 232),
(255, 127, 14), (255, 187, 120),(214, 39, 40), (255, 152, 150), (148, 103, 189), (197, 176, 213)]
for i in range(len(colors)):
r, g, b = colors[i]
colors[i] = (r / 255., g / 255., b / 255.)
# modifying parameters for plot
from math import sqrt
golden_mean = (sqrt(5)-1.0)/2.0 #used for size=
fig_width = 6 # fig width in inches
fig_height = fig_width*golden_mean # fig height in inches
mpl.rcParams['axes.labelsize'] = 10
mpl.rcParams['axes.titlesize'] = 10
mpl.rcParams['font.size'] = 10
mpl.rcParams['legend.fontsize'] = 10
mpl.rcParams['xtick.labelsize'] = 8
mpl.rcParams['ytick.labelsize'] = 8
mpl.rcParams['text.usetex'] = False
mpl.rcParams['font.family'] = 'serif'
# params = {'axes.labelsize': 10, # fontsize for x and y labels (was 10)
# 'axes.titlesize': 10,
# 'font.size': 10,
# 'legend.fontsize': 10,
# 'xtick.labelsize': 8,
# 'ytick.labelsize': 8,
# 'text.usetex': False,
# 'font.family': 'serif'
# }
lw = 2
#fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(13,4),gridspec_kw={'width_ratios': [1, 2.2]})
# roc curve
fig, axes = plt.subplots(nrows=1,ncols=2,figsize=(fig_width*2+0.7,fig_height+0.1))
# PR curve
fig_2, axes_2 = plt.subplots(nrows=1,ncols=2,figsize=(fig_width*2+0.7,fig_height+0.1))
# matplotlib.rcParams.update(params)
# set color palettes
for i, class_name in zip(range(num_task), class_names):
axes[0].plot(fpr[i], tpr[i], color=colors[i],lw=lw)
axes[0].plot([0, 1], [0, 1], 'k--', lw=lw)
axes[0].set_xlim([0.0, 1.0])
axes[0].set_ylim([0.0, 1.0])
axes[0].tick_params(axis='x',which='both',top=False)
axes[0].tick_params(axis='y',which='both',right=False)
axes[0].set_aspect('equal', adjustable='box')
axes[0].set_xlabel('False Positive Rate')
axes[0].set_ylabel('True Positive Rate')
axes[0].set_title('ROC curves (binary)')
axes_2[0].plot(recalls[i], precisions[i], color=colors[i],lw=lw)
axes_2[0].plot([0, 1], [0.5, 0.5], 'k--', lw=lw)
axes_2[0].set_xlim([0.0, 1.0])
axes_2[0].set_ylim([0.45, 1.0])
axes_2[0].tick_params(axis='x',which='both',top=False)
axes_2[0].tick_params(axis='y',which='both',right=False)
xmin, xmax = axes_2[0].get_xlim()
ymin, ymax = axes_2[0].get_ylim()
axes_2[0].set_aspect(abs((xmax-xmin)/(ymax-ymin)), adjustable='box')
axes_2[0].set_xlabel('Recall')
axes_2[0].set_ylabel('Precision')
axes_2[0].set_title('PR curves (binary)')
if class_name == 'Atol':
class_name = 'A-to-I'
elif class_name == 'hPsi':
class_name = 'Psi'
elif class_name[-1] == 'm':
class_name = class_name[1:]
else:
# tmp = class_name[2:]
# num = class_name[1]
# class_name = 'm^{%s}%s'%(num,tmp)
class_name = class_name[1:]
axes[1].plot(fpr_2[i], tpr_2[i], color=colors[i],lw=lw,
label ='%s ($AUC_{b}$ = %.2f, $AUC_{m}$ = %.2f)'%(class_name,
metrics['auc'][i],metrics['auc_2'][i]))
axes[1].set_xlim([0.0, 1.0])
axes[1].set_ylim([0.0, 1.0])
axes[1].tick_params(axis='x',which='both',top=False)
axes[1].tick_params(axis='y',which='both',right=False,left=False,labelleft=False)
axes[1].set_aspect('equal', adjustable='box')
axes[1].set_xlabel('False Positive Rate')
axes[1].set_ylabel('True Positive Rate')
axes[1].set_title('ROC curves (multiple)')
axes_2[1].plot(recalls_m[i], precisions_m[i], color=colors[i],lw=lw,
label ='%s ($AP_{b}$ = %.2f, $AP_{m}$ = %.2f)'%(class_name,
metrics['ap'][i],metrics['ap_2'][i]))
axes_2[1].set_xlim([0.0, 1.0])
axes_2[1].set_ylim([0.0, 1.0])
axes_2[1].tick_params(axis='x',which='both',top=False)
axes_2[1].tick_params(axis='y',which='both',right=False,left=False,labelleft=True)
xmin, xmax = axes_2[1].get_xlim()
ymin, ymax = axes_2[1].get_ylim()
axes_2[1].set_aspect(abs((xmax-xmin)/(ymax-ymin)), adjustable='box')
axes_2[1].set_xlabel('Recall')
axes_2[1].set_ylabel('Precision')
axes_2[1].set_title('PR curves (multiple)')
# Shrink current axis by 20%
# box = axes[1].get_position()
# print(box)
# axes[1].set_position([box.x0, box.y0, box.x1-box.width * 0.5, box.height])
# print(axes[1].get_position())
axes[1].plot([0, 1], [0, 1], 'k--', lw=lw, label='no skill')
axes_2[1].plot([0, 1], [0.04, 0.04], 'k--', lw=lw, label = 'no skill')
# Put a legend to the right of the current axis
axes[1].legend(loc='upper left', bbox_to_anchor=(1.05, 1),borderaxespad=0.,frameon=False)
axes_2[1].legend(loc='upper left', bbox_to_anchor=(1.05, 1),borderaxespad=0.,frameon=False)
fig.tight_layout()
fig_2.tight_layout()
fig.savefig('../Figs/roc_curve_%s.pdf'%(plot_name))
fig_2.savefig('../Figs/precision_recall_curve_%s.pdf'%(plot_name))
print('Successfully save figure to ../Figs/roc_curve_%s.pdf'%(plot_name))
print('Successfully save figure to ../Figs/precision_recall_curve_%s.pdf'%(plot_name))
return metrics,metrics_avg
def cal_metrics_sampling(model_out,label):
from sklearn.metrics import recall_score,precision_score,roc_auc_score,roc_curve, average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_recall_curve
label = label.cpu().numpy()
Y_pred = np.zeros(label.shape)
num_task = len(model_out)
metrics = {i : {'acc':[],'auc':[], 'ap':[], 'fprs':[],
'tprs':[],'precisions':[],'recalls':[]} for i in range(num_task)}
total_num = 304661
posi_num = np.array([1591, 1878,1471,2253,16346,3207,3696,65178,2447,1036,3137,52618])
neg_num = total_num - posi_num
ratio = np.round(neg_num / posi_num).astype(int)
iterations = 2000
for i in range(num_task):
y_true_pos = label[label[:,i]==1,i]
y_true_neg = label[label[:,i]!=1,i]
y_pred = model_out[i].cpu().detach().numpy()
y_pred_pos = y_pred[label[:,i]==1]
y_pred_neg = y_pred[label[:,i]!=1]
for iter in range(iterations):
pos_num = len(label[:,i]==1)
pos_idx = np.random.randint(0,len(y_true_pos),pos_num)
neg_idx = np.random.randint(0, len(y_true_neg),pos_num*ratio[i])
y_true = np.concatenate([y_true_pos[pos_idx], y_true_neg[neg_idx]])
y_score = np.concatenate([y_pred_pos[pos_idx], y_pred_neg[neg_idx]])
y_pred_label = y_score > 0.5
acc = np.mean(y_true==y_pred_label)
auc = roc_auc_score(y_true,y_score)
ap = average_precision_score(y_true,y_score)
fprs, tprs, thresholds = roc_curve(y_true, y_score)
precisions, recalls, _ = precision_recall_curve(y_true, y_score)
metrics[i]['acc'].append(acc)
metrics[i]['auc'].append(auc)
metrics[i]['ap'].append(ap)
metrics[i]['fprs'] = fprs.tolist()
metrics[i]['tprs'] = tprs.tolist()
metrics[i]['precisions'] = precisions.tolist()
metrics[i]['recalls'] = recalls.tolist()
metrics_avg = dict()
metrics_avg['acc'] = [np.mean(metrics[i]['acc']) for i in range(num_task)]
metrics_avg['auc'] = [np.mean(metrics[i]['auc']) for i in range(num_task)]
metrics_avg['ap'] = [np.mean(metrics[i]['ap']) for i in range(num_task)]
return metrics, metrics_avg
|
nilq/baby-python
|
python
|
from django import forms
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Ore
class CreateNewOreupdate(forms.ModelForm):
class Meta:
model = Ore
fields = ('oret','contrattiok','contrattiko','nomecognome','statuse',)
widgets = {
'data' : forms.DateInput(
attrs={
'class': 'md-form'
}
),
'user' : forms.Select(
attrs={
'class': 'custom-select',
}
),
'contrattiok' : forms.NumberInput(
attrs={
'class': 'form-control',
}
),
'oret' : forms.NumberInput(
attrs={
'class': 'form-control'
}
),
'nomecognome' : forms.TextInput(
attrs={
'class': 'form-control'
}
),
}
class CreateNewOre(forms.ModelForm):
class Meta:
model = Ore
exclude = ('statuse','contrattiko', )
fields = ('user','oret','contrattiok','contrattiko','nomecognome','statuse',)
widgets = {
'data' : forms.DateInput(
attrs={
'class': 'md-form'
}
),
'user' : forms.Select(
attrs={
'class': 'custom-select',
}
),
'contrattiok' : forms.NumberInput(
attrs={
'class': 'form-control'
}
),
'oret' : forms.NumberInput(
attrs={
'class': 'form-control'
}
),
'nomecognome' : forms.TextInput(
attrs={
'class': 'form-control'
}
),
}
|
nilq/baby-python
|
python
|
import pytest
import pandas as pd
from hypper.data import (
read_banking,
read_breast_cancer_data,
read_churn,
read_congressional_voting_records,
read_german_data,
read_hr,
read_phishing,
read_spect_heart,
)
@pytest.mark.parametrize(
"read_fun",
[
read_banking,
read_breast_cancer_data,
read_churn,
read_congressional_voting_records,
read_german_data,
read_hr,
read_phishing,
read_spect_heart,
],
)
def test_reading_data_types(read_fun):
df, label, cat_cols = read_fun()
assert type(df) == pd.DataFrame
assert type(label) == str
assert type(cat_cols) == list
|
nilq/baby-python
|
python
|
"""Base camera module
This file contains the class definition for the Camera class on which
all subsequent cameras should be based on.
"""
from __future__ import print_function, division
import numpy.random as npr
from .log import logger
# from .ringbuffer import RingBuffer
from .camprops import CameraProperties
# from .exceptions import CameraError
class CameraError(Exception):
"""Generic camera error"""
class Camera(object):
"""Base class for all cameras. New camera implementations should
subclass this and override all methods necessary for use.
Attributes
----------
clib : WinDLL or CDLL
A ctypes library reference
roi : list
The defined region of interest in the form [x1, y1, x2, y2].
t_ms : float
Exposure time in ms.
gain : int or float
Gain setting. The type is dependent on the camera used.
shape : tuple
Number of pixels (x, y)
bins : int
Bin size to use.
crop : list
Crop specifications. Should be of the form::
[horiz start, horiz end, vert start, vert end]
with indeces starting from 1.
shutter_open : bool
For cameras that are equipped with an integrated shutter: is the
shutter open?
cooler_active : bool
True if the cooler is on.
temperature_set_point : int
Temperature set point for the cooler if present.
acq_mode : str
Camera acquisition mode.
trigger_mode : int
Camera triggering mode. These are obviously defined
differently depending on the particular camera's SDK.
rbuffer : RingBuffer
The RingBuffer object for autosaving of images.
props : CameraProperties
A CameraProperties object defining several generic settings of
the camera as well as flags indicating if certain
functionality is available.
"""
def __init__(self, **kwargs):
"""Initialize a camera. Additional keyword arguments may also
be passed and checked for the initialize function to be
defined by child classes.
Keyword arguments
-----------------
bins : int
Binning to use.
buffer_dir : str
Directory to store the ring buffer file to. Default:
'.'.
log_level : int
Logging level to use. Default: ``logging.INFO``.
"""
self.clib = None
self.roi = [1, 1, 10, 10]
self.t_ms = 100.
self.gain = 0
self.shape = (512, 512)
self.bins = 1
self.crop = (1, self.shape[0], 1, self.shape[1])
self.shutter_open = False
self.cooler_active = False
self.temperature_set_point = 0
self.acq_mode = "single"
self.trigger_mode = 0
self.rbuffer = None
self.props = CameraProperties()
# Get kwargs and set defaults
bins = kwargs.get('bins', 1)
buffer_dir = kwargs.get('buffer_dir', '.')
recording = kwargs.get('recording', True)
# Check kwarg types are correct
assert isinstance(bins, int)
assert isinstance(buffer_dir, str)
# Configure logging
logger.info("Connecting to camera")
# Initialize
try:
# self.rbuffer = RingBuffer(
# directory=buffer_dir, recording=recording, roi=self.roi)
raise ValueError
except ValueError:
# logger.warn('Error opening the ring buffer. This is expected with a remote camera server.')
self.rbuffer = None
x0 = npr.randint(self.shape[0]/4, self.shape[0]/2)
y0 = npr.randint(self.shape[1]/4, self.shape[1]/2)
self.sim_img_center = (x0, y0)
self.initialize(**kwargs)
self.get_camera_properties()
def initialize(self, **kwargs):
"""Any extra initialization required should be placed in this
function for child camera classes.
"""
def get_camera_properties(self):
"""Code for getting camera properties should go here."""
logger.warning(
"Properties not being set. " +
"Did you forget to override get_camera_properties?")
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
logger.info("Shutting down camera.")
if self.rbuffer is not None:
self.rbuffer.close()
self.close()
def close(self):
"""Close the camera safely. Anything necessary for doing so
should be defined here.
"""
raise NotImplementedError
def set_acquisition_mode(self, mode):
"""Set the image acquisition mode."""
raise NotImplementedError
def get_num_available_images(self, mode):
"""Get num of available images."""
raise NotImplementedError
def get_image(self):
"""Acquire the current image from the camera.
"""
img = self.acquire_image_data()
return img
def acquire_image_data(self):
"""Code for getting image data from the camera should be
placed here. This must return a numpy array.
"""
raise NotImplementedError
def get_images_as_buffer(self, first, last):
"""Acquire the current image from the camera and write it to
the ring buffer. This function should *not* be overwritten by
child classes. Instead, everything necessary to acquire an
image from the camera should be added to the
:meth:`acquire_image_data` method.
"""
buffer, size = self.acquire_images()
return buffer, size
def acquire_images(self, first, last):
"""Code for getting image data from the camera should be
placed here. This must return a numpy array.
"""
raise NotImplementedError
def get_trigger_mode(self):
"""Query the current trigger mode."""
raise NotImplementedError
def set_trigger_mode(self, mode):
"""Setup trigger mode."""
raise NotImplementedError
def start(self):
"""Code needed for getting the camera to begin triggering
should be placed here.
"""
raise NotImplementedError
def stop(self):
"""Code needed to stop accepting triggering should be placed
here.
"""
raise NotImplementedError
# Not all cameras have builtin shutters, so the next few functions
# should have no actual effect in that case. Child classes should
# override the set_shutter function to set the shutter state.
def open_shutter(self):
"""Open the shutter."""
self.shutter_open = True
logger.info('Opening shutter.')
self.set_shutter('open')
def close_shutter(self):
"""Close the shutter."""
self.shutter_open = False
logger.info('Closing shutter.')
self.set_shutter('closed')
def set_shutter(self, state):
"""This will set the shutter to the given state ('open' or
'closed'). Since not all cameras have a built in shutter, this
will simply do nothing if not overridden.
"""
logger.debug("set_shutter not overridden")
def toggle_shutter(self, state):
"""Toggle the shutter state from open to closed and vice versa."""
if self.shutter_open:
self.close_shutter()
else:
self.open_shutter()
def get_exposure_time(self):
"""Query for the current exposure time. Default is to just
return what is stored in the instantiation.
"""
return self.t_ms
def set_exposure_time(self, t):
"""Set the exposure time."""
self.t_ms = t
timings = self.update_exposure_time(t)
return timings
def update_exposure_time(self, t):
"""Camera-specific code for setting the exposure time should
go here.
"""
raise NotImplementedError
def get_gain(self):
"""Query the current gain settings."""
raise NotImplementedError
def set_gain(self, **kwargs):
"""Set the camera gain."""
raise NotImplementedError
# Don't override :meth:`set_cooler`, but rather the
# :meth:`cooler_on` and :meth:`cooler_off`.
def cooler_on(self):
"""Turn on the TEC."""
def cooler_off(self):
"""Turn off the TEC."""
def set_cooler(self, mode):
assert isinstance(mode, (bool, int))
self.cooler_active = mode
if mode:
self.cooler_on()
else:
self.cooler_off()
def get_cooler_temperature(self):
"""Check the TEC temperature."""
logger.warn("No action: get_cooler_temperature not overriden.")
def set_cooler_temperature(self, temp):
"""Set the cooler temperature to temp."""
logger.warn("No action: set_cooler_temperature not overriden.")
raise NotImplementedError("No cooler?")
def set_roi(self, roi):
"""Define the region of interest. Since ROI stuff is handled
entirely in software, this function does not need to be
implemented in inheriting classes.
"""
if len(roi) != 4:
raise CameraError("roi must be a length 4 list.")
if roi[0] >= roi[2] or roi[1] >= roi[3] or roi[0] < 0 or roi[1] < 0:
logger.error(
'Invalid ROI: {0}. Keeping old ROI.'.format(roi))
return
old = self.roi
self.roi = roi
if self.rbuffer is not None:
self.rbuffer.roi = roi
logger.info(
'Adjusting ROI: {0} --> {1}'.format(str(old), str(self.roi)))
def get_crop(self):
"""Get the current CCD crop settings. If this function is not
overloaded, it will simply return the value stored in the crop
attribute.
"""
return self.crop
def set_crop(self, crop):
"""Define the portion of the CCD to actually collect data
from. Using a reduced sensor area typically allows for faster
readout. Derived classes should define :meth:`update_crop`
instead of overriding this one.
"""
assert crop[1] > crop[0]
assert crop[3] > crop[2]
if len(crop) != 4:
raise CameraError("crop must be a length 4 array.")
self.crop = crop
self.update_crop(self.crop)
def reset_crop(self):
"""Reset the crop to the maximum size."""
self.crop = [1, self.shape[0], 1, self.shape[1]]
self.update_crop(self.crop)
def update_crop(self, crop):
"""Camera-specific code for setting the crop should go
here.
"""
logger.debug("update_crop not implemented.")
def get_bins(self):
"""Query the current binning. If this function is not
overloaded, it will simply return the value stored in the bins
attribute.
"""
return self.bins
def set_bins(self, bins):
"""Set binning to bins x bins."""
logger.debug("set_bins not implemented.")
|
nilq/baby-python
|
python
|
from .copy import files_copy
from .delete import files_delete
from .download import files_download
from .history import files_history
from .import_files import files_import
from .list import files_list
from .mkdir import files_mkdir
from .move import files_move
from .pems_delete import files_pems_delete
from .pems_list import files_pems_list
from .pems_update import files_pems_update
from .upload import files_upload
|
nilq/baby-python
|
python
|
import turtle
def draw_piece(row, col, color):
x = offset_x + 25 + col * 2 * (radius + gap)
y = offset_y - 25 - row * 2 * (radius + gap)
t.up()
t.home()
t.goto(x,y)
t.down()
t.color(color)
t.begin_fill()
t.circle(radius)
t.end_fill()
def draw(x, y):
global board, rb, winner
col = int((x - offset_x) // square_size)
def check_winner():
pass
def draw_board():
# TODO: use a for loop to simplify the code
t.color("purple")
t.begin_fill()
t.up()
t.goto(190, -180)
t.down()
t.left(90)
t.forward(310)
t.left(90)
t.forward(380)
t.left(90)
t.forward(310)
t.left(90)
t.forward(380)
t.end_fill()
for row in range(6):
for col in range(7):
draw_piece(row,col,"white")
radius = 23
gap = 2
square_size = 2 * (radius + gap)
offset_x = -180
offset_y = 100
board = [
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
[None, None, None, None, None, None, None],
]
winner = ""
rb = "red"
t = turtle.Turtle()
t.ht()
t.speed(200)
draw_board()
#draw_piece(0, 0, "blue")
#draw_piece(0, 1, "red")
#draw_piece(3, 5, "purple")
t.up()
t.home()
t.down()
wn = turtle.Screen()
wn.onclick(draw)
wn.mainloop()
|
nilq/baby-python
|
python
|
from machine.tokenization import ZwspWordDetokenizer
def test_detokenize_empty() -> None:
detokenizer = ZwspWordDetokenizer()
assert detokenizer.detokenize([]) == ""
def test_detokenize_space() -> None:
detokenizer = ZwspWordDetokenizer()
assert (
detokenizer.detokenize(["គែស", "មាង់", " ", "អី", "នៃ", "ជេង", "នារ", "ត៝ល់", "ព្វាន់", "។"])
== "គែស\u200bមាង់ អី\u200bនៃ\u200bជេង\u200bនារ\u200bត៝ល់\u200bព្វាន់។"
)
def test_detokenize_guillment() -> None:
detokenizer = ZwspWordDetokenizer()
assert detokenizer.detokenize(["ឞ្ក្នៃ", "រាញា", "«", "នារ", "»", "ជេសរី"]) == "ឞ្ក្នៃ\u200bរាញា «នារ» ជេសរី"
def test_detokenize_punctuation() -> None:
detokenizer = ZwspWordDetokenizer()
assert (
detokenizer.detokenize(["ไป", "ไหน", "มา", "?", "เขา", "ถาม", "ผม", "."])
== "ไป\u200bไหน\u200bมา? เขา\u200bถาม\u200bผม."
)
assert detokenizer.detokenize(["ช้าง", ",", "ม้า", ",", "วัว", ",", "กระบือ"]) == "ช้าง, ม้า, วัว, กระบือ"
def test_detokenize_punctuation_inside_word() -> None:
detokenizer = ZwspWordDetokenizer()
assert (
detokenizer.detokenize(["เริ่ม", "ต้น", "ที่", " ", "7,999", " ", "บาท"]) == "เริ่ม\u200bต้น\u200bที่ 7,999 บาท"
)
def test_detokenize_multiple_spaces() -> None:
detokenizer = ZwspWordDetokenizer()
assert (
detokenizer.detokenize(["គែស", "មាង់", " ", "អី", "នៃ", "ជេង", "នារ", "ត៝ល់", "ព្វាន់", "។"])
== "គែស\u200bមាង់ អី\u200bនៃ\u200bជេង\u200bនារ\u200bត៝ល់\u200bព្វាន់។"
)
|
nilq/baby-python
|
python
|
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2017-2020 German Aerospace Center (DLR) and others.
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License 2.0 which is available at
# https://www.eclipse.org/legal/epl-2.0/
# This Source Code may also be made available under the following Secondary
# Licenses when the conditions for such availability set forth in the Eclipse
# Public License 2.0 are satisfied: GNU General Public License, version 2
# or later which is available at
# https://www.gnu.org/licenses/old-licenses/gpl-2.0-standalone.html
# SPDX-License-Identifier: EPL-2.0 OR GPL-2.0-or-later
# @file __init__.py
# @author Leonhard Luecken
# @date 2017-04-09
"""
simpla - A simple platooning plugin for TraCI
simpla is a configurable, simple platooning plugin for TraCI.
A platooning configuration has to be created before using.
Its possible elements are given in the example configuration file
'simpla_example.cfg.xml'
Information about vType mappings between original and
platooning vTypes has to be supplied. This can be done directly
in the configuration xml-file by using 'vTypeMapLeader', 'vTypeMapFollower' and 'vTypeMapCatchup'
elements or by reference to seperate files which define the mappings as
'originalVType : mappedVType'
All specified vTypes should be available within the simulation, the "default" type
is optional and used whenever information is missing for some original type
if no default is specified, the original type remains unchanged within the platoon.
For the definition of platooning vTypes for existing basic vTypes,
and generating vTypeMapping-files see the script generateModifiedVTypes.py.
Usage:
1) import simpla into your traci script.
2) After establishing a connection to SUMO with traci, call simpla.load(<configuration_filename>)
3) Only applies to SUMO version < 0.30: After starting simpla, call simpla.update() after each call to
traci.simulationStep()
Notes:
1) simpla changes the vehicle types, speedfactors, and lane changemodes of all connected vehicles.
If your application does so as well, this might have unintended consequences.
2) Currently, steps of lengths other than DeltaT are not supported (i.e. if traci.simulationStep()
is called with argument when simpla is running this may yield undesired behaviour).
3) simpla adds subscriptions to VAR_ROAD_ID, VAR_LANE_INDEX (and currently VAR_LANE_ID) and removes them when stopped
"""
import sys
import os
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci # noqa
from ._utils import openGap # noqa
from ._utils import SimplaException # noqa
import simpla._config # noqa
import simpla._reporting as rp # noqa
import simpla._platoonmanager # noqa
warn = rp.Warner("simpla")
_mgr = None
_mgr_listenerID = None
_useStepListener = 'addStepListener' in dir(traci)
_emergencyDecelImplemented = 'VAR_EMERGENCY_DECEL' in dir(traci.constants)
if not _emergencyDecelImplemented:
# Old traci version. No emergency decel present.
if rp.VERBOSITY >= 1:
warn("Using old traci version assuming emergency decel == decel", True)
# Set emergency decel to decel
traci.constants.VAR_EMERGENCY_DECEL = 0x7b
traci.vehicletype.getEmergencyDecel = traci.vehicletype.getDecel
def load(config_filename):
'''
Load the config from file and create a Platoon Manager
'''
global _mgr, _mgr_listenerID
simpla._config.load(config_filename)
_mgr = simpla._platoonmanager.PlatoonManager()
if _useStepListener:
# For SUMO version >= 0.30
_mgr_listenerID = traci.addStepListener(_mgr)
def stop():
'''
Stop the PlatoonManager
'''
global _mgr, _mgr_listenerID
if _mgr is not None:
_mgr.stop()
traci.removeStepListener(_mgr_listenerID)
_mgr = None
def update():
'''
Function called each simulation step. Only to be used for SUMO version < 1.0
'''
global _mgr, warn
if _mgr is not None:
_mgr.step()
else:
if rp.VERBOSITY >= 1:
warn("call simpla.init(<config_file>) before simpla.update()!")
|
nilq/baby-python
|
python
|
#!/bin/python3
# name: vignette_testing.py
# author: nbehrnd@yahoo.com
# license: 2019, MIT
# date: 2019-12-02 (YYYY-MM-DD)
# edit: 2019-12-03 (YYYY-MM-DD)
#
""" Probe for gnuplot palettes' differences
Script 'palette_decomposition.py' provides rapid access to visualize
the channels of R, G, B of RGB color space subsequently deposit e.g.
as a .png file.
compare by ImageMagick allows the superposition of two .png files to
identify differences between the two; this is then highlighted by red
pixels. Because 'palette_decomposition.py' names the diagnostic files
coherently, their inspection with ImageMagick may be be automated.
Place the script in the same folder already containing the vignette /
decomposition plots to scrutinize as .png. Launch without provision
of parameters by
python3 vignette_comparsion.py
The script will point ImageMagick which files to check against each
other. This for example allows to discern quickly palettes with a
similar name, but from different repositories (e.g., magma). """
import fnmatch
import os
import shutil
import subprocess as sub
import sys
def identify():
""" Learn about the vignette .png to process at all. """
global register
register = []
for file in os.listdir("."):
if fnmatch.fnmatch(file, "*_vig.png"):
register.append(file)
register.sort()
def probe():
""" Call compare by ImageMagick to work on the data. """
print("\nProbing for differences in the palette files' plots.")
while len(register) > 1:
for entry in register[1:]:
reference = register[0]
probe = entry
difference = str("diff_{}_{}".format(str(reference[:-4]), probe))
test = str("compare {} {} {}".format(reference, entry, difference))
try:
sub.call(test, shell=True)
except IOError:
print("Possibly no callable instance of ImageMagick.")
sys.exit(0)
del register[0]
print("Probing for differences in the palette files' plots complete.")
def stamp():
""" Add an 'inner stamp' to ease the visual discern of the plots. """
print("\nProvision of 'inner stamps' in the difference plots.")
diff_register = []
for file in os.listdir("."):
if fnmatch.fnmatch(file, "diff*.png"):
diff_register.append(file)
diff_register.sort()
for entry in diff_register:
intermediate = str(entry)[:-4] + str("_intermediate.png")
stamping = str(
"convert {} label:'{}' -gravity Center -append {}".format(
entry, entry, intermediate))
try:
sub.call(stamping, shell=True)
# Cleaning; retain only the stamped file filed by old name.
os.remove(entry)
shutil.move(intermediate, entry)
except IOError:
print("problem with {}".format(entry))
sys.exit(0)
# action calls:
identify()
probe()
stamp()
sys.exit(0)
|
nilq/baby-python
|
python
|
from learnml.metrics import mean_squared_error
import numpy as np
import unittest
class Test(unittest.TestCase):
def test_mean_squared_error(self):
expected_results = [0, 1]
for i, y_pred in enumerate(np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6]])):
self.assertEqual(expected_results[i], mean_squared_error(np.array([1, 2, 3, 4, 5]), y_pred))
if __name__ == "__main__":
unittest.main()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import json
from TM1py.Objects.User import User
from TM1py.Services.ObjectService import ObjectService
class SecurityService(ObjectService):
""" Service to handle Security stuff
"""
def __init__(self, rest):
super().__init__(rest)
def create_user(self, user):
""" Create a user on TM1 Server
:param user: instance of TM1py.User
:return: response
"""
request = '/api/v1/Users'
self._rest.POST(request, user.body)
def get_user(self, user_name):
""" Get user from TM1 Server
:param user_name:
:return: instance of TM1py.User
"""
request = '/api/v1/Users(\'{}\')?$expand=Groups'.format(user_name)
response = self._rest.GET(request)
return User.from_json(response)
def update_user(self, user):
""" Update user on TM1 Server
:param user: instance of TM1py.User
:return: response
"""
for current_group in self.get_groups(user.name):
if current_group not in user.groups:
self.remove_user_from_group(current_group, user.name)
request = '/api/v1/Users(\'{}\')'.format(user.name)
return self._rest.PATCH(request, user.body)
def delete_user(self, user_name):
""" Delete user on TM1 Server
:param user_name:
:return: response
"""
request = '/api/v1/Users(\'{}\')'.format(user_name)
return self._rest.DELETE(request)
def get_all_users(self):
""" Get all users from TM1 Server
:return: List of TM1py.User instances
"""
request = '/api/v1/Users?$expand=Groups'
response = self._rest.GET(request)
response_as_dict = json.loads(response)
users = [User.from_dict(user) for user in response_as_dict['value']]
return users
def get_users_from_group(self, group_name):
""" Get all users from group
:param group_name:
:return: List of TM1py.User instances
"""
request = '/api/v1/Groups(\'{}\')?$expand=Users($expand=Groups)'.format(group_name)
response = self._rest.GET(request)
response_as_dict = json.loads(response)
users = [User.from_dict(user) for user in response_as_dict['Users']]
return users
def get_groups(self, user_name):
""" Get the groups of a user in TM1 Server
:param user_name:
:return: List of strings
"""
request = '/api/v1/Users(\'{}\')/Groups'.format(user_name)
response = self._rest.GET(request)
groups = json.loads(response)['value']
return [group['Name'] for group in groups]
def remove_user_from_group(self, group_name, user_name):
""" Remove user from group in TM1 Server
:param group_name:
:param user_name:
:return: response
"""
request = '/api/v1/Users(\'{}\')/Groups?$id=Groups(\'{}\')'.format(user_name, group_name)
return self._rest.DELETE(request)
def get_all_groups(self):
""" Get all groups from TM1 Server
:return: List of strings
"""
request = '/api/v1/Groups?$select=Name'
response = self._rest.GET(request)
response_as_dict = json.loads(response)
groups = [entry['Name'] for entry in response_as_dict['value']]
return groups
|
nilq/baby-python
|
python
|
from abc import abstractmethod
from typing import Callable, Tuple
import numpy as np
from ._func import Func
class OriFunc(Func):
@abstractmethod
def __call__(self, t: float) -> float:
"""
:param t: Time.
:return: Orientation in degrees.
"""
pass
class Tangential(OriFunc):
def __init__(
self,
pos_func: Callable[[float], Tuple[float, float]],
dt: float = 1e-2,
init_ori: float = 0,
):
"""Orient the stimulus tangentially to its trajectory.
:param pos_func: Position as function of time.
:param dt: Approximate time between consecutive frames.
"""
self.__pos_func = pos_func
self.__dt = dt
self.__init_ori = init_ori
self.__prev_ori = init_ori
def __call__(self, t: float):
old_value = np.array(self.__pos_func(t - self.__dt))
new_value = np.array(self.__pos_func(t))
if all(old_value == new_value):
return self.__prev_ori
ori = (
np.rad2deg(np.arctan2(*(new_value - old_value))) + self.__init_ori
)
self.__prev_ori = ori
return ori
|
nilq/baby-python
|
python
|
a = ["1", 1, "1", 2]
# ex-14: Remove duplicates from list a
a = list(set(a))
print(a)
# ex-15: Create a dictionary that contains the keys a and b and their respec
# tive values 1 and 2 .
my_dict = {"a":1, "b":2}
print(my_dict)
print(type(my_dict))
# Add "c":3 to dictionary
my_dict["c"] = 3
print(my_dict)
my_dict2 = dict([("a",1), ("b",2)])
print(my_dict2)
# ex-16: Please complete the script so that it prints out the value of key b .
d = {"a": 1, "b": 2}
print(d["b"])
# ex-17: Calculate the sum of the values of keys a and b .
d = {"a": 1, "b": 2, "c": 3}
sum = d["a"] + d["b"]
print(sum)
# ex-19: Add a new pair of key (e.g. c ) and value (e.g. 3 ) to the dictionary
# and print out the new dictionary.
d = {"a": 1, "b": 2}
d["c"] = 3
print(d)
# ex-20: Calculate the sum of all dictionary values.
d = {"a": 1, "b": 2, "c": 3}
sum = 0
for keys in d.keys():
sum += d[keys]
print(sum)
## There is simple oneliner
# print(sum(d.values()))
# print(sum(d.values()))
d = {'key1': 1, 'key2': 14, 'key3': 47}
sum1 = [d[key] for key in d.keys()]
print(sum1)
|
nilq/baby-python
|
python
|
from task_grounding.task_grounding import TaskGrounding, TaskGroundingReturn, TaskErrorType
from database_handler.database_handler import DatabaseHandler
import unittest
from unittest.mock import Mock
from ner_lib.ner import EntityType
from ner_lib.command_builder import Task, TaskType, ObjectEntity, SpatialType, SpatialDescription
################################# ISOLATED UNIT TESTS ----- BEGIN ##########################################################
class SimpleSkillTest(unittest.TestCase):
def setUp(self):
self.db_mock = Mock()
self.task_grounding = TaskGrounding(db=self.db_mock)
self.entities = [
(EntityType.COLOUR, "blue"),
(EntityType.OBJECT, "cover"),
(EntityType.LOCATION, "next"),
(EntityType.COLOUR, "black"),
(EntityType.OBJECT, "bottom cover"),
(EntityType.LOCATION, "above"),
(EntityType.OBJECT, "bottom cover")
]
def test_get_specific_task_from_task__task_is_pick_up__returns_task_with_pick_up(self):
self.db_mock.get_task = Mock(return_value=(1, "pick up"))
task = Task(name="pick up")
task.objects_to_execute_on = [ObjectEntity()]
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertEqual(TaskType.PICK, returned.task_info[0].task_type)
def test_get_specific_task_from_task__task_is_move__returns_task_with_move(self):
self.db_mock.get_task = Mock(return_value=(1, "move"))
task = Task(name="move")
task.objects_to_execute_on = [ObjectEntity()]
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertEqual(TaskType.MOVE, returned.task_info[0].task_type)
def test_get_specific_task_from_task__task_is_place__returns_task_with_place(self):
self.db_mock.get_task = Mock(return_value=(1, "place"))
task = Task(name="place")
task.objects_to_execute_on = [ObjectEntity()]
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertEqual(TaskType.PLACE, returned.task_info[0].task_type)
def test_get_specific_task_from_task__task_is_find__returns_task_with_find(self):
self.db_mock.get_task = Mock(return_value=(1, "find"))
task = Task(name="find")
task.objects_to_execute_on = [ObjectEntity()]
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertEqual(TaskType.FIND, returned.task_info[0].task_type)
def test_get_specific_task_from_task__task_is_unknown__returns_error_code_unknown(self):
self.db_mock.get_task = Mock(return_value=(1, None))
task = Task(name="asdasd")
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertFalse(returned.is_success)
self.assertEqual(TaskErrorType.UNKNOWN, returned.error.error_code)
def test_get_specific_task_from_task__task_has_no_object__returns_error_code_no_object(self):
self.db_mock.get_task = Mock(return_value=(1, "pick up"))
task = Task(name="pick up")
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertFalse(returned.is_success)
self.assertEqual(TaskErrorType.NO_OBJECT, returned.error.error_code)
class AdvancedTaskTest(unittest.TestCase):
def setUp(self):
self.db_mock = Mock()
self.task_grounding = TaskGrounding(db=self.db_mock)
self.entities = [
(EntityType.COLOUR, "blue"),
(EntityType.OBJECT, "cover"),
(EntityType.LOCATION, "next"),
(EntityType.COLOUR, "black"),
(EntityType.OBJECT, "bottom cover"),
(EntityType.LOCATION, "above"),
(EntityType.OBJECT, "bottom cover")
]
def test_get_specific_task_from_task__task_is_custom_task__returns_list_of_primary_skills(self):
pick_up_task = Task("pick up")
pick_up_task.task_type = TaskType.PICK
pick_up_task.objects_to_execute_on = [ObjectEntity()]
move_task = Task("pick up")
move_task.task_type = TaskType.MOVE
move_task.objects_to_execute_on = [ObjectEntity()]
place_task = Task("pick up")
place_task.task_type = TaskType.PICK
place_task.objects_to_execute_on = [ObjectEntity()]
sub_tasks = [[1, 2, 3], ["pick up", "move", "place"], [pick_up_task, move_task, place_task]]
tasks = [TaskType.PICK, TaskType.MOVE, TaskType.PLACE]
self.db_mock.get_task = Mock(return_value=(1, "clear table"))
self.db_mock.get_sub_tasks = Mock(return_value=sub_tasks)
task = Task("tidy")
returned = self.task_grounding.get_specific_task_from_task(task)
returned_tasks = [returned.task_info[0].task_type,
returned.task_info[1].task_type,
returned.task_info[2].task_type]
self.assertEqual(tasks, returned_tasks)
def test_get_specific_task_from_tasks__task_is_custom_task_without_sub_tasks__returns_error_code_no_sub_tasks(self):
self.db_mock.get_task = Mock(return_value=(1, "clear table"))
self.db_mock.get_sub_tasks = Mock(return_value=None)
task = Task("tidy")
returned = self.task_grounding.get_specific_task_from_task(task)
self.assertFalse(returned.is_success)
self.assertEqual(TaskErrorType.NO_SUBTASKS, returned.error.error_code)
class TeachSystemTest(unittest.TestCase):
def setUp(self):
self.db_mock = Mock()
self.task_grounding = TaskGrounding(db=self.db_mock)
def test_teach_new_task__valid_input__returns_success(self):
self.db_mock.add_task = Mock()
self.db_mock.get_task = Mock()
self.db_mock.get_task.side_effect = [(1, None), (2, None), (3, None)]
self.db_mock.add_sub_task = Mock()
returned = self.task_grounding.teach_new_task("nice task name", [Task("take"), Task("move"), Task("put")], "nice task keyword")
self.assertTrue(returned.is_success)
def test_teach_new_task__contains_unknown_task__returns_unknown_error_code(self):
self.db_mock.add_task = Mock()
self.db_mock.get_task = Mock()
self.db_mock.get_task.side_effect = [(None, None)]
self.db_mock.add_sub_task = Mock()
returned = self.task_grounding.teach_new_task("nice task name", [Task("take"), Task("move"), Task("put")], "nice task keyword")
self.assertFalse(returned.is_success)
self.assertEqual(TaskErrorType.UNKNOWN, returned.error.error_code)
def test_add_sub_task__valid_input__returns_success(self):
self.db_mock.get_task = Mock()
self.db_mock.add_sub_task = Mock()
self.db_mock.get_task.side_effect = [(5, "clear table"), (1, "pick up")]
returned = self.task_grounding.add_sub_task("tidy", ["get"])
self.assertTrue(returned.is_success)
################################# ISOLATED UNIT TESTS ----- END ##########################################################
################################# INTEGRATION TESTS ----- BEGIN ##########################################################
class SimpleSkillIntegration(unittest.TestCase):
def setUp(self):
self.task_grounding = TaskGrounding(DatabaseHandler("test_grounding.db"))
self.returned = TaskGroundingReturn()
self.entities = [
(EntityType.COLOUR, "blue"),
(EntityType.OBJECT, "cover"),
(EntityType.LOCATION, "next"),
(EntityType.COLOUR, "black"),
(EntityType.OBJECT, "bottom cover"),
(EntityType.LOCATION, "above"),
(EntityType.OBJECT, "bottom cover")
]
def test_Pick(self):
self.returned = self.task_grounding.get_specific_task_from_task("take", self.entities)
self.assertEqual(self.returned.task_info[0].get_name(), "PickUpTask")
def test_Move(self):
self.returned = self.task_grounding.get_specific_task_from_task("relocate", self.entities)
self.assertEqual(self.returned.task_info[0].get_name(), "MoveTask")
def test_Place(self):
self.returned = self.task_grounding.get_specific_task_from_task("put", self.entities)
self.assertEqual(self.returned.task_info[0].get_name(), "PlaceTask")
def test_Find(self):
self.returned = self.task_grounding.get_specific_task_from_task("locate", self.entities)
self.assertEqual(self.returned.task_info[0].get_name(), "FindTask")
def test_UnknownObject(self):
self.returned = self.task_grounding.get_specific_task_from_task("asdasd")
self.assertFalse(self.returned.is_success)
self.assertEqual(self.returned.error_code, TaskErrorType.UNKNOWN)
def test_NoObjectSpecified(self):
self.returned = self.task_grounding.get_specific_task_from_task("take")
self.assertFalse(self.returned.is_success)
self.assertEqual(self.returned.error_code, TaskErrorType.NO_OBJECT)
class AdvancedTaskIntegration(unittest.TestCase):
def setUp(self):
self.task_grounding = TaskGrounding(DatabaseHandler("test_grounding.db"))
self.returned = TaskGroundingReturn()
self.entities = [
(EntityType.COLOUR, "blue"),
(EntityType.OBJECT, "cover"),
(EntityType.LOCATION, "next"),
(EntityType.COLOUR, "black"),
(EntityType.OBJECT, "bottom cover"),
(EntityType.LOCATION, "above"),
(EntityType.OBJECT, "bottom cover")
]
def test_MoveBlue(self):
tasks = [TaskType.PICK, TaskType.PLACE]
returned = self.task_grounding.get_specific_task_from_task(Task("blue1"))
returned_tasks = [returned.task_info[0].task_type,
returned.task_info[1].task_type]
self.assertEqual(tasks, returned_tasks)
def test_ClearTable(self):
tasks = ["PickUpTask", "MoveTask", "PlaceTask"]
self.returned = self.task_grounding.get_specific_task_from_task("tidy", self.entities)
returned_tasks = [self.returned.task_info[0].get_name(),
self.returned.task_info[1].get_name(),
self.returned.task_info[2].get_name()]
self.assertEqual(tasks, returned_tasks)
class TeachSystemIntegration(unittest.TestCase):
def setUp(self):
self.db = DatabaseHandler("test_grounding.db")
self.task_grounding = TaskGrounding(self.db)
self.returned = TaskGroundingReturn()
def test_TeachTask(self):
returned = self.task_grounding.teach_new_task("test_task1", ["take", "move", "put"], ["test1-1", "test1-2"])
self.assertTrue(returned.is_success)
self.clean_test_db("test_task1")
def test_AddWord(self):
returned = self.task_grounding.add_word_to_task("blue1", "blue2")
self.assertTrue(returned.is_success)
self.db.conn.execute("delete from TASK_WORDS where WORD='blue2';")
self.db.conn.commit()
def test_TeachTaskUnknownSubTask(self):
returned = self.task_grounding.teach_new_task("test_task2", ["UNKNOWN TASK"], ["test1", "test2-1"])
self.assertFalse(returned.is_success)
self.assertEqual(returned.error_code, TaskErrorType.UNKNOWN)
self.clean_test_db("test_task2")
def test_AddWordsToTask(self):
#self.task_grounding.teach_new_task("test_task3", ["take", "move", "put"], ["test3-1", "test3-2"])
#returned = self.task_grounding.add_word_to_task("test_task3-1", "TEST WORD")
#self.assertTrue(returned.is_success)
self.clean_test_db("test_task3")
def test_AddSubTask(self):
self.task_grounding.teach_new_task("test_task4", ["take", "move", "put"], ["test4-1", "test4-2"])
returned = self.task_grounding.add_sub_task("test_task4", ["get"])
self.assertTrue(returned.is_success)
self.clean_test_db("test_task4")
def clean_test_db(self, task_name):
task_id = self.db.get_task_id(task_name)
self.db.conn.execute("delete from TASK_WORDS where TASK_ID=?;", (task_id,))
self.db.conn.execute("delete from TASK_INFO where TASK_NAME=?;", (task_name,))
self.db.conn.commit()
################################# INTEGRATION TESTS ----- END ##########################################################
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.5 on 2022-01-24 05:22
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('metrics', '0002_initial'),
]
operations = [
migrations.CreateModel(
name='FeedbackResponseKeyword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=64)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='ImportLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now_add=True, db_index=True)),
('responses_imported_count', models.PositiveIntegerField(blank=True, null=True)),
('projects_affected_count', models.PositiveIntegerField(blank=True, null=True)),
('run_time_seconds', models.FloatField()),
('import_type', models.CharField(choices=[('beeheard', 'BeeHeard'), ('usabilla', 'Usabilla')], max_length=12)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='usabilla_import_log_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date'],
},
),
migrations.AddField(
model_name='campaign',
name='feedback_response_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='campaign',
name='latest_feedback_response_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='campaign',
name='latest_other_response_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='campaign',
name='other_response_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='campaign',
name='vote_response_count',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='feedbackresponse',
name='assignees',
field=models.ManyToManyField(blank=True, related_name='feedback_response_assignees', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='feedbackresponse',
name='notes',
field=models.TextField(blank=True, max_length=3000),
),
migrations.AlterField(
model_name='domainyearsnapshot',
name='year',
field=models.PositiveIntegerField(default=2022),
),
migrations.AlterField(
model_name='project',
name='admins',
field=models.ManyToManyField(blank=True, help_text='Admins have full edit access to the project info, can add other admins and editors, and can see emails in responses', related_name='project_admins', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='project',
name='contact',
field=models.ForeignKey(blank=True, help_text='Contact is the owner and has full edit access to the project info, can add other admins and editors, and can see emails in responses', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='project_contact', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='project',
name='editors',
field=models.ManyToManyField(blank=True, help_text='Editors can only create/edit manual snapshots and can see emails in responses', related_name='project_editors', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='projectyearsetting',
name='year',
field=models.PositiveIntegerField(default=2022),
),
migrations.DeleteModel(
name='UsabillaImportLog',
),
migrations.AddField(
model_name='feedbackresponse',
name='keywords',
field=models.ManyToManyField(blank=True, related_name='feedback_response_keywords', to='metrics.FeedbackResponseKeyword'),
),
]
|
nilq/baby-python
|
python
|
from itertools import count
CARD_PUBLIC_KEY = 14205034
DOOR_PUBLIC_KEY = 18047856
def transform_one_step(value, subject_number):
return (value * subject_number) % 20201227
def transform(loop_size, subject_number=7):
value = 1
for _ in range(loop_size):
value = transform_one_step(value, subject_number)
return value
def get_loop_size(target, subject_number=7):
value = 1
for loop_size in count(1):
value = transform_one_step(value, subject_number)
if value == target:
return loop_size
card_loop_size = get_loop_size(CARD_PUBLIC_KEY)
print(transform(card_loop_size, DOOR_PUBLIC_KEY))
|
nilq/baby-python
|
python
|
# MIT License
# Copyright (c) 2017 MassChallenge, Inc.
from __future__ import unicode_literals
import swapper
from factory import (
Sequence,
SubFactory,
post_generation,
)
from accelerator.tests.factories.core_profile_factory import CoreProfileFactory
from accelerator.tests.factories.expert_category_factory import (
ExpertCategoryFactory
)
from accelerator.tests.factories.industry_factory import IndustryFactory
from accelerator.tests.factories.program_family_factory import (
ProgramFamilyFactory
)
ExpertProfile = swapper.load_model('accelerator', 'ExpertProfile')
class ExpertProfileFactory(CoreProfileFactory):
class Meta:
model = ExpertProfile
salutation = Sequence(lambda x: "Expert Title %d" % x)
title = Sequence(lambda x: "Expert title %d" % x)
company = Sequence(lambda x: "Expert Company %d" % x)
expert_category = SubFactory(ExpertCategoryFactory)
primary_industry = SubFactory(IndustryFactory)
privacy_email = "finalists and staff"
privacy_phone = "finalists and staff"
privacy_web = "finalists and staff"
public_website_consent = True
public_website_consent_checked = True
judge_interest = False
mentor_interest = False
speaker_interest = False
speaker_topics = ""
office_hours_interest = False
office_hours_topics = ""
expert_group = ""
reliability = None
referred_by = ""
other_potential_experts = ""
internal_notes = ""
bio = Sequence(lambda x: "Bio text %d" % x)
home_program_family = SubFactory(ProgramFamilyFactory)
@post_generation
def functional_expertise(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for expertise in extracted:
self.functional_expertise.add(expertise)
@post_generation
def additional_industries(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for industry in extracted:
self.additional_industries.add(industry)
@post_generation
def mentoring_specialties(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for specialty in extracted:
self.mentoring_specialties.add(specialty)
|
nilq/baby-python
|
python
|
from typing import Callable, Dict, Tuple, Text
from recommenders.datasets import Dataset
import numpy as np
import tensorflow as tf
import tensorflow_recommenders as tfrs
from pathlib import Path
SAVE_PATH = Path(__file__).resolve().parents[1] / "weights"
class RankingModel(tfrs.models.Model):
def __init__(
self,
dataset: Dataset,
network_fn: Callable,
network_args: Dict = None
):
super().__init__()
self._name = f"{self.__class__.__name__}_{network_fn.__name__}"
if network_args is None:
network_args = {}
self.ranking_model: tf.keras.Model = network_fn(
unique_user_ids = dataset.unique_user_ids,
unique_item_ids = dataset.unique_movie_ids, **network_args)
self.task = tfrs.tasks.Ranking(
loss = tf.keras.losses.MeanSquaredError(),
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
prediction = self.ranking_model(**features)
return self.task(prediction, features['rating'])
def call(self, features: Dict[Text, tf.Tensor]):
return self.ranking_model(**features)
def print_summary(self):
print(self.ranking_model.print_summary())
def save_weights(self, save_dir):
if save_dir is None:
save_dir = SAVE_PATH
save_dir.mkdir(parents=True, exist_ok=True)
self.ranking_model.save_weights(str(Path(save_dir) /'ranking'))
|
nilq/baby-python
|
python
|
"""Convert Noorlib library html to OpenITI mARkdown.
This script subclasses the generic MarkdownConverter class
from the html2md module (based on python-markdownify,
https://github.com/matthewwithanm/python-markdownify),
which uses BeautifulSoup to create a flexible converter.
The subclass in this module, NoorlibHtmlConverter,
adds methods specifically for the conversion of books from
the eShia library to OpenITI mARkdown:
* Span, div and p conversion: span, div and p classes needed to be converted
are defined in self.class_dict.
Inheritance schema of the NoorlibHtmlConverter:
======================== ==========================
MarkdownConverter NoorlibHtmlConverter
======================== ==========================
Options (inherited)
DefaultOptions (inherited)
__init__ (inherited)
__getattr__ (inherited)
convert (inherited)
process_tag (inherited)
process_text (inherited)
fill_out_columns (inherited)
post_process_md (inherited)
should_convert_tag (inherited)
indent (inherited)
underline (inherited)
create_underline_line (inherited)
convert_a (inherited)
convert_b (inherited)
convert_blockquote (inherited)
convert_br (inherited)
convert_em (inherited)
convert_hn (inherited)
convert_i (inherited)
convert_img (inherited)
convert_list (inherited)
convert_li (inherited)
convert_ol (inherited)
convert_p convert_p
convert_table (inherited)
convert_tr (inherited)
convert_ul (inherited)
convert_strong (inherited)
convert_span
convert_div
======================== ==========================
"""
import re
if __name__ == '__main__':
from os import sys, path
root_folder = path.dirname(path.dirname(path.abspath(__file__)))
root_folder = path.dirname(path.dirname(path.dirname(root_folder)))
sys.path.append(root_folder)
from openiti.new_books.convert.helper import html2md
from openiti.new_books.convert.helper.html2md import * # import all constants!
class NoorlibHtmlConverter(html2md.MarkdownConverter):
"""Convert Noorlib library html to OpenITI mARkdown.
Examples:
>>> import html2md_noorlib
>>> h = '<img class="libimages" src="/images/books/86596/01/cover.jpg">'
>>> html2md_noorlib.markdownify(h)
''
>>> import html2md_noorlib
>>> h = 'abc <a href="www.example.com">def</a> ghi'
>>> html2md_noorlib.markdownify(h)
'abc def ghi'
"""
def __init__(self, **options):
super().__init__(**options)
self.class_dict = dict()
self.class_dict["rightpome"] = "\n# {} %~% " # <span class>
self.class_dict["leftpome"] = "{}\n" # <span class>
self.class_dict["footnote"] = "{}\n" # <div class>
## ##old:
## self.class_dict["Titr3"] = "\n\n### ||| {}\n\n" # <span class>
## self.class_dict["KalamateKhas2"] = "\n\n### || {}\n\n" # <p class>
## self.class_dict["KalamateKhas"] = "\n\n### ||| {}\n\n" # <p class>
## self.class_dict["TextsStyles3"] = "\n\n### ||| {}\n\n" # <p class>
## self.class_dict["TextsStyles1"] = "@QUR@ {}\n" # <span class>
## self.class_dict["Aye"] = "@QUR@ {}\n" # <span class>
## self.class_dict["tdfehrest2"] = "\t{}" # <td class>
## self.class_dict["list3"] = "\t{}" # <div class>
## self.class_dict["sher"] = "# {}\n" # <p class>
## self.class_dict["#6C3934"] = "\n\n# {}\n\n" # <span class>
self.options["image_link_regex"] = "/?images/books"
## self.options["image_folder"] = "img"
self.options["strip"] = ["a", "img"]
def convert_span(self, el, text):
"""Converts html <span> tags, depending on their class attribute.
Supported span classes should be stored in self.class_dict
(key: span class (str); value: formatting string)
E.g., {"quran": "@QUR@ {}\\n"}
Example:
>>> import html2md_noorlib
>>> h = 'abc <span>def</span> ghi'
>>> html2md_noorlib.markdownify(h)
'abc def ghi'
>>> h = 'abc <span class="unknown_span_class">def</span> ghi'
>>> html2md_noorlib.markdownify(h)
'abc def ghi'
#>>> h = 'abc <span class="Aya">def ghi</span> jkl'
#>>> html2md_noorlib.markdownify(h)
#'abc @QUR02 def ghi jkl'
# the @QUR@ example outputs are a result of post-processing;
# the function itself will produce:
# 'abc @QUR@ def ghi\\njkl'
>>> h = '<span class="rightpome">abc def</span><span class="leftpome">ghi jkl</span>'
>>> html2md_noorlib.markdownify(h)
'\\n# abc def %~% ghi jkl'
"""
try: # will fail if el has no class attribute
for c in el["class"]:
#print(c)
if c in self.class_dict:
return self.class_dict[c].format(text) if text else ''
if c == "ayah":
try:
sura = el["surah"]
except:
sura = "0"
try:
aya = el["ayah"]
except:
aya = "0"
#print("@QUR{}.{}@ {}".format(sura, aya, text))
return "@QUR{}.{}@ {}\n".format(sura, aya, text)
except Exception as e:
pass
return text
def convert_div(self, el, text):
"""Converts html <div> tags, depending on their class attribute.
Supported div classes should be stored in self.class_dict
(key: div class (str); value: formatting string)
Example:
>>> import html2md_noorlib
>>> h = 'abc <div>def</div> ghi'
>>> html2md_noorlib.markdownify(h)
'abc def ghi'
>>> h = 'abc <div class="unknown_div_class">def</div> ghi'
>>> html2md_noorlib.markdownify(h)
'abc def ghi'
>>> h = '<div class="ClssDivMeesage">Page Is Empty</div>'
>>> html2md_noorlib.markdownify(h)
''
"""
try: # will fail if el has no class attribute
for c in el["class"]:
if c in self.class_dict:
return self.class_dict[c].format(text) if text else ''
if c == "ClssDivMeesage":
return ""
except Exception as e:
pass
return text
def convert_p(self, el, text):
"""Converts <p> tags according to their class.
Supported p classes should be stored in self.class_dict
(key: span class (str); value: formatting string)
E.g., {"quran": "@QUR@ {}\\n"}
<p> tags without class attribute, or unsupported class,
will be converted according to the markdown style
as defined in the self.options["md_style"] value
(from super().DefaultOptions)
Examples:
>>> import html2md_noorlib
>>> h = "<p>abc</p>"
>>> html2md_noorlib.markdownify(h)
'\\n\\n# abc\\n\\n'
>>> h = "<p>abc</p>"
>>> html2md_noorlib.markdownify(h, md_style=ATX)
'\\n\\nabc\\n\\n'
>>> h = "<p></p>"
>>> html2md_noorlib.markdownify(h, md_style=ATX)
''
"""
if self.options['md_style'] == OPENITI:
return '\n\n# %s\n\n' % text if text else ''
else:
return '\n\n%s\n\n' % text if text else ''
def convert_sup(self, el, text):
"""Converts <sup> tags (used for footnote markers)."""
return "({})".format(text.strip())
def markdownify(html, **options):
"""Shortcut to the convert method of the HindawiConverter class."""
return NoorlibHtmlConverter(**options).convert(html)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
nilq/baby-python
|
python
|
import pytest
from bot.haiku.models import HaikuMetadata
@pytest.fixture()
def haiku_metadata(data_connection):
"""Create a haiku metadata."""
HaikuMetadata.client = data_connection
return HaikuMetadata
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import argparse
import os
import re
import sys
from itertools import product
import h5py
import numpy as np
if __name__ == "__main__":
ORIG_WIDTH = 512
ORIG_NUM_PARAMS = 4
parser = argparse.ArgumentParser()
parser.add_argument("hdf5_files", nargs="*",
help="Path to a CosmoFlow HDF5 file.")
parser.add_argument("--out_dir", type=str, default="dataset",
help="An optional value.")
parser.add_argument("--width", type=int, default=128,
help="The output spatial width.")
parser.add_argument("--datatype", type=str, default="float32",
help="The data type for universe data.")
args = parser.parse_args()
if not os.path.exists(args.out_dir) or not os.path.isdir(args.out_dir):
sys.stderr.write("The output directory does not exist: {}\n"
.format(args.out_dir))
exit(1)
if (ORIG_WIDTH % args.width) != 0:
sys.stderr.write("The output width is not a divisor of the original width({}): {}\n"
.format(ORIG_WIDTH, args.width))
exit(1)
if args.datatype not in ["float", "float32", "int16"]:
sys.stderr.write("Unrecognized data type: {}\n".format(args.datatype))
data_type = getattr(np, args.datatype)
sub_cube_count = ORIG_WIDTH // args.width
for hdf5_file in args.hdf5_files:
m = re.compile("(.*)\\.hdf5$").match(os.path.basename(hdf5_file))
if m is None:
sys.stderr.write("Unrecognized file name: {}\n".format(hdf5_file))
exit(1)
hdf5_file_wo_ext = m.group(1)
h = h5py.File(hdf5_file, "r")
full = h["full"]
unitPar = h["unitPar"]
assert full.value.shape == tuple([ORIG_WIDTH]*3+[ORIG_NUM_PARAMS])
assert unitPar.value.shape == (ORIG_NUM_PARAMS,)
full_transposed = full.value.transpose().astype(data_type)
for ix, iy, iz in product(range(sub_cube_count),
range(sub_cube_count),
range(sub_cube_count)):
cube = full_transposed[
:,
(args.width*ix):(args.width*(ix+1)),
(args.width*iy):(args.width*(iy+1)),
(args.width*iz):(args.width*(iz+1)),
]
assert cube.shape == tuple([ORIG_NUM_PARAMS]+[args.width]*3)
out_path = os.path.join(
args.out_dir,
"{}_{}_{}_{}.hdf5".format(hdf5_file_wo_ext, ix, iy, iz))
with h5py.File(out_path, "w-") as hw:
hw["full"] = cube
hw["unitPar"] = unitPar.value
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : EINDEX Li
@File : __init__.py.py
@Created : 26/12/2017
"""
from aiospider.tools.singleton import OnlySingleton
class AIOSpider(metaclass=OnlySingleton):
def __init__(self, loop=None):
self.config = dict()
self.loop = loop
def __eq__(self, other):
return id(self.config) == id(other.config)
def __repr__(self):
return ""
|
nilq/baby-python
|
python
|
# @author: Michael Vorotyntsev
# @email: linkofwise@gmail.com
# @github: unaxfromsibiria
import logging
import string
from enum import Enum
from hashlib import sha256, md5
from random import SystemRandom
_cr_methods = {
'sha256': sha256,
'md5': md5,
}
class ServiceGroup(Enum):
service = 1
server = 2
web = 3
class CommandTargetType(Enum):
exit = 0
auth_request = 1
auth = 2
client_data = 3
wait_command = 4
server_status = 5
methods_registration = 6
call_method = 7
wait_free = 8
problem = 9
ok = 10
server_call = 11
ping = 12
get_result = 13
class Protocol(object):
_handlers = {}
_options = {}
_logger = None
def __init__(self, **options):
self._options.update(**options)
@classmethod
def add_handler(cls, target, handler):
assert callable(handler)
cls._handlers[target] = handler
def processing(self, command, manager):
if not self._logger:
self._logger = logging.getLogger(
self._options.get('logger_name'))
handler = self._handlers.get(command.target)
if not callable(handler):
raise NotImplementedError(
'Unknown target {}!'.format(command.target))
return handler(command, manager, self._options, self._logger)
# # handlers # #
def auth_request(command, manager, options, logger):
key = command.data
variants = string.digits + string.ascii_letters
rand = SystemRandom()
size = len(key)
client_solt = ''.join(rand.choice(variants) for _ in range(size))
content = '{}{}{}'.format(options.get('secret'), client_solt, key)
_hash = _cr_methods.get(options.get('hash_method'))
if hash:
content = _hash(bytes(content, 'utf-8')).hexdigest()
else:
content = 'no method'
return command.create(
target=CommandTargetType.auth,
data='{}:{}'.format(content, client_solt))
def send_client_data(command, manager, options, logger):
manager.setup_cid(command.cid)
return command.create(
target=CommandTargetType.client_data,
data={
'workers': options.get('workers') or 1,
'group': ServiceGroup.server.value,
})
def send_api_methods(command, manager, options, logger):
return command.create(
target=CommandTargetType.methods_registration,
data={
'methods': manager.get_public_methods(),
})
def start_info(command, manager, options, logger):
return None
def send_status(command, manager, options, logger):
return command.create(
target=CommandTargetType.server_status,
data={
'status': manager.get_status().value,
})
# # link # #
Protocol.add_handler(CommandTargetType.auth_request, auth_request)
Protocol.add_handler(CommandTargetType.client_data, send_client_data)
Protocol.add_handler(CommandTargetType.methods_registration, send_api_methods)
Protocol.add_handler(CommandTargetType.wait_command, start_info)
Protocol.add_handler(CommandTargetType.server_status, send_status)
|
nilq/baby-python
|
python
|
from decimal import *
N = int(input())
print(int(int((N-1)*N)/2))
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 eval: (yapf-mode 1) -*-
#
# January 13 2019, Christian E. Hopps <chopps@labn.net>
#
# Copyright (c) 2019, LabN Consulting, L.L.C.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, division, unicode_literals, print_function, nested_scopes
import argparse
import fcntl
import logging
import io
import os
import socket
import struct
import sys
import threading
from . import iptfs
TUNSETIFF = 0x400454ca
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
logger = logging.getLogger(__file__)
def usage():
print("usage: {} [-c|--connect server] [-p|--port service]\n", sys.argv[0])
sys.exit(1)
def tun_alloc(devname):
fd = os.open("/dev/net/tun", os.O_RDWR)
rfd = io.open(fd, "rb", buffering=0)
wfd = io.open(fd, "wb", buffering=0)
# ff = io.open(fd, "rb")
# f = io.open("/dev/net/tun", "rb", buffering=0)
ifs = fcntl.ioctl(fd, TUNSETIFF, struct.pack("16sH", devname.encode(), IFF_TUN | IFF_NO_PI))
devname = ifs[:16]
devname = devname.strip(b"\x00")
return rfd, wfd, devname
def connect(sname, service, isudp):
# stype = socket.SOCK_DGRAM if isudp else socket.SOCK_STREAM
proto = socket.IPPROTO_UDP if isudp else socket.IPPROTO_TCP
for hent in socket.getaddrinfo(sname, service, 0, 0, proto):
try:
s = socket.socket(*hent[0:3])
if isudp:
# Save the peer address
iptfs.peeraddr = hent[4]
s.connect(hent[4])
return s
except socket.error:
continue
return None
def accept(sname, service, isudp):
# stype = socket.SOCK_DGRAM if isudp else socket.SOCK_STREAM
proto = socket.IPPROTO_UDP if isudp else socket.IPPROTO_TCP
for hent in socket.getaddrinfo(sname, service, 0, 0, proto):
try:
logger.info("Get socket")
s = socket.socket(*hent[0:3])
logger.info("Set socketopt")
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logger.info("Try to bind to: %s", str(hent[4]))
s.bind(hent[4])
break
except socket.error as e:
logger.info("Got exception for %s: %s", str(hent), str(e))
continue
else:
logger.info("Can't bind to %s:%s", sname, service)
return None
if isudp:
# Do PEEK to get first UDP address from client.
logger.info("Server: waiting on initial UDP packet %s:%s:%s", sname, service, str(hent)) # pylint: disable=W0631
b = bytearray(9170)
(n, iptfs.peeraddr) = s.recvfrom_into(b, 0, socket.MSG_PEEK)
logger.info("Server: Got UDP packet from %s of len %d", iptfs.peeraddr, n)
s.connect(iptfs.peeraddr)
return (s, iptfs.peeraddr)
logger.info("Listen 5 on %s", str(iptfs.peeraddr))
s.listen(5)
logger.info("Doing accept.")
return s.accept()
def checked_main(*margs):
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--ack-rate", type=float, default=1.0, help="Rate in float seconds to send ACK info")
parser.add_argument("-c", "--connect", help="Connect to server")
parser.add_argument(
"--congest-rate", type=float, default=0, help="Forced maximum egress rate in Kilobits")
parser.add_argument("-d", "--dev", default="vtun%d", help="Name of tun interface.")
parser.add_argument("--debug", action="store_true", help="Debug logging and checks.")
parser.add_argument(
"--no-egress", action="store_true", help="Do not create tunnel egress endpoint")
parser.add_argument(
"--no-ingress", action="store_true", help="Do not create tunnel ingress endpoint")
parser.add_argument("-l", "--listen", default="::", help="Server listen on this address")
parser.add_argument("-p", "--port", default="8001", help="TCP port to use.")
# parser.add_argument("-u", "--udp", action="store_true", help="Use UDP instead of TCP")
parser.add_argument("-r", "--rate", type=float, default=0, help="Tunnel rate in Kilobits")
parser.add_argument("--trace", action="store_true", help="Trace logging.")
parser.add_argument("-v", "--verbose", action="store_true", help="Verbose logging.")
args = parser.parse_args(*margs)
FORMAT = '%(asctime)-15s %(threadName)s %(message)s'
if args.trace:
iptfs.TRACE = True
iptfs.DEBUG = True
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
elif args.debug:
iptfs.DEBUG = True
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
elif args.verbose:
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=FORMAT, level=logging.INFO)
riffd, wiffd, devname = tun_alloc(args.dev)
logger.info("Opened tun device: %s", devname)
if not args.connect:
s, _ = accept(args.listen, args.port, True)
logger.info("Accepted from client: %s", str(s))
else:
s = connect(args.connect, args.port, True)
logger.info("Connected to server: %s", str(s))
send_lock = threading.Lock()
threads = []
if not args.no_ingress:
threads.extend(iptfs.tunnel_ingress(riffd, s, send_lock, int(args.rate * 1000)))
if not args.no_egress:
threads.extend(
iptfs.tunnel_egress(s, send_lock, wiffd, args.ack_rate, int(args.congest_rate * 1000)))
for thread in threads:
thread.join()
return 0
def main(*margs):
try:
return checked_main(*margs)
except Exception as e: # pylint: disable=W0703
logger.critical("Unexpected exception: %s", str(e))
sys.exit(1)
__author__ = "Christian E. Hopps"
__date__ = "January 13 2019"
__version__ = "1.0"
__docformat__ = "restructuredtext en"
|
nilq/baby-python
|
python
|
from pyrete.settings import settings
from . import (
get_attr_name,
ParserLiterals,
)
class DataLayer(object):
"""
The DataLayer is responsible for fetching data from the database.
It parses the provided rules and fetches only the data required for running the rules.
Example:
.. code-block:: python
from pyrete.core.nodes import ReteGraph
from pyrete.core.data_layer import DataLayer
rule = {
'key': 'some_rule',
'description': 'Some awesome description',
...
}
graph = ReteGraph()
graph.load_rule(rule)
# ---------------------- Fetch data from DB
data = DataLayer().get_data(
rules=[rule],
filter={},
limit=10)
"""
def get_all_collections(self):
"""
Gets list of all collections in the database.
"""
return settings.DB.collection_names()
def _get_keys(self, doc, parent=None):
"""
Gets list of all the keys in a dict, including nested dicts and dicts inside a list.
Example:
demo_dict = {
"subtotal_price": "51.00",
"billing_address": {
"province" : "North Carolina",
"city" : "Franklinton"
},
"note_attributes": [
{
"name": "address-type",
"value": "residential",
},
{
""name": "transit-time",
"value": "1",
}
],
"token" : "384779c27a35e8fcc0c948ad87f0ac35"
}
Converts above into:
['subtotal_price',
'billing_address',
'billing_address.province',
'billing_address.city',
'note_attributes.name',
'note_attributes.value',
'token']
"""
key_list = []
for key in list(doc.keys()):
# Add parent.key if parent is present
if parent:
key_list.append(parent + '.' + key)
else:
key_list.append(key)
if isinstance(doc[key], dict):
# If nested dict, call this method again
new_parent = parent + '.' + key if parent else key
key_list.extend(
self._get_keys(doc[key], new_parent))
elif isinstance(doc[key], list):
if len(doc[key]) > 0 and isinstance(doc[key][0], dict):
# If nested dict inside a list, call this method again
new_parent = parent + '.' + key if parent else key
key_list.extend(
self._get_keys(doc[key][0], new_parent))
return key_list
def get_collection_fields(self, collection_name):
"""
Gets list of all collections in the database.
**Args:**
* **collection_name** *(str)*: The name of the collection for which field names are to be fetched.
**Returns:**
Returns the list of field names of the given **collection_name**.
"""
if settings.DB[collection_name].find_one():
doc = settings.DB[collection_name].find_one()
return self._get_keys(doc)
else:
return {}
def _get_collection_data(self, rule, collection_name, filter={}, skip=0, limit=0):
"""
Gets only required data attributes from the database collection by evaluating projection
for the given **collection_name**.
**Args:**
* **rule** *(dict)*: The rule dictionary.
* **collection_name** *(str)*: The Collection Name for which projection needs to be evaluated.
* **filter** *(dict)*: Optional. Dictionary of filter for querying filtered data.
* **skip** *(int)*: Optional. The number of documents to be skipped while fetching the data.
* **limit** *(int)*: Optional. The maximum number of records to be fetched.
**Returns:**
Data dictionary of the provided **collection_name**, fetched from the database.
"""
projection = []
for variable in rule['variables']:
# Getting field names from "variables"
coll_name, attr_name, fn_name, fn_type = get_attr_name(variable['value'])
if attr_name and coll_name == collection_name:
projection.append(attr_name)
for condition in rule['when']['any']:
# Getting field names from "any"
coll_name, attr_name, fn_name, fn_type = get_attr_name(condition['name'])
if attr_name and coll_name == collection_name:
projection.append(attr_name)
for condition in rule['when']['any']:
# Getting field names from "value" if it is a "join condition"
condition_value = condition['value']
if isinstance(
condition_value, str) and condition_value.startswith(
ParserLiterals.OBJECT_VALUE_IDENTIFIER):
condition_value = condition['value'].replace(ParserLiterals.OBJECT_VALUE_IDENTIFIER, '')
coll_name, attr_name, fn_name, fn_type = get_attr_name(condition_value)
if attr_name and coll_name == collection_name:
projection.append(attr_name)
for condition in rule['when']['all']:
# Getting field names from "all"
coll_name, attr_name, fn_name, fn_type = get_attr_name(condition['name'])
if attr_name and coll_name == collection_name:
projection.append(attr_name)
for condition in rule['when']['all']:
# Getting field names from "value" if it is a "join condition"
condition_value = condition['value']
if isinstance(
condition_value, str) and condition_value.startswith(
ParserLiterals.OBJECT_VALUE_IDENTIFIER):
condition_value = condition['value'].replace(ParserLiterals.OBJECT_VALUE_IDENTIFIER, '')
coll_name, attr_name, fn_name, fn_type = get_attr_name(condition_value)
if attr_name and coll_name == collection_name:
projection.append(attr_name)
for action in rule['then']:
# Getting field names from "then"
for param in action['params']:
coll_name, attr_name, fn_name, fn_type = get_attr_name(param['value'])
if attr_name and coll_name == collection_name:
projection.append(attr_name)
projection.append('email')
cursor = settings.DB[collection_name].find(
filter=filter,
projection=projection,
skip=skip,
limit=limit)
# Return data instead of the cursor
data = []
for datum in cursor:
data.append(datum)
return data
def get_data(self, rules, filter={}, skip=0, limit=0):
"""
Gets the required data from the database. All the collections listed in the **collections** key
of the rule.
**Args:**
* **rules** *(list of dict)*: The list of rules.
* **filter** *(dict)*: Optional. Dictionary of filter for querying filtered data.
* **skip** *(int)*: Optional. The number of documents to be skipped while fetching the data.
* **limit** *(int)*: Optional. The maximum number of records to be fetched.
**Returns:**
Data dictionary of the provided **collection_name**, fetched from the database.
"""
data = {}
for rule in rules:
for collection_name in rule['collections']:
data[collection_name] = self._get_collection_data(
rule,
collection_name,
filter={},
skip=skip,
limit=limit)
return data
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""
Raven-django
============
Raven-Django is a Raven extension that provides full out-of-the-box support
for `Django <https://www.djangoproject.com>`_ framework.
Raven itself is a Python client for `Sentry <http://www.getsentry.com/>`_.
"""
# Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error
# in multiprocessing/util.py _exit_function when running `python
# setup.py test` (see
# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)
for m in ('multiprocessing', 'billiard'):
try:
__import__(m)
except ImportError:
pass
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sys
dev_requires = [
'flake8>=2.0,<2.1',
]
tests_require = [
'Django>=1.4',
'mock',
'pep8',
'pytz',
'pytest',
'pytest-cov>=1.4',
'pytest-django',
'python-coveralls',
]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='raven',
version='0.0.0',
author='Xavier Ordoquy',
author_email='xordoquy@linovia.com',
url='http://github.com/getsentry/raven-django',
description='Raven-django is a Django extension for Raven (https://www.getsentry.com)',
long_description=__doc__,
packages=find_packages(exclude=("tests", "tests.*",)),
zip_safe=False,
install_requires=['raven'],
extras_require={
'tests': tests_require,
'dev': dev_requires,
},
license='BSD',
tests_require=tests_require,
cmdclass={'test': PyTest},
include_package_data=True,
entry_points={},
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
|
nilq/baby-python
|
python
|
from typing import List
import allure
from markupsafe import Markup
from overhave.entities import OverhaveDescriptionManagerSettings
class DescriptionManager:
""" Class for test-suit custom description management and setting to Allure report. """
def __init__(self, settings: OverhaveDescriptionManagerSettings):
self._settings = settings
self._description: List[str] = []
def apply_description(self) -> None:
if self._description:
joined_description = self._settings.blocks_delimiter.join(self._description)
if not self._settings.html:
allure.dynamic.description(joined_description)
return
allure.dynamic.description_html(Markup(joined_description))
def add_description(self, value: str) -> None:
self._description.append(value)
def add_description_above(self, value: str) -> None:
self._description.insert(0, value)
|
nilq/baby-python
|
python
|
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
_RTOS_NONE = "//pw_build/constraints/rtos:none"
# Common select for tagging a target as only compatible with host OS's. This
# select implements the logic '(Windows or Macos or Linux) and not RTOS'.
# Example usage:
# load("//pw_build:selects.bzl","TARGET_COMPATIBLE_WITH_HOST_SELECT")
# pw_cc_library(
# name = "some_host_only_lib",
# hdrs = ["host.h"],
# target_compatible_with = select(TARGET_COMPATIBLE_WITH_HOST_SELECT),
# )
TARGET_COMPATIBLE_WITH_HOST_SELECT = {
"@platforms//os:windows": [_RTOS_NONE],
"@platforms//os:macos": [_RTOS_NONE],
"@platforms//os:linux": [_RTOS_NONE],
"//conditions:default": ["@platforms//:incompatible"],
}
|
nilq/baby-python
|
python
|
from django.apps import AppConfig
class ExternalLinksConfig(AppConfig):
name = 'wagtail_external_menu_items'
|
nilq/baby-python
|
python
|
# Standard
import logging
# Third Party
import six
import pygame as pg
from pytmx.util_pygame import load_pygame
from pytmx import TiledImageLayer, TiledTileLayer
# Project
from harren.utils import color
LOG = logging.getLogger(__name__)
class Renderer(object):
"""This object renders tile maps from Tiled."""
def __init__(self, filename):
tm = load_pygame(filename, pixelalpha=True)
self.size = tm.width * tm.tilewidth, tm.height * tm.tileheight
self.tmx_data = tm
def render(self, surface):
tw = self.tmx_data.tilewidth
th = self.tmx_data.tileheight
gt = self.tmx_data.get_tile_image_by_gid
bg_color = self.tmx_data.background_color
if isinstance(bg_color, six.string_types):
bg_color = color.hex_to_rgb(bg_color)
if bg_color:
surface.fill(bg_color)
for layer in self.tmx_data.visible_layers:
if isinstance(layer, TiledTileLayer):
for x, y, gid in layer:
tile = gt(gid)
if tile:
surface.blit(tile, (x * tw, y * th))
elif isinstance(layer, TiledImageLayer):
image = gt(layer.gid)
if image:
surface.blit(image, (0, 0))
def make_2x_map(self):
temp_surface = pg.Surface(self.size)
self.render(temp_surface)
temp_surface = pg.transform.scale2x(temp_surface)
return temp_surface
|
nilq/baby-python
|
python
|
from discord.ext import commands
import discord, typing, random
import utils
from discord.ext.commands.cooldowns import BucketType
import collections, itertools
class Test(commands.Cog):
"""A cog to have people test new commands, or wip ones"""
def __init__(self, bot):
self.bot = bot
@commands.command()
async def ticket_make(self, ctx):
await ctx.send("WIP, will make ticket soon.. Please Contact the owner with the support command")
@commands.command(brief="this command will error by sending no content")
async def te(self, ctx):
await ctx.send("this command will likely error...")
await ctx.send("")
@commands.command(brief = "WIP command to verify")
async def verify(self, ctx):
await ctx.send("WIP will make this soon..")
async def cog_check(self, ctx):
return ctx.author.id in self.bot.testers
@commands.command(brief = "a command to email you(work in progress)", help = "This command will email your email, it will automatically delete in guilds, but not in DMs(as it's not necessary")
async def email(self, ctx, *args):
print(args)
await ctx.send("WIP")
@commands.command(brief="make a unique prefix for this guild(other prefixes still work)")
async def setprefix(self, ctx, *, arg = None):
await ctx.send("WIP")
@commands.command(brief = "WIP thing for birthday set up lol")
async def birthday_setup(self, ctx):
await ctx.send("WIP")
@commands.command(brief ="sleep time")
async def set_sleeptime(self, ctx):
await ctx.send("WIP")
@commands.command(brief = "wakeup time")
async def set_wakeuptime(self, ctx):
await ctx.send("WIP")
@commands.command(brief = "gets tweets from a username")
async def tweet(self, ctx, *, args = None):
await ctx.send("WIP")
#look at the JDJG Bot orginal
@commands.command(brief = "add emoji to your guild lol")
async def emoji_add(self, ctx):
await ctx.send("WIP")
#look at the JDJG Bot orginal
@commands.command(brief = "scans statuses to see if there is any bad ones.")
async def scan_status(self, ctx):
await ctx.send("will scan statuses in a guild to see if there is a bad one.")
@commands.command(brief = "sets logs for a guild", name = "logging")
async def _logging(self, ctx):
await ctx.send("logging wip.")
#look at global_chat stuff for global_chat features, rank for well rank, add an update system too, add cc_ over. nick too, as well as kick and ban, ofc unban and other guild ban moderation stuff. Port over emoji_check but public and make that do it's best to upload less than 256 kB, try to and ofc an os emulation mode, as well as update mode, and nick.
#make the bot be able to lock commands to owners only, for testing purposes or not respond to commands.
#Unrelated to Urban:
#https://discordpy.readthedocs.io/en/master/api.html?highlight=interaction#discord.InteractionResponse.send_message
#https://discordpy.readthedocs.io/en/latest/api.html#discord.Guild.query_members
#guild_prefixes table in my sql database
#spyco data table in my sql database
@commands.group(brief = "list of commands of plans of stuff to do in the future", invoke_without_command = True)
async def todo(self, ctx):
page = "\n".join(f"{c.name}" for c in ctx.command.commands)
await ctx.send(f"Please run the subcommands with the prefix {ctx.prefix}: \n{page}")
#needs to be a bit better, and to embed it.
@todo.command(brief = "lists stuff in todo")
async def list(self, ctx):
values = await self.bot.db.fetch("SELECT * FROM todo WHERE user_id = $1 ORDER BY added_time ASC", ctx.author.id)
if not values:
embed = discord.Embed(description = "No items in your Todo List", color = 1246983, timestamp = ctx.message.created_at)
embed.set_footer(text = f"Requested by {ctx.author}", icon_url = ctx.author.display_avatar.url)
return await ctx.send(embed = embed)
pag = commands.Paginator()
#Idk from here
@todo.command(brief = "adds items to todo")
async def add(self, ctx, *, text : commands.clean_content = None):
if not text:
return await ctx.send("Please tell me what to add")
value = await self.bot.db.fetchrow("SELECT * FROM todo WHERE user_id = $1 AND TEXT = $2", ctx.author.id, text)
if value:
return await ctx.send("What?")
await self.bot.db.execute("INSERT INTO todo (user_id, text, jump_url, added_time) VALUES ($1, $2, $3, $4)", ctx.author.id, text[0:4000], ctx.message.jump_url, ctx.message.created_at)
await ctx.send("ADDED")
@todo.command(brief = "edits items in todo")
async def edit(self, ctx):
await ctx.send("WIP")
@todo.command(brief = "removes items in todo")
async def remove(self, ctx):
await ctx.send("WIP")
@todo.command(brief = "removes all your items in todo")
async def clear(self, ctx):
await ctx.send("WIP")
#add support for https://discordpy.readthedocs.io/en/master/api.html#discord.Member.mobile_status
#https://discordpy.readthedocs.io/en/master/api.html#discord.Member.desktop_status
#https://discordpy.readthedocs.io/en/master/api.html#discord.Member.web_status
#do something with this: https://discordpy.readthedocs.io/en/master/api.html#discord.Member.status
class Slash(commands.Cog):
"""A Testing Category for Slash Commands"""
def __init__(self, bot):
self.bot = bot
def setup(bot):
bot.add_cog(Test(bot))
bot.add_cog(Slash(bot))
|
nilq/baby-python
|
python
|
"""Built-in reducer function."""
# pylint: disable=redefined-builtin
from __future__ import absolute_import
import sys
from .base import BuiltinFunction, TargetCode
from ..runtime import ir
from ..runtime.ir import var
class ReduceFunction(BuiltinFunction):
"""Base builtin reduce function class."""
def _invoke(self, graph, edge_frame, out_size, edge_map=None,
out_map=None):
"""Symbolic computation of this builtin function to create
runtime.executor
"""
raise NotImplementedError
@property
def name(self):
"""Return the name of this builtin function."""
raise NotImplementedError
class SimpleReduceFunction(ReduceFunction):
"""Builtin reduce function that aggregates a single field into another
single field."""
def __init__(self, name, msg_field, out_field):
self._name = name
self.msg_field = msg_field
self.out_field = out_field
def _invoke(self, graph, edge_frame, out_size, edge_map=None,
out_map=None):
"""Symbolic execution of this builtin function"""
reducer = self._name
graph = var.GRAPH(graph)
edge_map = var.MAP(edge_map)
out_map = var.MAP(out_map)
edge_data = ir.READ_COL(edge_frame, var.STR(self.msg_field))
return ir.COPY_REDUCE(reducer, graph, TargetCode.EDGE, edge_data,
out_size, edge_map, out_map)
@property
def name(self):
return self._name
###############################################################################
# Generate all following reducer functions:
# sum, max, min, prod
def _gen_reduce_builtin(reducer):
docstring = """Builtin reduce function that aggregates messages by {0}.
Parameters
----------
msg : str
The message field.
out : str
The output node feature field.
Examples
--------
>>> import dgl
>>> reduce_func = dgl.function.{0}('m', 'h')
The above example is equivalent to the following user defined function
(if using PyTorch):
>>> import torch
>>> def reduce_func(nodes):
>>> return {{'h': torch.{0}(nodes.mailbox['m'], dim=1)}}
""".format(reducer)
def func(msg, out):
return SimpleReduceFunction(reducer, msg, out)
func.__name__ = reducer
func.__doc__ = docstring
return func
__all__ = []
def _register_builtin_reduce_func():
"""Register builtin reduce functions"""
for reduce_op in ["max", "min", "sum", "prod"]:
builtin = _gen_reduce_builtin(reduce_op)
setattr(sys.modules[__name__], reduce_op, builtin)
__all__.append(reduce_op)
_register_builtin_reduce_func()
|
nilq/baby-python
|
python
|
### packages
import os
import numpy as np
import torch
import pickle as pkl
from copy import deepcopy
### sys relative to root dir
import sys
from os.path import dirname, realpath
sys.path.append(dirname(dirname(realpath(__file__))))
### absolute imports wrt root
from problems.problem_definition import ProblemDefinition_Abstract, welless_check_decorator
from codes.factory import Factory_SimGAN
from data.data_tools import simganData
from codes.utilities.custom_logging import ezLogging
from codes.utilities.gan_tournament_selection import get_graph_ratings
import codes.utilities.simgan_feature_eval as feature_eval
from codes.utilities.simgan_fid_metric import get_fid_scores
from codes.utilities.simgan_support_size_eval import get_support_size
from codes.block_definitions.shapemeta.block_shapemeta import BlockShapeMeta_SimGAN_Network, BlockShapeMeta_SimGAN_Train_Config
from codes.block_definitions.operators.block_operators import BlockOperators_SimGAN_Refiner, BlockOperators_SimGAN_Discriminator, BlockOperators_SimGAN_Train_Config
from codes.block_definitions.arguments.block_arguments import BlockArguments_Auto
from codes.block_definitions.evaluate.block_evaluate_pytorch import BlockEvaluate_SimGAN_Refiner, BlockEvaluate_SimGAN_Discriminator, BlockEvaluate_SimGAN_Train_Config
from codes.block_definitions.mutate.block_mutate import BlockMutate_OptB_No_Single_Ftn, BlockMutate_OptB, BlockMutate_ArgsOnly
from codes.block_definitions.mate.block_mate import BlockMate_WholeOnly
from codes.individual_definitions.individual_mutate import IndividualMutate_RollOnEachBlock_LimitedMutants
from codes.individual_definitions.individual_mate import IndividualMate_RollOnEachBlock
from codes.individual_definitions.individual_evaluate import IndividualEvaluate_SimGAN
from post_process import save_things
from post_process import plot_things
from post_process import plot_signals
from codes.utilities import decorators
class Problem(ProblemDefinition_Abstract):
'''
Not intented to see if this does a good job at evolving but rather just a quick way to test out the different
mating, mutating, operators etc with multiple blocks.
'''
def __init__(self):
population_size = 4 #must be divisible by 4 if doing mating
number_universe = 1
factory = Factory_SimGAN
mpi = False
genome_seeds = [["misc/IndivSeed_SimGAN_Seed0/RefinerBlock_lisp.txt",
"misc/IndivSeed_SimGAN_Seed0/DiscriminatorBlock_lisp.txt",
"misc/IndivSeed_SimGAN_Seed0/ConfigBlock_lisp.txt"]]*population_size
hall_of_fame_flag = True
super().__init__(population_size, number_universe, factory, mpi, genome_seeds, hall_of_fame_flag)
self.relativeScoring = True # this will force universe to be instance of RelativePopulationUniverseDefinition() in main.py
refiner_def = self.construct_block_def(nickname = "refiner_block",
shape_def = BlockShapeMeta_SimGAN_Network,
operator_def = BlockOperators_SimGAN_Refiner,
argument_def = BlockArguments_Auto(BlockOperators_SimGAN_Refiner().operator_dict, 10),
evaluate_def = BlockEvaluate_SimGAN_Refiner,
mutate_def=BlockMutate_OptB_No_Single_Ftn(prob_mutate=0.2, num_mutants=2),
mate_def=BlockMate_WholeOnly(prob_mate=1/3)
)
discriminator_def = self.construct_block_def(nickname = "discriminator_block",
shape_def = BlockShapeMeta_SimGAN_Network,
operator_def = BlockOperators_SimGAN_Discriminator,
argument_def = BlockArguments_Auto(BlockOperators_SimGAN_Discriminator().operator_dict, 15),
evaluate_def = BlockEvaluate_SimGAN_Discriminator,
mutate_def=BlockMutate_OptB(prob_mutate=0.2, num_mutants=2),
mate_def=BlockMate_WholeOnly(prob_mate=1/3)
)
train_config_def = self.construct_block_def(nickname = "train_config",
shape_def = BlockShapeMeta_SimGAN_Train_Config,
operator_def = BlockOperators_SimGAN_Train_Config,
argument_def = BlockArguments_Auto(BlockOperators_SimGAN_Train_Config().operator_dict, 10),
evaluate_def = BlockEvaluate_SimGAN_Train_Config,
mutate_def=BlockMutate_ArgsOnly(prob_mutate=0.1, num_mutants=2),
mate_def=BlockMate_WholeOnly(prob_mate=1/3)
)
self.construct_individual_def(block_defs = [refiner_def, discriminator_def, train_config_def],
mutate_def = IndividualMutate_RollOnEachBlock_LimitedMutants,
mate_def = IndividualMate_RollOnEachBlock,
evaluate_def = IndividualEvaluate_SimGAN
)
self.construct_dataset()
@decorators.stopwatch_decorator
def construct_dataset(self):
'''
Constructs a train and validation 1D signal datasets
'''
# Can configure the real and simulated sizes + batch size, but we will use default
train_config_dict = {'device': 'cuda', # was gpu but that didn't work anymore
'offline_mode': False} # see Issue #268 to get pretrained models working offline
self.training_datalist = [simganData.SimGANDataset(real_size=512, sim_size=128**2, batch_size=4),
train_config_dict]
self.validating_datalist = [simganData.SimGANDataset(real_size=128, sim_size=int((128**2)/4), batch_size=4)]
def set_optimization_goals(self):
self.maximize_objectives = [False, False, False, True]
self.objective_names = ["FID", "KS stat", "Significant Count", "Avg Feature P-value"] # will be helpful for plotting later
@decorators.stopwatch_decorator
@welless_check_decorator
def objective_functions(self, population):
'''
Get the best refiner and discriminator from each individual in the population and do a tournament selection to rate them
# TODO: add in the support size as a metric
'''
n_individuals = len(population.population)
refiners = []
discriminators = []
alive_individual_index = []
for i, indiv in enumerate(population.population):
if not indiv.dead:
alive_individual_index.append(i)
R, D = indiv.output
refiners.append(R.cpu())
discriminators.append(D.cpu())
# Run tournament and add ratings
if len(alive_individual_index) > 0:
# Objective #1 - NO LONGER AN OBJECTIVE FOR POPULATION SELECTION
ezLogging.info("Calculating Objective 1")
refiner_ratings, _ = get_graph_ratings(refiners,
discriminators,
self.validating_datalist[0],
'cpu')
# Objective #2
ezLogging.info("Calculating Objective 2")
refiner_fids, mses = get_fid_scores(refiners, self.validating_datalist[0], offline_mode=self.training_datalist[1]['offline_mode'])
#refiner_fids, mses = (np.random.random(size=len(refiners)), np.random.random(size=len(refiners))) #<-sometimes i get a gpu memory error on above step so i replace with this in testing
# Objective #3, #4, #5
ezLogging.info("Calculating Objective 3,4,5")
refiner_feature_dist = feature_eval.calc_feature_distances(refiners, self.validating_datalist[0], 'cpu')
# Objective #6, #7
ezLogging.info("Calculating Objective 6,7")
refiner_t_tests = feature_eval.calc_t_tests(refiners, self.validating_datalist[0], 'cpu')
# Objective #8
#ezLogging.info("Calculating Objective 8")
#support_size = get_support_size(refiners, self.validating_datalist[0], 'cpu')
for indx, rating, fid, kl_div, wasserstein_dist, ks_stat, num_sig, avg_feat_pval, mse \
in zip(alive_individual_index,
refiner_ratings['r'],
refiner_fids,
refiner_feature_dist['kl_div'],
refiner_feature_dist['wasserstein_dist'],
refiner_feature_dist['ks_stat'],
refiner_t_tests['num_sig'],
refiner_t_tests['avg_feat_pval'],
mses):
# since refiner rating is a 'relative' score, we are not going to set it to fitness value to be used in population selection
# BUT we will keep it available as metadata
if hasattr(population.population[indx], 'refiner_rating'):
population.population[indx].refiner_rating.append(rating)
else:
population.population[indx].refiner_rating = [rating]
# mse is used to eval eval functions, we are not going to set it to fitness value to be used in population selection
# BUT we will keep it available as metadata
if hasattr(population.population[indx], 'mse'):
population.population[indx].mse.append(mse)
else:
population.population[indx].mse = [mse]
# Issue 219 - filtering down to only 4 objectives:
# fid (#2), ks_stat (#5), num_sig (#6), and avg_feat_pval (#7)
population.population[indx].fitness.values = (fid, ks_stat, num_sig, avg_feat_pval)
def check_convergence(self, universe):
'''
TODO: add code for determining whether convergence has been reached
'''
GENERATION_LIMIT = 1 # TODO
if universe.generation >= GENERATION_LIMIT:
ezLogging.warning("TERMINATING...reached generation limit.")
universe.converged = True
def population_selection(self, universe):
for i, indiv in enumerate(universe.population.population):
ezLogging.warning("Final Population Scores: (%i) %s %s" % (i, indiv.id, indiv.fitness.values))
next_pop = super().population_selection(universe)
for i, indiv in enumerate(next_pop):
ezLogging.warning("Next Population Scores: (%i) %s %s" % (i, indiv.id, indiv.fitness.values))
return next_pop
def save_pytorch_individual(self, universe, original_individual):
'''
can't use save_things.save_population() because can't pickle nn.Module,
so we're going to save the block and individual outputs into a folder for each individual,
then delete those outputs so we can use save_things.save_population() normally.
'''
ezLogging.debug("Saving individual %s from generation %i" % (original_individual.id, universe.generation))
# deepcopy in-case we still plan on using individual for another evolution
individual = deepcopy(original_individual)
# handle file names and locations
name = "gen_%04d_indiv_%s" % (universe.generation, individual.id)
attachment_folder = os.path.join(universe.output_folder, name)
os.makedirs(attachment_folder, exist_ok=False)
# save models
# NOTE if indiv.dead then some of these values may not be filled
if not individual[0].dead:
torch.save(individual[0].output[0].state_dict(),
os.path.join(attachment_folder, 'untrained_refiner'))
if not individual[1].dead:
torch.save(individual[1].output[0].state_dict(),
os.path.join(attachment_folder, 'untrained_discriminator'))
if not individual[2].dead:
with open(os.path.join(attachment_folder, 'trainconfig_dict.pkl'), 'wb') as f:
pkl.dump(individual[2].output, f)
if not individual.dead:
torch.save(individual.output[0].state_dict(),
os.path.join(attachment_folder, 'trained_refiner'))
torch.save(individual.output[1].state_dict(),
os.path.join(attachment_folder, 'trained_discriminator'))
# now overwrite
individual[0].output = []
individual[1].output = []
individual[2].output = []
individual.output = []
individual.blocks[1].local_graph = None
# save individual
indiv_file = os.path.join(universe.output_folder, name+".pkl")
with open(indiv_file, "wb") as f:
pkl.dump(individual, f)
def postprocess_generation(self, universe):
'''
Save fitness scores and the refiners on the pareto front of fitness scroes
'''
ezLogging.info("Post Processing Generation Run")
save_things.save_fitness_scores(universe)
save_things.save_HOF_scores(universe)
# to be used later to extract features
# ...note that we allow features to be turned on/off in evolution but we still plot all features
fe = feature_eval.FeatureExtractor()
for individual in universe.population.population:
if not individual.dead:
self.save_pytorch_individual(universe, individual)
plot_things.draw_genome(universe, self, individual)
# the rest is just to plot signals
num_signals = 5
#sample_index_sim = np.random.choice(np.arange(len(self.validating_datalist[0].simulated_raw)), size=num_signals)
sample_index_sim = np.arange(num_signals) #not letting it be random so we can easily compare between refiners
simulated_batch = torch.tensor(self.validating_datalist[0].simulated_raw[sample_index_sim], dtype=torch.float, device='cpu')
sample_index_real = np.random.choice(np.arange(len(self.validating_datalist[0].real_raw)), size=num_signals)
real_batch = torch.tensor(self.validating_datalist[0].real_raw[sample_index_real], dtype=torch.float, device='cpu')
R, D = individual.output
refined_sim_batch = R.cpu()(simulated_batch)
refined_sim_preds = D.cpu()(refined_sim_batch)
real_preds = D.cpu()(real_batch)
attachment_folder = os.path.join(universe.output_folder, "gen_%04d_indiv_%s_signals.png" % (universe.generation, individual.id))
plot_signals.generate_img_batch(simulated_batch.data.cpu(),
refined_sim_batch.data.cpu(),
real_batch.data.cpu(),
attachment_folder,
refined_sim_preds,
real_preds)
# now plot the feature distributions...but use full batch
simulated_batch = torch.tensor(self.validating_datalist[0].simulated_raw, dtype=torch.float, device='cpu')
real_batch = torch.tensor(self.validating_datalist[0].real_raw, dtype=torch.float, device='cpu')
refined_sim_batch = R.cpu()(simulated_batch)
simulated_features = fe.get_features(np.squeeze(simulated_batch.cpu().detach().numpy())).T
refined_sim_features = fe.get_features(np.squeeze(refined_sim_batch.cpu().detach().numpy())).T
real_features = fe.get_features(np.squeeze(real_batch.cpu().detach().numpy())).T
# what is the shape returned of get_features()
for ith_feature, feature_name in enumerate(fe.feature_names):
fig, axes = plot_things.plot_init(1, 1)
data = [simulated_features[:,ith_feature], refined_sim_features[:,ith_feature], real_features[:,ith_feature]]
labels = ["Simulated", "Refined Sim", "Real"]
plot_things.violin(axes[0,0], data, labels)
axes[0,0].set_title("%s feature distributions" % feature_name)
name = os.path.join(universe.output_folder, "gen_%04d_indiv_%s_%s_distribution.png" % (universe.generation, individual.id, feature_name))
plot_things.plot_save(fig, name)
# Pareto Plot for each objective combo at current HOF:
for i in range(len(self.maximize_objectives)-1):
for j in range(i+1,len(self.maximize_objectives)):
x_obj = self.objective_names[i]
y_obj = self.objective_names[j]
# Grab Pareto front and visualize secondary waveforms...do it for each combo of objectives
pareto_fig, pareto_axis = plot_things.plot_init(nrow=1, ncol=1, figsize=None, xlim=None, ylim=None)
pareto_fronts = plot_things.get_pareto_front(universe.population.hall_of_fame.items,
self.maximize_objectives,
x_objective_index=i,
y_objective_index=j,
first_front_only=False)
plot_things.plot_pareto_front2(pareto_axis[0,0],
pareto_fronts,
color=None, label='',
x_objective_index=0, y_objective_index=1,
xlabel=x_obj, ylabel=y_obj,
min_x=None, max_x=None,
min_y=None, max_y=None)
#plot_things.plot_legend(pareto_fig)
plot_things.plot_save(pareto_fig,
os.path.join(universe.output_folder,
"pareto_front_gen%04d_%s_vs_%s.png" % (universe.generation, x_obj, y_obj)))
# Best Pareto Plot Over time
for i in range(len(self.maximize_objectives)-1):
for j in range(i+1,len(self.maximize_objectives)):
x_obj = self.objective_names[i]
y_obj = self.objective_names[j]
pareto_fig, pareto_axis = plot_things.plot_init(nrow=1, ncol=1, figsize=None, xlim=None, ylim=None)
for gen in range(universe.generation+1):
hof_fitness_file = os.path.join(universe.output_folder, "gen%04d_hof_fitness.npz" % gen)
hof_fitness = np.load(hof_fitness_file)['fitness']
pareto_fronts = plot_things.get_pareto_front(hof_fitness,
self.maximize_objectives,
x_objective_index=i,
y_objective_index=j,
first_front_only=True)
plot_things.plot_pareto_front2(pareto_axis[0,0],
pareto_fronts,
color=None, label="HOF Gen %i" % (gen),
x_objective_index=0, y_objective_index=1,
xlabel=x_obj, ylabel=y_obj,
min_x=None, max_x=None,
min_y=None, max_y=None)
plot_things.plot_legend(pareto_fig)
plot_things.plot_save(pareto_fig,
os.path.join(universe.output_folder,
"pareto_front_overtime_gen%04d_%s_vs_%s.png" % (universe.generation, x_obj, y_obj)))
# AUC over time:
# get files:
all_hof_scores = []
for gen in range(universe.generation+1):
hof_fitness_file = os.path.join(universe.output_folder, "gen%04d_hof_fitness.npz" % gen)
hof_fitness = np.load(hof_fitness_file)['fitness']
all_hof_scores.append(hof_fitness)
# now for each combo of objectives, make a plot
for i in range(len(self.maximize_objectives)-1):
for j in range(i+1,len(self.maximize_objectives)):
x_obj = self.objective_names[i]
y_obj = self.objective_names[j]
all_auc = plot_things.calc_auc_multi_gen(self.maximize_objectives, i, j, *all_hof_scores)
auc_fig, auc_axis = plot_things.plot_init(nrow=1, ncol=1, figsize=None, xlim=None, ylim=None)
auc_axis[0,0].plot(all_auc, marker='*')
auc_axis[0,0].set_xlabel("ith Generation")
auc_axis[0,0].set_title("AUC over time\n%s_vs_%s" % (x_obj, y_obj))
plot_things.plot_save(auc_fig,
os.path.join(universe.output_folder, "AUC_overtime_gen%04d_%s_vs_%s.png" % (universe.generation, x_obj, y_obj)))
def postprocess_universe(self, universe):
'''
TODO: add code for universe postprocessing
'''
# ezLogging.info("Post Processing Universe Run")
# save_things.save_population(universe)
# save_things.save_population_asLisp(universe, self.indiv_def)
pass
|
nilq/baby-python
|
python
|
import config
from metaL import *
app = App('metaL')
app['host'] = Ip(config.HOST)
app['port'] = Port(config.PORT)
app.eval(glob)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""This module is for running the measurement process multiple times.
Requires the use of batch_table.csv"""
__author__ = "Camellia Magness"
__email__ = "cmagness@stsci.edu"
import sys
import glob
import logging
import pandas as pd
from tqdm import tqdm
from astropy import constants
from . import SETTINGS
from .measure_aod import *
from .format_data import build_spectrum
from .visualization import Visualizer
INPUTS = SETTINGS["inputs"]
DATADIR = INPUTS["datadir"]
PARAMETERS = SETTINGS["parameters"]
DEFAULTS = SETTINGS["defaults"]
LOGGER = logging.getLogger(__name__)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
LOGGER.addHandler(console)
C = constants.c.to('km/s').value # km/s
N = 3.768e14 # proportionality constant -> (m_e * c)/(pi * e**2)
# at some point need to check and see if outdir (and datadir, really too)
# exist. if not, create outdir
# --------------------------------------------------------------------------- #
def main():
# look at the table for info-turn into df, return df
LOGGER.info("Entering batch mode...")
if DEFAULTS["batch_table"]:
batch_dataframe = read_table(DEFAULTS["batch_table"])
else:
LOGGER.error("Please provide a path to your batch table in your "
"settings file if you would like to perform multiple "
"measurements. Exiting...")
sys.exit()
# use rootnames to collect the files that need to be run on
batch_dataframe = collect_files(batch_dataframe)
# for each file, essentially run a slightly modified version of
# measure_aod.main
batch_run(batch_dataframe)
return 0
def read_table(filename):
# why did i make this its own function lol
return pd.read_csv(filename)
def collect_files(dataframe):
# get the column of the basenames as a list or w/e
rootnames = dataframe["ROOTNAME"]
# get the files in the data directory
all_files_in_dir = glob.glob(DATADIR + "*")
# for each file in the column, check to see if a file matches
batch_files = []
for rootname in rootnames:
for filename in all_files_in_dir:
if rootname in filename:
batch_files += [filename]
break
else: # only gets here if it doesn't break the filename statement,
# i.e. it didn't find a match
LOGGER.warning("No file was found matching the rootname: {}. "
"Continuing...".format(rootname))
if batch_files:
LOGGER.info("Found {} files to measure. This might take a while."
.format(len(batch_files)))
dataframe["FILENAME"] = batch_files
else:
LOGGER.warning("Found no files to measure. Exiting...")
sys.exit()
return dataframe
def batch_run(dataframe):
# for each file in the list, do:
for index, file_row in tqdm(dataframe.iterrows()):
# collect the arguments
args = {"datadir": file_row["FILENAME"],
"ins": file_row["INSTRUMENT"].upper(),
"file": file_row["FILETYPE"].upper(),
"grating": file_row["GRATING"].upper(),
"redshift": file_row["REDSHIFT"]}
# build a spectrum object
spectrum = build_spectrum(**args)
spectrum.target = file_row["TARGET"] # this is a hack to get around
# stuff in build_spectrum
LOGGER.info("Spectrum object successfully built.")
# pass that on and do everything exactly the same
# set up visualizer
visualizer = Visualizer()
visualizer.set_target(spectrum.target)
visualizer.set_raw_flux(spectrum.flux)
# LSR correction
# need to add ion in to the args here
args["ion"] = file_row["ION"]
spectrum = lsr_correct(args, spectrum)
LOGGER.info("Spectrum LSR corrected.")
visualizer.set_raw_velocity(spectrum.raw_velocity[0])
visualizer.set_lsr_velocity(spectrum.velocity[0])
# continuum fit
spectrum, left_indices, right_indices = continuum_fit(spectrum)
LOGGER.info("Continuum fit calculated.")
visualizer.set_contadjspec(spectrum)
visualizer.set_indices(left_indices, right_indices)
# measure aod/acd/ew
# set measurements back in spectrum object from helper object
# need to add the vel_min & max here
args["vel_min"] = file_row["VEL_MIN"]
args["vel_max"] = file_row["VEL_MAX"]
spectrum, helper = measure(args, spectrum)
visualizer.set_helper(helper)
# generate table
spectrum.generate_table(args["vel_min"], args["vel_max"])
visualizer.plot()
LOGGER.info("Finished measurements for {}"
.format(file_row["FILENAME"]))
# finish entire list
LOGGER.info("spectrAOD complete.")
# --------------------------------------------------------------------------- #
if __name__ == "__main__":
status = main()
sys.exit(status)
|
nilq/baby-python
|
python
|
from flask import Flask
app = Flask(__name__)
from flask import render_template, url_for, send_file
import pkg_resources
logo = None
integration_data = None
packagename = None
@app.route('/')
def index():
global logo, integration_data, packagename
return render_template('index.html', name=packagename, logo=logo, data=integration_data)
@app.route('/dashboard/')
def dashboard():
global logo, integration_data, packagename
return render_template('integration_dashboard.html', name=packagename, logo=logo, data=integration_data)
def main(name, integration):
global logo, integration_data, packagename
integration_data = integration
packagename = name
with open(packagename + '/assets/' + integration_data['logo_file_name'], 'r') as f:
data = f.read()
logo = data.encode("base64")
app.debug = True
app.run()
|
nilq/baby-python
|
python
|
import pymongo
import nltk
from nltk.stem.porter import *
from nltk.corpus import stopwords
from collections import Counter
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer,TfidfTransformer
from itertools import islice
from sklearn import preprocessing
import numpy as np
import pandas as pa
client = pymongo.MongoClient('localhost', 27017)
db = client['db']
#Remove punctuation from a column of a dataframe and put the results to a new column
def remove_punctions(df,column,newColumn):
df[newColumn]=df[column].str.replace('[^\w\s]', '')
return df;
#Steemming a column of a dataframe and put the results to a new column
def stemming(df,column,newColumn):
porter_stemmer = PorterStemmer()
df["tokenized column"] =df[column].apply(lambda x: filter(None, x.split(" ")))
df['stemmed column'] = df["tokenized column"].apply(lambda x: [porter_stemmer.stem(y) for y in x])
df[newColumn]=df['stemmed column'].apply(lambda x : " ".join(x))
return df;
#Remove stopwords from a column of a dataframe and put the results to a new column
def remove_stopword(df,column,newColumn):
stop = stopwords.words('english')
df[newColumn] = df[column].apply(lambda x: ' '.join([word for word in x.split() if word not in (stop)]))
return df;
#Transform letter to lower from a column of a dataframe and put the results to a new column
def upper_to_lower(df,column,newColumn):
df[newColumn] = df[[column]].apply(lambda name: name.str.lower())
return df;
def textTFIDF(df):
tvec = TfidfVectorizer(min_df=.0025, max_df=.1, ngram_range=(1, 2))#initialize TFIDF VECTORIZER
tvec_weights = tvec.fit_transform(df.finalReviews.dropna())#Fit
weights = np.asarray(tvec_weights.mean(axis=0)).ravel().tolist()
weights_df = pa.DataFrame({'term': tvec.get_feature_names(), 'weight': weights})
print(weights_df.sort_values(by='weight', ascending=True).head(20))
X_normalized = preprocessing.normalize(tvec_weights, norm='l2')
print(X_normalized)
return X_normalized
def textCountVec(df):
cvec = CountVectorizer(min_df=.0025, max_df=.1, ngram_range=(1,2))
cvec.fit(df.finalReviews)
print(list(islice(cvec.vocabulary_.items(), 20)))
cvec_counts = cvec.transform(df.finalReviews)
print(cvec_counts.shape)
transformer = TfidfTransformer()
transformed_weights = transformer.fit_transform(cvec_counts)
print(transformed_weights)
print(transformed_weights.shape)
X_normalized = preprocessing.normalize(cvec_counts, norm='l2')
print(X_normalized)
return X_normalized
|
nilq/baby-python
|
python
|
# Code from - https://github.com/Cartucho/mAP
import glob
import json
import os
import shutil
import operator
import sys
import argparse
import math
import matplotlib.pyplot as plt
import numpy as np
def log_average_miss_rate(prec, rec, num_images):
"""
log-average miss rate:
Calculated by averaging miss rates at 9 evenly spaced FPPI points
between 10e-2 and 10e0, in log-space.
output:
lamr | log-average miss rate
mr | miss rate
fppi | false positives per image
references:
[1] Dollar, Piotr, et al. "Pedestrian Detection: An Evaluation of the
State of the Art." Pattern Analysis and Machine Intelligence, IEEE
Transactions on 34.4 (2012): 743 - 761.
"""
# if there were no detections of that class
if prec.size == 0:
lamr = 0
mr = 1
fppi = 0
return lamr, mr, fppi
fppi = (1 - prec)
mr = (1 - rec)
fppi_tmp = np.insert(fppi, 0, -1.0)
mr_tmp = np.insert(mr, 0, 1.0)
# Use 9 evenly spaced reference points in log-space
ref = np.logspace(-2.0, 0.0, num = 9)
for i, ref_i in enumerate(ref):
# np.where() will always find at least 1 index, since min(ref) = 0.01 and min(fppi_tmp) = -1.0
j = np.where(fppi_tmp <= ref_i)[-1][-1]
ref[i] = mr_tmp[j]
# log(0) is undefined, so we use the np.maximum(1e-10, ref)
lamr = math.exp(np.mean(np.log(np.maximum(1e-10, ref))))
return lamr, mr, fppi
"""
throw error and exit
"""
def error(msg):
print(msg)
sys.exit(0)
"""
check if the number is a float between 0.0 and 1.0
"""
def is_float_between_0_and_1(value):
try:
val = float(value)
if val > 0.0 and val < 1.0:
return True
else:
return False
except ValueError:
return False
"""
Calculate the AP given the recall and precision array
1st) We compute a version of the measured precision/recall curve with
precision monotonically decreasing
2nd) We compute the AP as the area under this curve by numerical integration.
"""
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre)-2, -1, -1):
mpre[i] = max(mpre[i], mpre[i+1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i-1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i]-mrec[i-1])*mpre[i])
return ap, mrec, mpre
"""
Convert the lines of a file to a list
"""
def file_lines_to_list(path):
# open txt file lines to a list
with open(path) as f:
content = f.readlines()
# remove whitespace characters like `\n` at the end of each line
content = [x.strip() for x in content]
return content
"""
Draws text in image
"""
def draw_text_in_image(img, text, pos, color, line_width):
font = cv2.FONT_HERSHEY_PLAIN
fontScale = 1
lineType = 1
bottomLeftCornerOfText = pos
cv2.putText(img, text,
bottomLeftCornerOfText,
font,
fontScale,
color,
lineType)
text_width, _ = cv2.getTextSize(text, font, fontScale, lineType)[0]
return img, (line_width + text_width)
"""
Plot - adjust axes
"""
def adjust_axes(r, t, fig, axes):
# get text width for re-scaling
bb = t.get_window_extent(renderer=r)
text_width_inches = bb.width / fig.dpi
# get axis width in inches
current_fig_width = fig.get_figwidth()
new_fig_width = current_fig_width + text_width_inches
propotion = new_fig_width / current_fig_width
# get axis limit
x_lim = axes.get_xlim()
axes.set_xlim([x_lim[0], x_lim[1]*propotion])
"""
Draw plot using Matplotlib
"""
def draw_plot_func(dictionary, n_classes, window_title, plot_title, x_label, output_path, to_show, plot_color, true_p_bar):
# sort the dictionary by decreasing value, into a list of tuples
sorted_dic_by_value = sorted(dictionary.items(), key=operator.itemgetter(1))
# unpacking the list of tuples into two lists
sorted_keys, sorted_values = zip(*sorted_dic_by_value)
#
if true_p_bar != "":
"""
Special case to draw in:
- green -> TP: True Positives (object detected and matches ground-truth)
- red -> FP: False Positives (object detected but does not match ground-truth)
- pink -> FN: False Negatives (object not detected but present in the ground-truth)
"""
fp_sorted = []
tp_sorted = []
for key in sorted_keys:
fp_sorted.append(dictionary[key] - true_p_bar[key])
tp_sorted.append(true_p_bar[key])
plt.barh(range(n_classes), fp_sorted, align='center', color='crimson', label='False Positive')
plt.barh(range(n_classes), tp_sorted, align='center', color='forestgreen', label='True Positive', left=fp_sorted)
# add legend
plt.legend(loc='lower right')
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
fp_val = fp_sorted[i]
tp_val = tp_sorted[i]
fp_str_val = " " + str(fp_val)
tp_str_val = fp_str_val + " " + str(tp_val)
# trick to paint multicolor with offset:
# first paint everything and then repaint the first number
t = plt.text(val, i, tp_str_val, color='forestgreen', va='center', fontweight='bold')
plt.text(val, i, fp_str_val, color='crimson', va='center', fontweight='bold')
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
else:
plt.barh(range(n_classes), sorted_values, color=plot_color)
"""
Write number on side of bar
"""
fig = plt.gcf() # gcf - get current figure
axes = plt.gca()
r = fig.canvas.get_renderer()
for i, val in enumerate(sorted_values):
str_val = " " + str(val) # add a space before
if val < 1.0:
str_val = " {0:.2f}".format(val)
t = plt.text(val, i, str_val, color=plot_color, va='center', fontweight='bold')
# re-set axes to show number inside the figure
if i == (len(sorted_values)-1): # largest bar
adjust_axes(r, t, fig, axes)
# set window title
fig.canvas.set_window_title(window_title)
# write classes in y axis
tick_font_size = 12
plt.yticks(range(n_classes), sorted_keys, fontsize=tick_font_size)
"""
Re-scale height accordingly
"""
init_height = fig.get_figheight()
# comput the matrix height in points and inches
dpi = fig.dpi
height_pt = n_classes * (tick_font_size * 1.4) # 1.4 (some spacing)
height_in = height_pt / dpi
# compute the required figure height
top_margin = 0.15 # in percentage of the figure height
bottom_margin = 0.05 # in percentage of the figure height
figure_height = height_in / (1 - top_margin - bottom_margin)
# set new height
if figure_height > init_height:
fig.set_figheight(figure_height)
# set plot title
plt.title(plot_title, fontsize=14)
# set axis titles
# plt.xlabel('classes')
plt.xlabel(x_label, fontsize='large')
# adjust size of window
fig.tight_layout()
# save the plot
fig.savefig(output_path)
# show image
if to_show:
plt.show()
# close the plot
plt.close()
|
nilq/baby-python
|
python
|
import ujson as json
def read_json_file(file_name: str) -> None:
try:
fp = open(file_name, 'r')
config = json.load(fp)
fp.close()
print(json.dumps(config))
except Exception as e:
print(f'exception: {e}')
read_json_file('C:\\Users\\s\\Desktop\\config.txt')
|
nilq/baby-python
|
python
|
# Copyright (c) 2012-2013 SHIFT.com
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
class Row(object):
def __init__(self, data):
for k,v in data.iteritems():
setattr(self, k, v)
class Table(object):
"""
A table accepts the results of a GremlinMethod in it's
constructor.
It can be iterated over like a normal list, but within the rows
the dictionaries are accessible via .notation
For example:
# returns a table of people & my friend edge to them
# the edge contains my nickname for that person
friends = thunderdome.GremlinMethod()
def get_friends_and_my_nickname(self):
result = self.friends()
result = Table(result)
for i in result:
print "{}:{}".format(i.friend_edge.nickname, i.person.name)
"""
def __init__(self, gremlin_result):
if gremlin_result == [[]]:
gremlin_result = []
self._gremlin_result = gremlin_result
self._position = 0
def __getitem__(self, key):
"""
returns an enhanced dictionary
"""
if key >= len(self._gremlin_result):
raise IndexError()
return Row(self._gremlin_result[key])
def __iter__(self):
return self
def next(self):
if self._position == len(self._gremlin_result):
self._position = 0
raise StopIteration()
tmp = self._gremlin_result[self._position]
self._position += 1
return Row(tmp)
def __len__(self):
return len(self._gremlin_result)
|
nilq/baby-python
|
python
|
"""Tests for accounts.views."""
# pylint: disable=no-value-for-parameter,maybe-no-member,invalid-name
from datetime import datetime
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.test import Client, TestCase, RequestFactory
from django.test.utils import override_settings
from mock import patch
from model_mommy import mommy
from open_connect.accounts import views
from open_connect.accounts.models import Invite, User
from open_connect.connectmessages.tests import ConnectMessageTestCase
from open_connect.media.tests import (
get_in_memory_image_file, get_in_memory_image_instance
)
from open_connect.connect_core.utils.basetests import ConnectTestMixin
class UserDetailViewTest(ConnectTestMixin, TestCase):
"""Tests for the user detail view."""
def setUp(self):
"""Handy things."""
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_context_object_name(self):
"""Test that the object name is account."""
user_detail_view = views.UserDetailView.as_view()
user = self.create_user()
self.request.user = user
response = user_detail_view(self.request, user_uuid=user.uuid)
self.assertTrue('account' in response.context_data.keys())
def test_user_property(self):
"""Test that the user property returns the user."""
view = views.UserDetailView()
user = self.create_user()
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.user, user)
def test_non_existant_404(self):
"""Test that a UUID that does not exist causes a 404"""
view = views.UserDetailView()
view.kwargs = {'user_uuid': 'does-not-exist'}
with self.assertRaises(Http404):
# pylint: disable=W0104
view.user
def test_direct_message_regular_user(self):
"""
Test that a regular user cannot send a direct message to regular users
"""
visitor = self.create_user()
recipient = self.create_user()
self.login(visitor)
self.assertFalse(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertNotContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_staff(self):
"""
Test that a regular user can direct message staff
"""
visitor = self.create_user()
recipient = self.create_user(is_staff=True)
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_staff(self):
"""
Test that a staff member can send a direct message to regular users
"""
visitor = self.create_user(is_staff=True)
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_superuser(self):
"""
Test that a superuser can send a direct message to regular users
"""
visitor = self.create_user(is_superuser=True)
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_direct_message_regular_user_by_permission(self):
"""
Test that someone with the correct permission can message a user
"""
visitor = self.create_user()
self.add_perm(
visitor, 'can_initiate_direct_messages', 'accounts', 'user')
recipient = self.create_user()
self.login(visitor)
self.assertTrue(visitor.can_direct_message_user(recipient))
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': recipient.uuid}))
self.assertEqual(response.status_code, 200)
self.assertContains(
response,
reverse(
'create_direct_message',
kwargs={
'user_uuid': recipient.uuid
}
)
)
def test_show_banned_warning_user_is_banned(self):
"""Banned warning should be shown if the user is banned."""
request_user = self.create_superuser()
banned_user = self.create_user(is_banned=True)
self.client.login(username=request_user.email, password='moo')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': banned_user.uuid}))
self.assertTrue(response.context['show_banned_warning'])
def test_show_banned_warning_user_is_not_banned(self):
"""Banned warning should not show if the user is not banned."""
request_user = self.create_user()
unbanned_user = self.create_user()
self.client.login(username=request_user.email, password='moo')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': unbanned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_show_banned_warning_to_self_banned(self):
"""Banned warning should not show to the user that is banned."""
banned_user = self.create_user(is_banned=True)
self.client.login(username=banned_user.email, password='moo')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': banned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_show_banned_warning_to_self_not_banned(self):
"""Banned warning should not show to an unbanned user."""
unbanned_user = self.create_user()
self.client.login(username=unbanned_user.email, password='moo')
response = self.client.get(
reverse('user_details', kwargs={'user_uuid': unbanned_user.uuid}))
self.assertFalse(response.context['show_banned_warning'])
def test_get_context_data(self):
"""Context should have nav_active_item and show_banned_warning."""
user = self.create_user()
self.client.login(username=user.email, password='moo')
response = self.client.get(
reverse('user_details',
kwargs={'user_uuid': user.uuid})
)
context = response.context
self.assertEqual(context['nav_active_item'], user)
self.assertEqual(context['show_banned_warning'], False)
self.assertQuerysetItemsEqual(
context['groups_joined'], user.groups_joined)
def test_get_object(self):
"""get_object should return the correct user."""
view = views.UserDetailView()
view.request = self.request_factory.get('/')
user = self.create_user()
view.request.user = user
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.get_object(), user)
@patch('open_connect.accounts.views.messages')
def test_get_object_user_is_banned(self, mock_messages):
"""should return the user and add a warning if user is banned."""
user = mommy.make('accounts.User', is_banned=True)
view = views.UserDetailView()
view.request = self.request
view.request.user = self.create_superuser()
view.kwargs = {'user_uuid': user.uuid}
self.assertEqual(view.get_object(), user)
self.assertEqual(
mock_messages.warning.call_args_list[0][0][1],
'This is a banned account.'
)
def test_get_object_user_is_banned_no_permission_to_view_profile(self):
"""should raise Http404 if user is banned and you don't have perms."""
user = mommy.make('accounts.User', is_banned=True)
view = views.UserDetailView()
view.request = self.request_factory.get('/')
view.request.user = self.create_user(is_staff=True)
view.kwargs = {'user_uuid': user.uuid}
self.assertRaises(Http404, view.get_object)
class UserUpdateViewTest(ConnectTestMixin, TestCase):
"""Tests for the user update view."""
def setUp(self):
"""Setup the UserUpdateViewTest TestCase"""
self.user = self.create_user(password='test')
self.client.login(username=self.user.username, password='test')
def test_authenticated_user_own_profile(self):
"""Test that an authenticated user can access their own update view."""
response = self.client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(response.context_data['object'], self.user)
def test_admin_access_view(self):
"""
Test that admins with the `accounts.change_user` permission can view
"""
admin_user = self.create_user(password='admintest')
admin_client = Client()
admin_client.login(username=admin_user.username, password='admintest')
unprivlidged_result = admin_client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(unprivlidged_result.status_code, 404)
change_user_permission = Permission.objects.get(
content_type__app_label='accounts', codename='change_user')
admin_user.user_permissions.add(change_user_permission)
privlidged_result = admin_client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertEqual(privlidged_result.status_code, 200)
self.assertContains(privlidged_result, self.user)
@override_settings(LOGIN_URL=reverse('login'))
def test_update_anonymous_user(self):
"""Unauthenticated users should be redirected to the login page."""
client = Client()
update_url = reverse('update_user', args=(self.user.uuid,))
response = client.get(update_url)
self.assertRedirects(
response,
'%s?next=%s' % (reverse('login'), update_url)
)
def test_with_image(self):
"""Make sure the user's image gets set when it is provided."""
data = {
'image': get_in_memory_image_file(),
'timezone': 'US/Central',
'group_notification_period': 'none',
'email': self.user.email
}
response = self.client.post(
reverse('update_user', args=(self.user.uuid,)), data)
self.assertRedirects(
response,
reverse('user_profile'),
target_status_code=302
)
user = User.objects.get(pk=self.user.pk)
data['image'].seek(0)
self.assertEqual(user.image.image.read(), data['image'].read())
def test_clear_image(self):
"""A user's image should be removed if clear is selected."""
self.user.image = get_in_memory_image_instance(self.user)
self.user.save()
data = {
'image-clear': True,
'image': None,
'timezone': 'US/Central',
'group_notification_period': 'none',
'email': self.user.email
}
response = self.client.post(
reverse('update_user', args=(self.user.uuid,)), data)
self.assertRedirects(
response,
reverse('user_profile'),
target_status_code=302
)
user = User.objects.get(pk=self.user.pk)
self.assertIsNone(user.image)
def test_group_owner_has_receive_group_join_notifications_field(self):
"""A user who owns any groups should see the field."""
response = self.client.get(
reverse('update_user', args=(self.user.uuid,)))
self.assertNotIn(
'receive_group_join_notifications',
response.context['user_form'].fields.keys()
)
def test_non_group_owner_does_not_have_receive_group_join_field(self):
"""A user who owns no groups should not see the field."""
user = self.create_user()
group = mommy.make('groups.Group')
group.owners.add(user)
client = Client()
client.login(username=user.email, password='moo')
response = client.get(
reverse('update_user', args=(user.uuid,)))
self.assertIn(
'receive_group_join_notifications',
response.context['user_form'].fields.keys()
)
class UpdateUserPermissionViewTest(ConnectTestMixin, TestCase):
"""Tests for UpdateUserPermissionView"""
def setUp(self):
"""Handy things."""
self.request_factory = RequestFactory()
# Add 2 permissions to the test, one valid and visible, one hidden
demo_content_type = ContentType.objects.create(
app_label='demo-app-label', model='DemoModel')
self.valid_permission = mommy.make(
Permission,
codename='viewable-permission',
name='Viewable Permission',
content_type=demo_content_type)
self.hidden_permission = mommy.make(
Permission,
codename='hidden-permission',
name='Hidden Permission',
content_type=demo_content_type)
# Create a view class that contains those permissions
self.view_class = views.UpdateUserPermissionView
self.view_class.editable_permissions = (
('demo-app-label', 'viewable-permission'),
)
def tearDown(self):
"""
Tear down the test
Cleanup the test by deleting the test permissions, then verify the
cleanup
"""
self.valid_permission.delete()
self.hidden_permission.delete()
self.assertNotIn(self.valid_permission, Permission.objects.all())
self.assertNotIn(self.hidden_permission, Permission.objects.all())
def test_no_impersonation(self):
"""Test that the view will reject those actively impersonating"""
# Create a user who is actively impersonating another user
user = self.create_user()
user.impersonating = True
# Create a request
request = self.request_factory.get('/')
request.user = user
# Instead of testing the dispatch() method directly or creating a
# django test client that is both logged in and impersonating, we can
# pass a pre-made request directly into the view.
with self.assertRaises(PermissionDenied):
self.view_class.as_view()(request)
def test_get_queryset(self):
"""
Test the view's get_queryset() method
Test that neither the requesting User nor a superuser User are in the
queryset of User objects returned by the view's get_queryset()
"""
requesting_user = self.create_user()
regular_user = self.create_user()
superuser = self.create_superuser()
view = self.view_class()
view.request = self.request_factory.get('/')
view.request.user = requesting_user
queryset = view.get_queryset()
# The regular user should be in the queryset
self.assertIn(regular_user, queryset)
# Superusers cannot be in the possible queryset
self.assertNotIn(superuser, queryset)
# The requesting user cannot be in the possible queryset
self.assertNotIn(requesting_user, queryset)
def test_get_editable_permissions(self):
"""
Test the `get_editable_permissions` method on the view.
"""
view = self.view_class()
editable_permissions_queryset = view.get_editable_permissions()
self.assertEqual(editable_permissions_queryset.count(), 1)
self.assertIn(self.valid_permission, editable_permissions_queryset)
self.assertNotIn(self.hidden_permission, editable_permissions_queryset)
def test_get_permissions_queryset(self):
"""
Test the get_permissions_queryset() method.
"""
view = self.view_class()
view.request = self.request_factory.get('/')
view.request.user = self.create_user()
# Create a new "target" user, who is the user the view will be set to
# edit during a regular request.
target_user = self.create_user()
view.object = target_user
# Get the existing queryset of changeable permissions. This should only
# include permissions set in the `view.editable_permissions` attribute.
permissions_queryset = view.get_permissions_queryset()
self.assertEqual(permissions_queryset.count(), 1)
self.assertIn(self.valid_permission, permissions_queryset)
self.assertNotIn(self.hidden_permission, permissions_queryset)
# Add the hidden permission to the user's list of permissions. This
# should cause the hidden permission to appear in the queryset
target_user.user_permissions.add(self.hidden_permission)
# Re-generate a queryset of editable views
extended_permissions_queryset = view.get_permissions_queryset()
self.assertEqual(extended_permissions_queryset.count(), 2)
self.assertIn(self.valid_permission, extended_permissions_queryset)
self.assertIn(self.hidden_permission, extended_permissions_queryset)
def test_get_form(self):
"""
Test the `get_form` method for users with and without extra permissions
"""
admin = self.create_superuser()
self.client.login(username=admin.email, password='moo')
# Ensure that by default 'Viewable Permission' is found in the form
# field and 'Hidden Permission' is not
user = self.create_user()
response = self.client.get(
reverse('update_user_permissions', args=[user.uuid]))
form = response.context['form']
user_permissions_field = form['user_permissions']
self.assertIn(u'Viewable Permission', unicode(user_permissions_field))
self.assertNotIn(u'Hidden Permission', unicode(user_permissions_field))
# Ensure that if a user has 'Hidden Permission' it is displayed in the
# form field
user.user_permissions.add(self.hidden_permission)
expanded_response = self.client.get(
reverse('update_user_permissions', args=[user.uuid]))
expanded_form = expanded_response.context['form']
expanded_user_permissions_field = expanded_form['user_permissions']
self.assertIn(
u'Viewable Permission', unicode(expanded_user_permissions_field))
self.assertIn(
u'Hidden Permission', unicode(expanded_user_permissions_field))
class UserProfileRedirectTest(ConnectTestMixin, TestCase):
"""Tests for the user profile redirect view."""
def test_redirects_to_user_details(self):
"""User profile should redirect to user detais."""
user = self.create_user()
self.client.login(username=user.email, password='moo')
response = self.client.get(reverse('user_profile'))
self.assertRedirects(
response,
reverse('user_details', args=[user.uuid]),
)
@override_settings(LOGIN_URL=reverse('login'))
def test_anonymous_user(self):
"""Unauthenticated user should be redirected to login."""
client = Client()
user_profile_url = reverse('user_profile')
response = client.get(user_profile_url)
self.assertRedirects(
response,
'%s?next=%s' % (reverse('login'), user_profile_url)
)
class InviteCreateViewTest(ConnectTestMixin, TestCase):
"""Tests for InviteCreateView."""
def setUp(self):
"""Handy things."""
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_get_success_url(self):
"""get_success_url should return the URL of the invite list page."""
view = views.InviteCreateView()
view.request = self.request
result = view.get_success_url()
self.assertEqual(result, reverse('invites'))
def test_form_valid(self):
"""form_valid should set created_by to the current user."""
user = self.create_superuser()
self.client.login(username=user.email, password='moo')
group = mommy.make('groups.Group')
response = self.client.post(
reverse('create_invite'),
{'emails': 'testuser@dj.local', 'groups': [group.pk]}
)
invite = Invite.objects.get(email='testuser@dj.local')
self.assertEqual(invite.created_by, user)
self.assertRedirects(response, reverse('invites'))
def test_get_form_non_superuser(self):
"""get_form should remove is_superuser from form and update groups."""
user = self.create_user(is_staff=True)
permission = Permission.objects.get_by_natural_key(
'add_invite', 'accounts', 'invite')
user.user_permissions.add(permission)
self.client.login(username=user.email, password='moo')
response = self.client.get(reverse('create_invite'))
form = response.context['form']
self.assertNotIn('is_superuser', form.fields.keys())
self.assertIn('is_staff', form.fields.keys())
self.assertQuerysetItemsEqual(
form.fields['groups'].queryset, user.groups.all())
def test_get_form_non_staff(self):
"""get_form should remove superuser & staff fields and update groups."""
user = self.create_user()
permission = Permission.objects.get_by_natural_key(
'add_invite', 'accounts', 'invite')
user.user_permissions.add(permission)
self.client.login(username=user.email, password='moo')
response = self.client.get(reverse('create_invite'))
form = response.context['form']
self.assertNotIn('is_superuser', form.fields.keys())
self.assertNotIn('is_staff', form.fields.keys())
self.assertQuerysetItemsEqual(
form.fields['groups'].queryset, user.groups.all())
def test_get_form_superuser(self):
"""get_form should have all fields and all groups."""
user = self.create_superuser()
self.client.login(username=user.email, password='moo')
response = self.client.get(reverse('create_invite'))
form = response.context['form']
self.assertIn('is_superuser', form.fields.keys())
self.assertIn('is_staff', form.fields.keys())
self.assertQuerysetItemsEqual(
form.fields['groups'].queryset, Group.objects.all())
class InviteListViewTest(ConnectTestMixin, TestCase):
"""Tests for InviteListView."""
def test_query(self):
"""Test searching."""
user = self.create_superuser()
find_me = Invite.objects.create(email='hi@dj.local', created_by=user)
dont_find_me = Invite.objects.create(
email='bye@dj.local', created_by=user)
self.client.login(username=user.email, password='moo')
response = self.client.get(reverse('invites'), {'q': 'hi'})
self.assertIn(find_me, response.context['invites'])
self.assertNotIn(dont_find_me, response.context['invites'])
class BanUnBanViewBaseTest(ConnectMessageTestCase):
"""Tests for BanUnBanViewBase."""
def test_user_property(self):
"""Test that the correct user is returned."""
view = views.BanUnBanViewBase()
view.kwargs = {'user_uuid': self.normal_user.uuid}
self.assertEqual(view.user, self.normal_user)
def test_get_initial(self):
"""Test that the user is added to the form's initial data."""
view = views.BanUnBanViewBase()
view.kwargs = {'user_uuid': self.normal_user.uuid}
self.assertEqual(view.get_initial()['user'], self.normal_user)
def test_get_context_data(self):
"""Test that the user is added to the context."""
view = views.BanUnBanViewBase()
view.kwargs = {'user_uuid': self.normal_user.uuid}
self.assertEqual(view.get_context_data()['account'], self.normal_user)
def test_get_success_url(self):
"""Test that the success url is the user's profile."""
view = views.BanUnBanViewBase()
view.kwargs = {'user_uuid': self.normal_user.uuid}
self.assertEqual(
view.get_success_url(),
reverse('user_details', kwargs={'user_uuid': self.normal_user.uuid})
)
class BanUserViewTest(ConnectMessageTestCase):
"""Tests for BanUserView."""
def test_form_valid_confirm(self):
"""Test that a user is banned when confirm=True."""
user = mommy.make('accounts.User', is_banned=False)
self.assertFalse(user.is_banned)
response = self.client.post(
reverse('ban_user', kwargs={'user_uuid': user.uuid}),
{'user': user.pk, 'confirm': 1}
)
self.assertRedirects(
response, reverse('user_details', kwargs={'user_uuid': user.uuid}))
user = User.objects.get(pk=user.pk)
self.assertTrue(user.is_banned)
def test_form_valid_not_confirmed(self):
"""Test that a user is not banned when confirm=False"""
user = mommy.make('accounts.User', is_banned=False)
self.assertFalse(user.is_banned)
response = self.client.post(
reverse('ban_user', kwargs={'user_uuid': user.uuid}),
{'user': user.pk}
)
self.assertRedirects(
response, reverse('user_details', kwargs={'user_uuid': user.uuid}))
user = User.objects.get(pk=user.pk)
self.assertFalse(user.is_banned)
class UnBanUserViewTest(ConnectMessageTestCase):
"""Tests for UnBanUserView."""
def test_form_valid_confirm(self):
"""Test that a user is unbanned when confirm=True."""
user = mommy.make('accounts.User', is_banned=True)
self.assertTrue(user.is_banned)
response = self.client.post(
reverse('unban_user', kwargs={'user_uuid': user.uuid}),
{'user': user.pk, 'confirm': 1}
)
self.assertRedirects(
response, reverse('user_details', kwargs={'user_uuid': user.uuid}))
user = User.objects.get(pk=user.pk)
self.assertFalse(user.is_banned)
def test_form_valid_not_confirmed(self):
"""Test that a user is not banned when confirm=False"""
user = mommy.make('accounts.User', is_banned=True)
self.assertTrue(user.is_banned)
response = self.client.post(
reverse('unban_user', kwargs={'user_uuid': user.uuid}),
{'user': user.pk}
)
self.assertRedirects(
response, reverse('user_details', kwargs={'user_uuid': user.uuid}))
user = User.objects.get(pk=user.pk)
self.assertTrue(user.is_banned)
class BecomeUserViewTest(ConnectMessageTestCase):
"""Tests for the BecomeUserView."""
def test_get_success_url(self):
"""Test get_success_url redirects to the right place."""
view = views.BecomeUserView()
view.request = self.request_factory.get('/')
self.assertEqual(view.get_success_url(), reverse('threads'))
def test_get_success_url_with_next(self):
"""Test get_success_url redirects to next if in the request GET."""
view = views.BecomeUserView()
view.request = self.request_factory.get('/?next=meow')
self.assertEqual(view.get_success_url(), 'meow')
def test_user_to_become(self):
"""Should return a user object corresponding to the user_uuid."""
view = views.BecomeUserView()
view.kwargs = {'user_uuid': self.normal_user.uuid}
self.assertEqual(
view.user_to_become,
self.normal_user
)
def test_form_valid_updates_session(self):
"""form_valid should add impersonate_id to the session."""
session = self.client.session
self.assertNotIn('impersonate_id', session)
self.client.post(
reverse('become_user', kwargs={'user_uuid': self.normal_user.uuid}),
{'user_to_become': self.normal_user.pk}
)
session = self.client.session
self.assertEqual(session['impersonate_id'], self.normal_user.pk)
def test_form_valid_does_not_update_session_without_permission(self):
"""form_valid should only update the session if user has permission."""
client = Client()
client.post(
reverse('login'),
{'username': 'staffuser@razzmatazz.local', 'password': 'moo'}
)
session = client.session
self.assertNotIn('impersonate_id', session)
client.post(
reverse('become_user', kwargs={'user_uuid': self.normal_user.uuid}),
{'user_to_become': self.normal_user.pk}
)
session = client.session
self.assertNotIn('impersonate_id', session)
def test_get_context_adds_user_to_become(self):
"""user_to_become should be added to the context."""
response = self.client.get(
reverse('become_user', kwargs={'user_uuid': self.normal_user.uuid}))
self.assertEqual(response.context['user_to_become'], self.normal_user)
class UnbecomeUserTest(ConnectMessageTestCase):
"""Tests for unbecome_user view."""
def test_unbecome_user(self):
"""View should remove impersonate_id from session and redirect."""
session = self.client.session
session['impersonate_id'] = self.normal_user.pk
session.save()
response = self.client.get(reverse('unbecome_user'))
session = self.client.session
self.assertNotIn('impersonate_id', session)
self.assertRedirects(response, reverse('threads'))
def test_unbecome_user_redirects_to_next(self):
"""If next is in GET, user should be redirected."""
session = self.client.session
session['impersonate_id'] = self.normal_user.pk
session.save()
user_profile = reverse(
'user_details', kwargs={'user_uuid': self.normal_user.uuid})
response = self.client.get(
'%s?next=%s' % (reverse('unbecome_user'), user_profile))
self.assertRedirects(response, user_profile)
def test_unbecome_user_impersonate_id_not_in_session(self):
"""Fail silently if impersonate_id is not in the session."""
session = self.client.session
self.assertNotIn('impersonate_id', session)
response = self.client.get(reverse('unbecome_user'))
self.assertRedirects(response, reverse('threads'))
class TermsAndConductViewTest(ConnectTestMixin, TestCase):
"""Tests for accepting terms of service and code of conduct."""
def test_user_accepted_terms_and_conduct(self):
"""Test that posting a valid form updates user and redirects."""
user = self.create_user(tos_accepted_at=None, ucoc_accepted_at=None)
self.assertIsNone(user.tos_accepted_at)
self.assertIsNone(user.ucoc_accepted_at)
self.client.login(username=user.email, password='moo')
response = self.client.post(
reverse('accept_terms_and_conduct'),
{'accept_tos': True, 'accept_ucoc': True, 'next': '/?ok'}
)
# Target status code is 302 because / will redirect user to another page
self.assertRedirects(response, '/?ok', target_status_code=302)
user = User.objects.get(pk=user.pk)
self.assertIsInstance(user.tos_accepted_at, datetime)
self.assertIsInstance(user.ucoc_accepted_at, datetime)
class TutorialStatusViewTest(ConnectTestMixin, TestCase):
"""Tests for user_tutorial_view."""
def setUp(self):
"""Setup the test"""
self.request_factory = RequestFactory()
def test_user_tutorial_view(self):
"""view should change the user's status and
return the expected response."""
request = self.request_factory.post('/')
user = self.create_user()
request.user = user
self.assertEqual(user.has_viewed_tutorial, False)
views.user_tutorial_view(request)
self.assertEqual(user.has_viewed_tutorial, True)
views.user_tutorial_view(request)
self.assertEqual(user.has_viewed_tutorial, False)
|
nilq/baby-python
|
python
|
def fetch_base_view(context, next):
base_blocks = [
{
"type": "input",
"block_id": "block_packages",
"element": {
"type": "plain_text_input",
"action_id": "package_input",
"placeholder": {
"type": "plain_text",
"text": "300"
}
},
"label": {
"type": "plain_text",
"text": "Packages"
}
},
{
"type": "input",
"block_id": "block_weight",
"element": {
"type": "plain_text_input",
"action_id": "weight_input",
"placeholder": {
"type": "plain_text",
"text": "750"
}
},
"label": {
"type": "plain_text",
"text": "Weight"
}
},
{
"type": "input",
"block_id": "block_items",
"element": {
"type": "plain_text_input",
"action_id": "item_input",
"placeholder": {
"type": "plain_text",
"text": "450"
}
},
"label": {
"type": "plain_text",
"text": "Items"
}
},
{
"type": "input",
"block_id": "block_hours",
"element": {
"type": "plain_text_input",
"action_id": "hour_input",
"placeholder": {
"type": "plain_text",
"text": "7.5"
}
},
"label": {
"type": "plain_text",
"text": "Hours"
}
}
]
base_view = {
"type": "modal",
"callback_id": "production_calc_submission",
"title": {
"type": "plain_text",
"text": "Production Calculator"
},
"submit": {
"type": "plain_text",
"text": "Calculate"
},
"blocks": base_blocks
}
context['base_blocks'] = base_blocks
context['base_view'] = base_view
next()
def get_input_values(context, body, next):
input_block_values = {
"block_packages": body['view']['state']['values']['block_packages']['package_input']['value'].strip(' '),
"block_weight": body['view']['state']['values']['block_weight']['weight_input']['value'].strip(' '),
"block_items": body['view']['state']['values']['block_items']['item_input']['value'].strip(' '),
"block_hours": body['view']['state']['values']['block_hours']['hour_input']['value'].strip(' ')
}
context['input_block_values'] = input_block_values
next()
def create_score_blocks(context, next):
if 'stats' in context:
stats = context['stats']
score = context['score']
score_block = [
{
"type": "section",
"fields": [
{
"type": "mrkdwn",
"text": f"*Packages/Hour:* `{stats['pkg_per_hour']:.2f}`"
},
{
"type": "mrkdwn",
"text": f"*Weight/Package:* `{stats['weight_per_pkg']:.2f}`"
},
{
"type": "mrkdwn",
"text": f"*Items/Package:* `{stats['items_per_pkg']:.2f}`"
},
{
"type": "mrkdwn",
"text": f"*Productivity Score:* `{score:.2f}` :dash:"
},
]
}
]
context['new_block'] = score_block
next()
else:
next()
def update_base_view(context, next):
if 'new_block' in context:
# create new view key
context['view'] = context['base_view']
# create new blocks key with updated blocks lst
context['blocks'] = context['base_blocks'] + context['new_block']
# update new view key to point to new blocks key
context['view']['blocks'] = context['blocks']
next()
else:
next()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from fireworks import LaunchPad, Firework, Workflow, PyTask
import glob
launchpad = LaunchPad(
host = 'localhost',
port = 27017, # REPLACE
authsource = 'admin',
name = 'fireworks',
password = None,
ssl = False,
username = None
)
for inp in glob.glob('eda*.inp'):
label = inp[0:-4]
t0 = PyTask(
func='qcfw.functions.run_QChem',
kwargs={'label':label},
outputs = ['output_encoding']
)
fw0 = Firework([t0], spec={'_priority': 1}, name=label,fw_id=1)
wf = Workflow ([fw0],name=label)
launchpad.add_wf(wf)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# DO NOT EDIT THIS FILE!
# This file has been autogenerated by dephell <3
# https://github.com/dephell/dephell
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import os.path
readme = ''
here = os.path.abspath(os.path.dirname(__file__))
readme_path = os.path.join(here, 'README.rst')
if os.path.exists(readme_path):
with open(readme_path, 'rb') as stream:
readme = stream.read().decode('utf8')
setup(
long_description=readme,
name='petri',
version='0.24.1',
description='Free your python code from 12-factor boilerplate.',
python_requires='==3.*,>=3.6.0',
project_urls={'homepage': 'https://pypi.org/project/petri/', 'repository': 'https://github.com/pwoolvett/petri', 'documentation': 'https://petri.readthedocs.io/en/stable/'},
author='Pablo Woolvett',
author_email='pablowoolvett@gmail.com',
license='Unlicense',
keywords='dotenv boilerplate 12-factor pydantic structlog',
packages=['petri'],
package_data={},
install_requires=['importlib-metadata==0.*,>=0.23.0', 'pydantic==0.*,>=0.32.2', 'python-dotenv==0.*,>=0.10.3', 'structlog==19.*,>=19.1.0'],
extras_require={'dev': ['autopep8==1.*,>=1.4.4', 'bandit==1.*,>=1.6.2', 'blackini==0.*,>=0.1.6', 'doc8==0.*,>=0.8.0', 'docformatter==1.*,>=1.3.0', 'docutils==0.*,>=0.15.2', 'flake8==3.*,>=3.7.8', 'flake8-bugbear==19.*,>=19.8.0', 'isort==4.*,>=4.3.21', 'mypy==0.*,>=0.720.0', 'pip==19.*,>=19.2.3', 'pylint==2.*,>=2.4.1', 'pytest==5.*,>=5.1.2', 'pytest-cov==2.*,>=2.7.1', 'pytest-html==2.*,>=2.0.0', 'sphinx==1.*,>=1.8.0', 'sphinx-autodoc-typehints==1.*,>=1.5.0', 'sphinx-rtd-theme==0.*,>=0.4.2', 'sphinxcontrib-apidoc==0.*,>=0.3.0'], 'lint': ['bandit==1.*,>=1.6.2', 'flake8==3.*,>=3.7.8', 'flake8-bugbear==19.*,>=19.8.0', 'mypy==0.*,>=0.720.0', 'pylint==2.*,>=2.4.1'], 'color': ['colorama==0.*,>=0.4.1'], 'docs': ['doc8==0.*,>=0.8.0', 'docutils==0.*,>=0.15.2', 'sphinx==1.*,>=1.8.0', 'sphinx-autodoc-typehints==1.*,>=1.5.0', 'sphinx-rtd-theme==0.*,>=0.4.2', 'sphinxcontrib-apidoc==0.*,>=0.3.0'], 'pytest': ['pytest==5.*,>=5.1.2', 'pytest-cov==2.*,>=2.7.1', 'pytest-html==2.*,>=2.0.0']},
)
|
nilq/baby-python
|
python
|
from .paths import get_backup_path, get_resources_path
from .logging import initialize_logging
|
nilq/baby-python
|
python
|
from collections import defaultdict
import numpy as np
from yt.funcs import mylog
from yt.utilities.exceptions import YTDomainOverflow
from yt.utilities.io_handler import BaseIOHandler
from yt.utilities.lib.geometry_utils import compute_morton
from yt.utilities.on_demand_imports import _h5py as h5py
class IOHandlerGadgetFOFHDF5(BaseIOHandler):
_dataset_type = "gadget_fof_hdf5"
def __init__(self, ds):
super(IOHandlerGadgetFOFHDF5, self).__init__(ds)
self.offset_fields = set([])
def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
def _read_particle_coords(self, chunks, ptf):
# This will read chunks and yield the results.
chunks = list(chunks)
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
with h5py.File(data_file.filename, mode="r") as f:
for ptype in sorted(ptf):
coords = data_file._get_particle_positions(ptype, f=f)
if coords is None:
continue
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
yield ptype, (x, y, z)
def _yield_coordinates(self, data_file):
ptypes = self.ds.particle_types_raw
with h5py.File(data_file.filename, "r") as f:
for ptype in sorted(ptypes):
pcount = data_file.total_particles[ptype]
if pcount == 0:
continue
coords = f[ptype][f"{ptype}Pos"][()].astype("float64")
coords = np.resize(coords, (pcount, 3))
yield ptype, coords
def _read_offset_particle_field(self, field, data_file, fh):
field_data = np.empty(data_file.total_particles["Group"], dtype="float64")
fofindex = (
np.arange(data_file.total_particles["Group"])
+ data_file.index_start["Group"]
)
for offset_file in data_file.offset_files:
if fh.filename == offset_file.filename:
ofh = fh
else:
ofh = h5py.File(offset_file.filename, mode="r")
subindex = np.arange(offset_file.total_offset) + offset_file.offset_start
substart = max(fofindex[0] - subindex[0], 0)
subend = min(fofindex[-1] - subindex[0], subindex.size - 1)
fofstart = substart + subindex[0] - fofindex[0]
fofend = subend + subindex[0] - fofindex[0]
field_data[fofstart : fofend + 1] = ofh["Subhalo"][field][
substart : subend + 1
]
return field_data
def _read_particle_fields(self, chunks, ptf, selector):
# Now we have all the sizes, and we can allocate
chunks = list(chunks)
data_files = set([])
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
si, ei = data_file.start, data_file.end
with h5py.File(data_file.filename, mode="r") as f:
for ptype, field_list in sorted(ptf.items()):
pcount = data_file.total_particles[ptype]
if pcount == 0:
continue
coords = data_file._get_particle_positions(ptype, f=f)
x = coords[:, 0]
y = coords[:, 1]
z = coords[:, 2]
mask = selector.select_points(x, y, z, 0.0)
del x, y, z
if mask is None:
continue
for field in field_list:
if field in self.offset_fields:
field_data = self._read_offset_particle_field(
field, data_file, f
)
else:
if field == "particle_identifier":
field_data = (
np.arange(data_file.total_particles[ptype])
+ data_file.index_start[ptype]
)
elif field in f[ptype]:
field_data = f[ptype][field][()].astype("float64")
else:
fname = field[: field.rfind("_")]
field_data = f[ptype][fname][()].astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
findex = int(field[field.rfind("_") + 1 :])
field_data = field_data[:, findex]
data = field_data[si:ei][mask]
yield (ptype, field), data
def _initialize_index(self, data_file, regions):
if self.index_ptype == "all":
ptypes = self.ds.particle_types_raw
pcount = sum(data_file.total_particles.values())
else:
ptypes = [self.index_ptype]
pcount = data_file.total_particles[self.index_ptype]
morton = np.empty(pcount, dtype="uint64")
if pcount == 0:
return morton
mylog.debug(
"Initializing index % 5i (% 7i particles)", data_file.file_id, pcount
)
ind = 0
with h5py.File(data_file.filename, mode="r") as f:
if not f.keys():
return None
dx = np.finfo(f["Group"]["GroupPos"].dtype).eps
dx = 2.0 * self.ds.quan(dx, "code_length")
for ptype in ptypes:
if data_file.total_particles[ptype] == 0:
continue
pos = data_file._get_particle_positions(ptype, f=f)
pos = self.ds.arr(pos, "code_length")
if np.any(pos.min(axis=0) < self.ds.domain_left_edge) or np.any(
pos.max(axis=0) > self.ds.domain_right_edge
):
raise YTDomainOverflow(
pos.min(axis=0),
pos.max(axis=0),
self.ds.domain_left_edge,
self.ds.domain_right_edge,
)
regions.add_data_file(pos, data_file.file_id)
morton[ind : ind + pos.shape[0]] = compute_morton(
pos[:, 0],
pos[:, 1],
pos[:, 2],
self.ds.domain_left_edge,
self.ds.domain_right_edge,
)
ind += pos.shape[0]
return morton
def _count_particles(self, data_file):
si, ei = data_file.start, data_file.end
pcount = {
"Group": data_file.header["Ngroups_ThisFile"],
"Subhalo": data_file.header["Nsubgroups_ThisFile"],
}
if None not in (si, ei):
for ptype in pcount:
pcount[ptype] = np.clip(pcount[ptype] - si, 0, ei - si)
return pcount
def _identify_fields(self, data_file):
fields = []
pcount = data_file.total_particles
if sum(pcount.values()) == 0:
return fields, {}
with h5py.File(data_file.filename, mode="r") as f:
for ptype in self.ds.particle_types_raw:
if data_file.total_particles[ptype] == 0:
continue
fields.append((ptype, "particle_identifier"))
my_fields, my_offset_fields = subfind_field_list(
f[ptype], ptype, data_file.total_particles
)
fields.extend(my_fields)
self.offset_fields = self.offset_fields.union(set(my_offset_fields))
return fields, {}
class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5):
_dataset_type = "gadget_fof_halo_hdf5"
def _read_particle_coords(self, chunks, ptf):
pass
def _read_particle_selection(self, dobj, fields):
rv = {}
ind = {}
# We first need a set of masks for each particle type
ptf = defaultdict(list) # ON-DISK TO READ
fsize = defaultdict(lambda: 0) # COUNT RV
field_maps = defaultdict(list) # ptypes -> fields
unions = self.ds.particle_unions
# What we need is a mapping from particle types to return types
for field in fields:
ftype, fname = field
fsize[field] = 0
# We should add a check for p.fparticle_unions or something here
if ftype in unions:
for pt in unions[ftype]:
ptf[pt].append(fname)
field_maps[pt, fname].append(field)
else:
ptf[ftype].append(fname)
field_maps[field].append(field)
# Now we allocate
psize = {dobj.ptype: dobj.particle_number}
for field in fields:
if field[0] in unions:
for pt in unions[field[0]]:
fsize[field] += psize.get(pt, 0)
else:
fsize[field] += psize.get(field[0], 0)
for field in fields:
if field[1] in self._vector_fields:
shape = (fsize[field], self._vector_fields[field[1]])
elif field[1] in self._array_fields:
shape = (fsize[field],) + self._array_fields[field[1]]
elif field in self.ds.scalar_field_list:
shape = (1,)
else:
shape = (fsize[field],)
rv[field] = np.empty(shape, dtype="float64")
ind[field] = 0
# Now we read.
for field_r, vals in self._read_particle_fields(dobj, ptf):
# Note that we now need to check the mappings
for field_f in field_maps[field_r]:
my_ind = ind[field_f]
rv[field_f][my_ind : my_ind + vals.shape[0], ...] = vals
ind[field_f] += vals.shape[0]
# Now we need to truncate all our fields, since we allow for
# over-estimating.
for field_f in ind:
rv[field_f] = rv[field_f][: ind[field_f]]
return rv
def _read_scalar_fields(self, dobj, scalar_fields):
all_data = {}
if not scalar_fields:
return all_data
pcount = 1
with h5py.File(dobj.scalar_data_file.filename, mode="r") as f:
for ptype, field_list in sorted(scalar_fields.items()):
for field in field_list:
if field == "particle_identifier":
field_data = (
np.arange(dobj.scalar_data_file.total_particles[ptype])
+ dobj.scalar_data_file.index_start[ptype]
)
elif field in f[ptype]:
field_data = f[ptype][field][()].astype("float64")
else:
fname = field[: field.rfind("_")]
field_data = f[ptype][fname][()].astype("float64")
my_div = field_data.size / pcount
if my_div > 1:
findex = int(field[field.rfind("_") + 1 :])
field_data = field_data[:, findex]
data = np.array([field_data[dobj.scalar_index]])
all_data[(ptype, field)] = data
return all_data
def _read_member_fields(self, dobj, member_fields):
all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64))
if not member_fields:
return all_data
field_start = 0
for i, data_file in enumerate(dobj.field_data_files):
start_index = dobj.field_data_start[i]
end_index = dobj.field_data_end[i]
pcount = end_index - start_index
if pcount == 0:
continue
field_end = field_start + end_index - start_index
with h5py.File(data_file.filename, mode="r") as f:
for ptype, field_list in sorted(member_fields.items()):
for field in field_list:
field_data = all_data[(ptype, field)]
if field in f["IDs"]:
my_data = f["IDs"][field][start_index:end_index].astype(
"float64"
)
else:
fname = field[: field.rfind("_")]
my_data = f["IDs"][fname][start_index:end_index].astype(
"float64"
)
my_div = my_data.size / pcount
if my_div > 1:
findex = int(field[field.rfind("_") + 1 :])
my_data = my_data[:, findex]
field_data[field_start:field_end] = my_data
field_start = field_end
return all_data
def _read_particle_fields(self, dobj, ptf):
# separate member particle fields from scalar fields
scalar_fields = defaultdict(list)
member_fields = defaultdict(list)
for ptype, field_list in sorted(ptf.items()):
for field in field_list:
if (ptype, field) in self.ds.scalar_field_list:
scalar_fields[ptype].append(field)
else:
member_fields[ptype].append(field)
all_data = self._read_scalar_fields(dobj, scalar_fields)
all_data.update(self._read_member_fields(dobj, member_fields))
for field, field_data in all_data.items():
yield field, field_data
def _identify_fields(self, data_file):
fields = []
scalar_fields = []
id_fields = {}
with h5py.File(data_file.filename, mode="r") as f:
for ptype in self.ds.particle_types_raw:
fields.append((ptype, "particle_identifier"))
scalar_fields.append((ptype, "particle_identifier"))
my_fields, my_offset_fields = subfind_field_list(
f[ptype], ptype, data_file.total_particles
)
fields.extend(my_fields)
scalar_fields.extend(my_fields)
if "IDs" not in f:
continue
id_fields = [(ptype, field) for field in f["IDs"]]
fields.extend(id_fields)
return fields, scalar_fields, id_fields, {}
def subfind_field_list(fh, ptype, pcount):
fields = []
offset_fields = []
for field in fh.keys():
if isinstance(fh[field], h5py.Group):
my_fields, my_offset_fields = subfind_field_list(fh[field], ptype, pcount)
fields.extend(my_fields)
my_offset_fields.extend(offset_fields)
else:
if not fh[field].size % pcount[ptype]:
my_div = fh[field].size / pcount[ptype]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1 :]
if my_div > 1:
for i in range(int(my_div)):
fields.append((ptype, "%s_%d" % (fname, i)))
else:
fields.append((ptype, fname))
elif (
ptype == "Subhalo"
and not fh[field].size % fh["/Subhalo"].attrs["Number_of_groups"]
):
# These are actually Group fields, but they were written after
# a load balancing step moved halos around and thus they do not
# correspond to the halos stored in the Group group.
my_div = fh[field].size / fh["/Subhalo"].attrs["Number_of_groups"]
fname = fh[field].name[fh[field].name.find(ptype) + len(ptype) + 1 :]
if my_div > 1:
for i in range(int(my_div)):
fields.append(("Group", "%s_%d" % (fname, i)))
else:
fields.append(("Group", fname))
offset_fields.append(fname)
else:
mylog.warning(
"Cannot add field (%s, %s) with size %d.",
ptype,
fh[field].name,
fh[field].size,
)
continue
return fields, offset_fields
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-03-11 08:42
from __future__ import unicode_literals
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('laptimes', '0007_auto_20180225_2055'),
]
operations = [
migrations.AlterField(
model_name='laptime',
name='splits',
field=django.contrib.postgres.fields.ArrayField(base_field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)]), size=None, validators=[django.core.validators.MinLengthValidator(1)]),
),
migrations.AlterField(
model_name='laptime',
name='time',
field=models.PositiveIntegerField(validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='track',
name='sectors',
field=models.PositiveSmallIntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(10)]),
),
]
|
nilq/baby-python
|
python
|
import numpy as np
import imageio
infile = 'rawtext'
outfile = 'map_{:03d}{}_{}.png'
#
# 0 land
# 1 water
# 2 deepwater
# 3 void
#
colors = [
np.array([206, 169, 52], dtype = np.uint8),
np.array([0, 40, 220], dtype = np.uint8),
np.array([0, 20, 140], dtype = np.uint8),
np.array([0, 0, 0], dtype = np.uint8)
]
drawzoombox = True
box_color = np.array([255, 0, 0], dtype = np.uint8)
def make_image(data, width, height, zoom, idx, name):
name_ = ''
for c in name:
if c in ' ;:()[].':
name_ = name_ + '_'
else:
name_ = name_ + c
if zoom > 1.001:
filename = outfile.format(idx, '_zoom', name_)
else:
filename = outfile.format(idx, '', name_)
image = np.zeros((height, width, 3), dtype = np.uint8)
for row in range(height):
for col in range(width):
image[row, col, :] = colors[int(data[row][col])]
if zoom > 1.3 and drawzoombox:
x0 = int(width * (1 - 1 / zoom) / 2)
x1 = int(width * (1 + 1 / zoom) / 2)
y0 = int(height * (1 - 1 / zoom) / 2)
y1 = int(height * (1 + 1 / zoom) / 2)
for x in range(x0, x1 + 1):
image[y0, x, :] = box_color
image[y1, x, :] = box_color
for y in range(y0, y1 + 1):
image[y, x0, :] = box_color
image[y, x1, :] = box_color
imageio.imwrite(filename, image)
def process_data():
idx = 0
with open(infile) as f:
while True:
header = f.readline().strip()
if len(header) > 0:
pieces = header.split(maxsplit=3)
width = int(pieces[0])
height = int(pieces[1])
zoom = float(pieces[2])
name = pieces[3]
data = []
for row in range(height):
data.append(f.readline().strip())
make_image(data, width, height, zoom, idx, name)
idx += 1
else:
break
if __name__ == "__main__":
process_data()
|
nilq/baby-python
|
python
|
import os
import subprocess
import torchaudio
from glob import glob
from torch import Tensor
from typing import Any, Tuple, Optional
from clmr.datasets import Dataset
class AUDIO(Dataset):
"""Create a Dataset for any folder of audio files.
Args:
root (str): Path to the directory where the dataset is found or downloaded.
folder_in_archive (str, optional): The top-level directory of the dataset.
subset (str, optional): Which subset of the dataset to use.
One of ``"training"``, ``"validation"``, ``"testing"`` or ``None``.
If ``None``, the entire dataset is used. (default: ``None``).
"""
_ext_audio = ".wav"
def __init__(
self,
root: str,
src_ext_audio: str = ".wav",
n_classes: int = 1,
) -> None:
super(AUDIO, self).__init__(root)
self._path = root
self._src_ext_audio = src_ext_audio
self.n_classes = n_classes
self.fl = glob(
os.path.join(self._path, "**", "*{}".format(self._src_ext_audio)),
recursive=True,
)
if len(self.fl) == 0:
raise RuntimeError(
"Dataset not found. Please place the audio files in the {} folder.".format(
self._path
)
)
def file_path(self, n: int) -> str:
fp = self.fl[n]
return fp
def __getitem__(self, n: int) -> Tuple[Tensor, Tensor]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
Tuple [Tensor, Tensor]: ``(waveform, label)``
"""
audio, _ = self.load(n)
label = []
return audio, label
def __len__(self) -> int:
return len(self.fl)
|
nilq/baby-python
|
python
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Microbiomeutil(MakefilePackage, SourceforgePackage):
"""Microbiome analysis utilities"""
homepage = "http://microbiomeutil.sourceforge.net/"
sourceforge_mirror_path = "microbiomeutil/microbiomeutil-r20110519.tgz"
version('20110519', sha256='9233de80ea57bfb9e9371cbe7e3bfad2d4a51168fddaf60fa144c4046c80d823')
depends_on('perl', type=('build', 'run'))
depends_on('blast-plus')
depends_on('cdbfasta')
def install(self, spec, prefix):
install_tree('ChimeraSlayer', prefix.ChimeraSlayer)
install_tree('NAST-iEr', join_path(prefix, 'NAST-iEr'))
install_tree('TreeChopper', prefix.TreeChopper)
install_tree('WigeoN', prefix.WigeoN)
install_tree('docs', prefix.docs)
install_tree('RESOURCES', prefix.resources)
install_tree('AmosCmp16Spipeline', prefix.AmosCmp16Spipeline)
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix.ChimeraSlayer)
env.prepend_path('PATH', join_path(self.prefix, 'NAST-iEr'))
env.prepend_path('PATH', self.prefix.TreeChopper)
env.prepend_path('PATH', self.prefix.WigeoN)
|
nilq/baby-python
|
python
|
n = int(input())
num_list = list(int(num) for num in input().strip().split())[:n]
for i in range(len(num_list) - 1):
if num_list[i] > 0 and num_list[i + 1] > 0 or num_list[i] < 0 and num_list[i + 1] < 0:
print("YES")
exit()
print("NO")
|
nilq/baby-python
|
python
|
import pytest
from mockito import mock, unstub, when
from SeleniumLibrary.keywords import ElementKeywords
@pytest.fixture(scope='function')
def element():
ctx = mock()
ctx._browser = mock()
return ElementKeywords(ctx)
def teardown_function():
unstub()
def test_locator_should_match_x_times(element):
locator = '//div'
when(element).find_elements(locator).thenReturn([])
with pytest.raises(AssertionError) as error:
element.locator_should_match_x_times(locator, 1)
assert 'should have matched' in str(error.value)
with pytest.raises(AssertionError) as error:
element.locator_should_match_x_times(locator, 1, 'foobar')
assert 'foobar' in str(error.value)
def test_element_text_should_be(element):
locator = '//div'
webelement = mock()
webelement.text = 'text'
when(element).find_element(locator).thenReturn(webelement)
with pytest.raises(AssertionError) as error:
element.element_text_should_be(locator, 'not text')
assert 'should have been' in str(error.value)
with pytest.raises(AssertionError) as error:
element.element_text_should_be(locator, 'not text', 'foobar')
assert 'foobar' in str(error.value)
|
nilq/baby-python
|
python
|
from django.apps import AppConfig as DjangoAppConfig
from django.utils.translation import gettext_lazy as _
class AppConfig(DjangoAppConfig):
name = 'account'
verbose_name = _('Bank account management')
|
nilq/baby-python
|
python
|
import bpy
from dotbimpy import File
from collections import defaultdict
def convert_dotbim_mesh_to_blender(dotbim_mesh, mesh_id):
vertices = [
(dotbim_mesh.coordinates[counter], dotbim_mesh.coordinates[counter + 1], dotbim_mesh.coordinates[counter + 2])
for counter in range(0, len(dotbim_mesh.coordinates), 3)
]
faces = [
(dotbim_mesh.indices[counter], dotbim_mesh.indices[counter + 1], dotbim_mesh.indices[counter + 2])
for counter in range(0, len(dotbim_mesh.indices), 3)
]
mesh = bpy.data.meshes.new(f"Mesh {mesh_id}")
mesh.from_pydata(vertices, [], faces)
mesh.update()
return mesh
def import_from_file(filepath):
scene = bpy.context.scene
file = File.read(filepath)
meshes_users = defaultdict(list)
for elt in file.elements:
meshes_users[elt.mesh_id].append(elt)
for mesh_id, elts in meshes_users.items():
dotbim_mesh = next((m for m in file.meshes if m.mesh_id == mesh_id), None)
mesh = convert_dotbim_mesh_to_blender(dotbim_mesh, mesh_id)
for elt in elts:
obj = bpy.data.objects.new(elt.type, mesh)
obj.location = [elt.vector.x, elt.vector.y, elt.vector.z]
obj.rotation_mode = "QUATERNION"
obj.rotation_quaternion = [elt.rotation.qw, elt.rotation.qx, elt.rotation.qy, elt.rotation.qz]
for item in elt.info.items():
obj[item[0][0:62]] = item[1]
obj.color = [elt.color.r / 255.0, elt.color.g / 255.0, elt.color.b / 255.0, elt.color.a / 255.0]
scene.collection.objects.link(obj)
if __name__ == "__main__":
import_from_file(r'House.bim') # Change your path there
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.