id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1710151
|
from astropy.coordinates import SkyCoord, Distance
import astropy.units as u
from astropy.time import Time
from astroquery.ned import Ned
from astroquery.simbad import Simbad
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import os
import numpy as np
from spectractor import parameters
from spectractor.config import set_logger
from spectractor.extractor.spectroscopy import (Lines, HGAR_LINES, HYDROGEN_LINES, ATMOSPHERIC_LINES,
ISM_LINES, STELLAR_LINES)
if os.getenv("PYSYN_CDBS"):
import pysynphot as S
Simbad.add_votable_fields('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)', 'flux(I)', 'flux(J)', 'sptype')
def load_target(label, verbose=False):
"""Load the target properties according to the type set by parameters.OBS_OBJECT_TYPE.
Currently, the type can be either "STAR", "HG-AR" or "MONOCHROMATOR". The label parameter gives the
name of the source and allows to load its specific properties.
Parameters
----------
label: str
The label of the target.
verbose: bool, optional
If True, more verbosity (default: False).
Examples
--------
>>> parameters.OBS_OBJECT_TYPE = "STAR"
>>> t = load_target("HD111980", verbose=False)
>>> print(t.label)
HD111980
>>> print(t.radec_position.dec)
-18d31m20.009s
>>> parameters.OBS_OBJECT_TYPE = "MONOCHROMATOR"
>>> t = load_target("XX", verbose=False)
>>> print(t.label)
XX
>>> parameters.OBS_OBJECT_TYPE = "HG-AR"
>>> t = load_target("XX", verbose=False)
>>> print([line.wavelength for line in t.lines.lines][:5])
[253.652, 296.728, 302.15, 313.155, 334.148]
"""
if parameters.OBS_OBJECT_TYPE == 'STAR':
return Star(label, verbose)
elif parameters.OBS_OBJECT_TYPE == 'HG-AR':
return ArcLamp(label, verbose)
elif parameters.OBS_OBJECT_TYPE == 'MONOCHROMATOR':
return Monochromator(label, verbose)
else:
raise ValueError(f'Unknown parameters.OBS_OBJECT_TYPE: {parameters.OBS_OBJECT_TYPE}')
class Target:
def __init__(self, label, verbose=False):
"""Initialize Target class.
Parameters
----------
label: str
String label to name the target
verbose: bool, optional
Set True to increase verbosity (default: False)
"""
self.my_logger = set_logger(self.__class__.__name__)
self.label = label
self.type = None
self.wavelengths = []
self.spectra = []
self.verbose = verbose
self.emission_spectrum = False
self.hydrogen_only = False
self.sed = None
self.lines = None
self.radec_position = None
self.radec_position_after_pm = None
self.redshift = 0
self.image = None
self.image_x0 = None
self.image_y0 = None
class ArcLamp(Target):
def __init__(self, label, verbose=False):
"""Initialize ArcLamp class.
Parameters
----------
label: str
String label to name the lamp.
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
Mercury-Argon lamp:
>>> t = ArcLamp("HG-AR", verbose=False)
>>> print([line.wavelength for line in t.lines.lines][:5])
[253.652, 296.728, 302.15, 313.155, 334.148]
>>> print(t.emission_spectrum)
True
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.emission_spectrum = True
self.lines = Lines(HGAR_LINES, emission_spectrum=True, orders=[1, 2])
def load(self): # pragma: no cover
pass
class Monochromator(Target):
def __init__(self, label, verbose=False):
"""Initialize Monochromator class.
Parameters
----------
label: str
String label to name the monochromator.
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
>>> t = Monochromator("XX", verbose=False)
>>> print(t.label)
XX
>>> print(t.emission_spectrum)
True
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.emission_spectrum = True
self.lines = Lines([], emission_spectrum=True, orders=[1, 2])
def load(self): # pragma: no cover
pass
class Star(Target):
def __init__(self, label, verbose=False):
"""Initialize Star class.
Parameters
----------
label: str
String label to name the target
verbose: bool, optional
Set True to increase verbosity (default: False)
Examples
--------
Emission line object:
>>> s = Star('3C273')
>>> print(s.label)
3C273
>>> print(s.radec_position.dec)
2d03m08.598s
>>> print(s.emission_spectrum)
True
Standard star:
>>> s = Star('HD111980')
>>> print(s.label)
HD111980
>>> print(s.radec_position.dec)
-18d31m20.009s
>>> print(s.emission_spectrum)
False
"""
Target.__init__(self, label, verbose=verbose)
self.my_logger = set_logger(self.__class__.__name__)
self.simbad = None
self.load()
def load(self):
"""Load the coordinates of the target.
Examples
--------
>>> s = Star('3C273')
>>> print(s.radec_position.dec)
2d03m08.598s
"""
Simbad.add_votable_fields('flux(U)', 'flux(B)', 'flux(V)', 'flux(R)', 'flux(I)', 'flux(J)', 'sptype',
'parallax', 'pm', 'z_value')
simbad = Simbad.query_object(self.label)
self.simbad = simbad
if simbad is not None:
if self.verbose or True:
self.my_logger.info(f'\n\tSimbad:\n{simbad}')
self.radec_position = SkyCoord(simbad['RA'][0] + ' ' + simbad['DEC'][0], unit=(u.hourangle, u.deg))
else:
self.my_logger.warning('Target {} not found in Simbad'.format(self.label))
self.get_radec_position_after_pm(date_obs="J2000")
if not np.ma.is_masked(simbad['Z_VALUE']):
self.redshift = float(simbad['Z_VALUE'])
else:
self.redshift = 0
self.load_spectra()
def load_spectra(self):
"""Load reference spectra from Pysynphot database or NED database.
If the object redshift is >0.2, the LAMBDA_MIN and LAMBDA_MAX parameters
are redshifted accordingly.
Examples
--------
>>> s = Star('3C273')
>>> print(s.spectra[0][:4])
[0.0000000e+00 2.5048577e-14 2.4238061e-14 2.4088789e-14]
>>> s = Star('HD111980')
>>> print(s.spectra[0][:4])
[2.16890002e-13 2.66480010e-13 2.03540011e-13 2.38780004e-13]
>>> s = Star('PKS1510-089')
>>> print(s.redshift)
0.36
>>> print(f'{parameters.LAMBDA_MIN:.1f}, {parameters.LAMBDA_MAX:.1f}')
408.0, 1496.0
>>> print(s.spectra[0][:4])
[117.34012 139.27621 87.38032 143.0816 ]
"""
self.wavelengths = [] # in nm
self.spectra = []
# first try with pysynphot
file_names = []
is_calspec = False
if os.getenv("PYSYN_CDBS") is not None:
dirname = os.path.expandvars('$PYSYN_CDBS/calspec/')
for fname in os.listdir(dirname):
if os.path.isfile(dirname + fname):
if self.label.lower() in fname.lower():
file_names.append(dirname + fname)
if len(file_names) > 0:
is_calspec = True
self.emission_spectrum = False
self.hydrogen_only = False
self.lines = Lines(HYDROGEN_LINES + ATMOSPHERIC_LINES + STELLAR_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
for k, f in enumerate(file_names):
if '_mod_' in f:
continue
if self.verbose:
self.my_logger.info('\n\tLoading %s' % f)
data = S.FileSpectrum(f, keepneg=True)
if isinstance(data.waveunits, S.units.Angstrom):
self.wavelengths.append(data.wave / 10.)
self.spectra.append(data.flux * 10.)
else:
self.wavelengths.append(data.wave)
self.spectra.append(data.flux)
elif 'HD' in self.label: # it is a star
self.emission_spectrum = False
self.hydrogen_only = False
self.lines = Lines(ATMOSPHERIC_LINES + HYDROGEN_LINES + STELLAR_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
else:
if 'PNG' not in self.label:
# Try with NED query
# print 'Loading target %s from NED...' % self.label
ned = Ned.query_object(self.label)
hdulists = Ned.get_spectra(self.label, show_progress=False)
self.redshift = ned['Redshift'][0]
self.emission_spectrum = True
self.hydrogen_only = False
if self.redshift > 0.2:
self.hydrogen_only = True
parameters.LAMBDA_MIN *= 1 + self.redshift
parameters.LAMBDA_MAX *= 1 + self.redshift
self.lines = Lines(ATMOSPHERIC_LINES+ISM_LINES+HYDROGEN_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
for k, h in enumerate(hdulists):
if h[0].header['NAXIS'] == 1:
self.spectra.append(h[0].data)
else:
for d in h[0].data:
self.spectra.append(d)
wave_n = len(h[0].data)
if h[0].header['NAXIS'] == 2:
wave_n = len(h[0].data.T)
wave_step = h[0].header['CDELT1']
wave_start = h[0].header['CRVAL1'] - (h[0].header['CRPIX1'] - 1) * wave_step
wave_end = wave_start + wave_n * wave_step
waves = np.linspace(wave_start, wave_end, wave_n)
is_angstrom = False
for key in list(h[0].header.keys()):
if 'angstrom' in str(h[0].header[key]).lower():
is_angstrom = True
if is_angstrom:
waves *= 0.1
if h[0].header['NAXIS'] > 1:
for i in range(h[0].header['NAXIS'] + 1):
self.wavelengths.append(waves)
else:
self.wavelengths.append(waves)
else:
self.emission_spectrum = True
self.lines = Lines(ATMOSPHERIC_LINES+ISM_LINES+HYDROGEN_LINES,
redshift=self.redshift, emission_spectrum=self.emission_spectrum,
hydrogen_only=self.hydrogen_only)
self.build_sed()
self.my_logger.debug(f"\n\tTarget label: {self.label}"
f"\n\tCalspec? {is_calspec}"
f"\n\tNumber of spectra: {len(self.spectra)}"
f"\n\tRedshift: {self.redshift}"
f"\n\tEmission spectrum ? {self.emission_spectrum}"
f"\n\tLines: {[l.label for l in self.lines.lines]}")
def get_radec_position_after_pm(self, date_obs):
target_pmra = self.simbad[0]['PMRA'] * u.mas / u.yr
if np.isnan(target_pmra):
target_pmra = 0 * u.mas / u.yr
target_pmdec = self.simbad[0]['PMDEC'] * u.mas / u.yr
if np.isnan(target_pmdec):
target_pmdec = 0 * u.mas / u.yr
target_parallax = self.simbad[0]['PLX_VALUE'] * u.mas
if target_parallax == 0 * u.mas:
target_parallax = 1e-4 * u.mas
target_coord = SkyCoord(ra=self.radec_position.ra, dec=self.radec_position.dec,
distance=Distance(parallax=target_parallax),
pm_ra_cosdec=target_pmra, pm_dec=target_pmdec, frame='icrs', equinox="J2000",
obstime="J2000")
self.radec_position_after_pm = target_coord.apply_space_motion(new_obstime=Time(date_obs))
return self.radec_position_after_pm
def build_sed(self, index=0):
"""Interpolate the database reference spectra and return self.sed as a function of the wavelength.
Parameters
----------
index: int
Index of the spectrum stored in the self.spectra list
Examples
--------
>>> s = Star('HD111980')
>>> s.build_sed(index=0)
>>> s.sed(550)
array(1.67605113e-11)
"""
if len(self.spectra) == 0:
self.sed = lambda x: np.zeros_like(x)
else:
self.sed = interp1d(self.wavelengths[index], self.spectra[index], kind='linear', bounds_error=False,
fill_value=0.)
def plot_spectra(self):
""" Plot the spectra stored in the self.spectra list.
Examples
--------
>>> s = Star('HD111980')
>>> s.plot_spectra()
"""
# target.load_spectra() ## No global target object available here (SDC)
plt.figure() # necessary to create a new plot (SDC)
for isp, sp in enumerate(self.spectra):
plt.plot(self.wavelengths[isp], sp, label='Spectrum %d' % isp)
plt.xlim((300, 1100))
plt.xlabel(r'$\lambda$ [nm]')
plt.ylabel('Flux')
plt.title(self.label)
plt.legend()
if parameters.DISPLAY: # pragma: no cover
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
|
1710187
|
from django.urls import reverse
from django.conf import settings
from django.contrib.auth.hashers import make_password
from django.utils import timezone
from rest_framework import status
from datetime import timedelta
from mock import patch
import random
import string
import binascii
import os
import hashlib
import time
import json
import nacl.encoding
import nacl.utils
import nacl.secret
from restapi import models
from .base import APITestCaseExtended
from ..utils import encrypt_with_db_secret
class DuoVerifyTests(APITestCaseExtended):
def setUp(self):
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '<EMAIL>'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '<EMAIL>'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = '33afce78b0152075457e2a4d58b80312162f08ee932551c833b3d08d58574f03'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce=self.test_user_sauce,
is_email_active=True
)
self.token = ''.join(random.choice(string.ascii_lowercase) for _ in range(64))
self.session_secret_key = hashlib.sha256(settings.DB_SECRET.encode()).hexdigest()
self.token_obj = models.Token.objects.create(
key= hashlib.sha512(self.token.encode()).hexdigest(),
user=self.test_user_obj,
secret_key=self.session_secret_key,
valid_till = timezone.now() + timedelta(seconds=10)
)
models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
)
# encrypt authorization validator with session key
secret_box = nacl.secret.SecretBox(self.session_secret_key, encoder=nacl.encoding.HexEncoder)
authorization_validator_nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
authorization_validator_nonce_hex = nacl.encoding.HexEncoder.encode(authorization_validator_nonce)
encrypted = secret_box.encrypt(json.dumps({}).encode("utf-8"), authorization_validator_nonce)
authorization_validator = encrypted[len(authorization_validator_nonce):]
authorization_validator_hex = nacl.encoding.HexEncoder.encode(authorization_validator)
self.authorization_validator = json.dumps({
'text': authorization_validator_hex.decode(),
'nonce': authorization_validator_nonce_hex.decode(),
})
def test_get_authentication_duo_verify(self):
"""
Tests GET method on authentication_duo_verify
"""
url = reverse('authentication_duo_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def test_put_authentication_duo_verify(self):
"""
Tests PUT method on authentication_duo_verify
"""
url = reverse('authentication_duo_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
def mock_check(self):
return {
'time': int(time.time())
}
def mock_enroll_status(self, user_id=None, activation_code=None):
return 'success'
def mock_enroll_status_invalid(self, user_id=None, activation_code=None):
return 'invalid'
def mock_enroll_status_waiting(self, user_id=None, activation_code=None):
return 'waiting'
def mock_auth_valid(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'result': 'allow'
}
def mock_auth_invalid(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'result': 'deny'
}
def mock_auth_status_msg(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'result': 'deny',
'status_msg': 'Deny it!'
}
def mock_auth_error(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'result': 'deny',
'error': 'Funny error'
}
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_post_authentication_duo_verify_success_with_passcode(self):
"""
Tests POST method on authentication_duo_verify
"""
url = reverse('authentication_duo_verify')
data = {
'token': self.token,
'duo_token': '<PASSWORD>'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_post_authentication_duo_verify_success_without_passcode(self):
"""
Tests POST method on authentication_duo_verify
"""
url = reverse('authentication_duo_verify')
data = {
'token': self.token,
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_post_authentication_duo_verify_invalid_token(self):
"""
Tests POST method on authentication_duo_verify with invalid token
"""
url = reverse('authentication_duo_verify')
data = {
'token': '<PASSWORD>',
'duo_token': '<PASSWORD>'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + '<PASSWORD>', HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_invalid)
def test_post_authentication_duo_verify_invalid_duo_token(self):
"""
Tests POST method on authentication_duo_verify with an invalid duo_token
"""
url = reverse('authentication_duo_verify')
data = {
'token': self.token,
'duo_token': '<PASSWORD>'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertNotEqual(response.data.get('non_field_errors', False), False)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_status_msg)
def test_post_authentication_duo_verify_error_with_status_message(self):
"""
Tests POST method on authentication_duo_verify with a status message error
"""
url = reverse('authentication_duo_verify')
data = {
'token': self.token,
'duo_token': '<PASSWORD>'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.auth', mock_auth_error)
def test_post_authentication_duo_verify_error_with_error_message(self):
"""
Tests POST method on authentication_duo_verify with a error message error
"""
url = reverse('authentication_duo_verify')
data = {
'token': self.token,
'duo_token': '<PASSWORD>'
}
self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token, HTTP_AUTHORIZATION_VALIDATOR=self.authorization_validator)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_authentication_duo_verify(self):
"""
Tests DELETE method on authentication_duo_verify
"""
url = reverse('authentication_duo_verify')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)
class DuoTests(APITestCaseExtended):
def setUp(self):
self.test_email = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '<EMAIL>'
self.test_email_bcrypt = 'a'
self.test_username = ''.join(random.choice(string.ascii_lowercase) for _ in range(10)) + '<EMAIL>'
self.test_authkey = binascii.hexlify(os.urandom(settings.AUTH_KEY_LENGTH_BYTES)).decode()
self.test_public_key = binascii.hexlify(os.urandom(settings.USER_PUBLIC_KEY_LENGTH_BYTES)).decode()
self.test_private_key = binascii.hexlify(os.urandom(settings.USER_PRIVATE_KEY_LENGTH_BYTES)).decode()
self.test_private_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_secret_key = binascii.hexlify(os.urandom(settings.USER_SECRET_KEY_LENGTH_BYTES)).decode()
self.test_secret_key_nonce = binascii.hexlify(os.urandom(settings.NONCE_LENGTH_BYTES)).decode()
self.test_user_sauce = '6df1f310730e5464ce23e05fa4eca0de3fe30805fc8cc1d6b37389262e4bd9c3'
self.test_user_obj = models.User.objects.create(
email=self.test_email,
email_bcrypt=self.test_email_bcrypt,
username=self.test_username,
authkey=make_password(self.test_authkey),
public_key=self.test_public_key,
private_key=self.test_private_key,
private_key_nonce=self.test_private_key_nonce,
secret_key=self.test_secret_key,
secret_key_nonce=self.test_secret_key_nonce,
user_sauce=self.test_user_sauce,
is_email_active=True
)
def test_get_user_duo(self):
"""
Tests GET method on user_duo
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
)
url = reverse('user_duo')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.data, {
"duos":[{
"id":duo.id,
"active":duo.active,
"title":"My Sweet Title"
}]
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def mock_check(self):
return {
'time': int(time.time())
}
def mock_enroll(self, username=None):
return {
'expiration': int(time.time()) + 86400,
'user_id': '1234',
'activation_code': '123456',
}
def mock_check_error(self):
return {
'error': 'Some Error'
}
def mock_enroll_error(self, username=None):
return {
'error': 'Some Error'
}
def mock_preauth_enroll(self, username=None):
return {
'result': 'enroll'
}
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll', mock_enroll)
@patch('duo_client.Auth.preauth', mock_preauth_enroll)
def test_put_user_duo(self):
"""
Tests PUT method on user_duo to create a new duo
"""
url = reverse('user_duo')
data = {
'title': 'asdu5zz53',
'integration_key': 'integration_key',
'secret_key': 'secret_key',
'host': 'host',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertNotEqual(response.data.get('id', False), False)
self.assertNotEqual(response.data.get('activation_code', False), False)
@patch('duo_client.Auth.check', mock_check_error)
@patch('duo_client.Auth.enroll', mock_enroll)
@patch('duo_client.Auth.preauth', mock_preauth_enroll)
def test_put_user_duo_error_in_check(self):
"""
Tests PUT method on user_duo to create a new duo with an error in duo check call
"""
url = reverse('user_duo')
data = {
'title': 'asdu5zz53',
'integration_key': 'integration_key',
'secret_key': 'secret_key',
'host': 'host',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll', mock_enroll_error)
@patch('duo_client.Auth.preauth', mock_preauth_enroll)
def test_put_user_duo_error_in_enroll(self):
"""
Tests PUT method on user_duo to create a new duo with an error in duo enroll call
"""
url = reverse('user_duo')
data = {
'title': 'asdu5zz53',
'integration_key': 'integration_key',
'secret_key': 'secret_key',
'host': 'host',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll', mock_enroll)
@patch('duo_client.Auth.preauth', mock_preauth_enroll)
def test_put_user_duo_error_already_exists(self):
"""
Tests PUT method on user_duo to create a new (second) duo
"""
models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'title': 'asdu5zz53',
'integration_key': 'integration_key',
'secret_key': 'secret_key',
'host': 'host',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.put(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_post_user_duo_no_parameters(self):
"""
Tests POST method on user_duo
"""
url = reverse('user_duo')
data = {}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def mock_enroll_status(self, user_id=None, activation_code=None):
return 'success'
def mock_enroll_status_invalid(self, user_id=None, activation_code=None):
return 'invalid'
def mock_enroll_status_waiting(self, user_id=None, activation_code=None):
return 'waiting'
def mock_auth_valid(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'result': 'allow'
}
def mock_auth_error(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'error': 'Some Error Message'
}
def mock_auth_status_msg(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return {
'status_msg': 'Some Status Error Message'
}
def mock_auth_undefined_error(self, username=None, factor=None, device=None, pushinfo=None, passcode=None, async_txn=False):
return 'Undefined problem'
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_activate_duo_success_with_passcode(self):
"""
Tests POST method on user_duo to activate a duo
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
'duo_token': '<PASSWORD>',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
db_duo = models.Duo.objects.get(pk=duo.id)
self.assertTrue(db_duo.active)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_activate_duo_success_without_passcode(self):
"""
Tests POST method on user_duo to activate a duo
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
db_duo = models.Duo.objects.get(pk=duo.id)
self.assertTrue(db_duo.active)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_error)
def test_activate_duo_failure_error(self):
"""
Tests POST method on user_duo to activate a duo and auth returns an error
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_status_msg)
def test_activate_duo_failure_error_with_status_msg(self):
"""
Tests POST method on user_duo to activate a duo and auth returns an error with status msg
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_undefined_error)
def test_activate_duo_failure_error_unknown(self):
"""
Tests POST method on user_duo to activate a duo and auth returns an undefined error
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_activate_duo_failure_duo_id_does_not_exist(self):
"""
Tests POST method on user_duo to activate a duo
"""
url = reverse('user_duo')
data = {
'duo_id': 'e3208de5-0f79-46d0-a34c-5d701e48b4b5',
'duo_token': '<PASSWORD>',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status_invalid)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_activate_duo_failure_enrollment_status_invalid(self):
"""
Tests POST method on user_duo to activate a duo with an activation code that has expired
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
'duo_token': '<PASSWORD>',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
@patch('duo_client.Auth.check', mock_check)
@patch('duo_client.Auth.enroll_status', mock_enroll_status_waiting)
@patch('duo_client.Auth.auth', mock_auth_valid)
def test_activate_duo_failure_enrollment_status_waiting(self):
"""
Tests POST method on user_duo to activate a duo with an activation code that has not yet scanned by the a mobile
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
active = False,
)
url = reverse('user_duo')
data = {
'duo_id': duo.id,
'duo_token': '<PASSWORD>',
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.post(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_duo(self):
"""
Tests DELETE method on user_duo
"""
duo = models.Duo.objects.create(
user=self.test_user_obj,
title= 'My Sweet Title',
duo_integration_key = 'duo_integration_key',
duo_secret_key = encrypt_with_db_secret('duo_secret_key'),
duo_host = 'duo_secret_key',
enrollment_user_id = 'enrollment_user_id',
enrollment_activation_code = 'enrollment_activation_code',
enrollment_expiration_date = timezone.now() + timedelta(seconds=600),
)
url = reverse('user_duo')
data = {
'duo_id': duo.id
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.get(url, data)
self.assertEqual(response.data, {
"duos":[]
})
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_delete_user_duo_no_duo_id (self):
"""
Tests DELETE method on user_duo with no duo_id
"""
url = reverse('user_duo')
data = {
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_duo_duo_id_no_uuid(self):
"""
Tests DELETE method on user_duo with duo_id not being a uuid
"""
url = reverse('user_duo')
data = {
'duo_id': '12345'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_delete_user_duo_duo_id_not_exist(self):
"""
Tests DELETE method on user_duo with duo_id not existing
"""
url = reverse('user_duo')
data = {
'duo_id': '7e866c32-3e4d-4421-8a7d-3ac62f980fd3'
}
self.client.force_authenticate(user=self.test_user_obj)
response = self.client.delete(url, data)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
|
1710189
|
import os
import time
from collections import namedtuple
class SourceFile(namedtuple('SourceFile', 'destination source process prefix')):
def __new__(cls, destination, source, process=True, prefix=None):
return super(SourceFile, cls).__new__(cls,
source=source,
destination=destination,
process=process,
prefix=prefix)
class cached_property(object):
"""
Descriptor (non-data) for building an attribute on-demand on first use.
"""
def __init__(self, factory):
"""
<factory> is called such: factory(instance) to build the attribute.
"""
self._attr_name = factory.__name__
self._factory = factory
def __get__(self, instance, owner):
# Build the attribute.
attr = self._factory(instance)
# Cache the value; hide ourselves.
setattr(instance, self._attr_name, attr)
return attr
def shift(array, default):
"""
Shift items off the front of the `array` until it is empty, then return
`default`.
"""
try:
return array.pop(0)
except IndexError:
return default
def ensure_directory(directory):
""" ### Ensure directory
Ensure that the destination directory exists."""
if not os.path.isdir(directory):
os.makedirs(directory)
def monitor(path, file_modified, file_changed):
"""Monitor each source file and re-generate documentation on change."""
# The watchdog modules are imported in `main()` but we need to re-import
# here to bring them into the local namespace.
import watchdog.events
import watchdog.observers
path = os.path.normpath(path)
class RegenerateHandler(watchdog.events.FileSystemEventHandler):
"""A handler for recompiling files which triggered watchdog events"""
def dispatch(self, event):
# Skip files and directories starting with
if any([f.startswith('.')
for f in os.path.relpath(event.src_path, path).split(os.sep)]):
return
task = None
if event.event_type == "modified":
if not event.is_directory:
task = file_modified
else:
return
else:
task = file_changed
if task:
print("\n")
print("{} \"{}\" was {}, generating documentation...".format(
"Directory" if event.is_directory else "File",
event.src_path,
event.event_type
))
task()
# Set up an observer which monitors all directories for files given on
# the command line and notifies the handler defined above.
event_handler = RegenerateHandler()
observer = watchdog.observers.Observer()
observer.schedule(event_handler, path=path, recursive=True)
# Run the file change monitoring loop until the user hits Ctrl-C.
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
1710194
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Watchers(models.Model):
"""Watcher objects table"""
wid = models.AutoField(primary_key=True)
name = models.TextField()
class WatcherLogs(models.Model):
"""Watcher logs table"""
class Meta:
unique_together = ("wid", "log")
wid = models.IntegerField()
log = models.TextField()
class WatcherRules(models.Model):
"""Table which tracks all LogWatch object tables"""
class Meta:
unique_together = ("wid", "rule_id", "rule")
wid = models.IntegerField()
rule_id = models.IntegerField()
rule = models.TextField()
|
1710277
|
import json
import os
import six
import time
from sh import chmod
from os_brick.initiator import connector
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from oslo_utils import netutils
from oslo_utils import units
from twisted.python.filepath import FilePath
import hpedockerplugin.exception as exception
import hpedockerplugin.fileutil as fileutil
import math
import re
import hpedockerplugin.hpe.array_connection_params as acp
import datetime
from hpedockerplugin.hpe import volume
from hpedockerplugin.hpe import utils
from hpedockerplugin.i18n import _, _LE, _LI, _LW
import hpedockerplugin.synchronization as synchronization
LOG = logging.getLogger(__name__)
PRIMARY = 1
PRIMARY_REV = 1
SECONDARY = 2
CONF = cfg.CONF
VolumeOwnedAndMounted = 0
VolumeOwnedAndNotMounted = 1
VolumeNotOwned = 2
class VolumeManager(object):
def __init__(self, host_config, hpepluginconfig, etcd_util,
node_id,
backend_name):
self._host_config = host_config
self._hpepluginconfig = hpepluginconfig
self._my_ip = netutils.get_my_ipv4()
# Override the settings of use_multipath3, enforce_multipath
# This will be a workaround until Issue #50 is fixed.
msg = (_('Overriding the value of multipath flags to True'))
LOG.info(msg)
self._use_multipath = True
self._enforce_multipath = True
self._etcd = etcd_util
self._initialize_configuration()
self._pwd_decryptor = utils.PasswordDecryptor(backend_name,
self._etcd)
self._pwd_decryptor.decrypt_password(self.src_bkend_config)
self._pwd_decryptor.decrypt_password(self.tgt_bkend_config)
# TODO: When multiple backends come into picture, consider
# lazy initialization of individual driver
try:
LOG.info("Initializing 3PAR driver...")
self._primary_driver = self._initialize_driver(
host_config, self.src_bkend_config, self.tgt_bkend_config)
self._hpeplugin_driver = self._primary_driver
LOG.info("Initialized 3PAR driver!")
except Exception as ex:
msg = "Failed to initialize 3PAR driver for array: %s!" \
" Exception: %s"\
% (self.src_bkend_config.hpe3par_api_url,
six.text_type(ex))
LOG.info(msg)
raise exception.HPEPluginStartPluginException(
reason=msg)
# If replication enabled, then initialize secondary driver
if self.tgt_bkend_config:
LOG.info("Replication enabled!")
try:
LOG.info("Initializing 3PAR driver for remote array...")
self._remote_driver = self._initialize_driver(
host_config, self.tgt_bkend_config,
self.src_bkend_config)
except Exception as ex:
msg = "Failed to initialize 3PAR driver for remote array %s!" \
" Exception: %s"\
% (self.tgt_bkend_config.hpe3par_api_url,
six.text_type(ex))
LOG.info(msg)
raise exception.HPEPluginStartPluginException(reason=msg)
self._connector = self._get_connector(hpepluginconfig)
# Volume fencing requirement
self._node_id = node_id
def _initialize_configuration(self):
self.src_bkend_config = self._get_src_bkend_config()
self.tgt_bkend_config = None
if self._hpepluginconfig.replication_device:
self.tgt_bkend_config = acp.ArrayConnectionParams(
self._hpepluginconfig.replication_device)
if self.tgt_bkend_config:
# Copy all the source configuration to target
hpeconf = self._hpepluginconfig
for key in hpeconf.keys():
if not self.tgt_bkend_config.is_param_present(key):
value = getattr(hpeconf, key)
self.tgt_bkend_config.__setattr__(key, value)
self.tgt_bkend_config.hpe3par_cpg = self._extract_remote_cpgs(
self.tgt_bkend_config.cpg_map)
if not self.tgt_bkend_config.hpe3par_cpg:
LOG.exception("Failed to initialize driver - cpg_map not "
"defined for replication device")
raise exception.HPEPluginMountException(
"Failed to initialize driver - cpg_map not defined for"
"replication device")
if self.tgt_bkend_config.snap_cpg_map:
self.tgt_bkend_config.hpe3par_snapcpg = \
self._extract_remote_cpgs(
self.tgt_bkend_config.snap_cpg_map)
if not self.tgt_bkend_config.hpe3par_snapcpg:
self.tgt_bkend_config.hpe3par_snapcpg = \
self.tgt_bkend_config.hpe3par_cpg
if 'iscsi' in self.src_bkend_config.hpedockerplugin_driver:
iscsi_ips = self.tgt_bkend_config.hpe3par_iscsi_ips
self.tgt_bkend_config.hpe3par_iscsi_ips = iscsi_ips.split(
';')
def _get_src_bkend_config(self):
LOG.info("Getting source backend configuration...")
hpeconf = self._hpepluginconfig
config = acp.ArrayConnectionParams()
for key in hpeconf.keys():
value = getattr(hpeconf, key)
config.__setattr__(key, value)
if hpeconf.hpe3par_snapcpg:
config.hpe3par_snapcpg = hpeconf.hpe3par_snapcpg
else:
# config.hpe3par_snapcpg = hpeconf.hpe3par_cpg
# if 'hpe3par_snapcpg' is NOT given in hpe.conf this should be
# default to empty list & populate volume's snap_cpg later with
# value given with '-o cpg'
config.hpe3par_snapcpg = hpeconf.hpe3par_cpg
LOG.info("Got source backend configuration!")
return config
@staticmethod
def _extract_remote_cpgs(cpg_map):
hpe3par_cpgs = []
cpg_pairs = cpg_map.split(' ')
for cpg_pair in cpg_pairs:
cpgs = cpg_pair.split(':')
hpe3par_cpgs.append(cpgs[1])
return hpe3par_cpgs
def _initialize_driver(self, host_config, src_config, tgt_config):
hpeplugin_driver_class = src_config.hpedockerplugin_driver
hpeplugin_driver = importutils.import_object(
hpeplugin_driver_class, host_config, src_config, tgt_config)
if hpeplugin_driver is None:
msg = (_('hpeplugin_driver import driver failed'))
LOG.error(msg)
raise exception.HPEPluginNotInitializedException(reason=msg)
try:
hpeplugin_driver.do_setup(timeout=30)
hpeplugin_driver.check_for_setup_error()
return hpeplugin_driver
except Exception as ex:
msg = (_('hpeplugin_driver do_setup failed, error is: %s'),
six.text_type(ex))
LOG.error(msg)
raise exception.HPEPluginNotInitializedException(reason=msg)
def _get_connector(self, hpepluginconfig):
protocol = 'ISCSI'
if 'HPE3PARFCDriver' in hpepluginconfig.hpedockerplugin_driver:
protocol = 'FIBRE_CHANNEL'
root_helper = 'sudo'
return connector.InitiatorConnector.factory(
protocol, root_helper, use_multipath=self._use_multipath,
device_scan_attempts=5, transport='default')
@synchronization.synchronized_volume('{volname}')
def create_volume(self, volname, vol_size, vol_prov,
vol_flash, compression_val, vol_qos,
fs_owner, fs_mode,
mount_conflict_delay, cpg, snap_cpg,
current_backend, rcg_name):
LOG.info('In _volumedriver_create')
# NOTE: Since Docker passes user supplied names and not a unique
# uuid, we can't allow duplicate volume names to exist
vol = self._etcd.get_vol_byname(volname)
if vol is not None:
return json.dumps({u"Err": ''})
# if qos-name is given, check vvset is associated with qos or not
if vol_qos is not None:
try:
self._hpeplugin_driver.get_qos_detail(vol_qos)
# if vol_flash is not given in option & with qos
# if vvset is having flash-cache enabled, then set
# vol_flash=True
if vol_flash is None:
vvset_detail = self._hpeplugin_driver.get_vvset_detail(
vol_qos)
if(vvset_detail.get('flashCachePolicy') is not None and
vvset_detail.get('flashCachePolicy') == 1):
vol_flash = True
except Exception as ex:
msg = (_('Create volume failed because vvset is not present or'
'is not associated with qos: %s'), six.text_type(ex))
LOG.exception(msg)
return json.dumps({u"Err": six.text_type(ex)})
undo_steps = []
vol = volume.createvol(volname, vol_size, vol_prov,
vol_flash, compression_val, vol_qos,
mount_conflict_delay, False, cpg, snap_cpg,
False, current_backend)
bkend_vol_name = ""
try:
bkend_vol_name = self._create_volume(vol, undo_steps)
self._apply_volume_specs(vol, undo_steps)
if rcg_name:
# bkend_rcg_name = self._get_3par_rcg_name(rcg_name)
try:
rcg_info = self._find_rcg(rcg_name)
except exception.HPEDriverRemoteCopyGroupNotFound:
rcg_info = self._create_rcg(rcg_name, undo_steps)
self._add_volume_to_rcg(vol, rcg_name, undo_steps)
vol['rcg_info'] = rcg_info
# For now just track volume to uuid mapping internally
# TODO: Save volume name and uuid mapping in etcd as well
# This will make get_vol_byname more efficient
vol['fsOwner'] = fs_owner
vol['fsMode'] = fs_mode
vol['3par_vol_name'] = bkend_vol_name
self._etcd.save_vol(vol)
except Exception as ex:
msg = (_('Create volume failed with error: %s'), six.text_type(ex))
LOG.exception(msg)
self._rollback(undo_steps)
return json.dumps({u"Err": six.text_type(ex)})
else:
LOG.info('Volume: %(name)s was successfully saved to etcd',
{'name': volname})
return json.dumps({u"Err": ''})
def map_3par_volume_time_to_docker(self, vol, expiration=True):
try:
date_format = "%Y-%m-%d %H:%M:%S"
if expiration:
find_flag = "expirationTime8601"
else:
find_flag = "retentionTime8601"
start_groups = re.search('(\d+\-\d+\-\d+)[A-z](\d+:\d+:\d+)',
str(vol["creationTime8601"]))
startdate = start_groups.group(1) + " " + start_groups.group(2)
startt = datetime.datetime.strptime(startdate, date_format)
end_groups = re.search('(\d+\-\d+\-\d+)[A-z](\d+:\d+:\d+)',
str(vol[find_flag]))
enddate = end_groups.group(1) + " " + end_groups.group(2)
endd = datetime.datetime.strptime(enddate, date_format)
diff = endd - startt
diff_hour = diff.total_seconds() / 3600
return diff_hour
except Exception as ex:
msg = (_(
'Failed to map expiration hours of 3par volume: %(vol)s error'
' is: %(ex)s'), {'vol': vol, 'ex': six.text_type(ex)})
LOG.error(msg)
raise exception.HPEPluginMapHourException(reason=msg)
def map_3par_volume_size_to_docker(self, vol):
try:
return int(math.ceil(float(vol['sizeMiB']) / units.Ki))
except Exception as ex:
msg = (_('Failed to map size of 3par volume: %(vol)s, error is: '
'%(ex)s'), {'vol': vol, 'ex': six.text_type(ex)})
LOG.error(msg)
raise exception.HPEPluginMapSizeException(reason=msg)
def map_3par_volume_prov_to_docker(self, vol):
try:
prov = volume.PROVISIONING.get(vol.get('provisioningType'))
if not prov:
return volume.DEFAULT_PROV
return prov
except Exception as ex:
msg = (_(
'Failed to map provisioning of 3par volume: %(vol)s, error'
' is: %(ex)s'), {'vol': vol, 'ex': six.text_type(ex)})
LOG.error(msg)
raise exception.HPEPluginMapProvisioningException(reason=msg)
def map_3par_volume_compression_to_docker(self, vol):
# no need to raise exception here, because compression in docker
# environment can be either True or False
if volume.COMPRESSION.get(vol.get('compressionState')):
return True
return volume.DEFAULT_COMPRESSION_VAL
def _get_vvset_by_volume_name(self, backend_vol_name):
return self._hpeplugin_driver.get_vvset_from_volume(
backend_vol_name)
def _set_flash_cache_policy(self, vol, vvset_detail):
if vvset_detail is not None:
vvset_name = vvset_detail.get('name')
LOG.info('vvset_name: %(vvset)s' % {'vvset': vvset_name})
# check and set the flash-cache if exists
flash_cache_pol = vvset_detail.get('flashCachePolicy')
if flash_cache_pol is not None:
vol['flash_cache'] = (flash_cache_pol == 1)
else:
vol['flash_cache'] = None
def _set_qos_info(self, vol, vvset_name):
LOG.info("Getting QOS info by vv-set-name '%s' for volume'%s'..."
% (vvset_name, vol['display_name']))
self._hpeplugin_driver.get_qos_detail(vvset_name)
LOG.info("QOS info found for Docker volume '%s'. Setting QOS name"
"for the volume." % vol['display_name'])
vol["qos_name"] = vvset_name
def _set_qos_and_flash_cache_info(self, backend_vol_name, vol):
vvset_detail = self._get_vvset_by_volume_name(backend_vol_name)
if vvset_detail:
self._set_flash_cache_policy(vol, vvset_detail)
vvset_name = vvset_detail.get('name')
try:
if vvset_name:
self._set_qos_info(vol, vvset_name)
except Exception as ex:
if not vol['flash_cache']:
msg = (_("ERROR: No QOS or flash-cache found for a volume"
" '%s' present in vvset '%s'" % (backend_vol_name,
vvset_name)))
log_msg = msg + "error: %s" % six.text_type(ex)
LOG.error(log_msg)
# Error message to be displayed in inspect command
vol["qos_name"] = msg
def manage_existing(self, volname, existing_ref, backend='DEFAULT',
manage_opts=None):
LOG.info('Managing a %(vol)s' % {'vol': existing_ref})
# NOTE: Since Docker passes user supplied names and not a unique
# uuid, we can't allow duplicate volume names to exist
vol = self._etcd.get_vol_byname(volname)
if vol is not None:
return json.dumps({u"Err": ''})
is_snap = False
# Make sure the reference is not in use.
if existing_ref.startswith('dcv-') or existing_ref.startswith('dcs-'):
msg = (_('target: %s is already in-use') % existing_ref)
LOG.error(msg)
return json.dumps({u"Err": six.text_type(msg)})
vol = volume.createvol(volname)
vol['backend'] = backend
vol['fsOwner'] = None
vol['fsMode'] = None
vol['Options'] = manage_opts
parent_vol = ""
try:
# check target volume exists in 3par
existing_ref_details = \
self._hpeplugin_driver.get_volume_detail(existing_ref)
except Exception as ex:
msg = (_(
'Volume:%(existing_ref)s does not exists Error: %(ex)s')
% {'existing_ref': existing_ref, 'ex': six.text_type(ex)})
LOG.exception(msg)
return json.dumps({u"Err": six.text_type(msg)})
if ('rcopyStatus' in existing_ref_details and
existing_ref_details['rcopyStatus'] != 1):
msg = 'ERROR: Volume associated with a replication group '\
'cannot be imported'
raise exception.InvalidInput(reason=msg)
self._set_qos_and_flash_cache_info(existing_ref_details['name'], vol)
# since we have only 'importVol' option for importing,
# both volume and snapshot
# throw error when user tries to manage snapshot,
# before managing parent
copyType = existing_ref_details.get('copyType')
if volume.COPYTYPE.get(copyType) == 'virtual':
# it's a snapshot, so check whether its parent is managed or not ?
try:
# convert parent volume name to its uuid,
# which is then check in etcd for existence
vol_id = utils.get_vol_id(existing_ref_details["copyOf"])
LOG.info('parent volume ID: %(parent_vol_id)s'
% {'parent_vol_id': vol_id})
# check parent uuid is present in etcd, or not ?
parent_vol = self._etcd.get_vol_by_id(vol_id)
vol['flash_cache'] = parent_vol['flash_cache']
# parent vol is present so manage a snapshot now
is_snap = True
except Exception as ex:
msg = (_(
'Manage snapshot failed because parent volume: '
'%(parent_volume)s is unmanaged.') % {
'parent_volume': existing_ref_details["copyOf"]})
LOG.exception(msg)
return json.dumps({u"Err": six.text_type(msg)})
try:
volume_detail_3par = self._hpeplugin_driver.manage_existing(
vol, existing_ref_details, is_snap=is_snap)
except Exception as ex:
msg = (_('Manage volume failed Error: %s') % six.text_type(ex))
LOG.exception(msg)
return json.dumps({u"Err": six.text_type(msg)})
try:
# mapping
vol['size'] = \
self.map_3par_volume_size_to_docker(volume_detail_3par)
vol['provisioning'] = \
self.map_3par_volume_prov_to_docker(volume_detail_3par)
vol['compression'] = \
self.map_3par_volume_compression_to_docker(volume_detail_3par)
vol['cpg'] = volume_detail_3par.get('userCPG')
vol['snap_cpg'] = volume_detail_3par.get('snapCPG')
if is_snap:
if vol['3par_vol_name'].startswith("dcv-"):
vol['3par_vol_name'] = \
str.replace(vol['3par_vol_name'], "dcv-", "dcs-", 1)
# managing a snapshot
if volume_detail_3par.get("expirationTime8601"):
expiration_hours = \
self.map_3par_volume_time_to_docker(volume_detail_3par)
else:
expiration_hours = None
if volume_detail_3par.get("retentionTime8601"):
retention_hours = self.map_3par_volume_time_to_docker(
volume_detail_3par, expiration=False)
else:
retention_hours = None
db_snapshot = {
'name': volname,
'id': vol['id'],
'parent_name': parent_vol['display_name'],
'parent_id': parent_vol['id'],
'fsOwner': parent_vol['fsOwner'],
'fsMode': parent_vol['fsMode'],
'expiration_hours': expiration_hours,
'retention_hours': retention_hours}
if 'snapshots' not in parent_vol:
parent_vol['snapshots'] = []
parent_vol['snapshots'].append(db_snapshot)
vol['is_snap'] = is_snap
vol['snap_metadata'] = db_snapshot
self._etcd.save_vol(parent_vol)
self._etcd.save_vol(vol)
except Exception as ex:
msg = (_('Manage volume failed Error: %s') % six.text_type(ex))
LOG.exception(msg)
undo_steps = []
undo_steps.append(
{'undo_func': self._hpeplugin_driver.manage_existing,
'params': {
'volume': volume_detail_3par,
'existing_ref': volume_detail_3par.get('name'),
'is_snap': is_snap,
'target_vol_name': existing_ref_details.get('name'),
'comment': existing_ref_details.get('comment')},
'msg': 'Cleaning up manage'})
self._rollback(undo_steps)
return json.dumps({u"Err": six.text_type(ex)})
return json.dumps({u"Err": ''})
@synchronization.synchronized_volume('{src_vol_name}')
def clone_volume(self, src_vol_name, clone_name,
size=None, cpg=None, snap_cpg=None,
current_backend='DEFAULT', clone_opts=None):
# Check if volume is present in database
LOG.info('hpedockerplugin : clone options 5 %s ' % clone_opts)
src_vol = self._etcd.get_vol_byname(src_vol_name)
mnt_conf_delay = volume.DEFAULT_MOUNT_CONFLICT_DELAY
if src_vol is None:
msg = 'source volume: %s does not exist' % src_vol_name
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
# TODO(sonivi): remove below conversion to 3par volume name, once we
# we have code in place to store 3par volume name in etcd vol object
volume_3par = utils.get_3par_vol_name(src_vol.get('id'))
# check if volume having any active task, it yes return with error
# add prefix '*' because offline copy task name have pattern like
# e.g. dcv-m0o5ZAwPReaZVoymnLTrMA->dcv-N.9ikeA.RiaxPP4LzecaEQ
# this will check both offline as well as online copy task
if self._hpeplugin_driver.is_vol_having_active_task(
"*%s" % volume_3par):
msg = 'source volume: %s / %s is having some active task ' \
'running on array' % (src_vol_name, volume_3par)
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
if not size:
size = src_vol['size']
if not cpg:
cpg = src_vol.get('cpg', self._hpeplugin_driver.get_cpg
(src_vol, False, allowSnap=True))
if not snap_cpg:
snap_cpg = src_vol.get('snap_cpg', self._hpeplugin_driver.
get_snapcpg(src_vol, False))
if size < src_vol['size']:
msg = 'clone volume size %s is less than source ' \
'volume size %s' % (size, src_vol['size'])
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
if 'is_snap' in src_vol and src_vol['is_snap']:
msg = 'cloning a snapshot %s is not allowed ' \
% (src_vol_name)
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
if 'snapshots' not in src_vol:
src_vol['compression'] = None
src_vol['qos_name'] = None
src_vol['mount_conflict_delay'] = mnt_conf_delay
src_vol['snapshots'] = []
self._etcd.save_vol(src_vol)
return self._clone_volume(clone_name, src_vol, size, cpg,
snap_cpg, current_backend, clone_opts)
def _create_snapshot_record(self, snap_vol, snapshot_name, undo_steps):
self._etcd.save_vol(snap_vol)
undo_steps.append({'undo_func': self._etcd.delete_vol,
'params': {'vol': snap_vol},
'msg': "Cleaning up snapshot record for '%s'"
" from ETCD..." % snapshot_name})
@synchronization.synchronized_volume('{snapshot_name}')
def create_snapshot(self, src_vol_name, schedName, snapshot_name,
snapPrefix, expiration_hrs, exphrs, retention_hrs,
rethrs, mount_conflict_delay, has_schedule,
schedFrequency, current_backend='DEFAULT'):
# Check if volume is present in database
snap = self._etcd.get_vol_byname(snapshot_name)
if snap:
msg = 'snapshot %s already exists' % snapshot_name
LOG.info(msg)
response = json.dumps({'Err': msg})
return response
return self._create_snapshot(src_vol_name, schedName, snapshot_name,
snapPrefix, expiration_hrs, exphrs,
retention_hrs, rethrs,
mount_conflict_delay, has_schedule,
schedFrequency, current_backend)
@synchronization.synchronized_volume('{src_vol_name}')
def _create_snapshot(self, src_vol_name, schedName, snapshot_name,
snapPrefix, expiration_hrs, exphrs, retention_hrs,
rethrs, mount_conflict_delay, has_schedule,
schedFrequency, current_backend):
vol = self._etcd.get_vol_byname(src_vol_name)
if vol is None:
msg = 'source volume: %s does not exist' % src_vol_name
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
volid = vol['id']
if 'has_schedule' not in vol:
vol_sched_flag = volume.DEFAULT_SCHEDULE
vol['has_schedule'] = vol_sched_flag
self._etcd.update_vol(volid, 'has_schedule', vol_sched_flag)
# TODO(sonivi): remove below conversion to 3par volume name, once we
# we have code in place to store 3par volume name in etcd vol object
volume_3par = utils.get_3par_vol_name(volid)
# check if volume having any active task, it yes return with error
# add prefix '*' because offline copy task name have pattern like
# e.g. dcv-m0o5ZAwPReaZVoymnLTrMA->dcv-N.9ikeA.RiaxPP4LzecaEQ
# this will check both offline as well as online copy task
if self._hpeplugin_driver.is_vol_having_active_task(
"*%s" % volume_3par):
msg = 'source volume: %s / %s is having some active task ' \
'running on array' % (src_vol_name, volume_3par)
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
# Check if this is an old volume type. If yes, add is_snap flag to it
if 'is_snap' not in vol:
vol_snap_flag = volume.DEFAULT_TO_SNAP_TYPE
vol['is_snap'] = vol_snap_flag
self._etcd.update_vol(volid, 'is_snap', vol_snap_flag)
if 'snapshots' not in vol:
vol['snapshots'] = []
vol['compression'] = None
vol['qos_name'] = None
vol['mount_conflict_delay'] = mount_conflict_delay
vol['backend'] = current_backend
# Check if instead of specifying parent volume, user incorrectly
# specified snapshot as virtualCopyOf parameter. If yes, return error.
if 'is_snap' in vol and vol['is_snap']:
msg = 'source volume: %s is a snapshot, creating hierarchy ' \
'of snapshots is not allowed.' % src_vol_name
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
snap_cpg = None
if 'snap_cpg' in vol and vol['snap_cpg']:
snap_cpg = vol['snap_cpg']
else:
snap_cpg = vol.get('snap_cpg', self._hpeplugin_driver.get_snapcpg
(vol, False))
snap_size = vol['size']
snap_prov = vol['provisioning']
snap_flash = vol['flash_cache']
snap_compression = vol['compression']
snap_qos = volume.DEFAULT_QOS
is_snap = True
snap_vol = volume.createvol(snapshot_name, snap_size, snap_prov,
snap_flash, snap_compression, snap_qos,
mount_conflict_delay, is_snap, None,
snap_cpg, has_schedule,
current_backend)
snapshot_id = snap_vol['id']
if snap_vol['has_schedule']:
try:
src_3par_vol_name = utils.get_3par_vol_name(vol['id'])
self._hpeplugin_driver.create_snap_schedule(src_3par_vol_name,
schedName,
snapPrefix,
exphrs, rethrs,
schedFrequency)
except Exception as ex:
msg = (_('create snapshot failed, error is: %s')
% six.text_type(ex))
LOG.error(msg)
return json.dumps({u"Err": six.text_type(ex)})
# this 'snapshot dict'is for creating snap at 3par
snapshot = {'id': snapshot_id,
'display_name': snapshot_name,
'volume_id': vol['id'],
'volume_name': src_vol_name,
'expirationHours': expiration_hrs,
'retentionHours': retention_hrs,
'display_description': 'snapshot of volume %s'
% src_vol_name}
undo_steps = []
bkend_snap_name = ""
try:
bkend_snap_name = self._hpeplugin_driver.create_snapshot(
snapshot)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.delete_volume,
'params': {'volume': snapshot,
'is_snapshot': True},
'msg': 'Cleaning up backend snapshot: %s...'
% bkend_snap_name})
except Exception as ex:
msg = (_('create snapshot failed, error is: %s')
% six.text_type(ex))
LOG.error(msg)
return json.dumps({u"Err": six.text_type(ex)})
# Add back reference to child snapshot in volume metadata
db_snapshot = {'name': snapshot_name,
'id': snapshot_id,
'parent_name': src_vol_name,
'parent_id': vol['id'],
'fsMode': vol.get('fsMode'),
'fsOwner': vol.get('fsOwner'),
'expiration_hours': expiration_hrs,
'retention_hours': retention_hrs}
if has_schedule:
snap_schedule = {
'schedule_name': schedName,
'snap_name_prefix': snapPrefix,
'sched_frequency': schedFrequency,
'sched_snap_exp_hrs': exphrs,
'sched_snap_ret_hrs': rethrs}
db_snapshot['snap_schedule'] = snap_schedule
vol['snapshots'].append(db_snapshot)
snap_vol['snap_metadata'] = db_snapshot
snap_vol['backend'] = current_backend
snap_vol['3par_vol_name'] = bkend_snap_name
try:
self._create_snapshot_record(snap_vol,
snapshot_name,
undo_steps)
# For now just track volume to uuid mapping internally
# TODO: Save volume name and uuid mapping in etcd as well
# This will make get_vol_byname more efficient
self._etcd.save_vol(vol)
LOG.debug('snapshot: %(name)s was successfully saved '
'to etcd', {'name': snapshot_name})
except Exception as ex:
msg = (_('save volume to etcd failed, error is: %s'),
six.text_type(ex))
LOG.error(msg)
self._rollback(undo_steps)
response = json.dumps({u"Err": six.text_type(ex)})
else:
response = json.dumps({u"Err": ''})
return response
@synchronization.synchronized_volume('{volname}')
def remove_volume(self, volname):
# Only 1 node in a multinode cluster can try to remove the volume.
# Grab lock for volume name. If lock is inuse, just return with no
# error.
# Expand lock code inline as function based lock causes
# unexpected behavior
vol = self._etcd.get_vol_byname(volname)
if vol is None:
# Just log an error, but don't fail the docker rm command
msg = 'Volume name to remove not found: %s' % volname
LOG.error(msg)
return json.dumps({u"Err": msg})
parent_name = None
is_snap = False
if 'is_snap' in vol and vol['is_snap']:
is_snap = True
parent_name = vol['snap_metadata']['parent_name']
try:
if 'snapshots' in vol and vol['snapshots']:
msg = (_LE('Err: Volume %s has one or more child '
'snapshots - volume cannot be deleted!'
% volname))
LOG.error(msg)
response = json.dumps({u"Err": msg})
return response
else:
if 'has_schedule' in vol and vol['has_schedule']:
schedule_info = vol['snap_metadata']['snap_schedule']
sched_name = schedule_info['schedule_name']
self._hpeplugin_driver.force_remove_3par_schedule(
sched_name)
self._hpeplugin_driver.delete_volume(vol, is_snap)
LOG.info(_LI('volume: %(name)s,' 'was successfully deleted'),
{'name': volname})
if is_snap:
self.remove_snapshot(parent_name, volname)
except Exception as ex:
msg = (_LE('Err: Failed to remove volume %s, error is %s'),
volname, six.text_type(ex))
LOG.error(msg)
return json.dumps({u"Err": six.text_type(ex)})
try:
self._etcd.delete_vol(vol)
except KeyError:
msg = (_LW('Warning: Failed to delete volume key: %s from '
'etcd due to KeyError'), volname)
LOG.warning(msg)
pass
return json.dumps({u"Err": ''})
@synchronization.synchronized_volume('{volname}')
def remove_snapshot(self, volname, snapname):
LOG.info("volumedriver_remove_snapshot - getting volume %s"
% volname)
vol = self._etcd.get_vol_byname(volname)
if vol is None:
# Just log an error, but don't fail the docker rm command
msg = (_LE('snapshot remove - parent volume name not found '
'%s'), volname)
LOG.error(msg)
return json.dumps({u"Err": msg})
if snapname:
snapshots = vol['snapshots']
LOG.info("Getting snapshot by name: %s" % snapname)
snapshot, idx = self._get_snapshot_by_name(snapshots,
snapname)
if snapshot:
LOG.info("Found snapshot by name: %s" % snapname)
LOG.info("Deleting snapshot in ETCD - %s" % snapname)
# Remove snapshot entry from list and save it back to
# ETCD DB
del snapshots[idx]
try:
LOG.info("Updating volume in ETCD after snapshot "
"removal - vol-name: %s" % volname)
# For now just track volume to uuid mapping internally
# TODO: Save volume name and uuid mapping in etcd as
# well. This will make get_vol_byname more efficient
self._etcd.update_vol(vol['id'],
'snapshots',
snapshots)
LOG.info('snapshot: %(name)s was successfully '
'removed', {'name': snapname})
response = json.dumps({u"Err": ''})
return response
except Exception as ex:
msg = (_('remove snapshot from etcd failed, error is:'
' %s'), six.text_type(ex))
LOG.error(msg)
response = json.dumps({u"Err": six.text_type(ex)})
return response
else:
msg = (_LE('snapshot %s does not exist!' % snapname))
LOG.error(msg)
response = json.dumps({u"Err": msg})
return response
@synchronization.synchronized_volume('{clone_name}')
def _clone_volume(self, clone_name, src_vol, size, cpg,
snap_cpg, current_backend, clone_opts):
# Create clone volume specification
undo_steps = []
clone_vol = volume.createvol(clone_name, size,
src_vol['provisioning'],
src_vol['flash_cache'],
src_vol['compression'],
src_vol['qos_name'],
src_vol['mount_conflict_delay'],
False, cpg, snap_cpg, False,
current_backend)
try:
bkend_clone_name = self.__clone_volume__(src_vol,
clone_vol,
undo_steps)
self._apply_volume_specs(clone_vol, undo_steps)
# For now just track volume to uuid mapping internally
# TODO: Save volume name and uuid mapping in etcd as well
# This will make get_vol_byname more efficient
clone_vol['fsOwner'] = src_vol.get('fsOwner')
clone_vol['fsMode'] = src_vol.get('fsMode')
clone_vol['3par_vol_name'] = bkend_clone_name
if clone_opts is not None:
clone_vol['Options'] = clone_opts
self._etcd.save_vol(clone_vol)
except Exception as ex:
msg = (_('Clone volume failed, error is: %s'),
six.text_type(ex))
LOG.error(msg)
self._rollback(undo_steps)
return json.dumps({u"Err": six.text_type(ex)})
else:
return json.dumps({u"Err": ''})
# Commenting out unused function to increase coverage
# @synchronization.synchronized_volume('{volumename}')
# def revert_to_snapshot(self, volumename, snapname):
# volume = self._etcd.get_vol_byname(volumename)
# if volume is None:
# msg = (_LE('Volume: %s does not exist' % volumename))
# LOG.info(msg)
# response = json.dumps({u"Err": msg})
# return response
#
# snapshots = volume['snapshots']
# LOG.info("Getting snapshot by name: %s" % snapname)
# snapshot, idx = self._get_snapshot_by_name(snapshots,
# snapname)
# if snapshot:
# try:
# LOG.info("Found snapshot by name %s" % snapname)
# self._hpeplugin_driver.revert_snap_to_vol(volume, snapshot)
# response = json.dumps({u"Err": ''})
# return response
# except Exception as ex:
# msg = (_('revert snapshot failed, error is: %s'),
# six.text_type(ex))
# LOG.error(msg)
# return json.dumps({u"Err": six.text_type(ex)})
# else:
# msg = (_LE('snapshot: %s does not exist!' % snapname))
# LOG.info(msg)
# response = json.dumps({u"Err": msg})
# return response
def _get_snapshot_response(self, snapinfo, snapname):
err = ''
mountdir = ''
devicename = ''
path_info = self._etcd.get_vol_path_info(snapname)
LOG.debug('Value of path info in snapshot response is %s', path_info)
if path_info is not None:
mountdir = path_info['mount_dir']
devicename = path_info['path']
# use volinfo as volname could be partial match
snapshot = {'Name': snapname,
'Mountpoint': mountdir,
'Devicename': devicename,
'Status': {}}
metadata = snapinfo['snap_metadata']
parent_name = metadata['parent_name']
parent_id = metadata['parent_id']
expiration_hours = metadata['expiration_hours']
retention_hours = metadata['retention_hours']
snap_detail = {}
snap_detail['id'] = snapinfo.get('id')
snap_detail['size'] = snapinfo.get('size')
snap_detail['compression'] = snapinfo.get('compression')
snap_detail['provisioning'] = snapinfo.get('provisioning')
snap_detail['is_snap'] = snapinfo.get('is_snap')
snap_detail['parent_volume'] = parent_name
snap_detail['parent_id'] = parent_id
snap_detail['fsOwner'] = snapinfo['snap_metadata'].get('fsOwner')
snap_detail['fsMode'] = snapinfo['snap_metadata'].get('fsMode')
snap_detail['expiration_hours'] = expiration_hours
snap_detail['retention_hours'] = retention_hours
snap_detail['mountConflictDelay'] = snapinfo.get(
'mount_conflict_delay')
snap_detail['snap_cpg'] = snapinfo.get('snap_cpg')
snap_detail['backend'] = snapinfo.get('backend')
if 'snap_schedule' in metadata:
snap_detail['snap_schedule'] = metadata['snap_schedule']
LOG.info('_get_snapshot_response: adding 3par vol info')
if '3par_vol_name' in snapinfo:
snap_detail['3par_vol_name'] = snapinfo.get('3par_vol_name')
else:
snap_detail['3par_vol_name'] = utils.get_3par_name(snapinfo['id'],
True)
snapshot['Status'].update({'snap_detail': snap_detail})
response = json.dumps({u"Err": err, u"Volume": snapshot})
LOG.debug("Get volume/snapshot: \n%s" % str(response))
return response
def _get_snapshot_etcd_record(self, parent_volname, snapname):
volumeinfo = self._etcd.get_vol_byname(parent_volname)
snapshots = volumeinfo.get('snapshots', None)
if 'snap_cpg' in volumeinfo:
snapshot_cpg = volumeinfo.get('snap_cpg')
else:
snapshot_cpg = self._hpeplugin_driver.get_snapcpg(volumeinfo,
False)
if snapshots:
self._sync_snapshots_from_array(volumeinfo['id'],
volumeinfo['snapshots'],
snapshot_cpg)
snapinfo = self._etcd.get_vol_byname(snapname)
LOG.debug('value of snapinfo from etcd read is %s', snapinfo)
if snapinfo is None:
msg = (_LE('Snapshot_get: snapname not found after sync %s'),
snapname)
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
snapinfo['snap_cpg'] = snapshot_cpg
self._etcd.update_vol(snapinfo['id'], 'snap_cpg', snapshot_cpg)
return self._get_snapshot_response(snapinfo, snapname)
else:
msg = (_LE('Snapshot_get: snapname not found after sync %s'),
snapname)
LOG.debug(msg)
response = json.dumps({u"Err": msg})
return response
def get_volume_snap_details(self, volname, snapname, qualified_name):
volinfo = self._etcd.get_vol_byname(volname)
LOG.info("Value of volinfo is: %s", volinfo)
if volinfo is None:
msg = (_LE('Volume Get: Volume name not found %s'), volname)
LOG.warning(msg)
response = json.dumps({u"Err": ""})
return response
if 'is_snap' in volinfo and volinfo['is_snap']:
LOG.debug('type of is_snap is %s', type(volinfo['is_snap']))
snap_metadata = volinfo['snap_metadata']
parent_volname = snap_metadata['parent_name']
snapname = snap_metadata['name']
return self._get_snapshot_etcd_record(parent_volname, snapname)
if 'snap_cpg' not in volinfo:
snap_cpg = self._hpeplugin_driver.get_snapcpg(volinfo, False)
if snap_cpg:
volinfo['snap_cpg'] = snap_cpg
self._etcd.update_vol(volinfo['id'], 'snap_cpg', snap_cpg)
if 'cpg' not in volinfo:
volinfo['cpg'] = self._hpeplugin_driver.get_cpg(volinfo, False,
allowSnap=False)
self._etcd.update_vol(volinfo['id'], 'cpg', volinfo['cpg'])
err = ''
mountdir = ''
devicename = ''
path_info = self._etcd.get_vol_path_info(volname)
if path_info is not None:
mountdir = path_info['mount_dir']
devicename = path_info['path']
# use volinfo as volname could be partial match
volume = {'Name': qualified_name,
'Mountpoint': mountdir,
'Devicename': devicename,
'Status': {}}
snapshot_cpg = volinfo.get('snap_cpg', volinfo.get('cpg'))
if volinfo.get('snapshots') and volinfo.get('snapshots') != '':
self._sync_snapshots_from_array(volinfo['id'],
volinfo['snapshots'], snapshot_cpg)
# Is this request for snapshot inspect?
if snapname:
# Any snapshots left after synchronization with array?
if volinfo['snapshots']:
snapshot, idx = \
self._get_snapshot_by_name(
volinfo['snapshots'],
snapname)
settings = {"Settings": {
'expirationHours': snapshot['expiration_hours'],
'retentionHours': snapshot['retention_hours']}}
volume['Status'].update(settings)
else:
msg = (_LE('Snapshot Get: Snapshot name not found %s'),
qualified_name)
LOG.warning(msg)
# Should error be returned here or success?
response = json.dumps({u"Err": ""})
return response
else:
snapshots = volinfo.get('snapshots', None)
if snapshots:
ss_list_to_show = []
for s in snapshots:
snapshot = {'Name': s['name'],
'ParentName': volname}
# metadata = s['snap_metadata']
if 'snap_schedule' in s:
snapshot['snap_schedule'] = s['snap_schedule']
ss_list_to_show.append(snapshot)
volume['Status'].update({'Snapshots': ss_list_to_show})
backend_vol_name = utils.get_3par_vol_name(volinfo['id'])
self._set_qos_and_flash_cache_info(backend_vol_name, volinfo)
qos_name = volinfo.get('qos_name')
if qos_name is not None:
try:
qos_detail = self._hpeplugin_driver.get_qos_detail(
qos_name)
qos_filter = self._get_required_qos_field(qos_detail)
volume['Status'].update({'qos_detail': qos_filter})
except Exception as ex:
msg = "ERROR: Failed to retrieve QoS '%s' from 3PAR" \
% qos_name
volume['Status'].update({'qos_detail': msg})
msg += ' %s' % six.text_type(ex)
LOG.error(msg)
flash_cache = volinfo.get('flash_cache')
if flash_cache is not None:
flash_cache = 'true' if flash_cache else 'false'
vol_detail = {}
vol_detail['id'] = volinfo.get('id')
vol_detail['size'] = volinfo.get('size')
vol_detail['flash_cache'] = flash_cache
vol_detail['compression'] = volinfo.get('compression')
vol_detail['provisioning'] = volinfo.get('provisioning')
vol_detail['fsOwner'] = volinfo.get('fsOwner')
vol_detail['fsMode'] = volinfo.get('fsMode')
vol_detail['mountConflictDelay'] = volinfo.get(
'mount_conflict_delay')
vol_detail['cpg'] = volinfo.get('cpg')
vol_detail['snap_cpg'] = volinfo.get('snap_cpg')
vol_detail['backend'] = volinfo.get('backend')
vol_detail['domain'] = self._hpeplugin_driver.get_domain(
vol_detail['cpg'])
LOG.info(' get_volume_snap_details : adding 3par vol info')
if '3par_vol_name' in volinfo:
vol_detail['3par_vol_name'] = volinfo['3par_vol_name']
else:
vol_detail['3par_vol_name'] = \
utils.get_3par_name(volinfo['id'],
False)
if 'Options' in volinfo:
vol_detail['Options'] = volinfo['Options']
if volinfo.get('rcg_info'):
vol_detail['secondary_cpg'] = \
self.tgt_bkend_config.hpe3par_cpg[0]
vol_detail['secondary_snap_cpg'] = \
self.tgt_bkend_config.hpe3par_snapcpg[0]
# fetch rcg details and display
rcg_name = volinfo['rcg_info']['local_rcg_name']
try:
rcg_detail = self._hpeplugin_driver.get_rcg(rcg_name)
rcg_filter = self._get_required_rcg_field(rcg_detail)
volume['Status'].update({'rcg_detail': rcg_filter})
except Exception as ex:
msg = "ERROR: Failed to retrieve RCG '%s' from 3PAR" \
% rcg_name
volume['Status'].update({'rcg_detail': msg})
msg += ' %s' % six.text_type(ex)
LOG.error(msg)
volume['Status'].update({'volume_detail': vol_detail})
response = json.dumps({u"Err": err, u"Volume": volume})
LOG.debug("Get volume/snapshot: \n%s" % str(response))
return response
def list_volumes(self):
volumes = self._etcd.get_all_vols()
volumelist = []
for volinfo in volumes:
path_info = self._etcd.get_path_info_from_vol(volinfo)
if path_info is not None and 'mount_dir' in path_info:
mountdir = path_info['mount_dir']
devicename = path_info['path']
else:
mountdir = ''
devicename = ''
volume = {'Name': volinfo['display_name'],
'Devicename': devicename,
'size': volinfo['size'],
'Mountpoint': mountdir,
'Status': {}}
volumelist.append(volume)
return volumelist
def get_path(self, volname):
volinfo = self._etcd.get_vol_byname(volname)
if volinfo is None:
msg = (_LE('Volume Path: Volume name not found %s'), volname)
LOG.warning(msg)
response = json.dumps({u"Err": "No Mount Point",
u"Mountpoint": ""})
return response
path_name = ''
path_info = self._etcd.get_vol_path_info(volname)
if path_info is not None:
path_name = path_info['mount_dir']
response = json.dumps({u"Err": '', u"Mountpoint": path_name})
return response
@staticmethod
def _is_vol_not_mounted(vol):
return 'node_mount_info' not in vol
@staticmethod
def _is_first_mount(node_mount_info):
return (len(node_mount_info) == 0)
def _is_vol_mounted_on_this_node(self, node_mount_info, vol):
if self._node_id in node_mount_info:
# get the information from etcd where the volume should be mounted
path_info = self._etcd.get_path_info_from_vol(vol)
# important is here the device which should be mounted...
path_name = path_info['path']
# ... and the target it should be mounted to!
mount_dir = path_info['mount_dir']
# now check if this mount is really present on the node
if fileutil.check_if_mounted(path_name, mount_dir):
# Multiple containers mounting the same volume on same node
return VolumeOwnedAndMounted
else:
# This is a case of node reboot or deleted Stateful-set POD
return VolumeOwnedAndNotMounted
else:
# Failover case where volume is evicted from other node to this one
return VolumeNotOwned
def _update_mount_id_list(self, vol, mount_id):
node_mount_info = vol['node_mount_info']
# Check if mount_id is unique
if mount_id in node_mount_info[self._node_id]:
LOG.info("Received duplicate mount-id: %s. Ignoring"
% mount_id)
return
LOG.info("Adding new mount-id %s to node_mount_info..."
% mount_id)
node_mount_info[self._node_id].append(mount_id)
LOG.info("Updating etcd with modified node_mount_info: %s..."
% node_mount_info)
self._etcd.update_vol(vol['id'],
'node_mount_info',
node_mount_info)
LOG.info("Updated etcd with modified node_mount_info: %s!"
% node_mount_info)
def _get_success_response(self, vol):
path_info = json.loads(vol['path_info'])
path = FilePath(path_info['device_info']['path']).realpath()
response = json.dumps({"Err": '', "Name": vol['display_name'],
"Mountpoint": path_info['mount_dir'],
"Devicename": path.path})
return response
def _wait_for_graceful_vol_unmount(self, vol):
unmounted = False
vol_id = vol['id']
volname = vol['display_name']
mount_conflict_delay = vol['mount_conflict_delay']
for checks in range(0, mount_conflict_delay):
time.sleep(1)
LOG.info("Checking if volume %s got unmounted #%s..."
% (volname, checks))
vol = self._etcd.get_vol_by_id(vol_id)
# Check if unmount that was in progress has cleared the
# node entry from ETCD database
if 'node_mount_info' not in vol:
LOG.info("Volume %s got unmounted after %s "
"checks!!!" % (volname, checks))
unmounted = True
break
LOG.info("Volume %s still unmounting #%s..."
% (volname, checks))
return unmounted
def _force_remove_vlun(self, vol, is_snap):
bkend_vol_name = utils.get_3par_name(vol['id'], is_snap)
# Check if replication is configured and volume is
# populated with the RCG
if (self.tgt_bkend_config and 'rcg_info' in vol and
vol['rcg_info'] is not None):
if self.tgt_bkend_config.quorum_witness_ip:
LOG.info("Peer Persistence setup: Removing VLUNs "
"forcefully from remote backend...")
self._primary_driver.force_remove_volume_vlun(bkend_vol_name)
self._remote_driver.force_remove_volume_vlun(bkend_vol_name)
LOG.info("Peer Persistence setup: VLUNs forcefully "
"removed from remote backend!")
else:
LOG.info("Active/Passive setup: Getting active driver...")
try:
driver = self._get_target_driver(vol['rcg_info'])
if driver:
LOG.info("Active/Passive setup: Got active driver!")
LOG.info("Active/Passive setup: Removing VLUNs "
"forcefully from remote backend...")
driver.force_remove_volume_vlun(bkend_vol_name)
LOG.info("Active/Passive setup: VLUNs forcefully "
"removed from remote backend!")
else:
msg = "Failed to force remove VLUN(s) " \
"Could not determine the target array based on" \
"state of RCG %s." % \
vol['rcg_info']['local_rcg_name']
LOG.error(msg)
raise exception.HPEDriverForceRemoveVLUNFailed(
reason=msg)
except Exception as ex:
msg = "Failed to force remove VLUN(s). " \
"Exception: %s" % six.text_type(ex)
LOG.error(msg)
raise exception.HPEDriverForceRemoveVLUNFailed(
reason=six.text_type(ex))
else:
LOG.info("Removing VLUNs forcefully from remote backend...")
self._primary_driver.force_remove_volume_vlun(bkend_vol_name)
LOG.info("VLUNs forcefully removed from remote backend!")
def mount_volume(self, volname, vol_mount, mount_id):
vol = self._etcd.get_vol_byname(volname)
if vol is None:
msg = (_LE('Volume mount name not found %s'), volname)
LOG.error(msg)
raise exception.HPEPluginMountException(reason=msg)
node_mount_info = vol.get('node_mount_info')
if node_mount_info:
is_vol_owned = self._is_vol_mounted_on_this_node(
node_mount_info, vol
)
if is_vol_owned == VolumeNotOwned:
# Volume mounted on different node
LOG.info("Volume mounted on a different node. Waiting for "
"other node to gracefully unmount the volume...")
self._wait_for_graceful_vol_unmount(vol)
# Grab lock on volume name and continue with mount
return self._synchronized_mount_volume(volname, vol_mount, mount_id)
@synchronization.synchronized_volume('{volname}')
def _synchronized_mount_volume(self, volname, vol_mount, mount_id):
root_helper = 'sudo'
connector_info = connector.get_connector_properties(
root_helper, self._my_ip, multipath=self._use_multipath,
enforce_multipath=self._enforce_multipath)
def _mount_volume(driver):
LOG.info("Entered _mount_volume")
try:
# Call driver to initialize the connection
driver.create_export(vol, connector_info, is_snap)
connection_info = \
driver.initialize_connection(
vol, connector_info, is_snap)
LOG.debug("Initialized Connection Successful!")
LOG.debug('connection_info: %(connection_info)s, '
'was successfully retrieved',
{'connection_info': json.dumps(connection_info)})
undo_steps.append(
{'undo_func': driver.terminate_connection,
'params': (vol, connector_info, is_snap),
'msg': 'Terminating connection to volume: %s...'
% volname})
except Exception as ex:
msg = (_('Initialize Connection Failed: '
'connection info retrieval failed, error is: '),
six.text_type(ex))
LOG.error(msg)
self._rollback(undo_steps)
raise exception.HPEPluginMountException(reason=msg)
# Call OS Brick to connect volume
try:
LOG.debug("OS Brick Connector Connecting Volume...")
device_info = self._connector.connect_volume(
connection_info['data'])
undo_steps.append(
{'undo_func': self._connector.disconnect_volume,
'params': (connection_info['data'], None),
'msg': 'Undoing connection to volume: %s...' % volname})
except Exception as ex:
msg = (_('OS Brick connect volume failed, error is: '),
six.text_type(ex))
LOG.error(msg)
self._rollback(undo_steps)
raise exception.HPEPluginMountException(reason=msg)
return device_info, connection_info
# Check for volume's existence once again after lock has been
# acquired. This is just to ensure another thread didn't delete
# the volume before reaching this point in mount-volume flow
vol = self._etcd.get_vol_byname(volname)
if vol is None:
msg = (_LE('Volume mount name not found %s'), volname)
LOG.error(msg)
raise exception.HPEPluginMountException(reason=msg)
undo_steps = []
volid = vol['id']
# Update volume metadata with the fields that may not be
# there due to the fact that this volume might have been
# created using an older version of plugin
is_snap = False
if 'is_snap' not in vol:
vol['is_snap'] = volume.DEFAULT_TO_SNAP_TYPE
self._etcd.update_vol(volid, 'is_snap', is_snap)
elif vol['is_snap']:
is_snap = vol['is_snap']
vol['fsOwner'] = vol['snap_metadata'].get('fsOwner')
vol['fsMode'] = vol['snap_metadata'].get('fsMode')
if 'mount_conflict_delay' not in vol:
m_conf_delay = volume.DEFAULT_MOUNT_CONFLICT_DELAY
vol['mount_conflict_delay'] = m_conf_delay
self._etcd.update_vol(volid, 'mount_conflict_delay',
m_conf_delay)
# Initialize node-mount-info if volume is being mounted
# for the first time
if self._is_vol_not_mounted(vol):
LOG.info("Initializing node_mount_info... adding first "
"mount ID %s" % mount_id)
node_mount_info = {self._node_id: [mount_id]}
vol['node_mount_info'] = node_mount_info
else:
# Volume is in mounted state - Volume fencing logic begins here
node_mount_info = vol['node_mount_info']
flag = self._is_vol_mounted_on_this_node(node_mount_info, vol)
# If mounted on this node itself then just append mount-id
if flag == VolumeOwnedAndMounted:
self._update_mount_id_list(vol, mount_id)
return self._get_success_response(vol)
elif flag == VolumeNotOwned:
# Volume mounted on different node
LOG.info("Volume not gracefully unmounted by other node")
LOG.info("%s" % vol)
self._force_remove_vlun(vol, is_snap)
# Since VLUNs exported to previous node were forcefully
# removed, cache the connection information so that it
# can be used later when user tries to un-mount volume
# from the previous node
if 'path_info' in vol:
path_info = vol['path_info']
old_node_id = list(node_mount_info.keys())[0]
old_path_info = vol.get('old_path_info', [])
# Check if old_node_id is already present in old_path_info
# If found, replace it by removing the existing ones and
# appending the new one
if old_path_info:
LOG.info("Old path info found! Removing any "
"duplicate entries...")
# This is a temporary logic without a break statement
# This is required to remove multiple duplicate tuples
# (node_id, path_info) i.e. entries with same node_id
# Later on
updated_list = []
for opi in old_path_info:
node_id = opi[0]
if old_node_id == node_id:
LOG.info("Found old-path-info tuple "
"having node-id %s for volume %s. "
"Skipping it..."
% (node_id, volname))
continue
updated_list.append(opi)
old_path_info = updated_list
old_path_info.append((old_node_id, path_info))
self._etcd.update_vol(volid, 'old_path_info',
old_path_info)
node_mount_info = {self._node_id: [mount_id]}
LOG.info("New node_mount_info set: %s" % node_mount_info)
elif flag == VolumeOwnedAndNotMounted:
LOG.info("This might be the case of reboot...")
LOG.info("Volume %s is owned by this node %s but it is not "
"in mounted state" % (volname, self._node_id))
# We need to simply mount the volume using the information
# in ETCD
path_info = self._etcd.get_path_info_from_vol(vol)
if path_info:
dev_sym_link = path_info['device_info']['path']
etcd_dev_path = path_info['path']
real_dev_path = os.path.realpath(dev_sym_link)
if etcd_dev_path != real_dev_path:
LOG.info("Multipath device remapped for %s. "
"[Old-dev: %s, New-dev: %s]. "
"Using new device for mounting!" %
(dev_sym_link, etcd_dev_path, real_dev_path))
# Assigning blindly real_dev_path
path_info['path'] = real_dev_path
mount_dir = path_info['mount_dir']
# Ensure:
# 1. we have a multi-path device
# 2. mount dir is present
# 3. device symlink is not broken
if 'dm-' in real_dev_path and \
fileutil.check_if_file_exists(mount_dir):
if fileutil.check_if_file_exists(real_dev_path):
LOG.info("Case of reboot confirmed! Mounting "
"device %s on path %s"
% (dev_sym_link, mount_dir))
try:
fileutil.mount_dir(dev_sym_link, mount_dir)
self._etcd.update_vol(vol['id'],
'path_info',
json.dumps(path_info))
except Exception as ex:
msg = "Mount volume failed: %s" % \
six.text_type(ex)
LOG.error(msg)
self._rollback(undo_steps)
response = json.dumps({"Err": '%s' % msg})
return response
else:
mount_ids = node_mount_info[self._node_id]
if mount_id not in mount_ids:
# In case of reboot, mount-id list will
# have a previous stale mount-id which
# if not cleaned will disallow actual
# unmount of the volume forever. Hence
# creating new mount-id list with just
# the new mount_id received
node_mount_info[self._node_id] = \
[mount_id]
self._etcd.update_vol(vol['id'],
'node_mount_info',
node_mount_info)
return self._get_success_response(vol)
else:
LOG.info("Symlink %s exists but corresponding "
"device %s does not" %
(dev_sym_link, real_dev_path))
pri_connection_info = None
sec_connection_info = None
# Check if replication is configured and volume is
# populated with the RCG
if (self.tgt_bkend_config and 'rcg_info' in vol and
vol['rcg_info'] is not None):
LOG.info("This is a replication setup")
# Check if this is Active/Passive based replication
if self.tgt_bkend_config.quorum_witness_ip:
LOG.info("Peer Persistence has been configured")
# This is Peer Persistence setup
LOG.info("Mounting volume on primary array...")
device_info, pri_connection_info = _mount_volume(
self._primary_driver)
LOG.info("Volume successfully mounted on primary array!"
"pri_connection_info: %s" % pri_connection_info)
LOG.info("Mounting volume on secondary array...")
sec_device_info, sec_connection_info = _mount_volume(
self._remote_driver)
LOG.info("Volume successfully mounted on secondary array!"
"sec_connection_info: %s" % sec_connection_info)
else:
# In case failover/failback has happened at the backend, while
# mounting the volume, the plugin needs to figure out the
# target array
LOG.info("Active/Passive replication has been configured")
driver = self._get_target_driver(vol['rcg_info'])
device_info, pri_connection_info = _mount_volume(driver)
LOG.info("Volume successfully mounted on active array!"
"active_connection_info: %s" % pri_connection_info)
else:
# hpeplugin_driver will always point to the currently active array
# Post-failover, it will point to secondary_driver
LOG.info("Single array setup has been configured")
device_info, pri_connection_info = _mount_volume(
self._hpeplugin_driver)
LOG.info("Volume successfully mounted on the array!"
"pri_connection_info: %s" % pri_connection_info)
# Make sure the path exists
path = FilePath(device_info['path']).realpath()
if path.exists is False:
msg = (_('path: %s, does not exist'), path)
LOG.error(msg)
self._rollback(undo_steps)
raise exception.HPEPluginMountException(reason=msg)
LOG.debug('path for volume: %(name)s, was successfully created: '
'%(device)s realpath is: %(realpath)s',
{'name': volname, 'device': device_info['path'],
'realpath': path.path})
# Create filesystem on the new device
if fileutil.has_filesystem(path.path) is False:
fileutil.create_filesystem(path.path)
LOG.debug('filesystem successfully created on : %(path)s',
{'path': path.path})
# Determine if we need to mount the volume
if vol_mount == volume.DEFAULT_MOUNT_VOLUME:
# mkdir for mounting the filesystem
if self._host_config.mount_prefix:
mount_prefix = self._host_config.mount_prefix
else:
mount_prefix = None
mount_dir = fileutil.mkdir_for_mounting(device_info['path'],
mount_prefix)
LOG.debug('Directory: %(mount_dir)s, '
'successfully created to mount: '
'%(mount)s',
{'mount_dir': mount_dir, 'mount': device_info['path']})
undo_steps.append(
{'undo_func': fileutil.remove_dir,
'params': mount_dir,
'msg': 'Removing mount directory: %s...' % mount_dir})
# mount the directory
try:
fileutil.mount_dir(path.path, mount_dir)
LOG.debug('Device: %(path)s successfully mounted on %(mount)s',
{'path': path.path, 'mount': mount_dir})
undo_steps.append(
{'undo_func': fileutil.umount_dir,
'params': mount_dir,
'msg': 'Unmounting directory: %s...' % mount_dir})
except Exception as ex:
msg = "Mount volume failed: %s" % six.text_type(ex)
LOG.error(msg)
self._rollback(undo_steps)
response = json.dumps({"Err": '%s' % msg})
return response
# TODO: find out how to invoke mkfs so that it creates the
# filesystem without the lost+found directory
# KLUDGE!!!!!
lostfound = mount_dir + '/lost+found'
lfdir = FilePath(lostfound)
if lfdir.exists and fileutil.remove_dir(lostfound):
LOG.debug('Successfully removed : '
'%(lost)s from mount: %(mount)s',
{'lost': lostfound, 'mount': mount_dir})
else:
mount_dir = ''
try:
if 'fsOwner' in vol and vol['fsOwner']:
fs_owner = vol['fsOwner'].split(":")
uid = int(fs_owner[0])
gid = int(fs_owner[1])
os.chown(mount_dir, uid, gid)
if 'fsMode' in vol and vol['fsMode']:
mode = str(vol['fsMode'])
chmod(mode, mount_dir)
path_info = {}
path_info['name'] = volname
path_info['path'] = path.path
path_info['device_info'] = device_info
path_info['connection_info'] = pri_connection_info
path_info['mount_dir'] = mount_dir
if sec_connection_info:
path_info['remote_connection_info'] = sec_connection_info
LOG.info("Updating node_mount_info in etcd with mount_id %s..."
% mount_id)
self._etcd.update_vol(volid,
'node_mount_info',
node_mount_info)
LOG.info("node_mount_info updated successfully in etcd with "
"mount_id %s" % mount_id)
self._etcd.update_vol(volid, 'path_info', json.dumps(path_info))
response = json.dumps({u"Err": '', u"Name": volname,
u"Mountpoint": mount_dir,
u"Devicename": path.path})
except Exception as ex:
self._rollback(undo_steps)
response = json.dumps({"Err": '%s' % six.text_type(ex)})
return response
def _get_target_driver(self, rcg_info):
local_rcg = None
rcg_name = rcg_info.get('local_rcg_name')
try:
LOG.info("Getting local RCG: %s" % rcg_name)
local_rcg = self._primary_driver.get_rcg(rcg_name)
local_role_reversed = local_rcg['targets'][0]['roleReversed']
except Exception as ex:
msg = "There was an error fetching the remote copy " \
"group %s from primary array: %s" % \
(rcg_name, six.text_type(ex))
LOG.error(msg)
remote_rcg = None
remote_rcg_name = rcg_info.get('remote_rcg_name')
try:
LOG.info("Getting remote RCG: %s" % remote_rcg_name)
remote_rcg = self._remote_driver.get_rcg(remote_rcg_name)
remote_role_reversed = remote_rcg['targets'][0]['roleReversed']
except Exception as ex:
msg = "There was an error fetching the remote copy " \
"group %s from secondary array: %s" % \
(remote_rcg_name, six.text_type(ex))
LOG.error(msg)
# Both arrays are up - this could just be a group fail-over
if local_rcg and remote_rcg:
LOG.info("Got both local and remote RCGs! Checking roles...")
# State before to fail-over
if local_rcg['role'] == PRIMARY and not local_role_reversed and \
remote_rcg['role'] == SECONDARY and not remote_role_reversed:
LOG.info("Primary array is the active array")
return self._primary_driver
# Primary array is either down or RCG under maintenance
# Allow remote target driver to take over
if local_rcg['role'] == PRIMARY and not local_role_reversed and \
remote_rcg['role'] == PRIMARY_REV and remote_role_reversed:
msg = "Secondary array is the active array"
LOG.info(msg)
return self._remote_driver
# State post recover
if remote_rcg['role'] == PRIMARY and remote_role_reversed and \
local_rcg['role'] == SECONDARY and local_role_reversed:
LOG.info("Secondary array is the active array")
return self._remote_driver
msg = (_("Remote copy group %s is being failed over or failed "
"back. Unable to determine RCG location") % rcg_name)
LOG.error(msg)
raise exception.RcgStateInTransitionException(reason=msg)
if local_rcg:
if local_rcg['role'] == PRIMARY and not local_role_reversed:
LOG.info("Primary array is the active array")
return self._primary_driver
if remote_rcg:
if remote_rcg['role'] == PRIMARY and remote_role_reversed:
LOG.info("Secondary array is the active array")
return self._remote_driver
msg = (_("Failed to get RCG %s. Unable to determine RCG location")
% rcg_name)
LOG.error(msg)
raise exception.HPEDriverRemoteCopyGroupNotFound(name=rcg_name)
@synchronization.synchronized_volume('{volname}')
def unmount_volume(self, volname, vol_mount, mount_id):
vol = self._etcd.get_vol_byname(volname)
if vol is None:
msg = (_LE('Volume unmount name not found %s'), volname)
LOG.error(msg)
raise exception.HPEPluginUMountException(reason=msg)
volid = vol['id']
is_snap = vol['is_snap']
path_info = None
node_owns_volume = True
# Start of volume fencing
LOG.info('Unmounting volume: %s' % vol)
if 'node_mount_info' in vol:
node_mount_info = vol['node_mount_info']
# Check if this node still owns the volume. If not, then it is
# not possible to proceed with cleanup as the volume meta-data
# context was modified by other node and it's not relevant for
# this node anymore.
# TODO: To solve the above issue, when a volume is re-mounted
# forcibly on other node, that other node should save the volume
# meta-data in a different ETCD root for it to be accessible by
# this node. Once this node discovers that the volume is owned
# by some other node, it can go to that different ETCD root to
# fetch the volume meta-data and do the cleanup.
if self._node_id not in node_mount_info:
if 'old_path_info' in vol:
LOG.info("Old path info present in volume: %s"
% path_info)
for pi in vol['old_path_info']:
node_id = pi[0]
if node_id == self._node_id:
LOG.info("Found matching old path info for old "
"node ID: %s" % six.text_type(pi))
path_info = pi
node_owns_volume = False
break
if path_info:
LOG.info("Removing old path info for node %s from ETCD "
"volume meta-data..." % self._node_id)
vol['old_path_info'].remove(path_info)
if len(vol['old_path_info']) == 0:
LOG.info("Last old_path_info found. "
"Removing it too...")
vol.pop('old_path_info')
LOG.info("Updating volume meta-data: %s..." % vol)
self._etcd.save_vol(vol)
LOG.info("Volume meta-data updated: %s" % vol)
path_info = json.loads(path_info[1])
LOG.info("Cleaning up devices using old_path_info: %s"
% path_info)
else:
LOG.info("Volume '%s' is mounted on another node. "
"No old_path_info is present on ETCD. Unable"
"to cleanup devices!" % volname)
return json.dumps({u"Err": ""})
else:
LOG.info("node_id '%s' is present in vol mount info"
% self._node_id)
mount_id_list = node_mount_info[self._node_id]
LOG.info("Current mount_id_list %s " % mount_id_list)
try:
mount_id_list.remove(mount_id)
except ValueError as ex:
LOG.exception('Ignoring exception: %s' % ex)
pass
LOG.info("Updating node_mount_info '%s' in etcd..."
% node_mount_info)
# Update the mount_id list in etcd
self._etcd.update_vol(volid, 'node_mount_info',
node_mount_info)
LOG.info("Updated node_mount_info '%s' in etcd!"
% node_mount_info)
if len(mount_id_list) > 0:
# Don't proceed with unmount
LOG.info("Volume still in use by %s containers... "
"no unmounting done!" % len(mount_id_list))
return json.dumps({u"Err": ''})
else:
# delete the node_id key from node_mount_info
LOG.info("Removing node_mount_info %s",
node_mount_info)
vol.pop('node_mount_info')
LOG.info("Saving volume to etcd: %s..." % vol)
self._etcd.save_vol(vol)
LOG.info("Volume saved to etcd: %s!" % vol)
# TODO: Requirement #5 will bring the flow here but the below flow
# may result into exception. Need to ensure it doesn't happen
if not path_info:
path_info = self._etcd.get_vol_path_info(volname)
# path_info = vol.get('path_info', None)
if path_info:
path_name = path_info['path']
connection_info = path_info['connection_info']
mount_dir = path_info['mount_dir']
else:
msg = (_LE('Volume unmount path info not found %s'), volname)
LOG.error(msg)
raise exception.HPEPluginUMountException(reason=msg)
# Get connector info from OS Brick
# TODO: retrieve use_multipath and enforce_multipath from config file
root_helper = 'sudo'
connector_info = connector.get_connector_properties(
root_helper, self._my_ip, multipath=self._use_multipath,
enforce_multipath=self._enforce_multipath)
# Determine if we need to unmount a previously mounted volume
if vol_mount is volume.DEFAULT_MOUNT_VOLUME:
# unmount directory
fileutil.umount_dir(mount_dir)
# remove directory
fileutil.remove_dir(mount_dir)
# Changed asynchronous disconnect_volume to sync call
# since it causes a race condition between unmount and
# mount operation on the same volume. This scenario is
# more noticed in case of repeated mount & unmount
# operations on the same volume. Refer Issue #64
if connection_info:
LOG.info(_LI('sync call os brick to disconnect volume'))
self._connector.disconnect_volume(connection_info['data'], None)
LOG.info(_LI('end of sync call to disconnect volume'))
remote_connection_info = path_info.get('remote_connection_info')
# Issue#272 Fix: Don't allow disconnect_volume on secondary array
# for ISCSI. OS-Brick cleans up all the devices in the above call
# only for ISCSI. If we allow the below disconnect-volume to
# execute, OS-Brick throws exception aborting the remaining steps
# thereby leaving behind VLUN and ETCD entries
if remote_connection_info and \
remote_connection_info['driver_volume_type'] != 'iscsi':
LOG.info('sync call os brick to disconnect remote volume')
self._connector.disconnect_volume(
remote_connection_info['data'], None)
LOG.info('end of sync call to disconnect remote volume')
def _unmount_volume(driver):
try:
# Call driver to terminate the connection
driver.terminate_connection(vol, connector_info,
is_snap)
LOG.info(_LI('connection_info: %(connection_info)s, '
'was successfully terminated'),
{'connection_info': json.dumps(connection_info)})
except Exception as ex:
msg = (_LE('connection info termination failed %s'),
six.text_type(ex))
LOG.error(msg)
# Not much we can do here, so just continue on with unmount
# We need to ensure we update etcd path_info so the stale
# path does not stay around
# raise exception.HPEPluginUMountException(reason=msg)
_unmount_volume(self._hpeplugin_driver)
# In case of Peer Persistence, volume is mounted on the secondary
# array as well. It should be unmounted too
if self.tgt_bkend_config:
_unmount_volume(self._remote_driver)
# TODO: Create path_info list as we can mount the volume to multiple
# hosts at the same time.
# If this node owns the volume then update path_info
if node_owns_volume:
self._etcd.update_vol(volid, 'path_info', None)
LOG.info(_LI('path for volume: %(name)s, was successfully removed: '
'%(path_name)s'), {'name': volname,
'path_name': path_name})
response = json.dumps({u"Err": ''})
return response
def _create_volume(self, vol_specs, undo_steps):
bkend_vol_name = self._hpeplugin_driver.create_volume(vol_specs)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.delete_volume,
'params': {'volume': vol_specs},
'msg': 'Cleaning up backend volume: %s...' % bkend_vol_name})
return bkend_vol_name
def __clone_volume__(self, src_vol, clone_vol, undo_steps):
bkend_vol_name = self._hpeplugin_driver.create_cloned_volume(
clone_vol, src_vol)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.delete_volume,
'params': {'volume': clone_vol},
'msg': 'Cleaning up backend volume: %s...' % bkend_vol_name})
return bkend_vol_name
def _apply_volume_specs(self, vol, undo_steps):
vvs_name = vol.get('qos_name')
if vol['flash_cache']:
# If not a pre-created VVS, create one
if not vvs_name:
vvs_name = self._create_vvs(vol['id'], undo_steps)
if vvs_name is not None:
self._set_flash_cache_for_volume(vvs_name,
vol['flash_cache'])
# This can be either an existing VVSet with desired QoS
# or a new VVSet that got created for flash-cache use case
# Just add the volume to it
if vvs_name:
self._add_volume_to_vvset(vvs_name, vol, undo_steps)
def _add_volume_to_vvset(self, vvs_name, vol, undo_steps):
bkend_vol_name = self._hpeplugin_driver.add_volume_to_volume_set(
vol, vvs_name)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.remove_volume_from_volume_set,
'params': {'vol_name': bkend_vol_name,
'vvs_name': vvs_name},
'msg': 'Removing VV %s from VVS %s...'
% (bkend_vol_name, vvs_name)})
def _create_vvs(self, id, undo_steps):
vvs_name = self._hpeplugin_driver.create_vvs(id)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.delete_vvset,
'params': {'id': id},
'msg': 'Cleaning up VVS: %s...' % vvs_name})
return vvs_name
def _remove_snap_record(self, snap_name):
snap_info = self._etcd.get_vol_byname(snap_name)
self._etcd.delete_vol(snap_info)
def _set_flash_cache_for_volume(self, vvs_name, flash_cache):
self._hpeplugin_driver.set_flash_cache_policy_on_vvs(
flash_cache,
vvs_name)
@staticmethod
def _rollback(rollback_list):
LOG.info("Rolling back...")
for undo_action in reversed(rollback_list):
LOG.info(undo_action['msg'])
try:
params = undo_action['params']
if type(params) is dict:
undo_action['undo_func'](**undo_action['params'])
elif type(params) is tuple:
undo_action['undo_func'](*undo_action['params'])
else:
undo_action['undo_func'](undo_action['params'])
except Exception as ex:
# TODO: Implement retry logic
LOG.warning('Ignoring exception: %s' % six.text_type(ex))
pass
LOG.info("Roll back complete!")
@staticmethod
def _get_snapshot_by_name(snapshots, snapname):
idx = 0
for s in snapshots:
if s['name'] == snapname:
return s, idx
idx = idx + 1
return None, None
@staticmethod
def _get_snapshots_to_be_deleted(db_snapshots, bkend_snapshots):
ss_list = []
for db_ss in db_snapshots:
found = False
bkend_ss_name = utils.get_3par_snap_name(db_ss['id'])
for bkend_ss in bkend_snapshots:
if bkend_ss_name == bkend_ss:
found = True
break
if not found:
ss_list.append(db_ss)
return ss_list
def _sync_snapshots_from_array(self, vol_id, db_snapshots, snap_cpg):
bkend_snapshots = \
self._hpeplugin_driver.get_snapshots_by_vol(vol_id, snap_cpg)
ss_list_remove = self._get_snapshots_to_be_deleted(db_snapshots,
bkend_snapshots)
if ss_list_remove:
for ss in ss_list_remove:
db_snapshots.remove(ss)
self._remove_snap_record(ss['name'])
self._etcd.update_vol(vol_id, 'snapshots',
db_snapshots)
@staticmethod
def _get_required_rcg_field(rcg_detail):
rcg_filter = {}
msg = 'get_required_rcg_field: %s' % rcg_detail
LOG.info(msg)
rcg_filter['rcg_name'] = rcg_detail.get('name')
# TODO(sonivi): handle in case of multiple target
rcg_filter['policies'] = rcg_detail['targets'][0].get('policies')
rcg_filter['role'] = volume.RCG_ROLE.get(rcg_detail.get('role'))
return rcg_filter
@staticmethod
def _get_required_qos_field(qos_detail):
qos_filter = {}
msg = 'get_required_qos_field: %s' % qos_detail
LOG.info(msg)
qos_filter['enabled'] = qos_detail.get('enabled')
bwMaxLimitKB = qos_detail.get('bwMaxLimitKB')
if bwMaxLimitKB:
qos_filter['maxBWS'] = str(bwMaxLimitKB / 1024) + " MB/sec"
bwMinGoalKB = qos_detail.get('bwMinGoalKB')
if bwMinGoalKB:
qos_filter['minBWS'] = str(bwMinGoalKB / 1024) + " MB/sec"
ioMaxLimit = qos_detail.get('ioMaxLimit')
if ioMaxLimit:
qos_filter['maxIOPS'] = str(ioMaxLimit) + " IOs/sec"
ioMinGoal = qos_detail.get('ioMinGoal')
if ioMinGoal:
qos_filter['minIOPS'] = str(ioMinGoal) + " IOs/sec"
latencyGoal = qos_detail.get('latencyGoal')
if latencyGoal:
qos_filter['Latency'] = str(latencyGoal) + " sec"
priority = qos_detail.get('priority')
if priority:
qos_filter['priority'] = volume.QOS_PRIORITY[priority]
qos_filter['vvset_name'] = qos_detail['name']
return qos_filter
# TODO: Place holder for now
def _get_3par_rcg_name(self, rcg_name):
return rcg_name
def _find_rcg(self, rcg_name):
rcg = self._hpeplugin_driver.get_rcg(rcg_name)
rcg_info = {'local_rcg_name': rcg_name,
'remote_rcg_name': rcg['remoteGroupName']}
return rcg_info
# TODO: Need RCG lock in different namespace. To be done later
@synchronization.synchronized_rcg('{rcg_name}')
def _create_rcg(self, rcg_name, undo_steps):
rcg_info = self._hpeplugin_driver.create_rcg(
rcg_name=rcg_name)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.delete_rcg,
'params': {'rcg_name': rcg_name},
'msg': 'Undo create RCG: Deleting Remote Copy Group %s...'
% (rcg_name)})
return rcg_info
# TODO: Need RCG lock in different namespace. To be done later
# @synchronization.synchronized_rcg('{rcg_name}')
def _add_volume_to_rcg(self, vol, rcg_name, undo_steps):
bkend_vol_name = utils.get_3par_vol_name(vol['id'])
self._hpeplugin_driver.add_volume_to_rcg(
bkend_vol_name=bkend_vol_name,
rcg_name=rcg_name)
undo_steps.append(
{'undo_func': self._hpeplugin_driver.remove_volume_from_rcg,
'params': {'vol_name': bkend_vol_name,
'rcg_name': rcg_name},
'msg': 'Removing VV %s from Remote Copy Group %s...'
% (bkend_vol_name, rcg_name)})
|
1710330
|
from .util import *
## unify function that will bind variables in the search to their counterparts in the tree
## it takes two pl_expr and try to match the uppercased in lh or lh.domain with their corresponding
## values in rh itself or its domain
def unify(lh, rh, lh_domain = None, rh_domain = None):
if rh_domain == None:
rh_domain = {} #dict(zip(rh.terms, rh.terms))
if lh_domain == None:
lh_domain = {}
nterms = len(rh.terms)
if unifiable_check(nterms, rh, lh) == False:
return False
for i in range(nterms):
rh_arg = rh.terms[i]
lh_arg = lh.terms[i]
if lh_arg == "_":
continue
rh_val = rh_val_get(rh_arg, lh_arg, rh_domain)
if rh_val: # fact or variable in search
if lh_eval(rh_val, lh_arg, lh_domain) == False:
return False
return True
|
1710340
|
from api.utils.helpers import stubify
def test_stubify_empty():
"""Stubify logic must return None if it doesn't have a name"""
assert stubify("") is None
assert stubify(" ") is None
|
1710360
|
from django import template
register = template.Library()
@register.filter(name='addcss')
def addcss(field, css):
class_old = field.field.widget.attrs.get('class', '')
if class_old:
class_new = class_old + ' ' + css
else:
class_new = css
return field.as_widget(attrs={"class": class_new})
|
1710399
|
import click
import logging
from . import commands
logger = logging.getLogger(__name__)
@click.group(context_settings={"help_option_names": ["--help", "-h"]})
@click.option("--verbose", "-v", is_flag=True, help="Set log level to DEBUG.")
def docketparser_cli(verbose):
"""Parsing structured information from PACER Dockets."""
log_level = logging.DEBUG if verbose else logging.INFO
logger.setLevel(log_level)
subcommands = [
commands.parse_all
]
for subcommand in subcommands:
docketparser_cli.add_command(subcommand)
if __name__ == "__main__":
docketparser_cli()
|
1710402
|
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import fiona
# STEP 2
COUNTRIES_POPULATION = {
'Spain': 47.2,
'Portugal': 10.6,
'United Kingdom': 63.8,
'Ireland': 4.7,
'France': 64.9,
'Italy': 61.1,
'Germany': 82.6,
'Netherlands': 16.8,
'Belgium': 11.1,
'Denmark': 5.6,
'Slovenia': 2,
'Austria': 8.5,
'Luxembourg': 0.5,
'Andorra': 0.077,
'Switzerland': 8.2,
'Liechtenstein': 0.038,
}
MAX_POPULATION = max(COUNTRIES_POPULATION.values())
MIN_POPULATION = min(COUNTRIES_POPULATION.values())
# STEP 3
colormap = cm.get_cmap('Greens')
COUNTRY_COLOUR = {
country_name: colormap(
(population - MIN_POPULATION) / (MAX_POPULATION - MIN_POPULATION)
)
for country_name, population in COUNTRIES_POPULATION.items()
}
# STEP 4
with fiona.open('europe.geojson') as fd:
full_data = [data for data in fd]
# STEP 5
full_data = [data for data in full_data
if data['properties']['NAME'] in COUNTRIES_POPULATION]
for data in full_data:
country_name = data['properties']['NAME']
colour = COUNTRY_COLOUR[country_name]
# Draw the ISO3 code of each country
long, lat = data['properties']['LON'], data['properties']['LAT']
iso3 = data['properties']['ISO3']
plt.text(long, lat, iso3, horizontalalignment='center')
geo_type = data['geometry']['type']
if geo_type == 'Polygon':
data_x = [x for x, y in data['geometry']['coordinates'][0]]
data_y = [y for x, y in data['geometry']['coordinates'][0]]
plt.fill(data_x, data_y, c=colour)
# Draw a line surrounding the area
plt.plot(data_x, data_y, c='black', linewidth=0.2)
elif geo_type == 'MultiPolygon':
for coordinates in data['geometry']['coordinates']:
data_x = [x for x, y in coordinates[0]]
data_y = [y for x, y in coordinates[0]]
plt.fill(data_x, data_y, c=colour)
# Draw a line surrounding the area
plt.plot(data_x, data_y, c='black', linewidth=0.2)
# Set the background to light blue
axes = plt.gca()
axes.set_facecolor('xkcd:light blue')
# Set the proper aspect to avoid distorsions
axes.set_aspect('equal', adjustable='box')
# Remove labels from axes
plt.xticks([])
plt.yticks([])
# STEP 6
plt.show()
|
1710411
|
from twisted.application.service import ServiceMaker
TxFixClient = ServiceMaker(
'txfixclient',
'txfixclient.tap',
'Run the Fix Client service',
'txfixclient'
)
|
1710459
|
import numpy as np
import os
import torch
import cv2
from torch.utils.data import Dataset
class OnTheFlySMPLTrainDataset(Dataset):
def __init__(self,
poses_path,
textures_path,
backgrounds_dir_path,
params_from='all',
grey_tex_prob=0.05,
img_wh=256):
assert params_from in ['all', 'h36m', 'up3d', '3dpw', 'amass', 'not_amass']
# Load SMPL poses
data = np.load(poses_path)
self.fnames = data['fnames']
self.poses = data['poses']
if params_from != 'all':
if params_from == 'not_amass':
indices = [i for i, x in enumerate(self.fnames)
if (x.startswith('h36m') or x.startswith('up3d') or x.startswith('3dpw'))]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
elif params_from == 'amass':
indices = [i for i, x in enumerate(self.fnames)
if not (x.startswith('h36m') or x.startswith('up3d') or x.startswith('3dpw'))]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
else:
indices = [i for i, x in enumerate(self.fnames) if x.startswith(params_from)]
self.fnames = [self.fnames[i] for i in indices]
self.poses = [self.poses[i] for i in indices]
self.poses = np.stack(self.poses, axis=0)
# Load SMPL textures
textures = np.load(textures_path)
self.grey_textures = textures['grey']
self.nongrey_textures = textures['nongrey']
self.grey_tex_prob = grey_tex_prob
# Load LSUN backgrounds
self.backgrounds_paths = sorted([os.path.join(backgrounds_dir_path, f)
for f in os.listdir(backgrounds_dir_path)
if f.endswith('.jpg')])
self.img_wh = img_wh
def __len__(self):
return len(self.poses)
def __getitem__(self, index):
if torch.is_tensor(index):
index = index.tolist()
if isinstance(index, list):
num_samples = len(index)
else:
num_samples = 1
pose = self.poses[index]
pose = torch.from_numpy(pose.astype(np.float32))
sample = {'pose': pose}
# Randomly sample texture
texture_samples = []
for _ in range(num_samples):
if torch.rand(1).item() < self.grey_tex_prob:
tex_idx = torch.randint(low=0, high=len(self.grey_textures), size=(1,)).item()
texture = self.grey_textures[tex_idx]
else:
tex_idx = torch.randint(low=0, high=len(self.nongrey_textures), size=(1,)).item()
texture = self.nongrey_textures[tex_idx]
texture_samples.append(texture)
texture_samples = np.stack(texture_samples, axis=0).squeeze()
assert texture_samples.shape[-3:] == (1200, 800, 3), "Texture shape is wrong: {}".format(texture_samples.shape)
sample['texture'] = torch.from_numpy(texture_samples / 255.).float() # (1200, 800, 3) or (num samples, 1200, 800, 3)
# Randomly sample background if rendering RGB
bg_samples = []
for _ in range(num_samples):
bg_idx = torch.randint(low=0, high=len(self.backgrounds_paths), size=(1,)).item()
bg_path = self.backgrounds_paths[bg_idx]
background = cv2.cvtColor(cv2.imread(bg_path), cv2.COLOR_BGR2RGB)
background = cv2.resize(background, (self.img_wh, self.img_wh), interpolation=cv2.INTER_LINEAR)
background = background.transpose(2, 0, 1)
bg_samples.append(background)
bg_samples = np.stack(bg_samples, axis=0).squeeze()
assert bg_samples.shape[-3:] == (3, self.img_wh, self.img_wh), "BG shape is wrong: {}".format(sample['background'].shape)
sample['background'] = torch.from_numpy(bg_samples / 255.).float() # (3, img_wh, img_wh) or (num samples, 3, img_wh, img_wh)
return sample
|
1710484
|
import glob,os
dirlist = glob.glob('../data/N_*')
for dirname in dirlist:
flist = glob.glob(dirname+'/20140418_seqs*_d_0.5.dat')
for fname in flist:
print fname
os.remove(fname)
|
1710499
|
import unittest
from datetime import datetime
import pandas as pd
import numpy as np
from msticpy.analysis.anomalous_sequence.utils.data_structures import Cmd
from msticpy.analysis.anomalous_sequence import anomalous
class TestAnomalous(unittest.TestCase):
def setUp(self) -> None:
self.sessions1 = [
["Set-User", "Set-User"],
["Set-Mailbox", "Set-User", "Set-User"],
]
self.sessions2 = [
[
Cmd("Set-User", {"Identity"}),
Cmd("Set-User", {"Identity", "City", "Name"}),
],
[
Cmd("Set-Mailbox", {"Identity"}),
Cmd("Set-User", {"Identity", "City"}),
Cmd("Set-User", {"Identity"}),
],
]
self.sessions3 = [
[
Cmd("Set-User", {"Identity": "blah"}),
Cmd("Set-User", {"Identity": "haha", "City": "york", "Name": "bob"}),
],
[
Cmd("Set-Mailbox", {"Identity": "blah"}),
Cmd("Set-User", {"Identity": "blah", "City": "london"}),
Cmd("Set-User", {"Identity": "haha"}),
],
]
self.times = [datetime(2019, 3, 1), datetime(2019, 5, 6)]
self.data1 = pd.DataFrame({"session": self.sessions1, "time": self.times})
self.data2 = pd.DataFrame({"session": self.sessions2, "time": self.times})
self.data3 = pd.DataFrame({"session": self.sessions3, "time": self.times})
def tearDown(self) -> None:
self.sessions1 = None
self.sessions2 = None
self.sessions3 = None
self.times = None
self.data1 = None
self.data2 = None
self.data3 = None
def test_score_sessions(self):
actual = anomalous.score_sessions(
data=self.data1, session_column="session", window_length=3
)
self.assertTrue(isinstance(actual, pd.DataFrame))
for col in self.data1.columns:
self.assertTrue(col in actual.columns)
self.assertEqual(len(actual.columns), len(self.data1.columns) + 2)
self.assertEqual(len(actual), len(self.data1))
window = actual["rarest_window3"].iloc[0]
self.assertTrue(isinstance(window, list))
self.assertTrue(isinstance(window[0], str))
actual = anomalous.score_sessions(
data=self.data2, session_column="session", window_length=3
)
window = actual["rarest_window3"].iloc[0]
cmd = window[0]
self.assertTrue(isinstance(window, list))
self.assertTrue("name" in dir(cmd))
self.assertTrue("params" in dir(cmd))
self.assertTrue(isinstance(cmd.params, set))
actual = anomalous.score_sessions(
data=self.data3, session_column="session", window_length=3
)
window = actual["rarest_window3"].iloc[0]
cmd = window[0]
self.assertTrue(isinstance(window, list))
self.assertTrue("name" in dir(cmd))
self.assertTrue("params" in dir(cmd))
self.assertTrue(isinstance(cmd.params, dict))
actual = anomalous.score_sessions(
data=self.data3, session_column="session", window_length=5
)
window = actual["rarest_window5"].iloc[0]
lik = actual["rarest_window5_likelihood"].iloc[0]
self.assertTrue(isinstance(window, list))
self.assertEqual(len(window), 0)
self.assertTrue(np.isnan(lik))
if __name__ == "__main__":
unittest.main()
|
1710542
|
class Console(object):
""" Represents the standard input,output,and error streams for console applications. This class cannot be inherited. """
@staticmethod
def Beep(frequency=None,duration=None):
"""
Beep(frequency: int,duration: int)
Plays the sound of a beep of a specified frequency and duration through the console speaker.
frequency: The frequency of the beep,ranging from 37 to 32767 hertz.
duration: The duration of the beep measured in milliseconds.
Beep()
Plays the sound of a beep through the console speaker.
"""
pass
@staticmethod
def Clear():
"""
Clear()
Clears the console buffer and corresponding console window of display information.
"""
pass
@staticmethod
def MoveBufferArea(sourceLeft,sourceTop,sourceWidth,sourceHeight,targetLeft,targetTop,sourceChar=None,sourceForeColor=None,sourceBackColor=None):
"""
MoveBufferArea(sourceLeft: int,sourceTop: int,sourceWidth: int,sourceHeight: int,targetLeft: int,targetTop: int,sourceChar: Char,sourceForeColor: ConsoleColor,sourceBackColor: ConsoleColor)
Copies a specified source area of the screen buffer to a specified destination area.
sourceLeft: The leftmost column of the source area.
sourceTop: The topmost row of the source area.
sourceWidth: The number of columns in the source area.
sourceHeight: The number of rows in the source area.
targetLeft: The leftmost column of the destination area.
targetTop: The topmost row of the destination area.
sourceChar: The character used to fill the source area.
sourceForeColor: The foreground color used to fill the source area.
sourceBackColor: The background color used to fill the source area.
MoveBufferArea(sourceLeft: int,sourceTop: int,sourceWidth: int,sourceHeight: int,targetLeft: int,targetTop: int)
Copies a specified source area of the screen buffer to a specified destination area.
sourceLeft: The leftmost column of the source area.
sourceTop: The topmost row of the source area.
sourceWidth: The number of columns in the source area.
sourceHeight: The number of rows in the source area.
targetLeft: The leftmost column of the destination area.
targetTop: The topmost row of the destination area.
"""
pass
@staticmethod
def OpenStandardError(bufferSize=None):
"""
OpenStandardError(bufferSize: int) -> Stream
Acquires the standard error stream,which is set to a specified buffer size.
bufferSize: The internal stream buffer size.
Returns: The standard error stream.
OpenStandardError() -> Stream
Acquires the standard error stream.
Returns: The standard error stream.
"""
pass
@staticmethod
def OpenStandardInput(bufferSize=None):
"""
OpenStandardInput(bufferSize: int) -> Stream
Acquires the standard input stream,which is set to a specified buffer size.
bufferSize: The internal stream buffer size.
Returns: The standard input stream.
OpenStandardInput() -> Stream
Acquires the standard input stream.
Returns: The standard input stream.
"""
pass
@staticmethod
def OpenStandardOutput(bufferSize=None):
"""
OpenStandardOutput(bufferSize: int) -> Stream
Acquires the standard output stream,which is set to a specified buffer size.
bufferSize: The internal stream buffer size.
Returns: The standard output stream.
OpenStandardOutput() -> Stream
Acquires the standard output stream.
Returns: The standard output stream.
"""
pass
@staticmethod
def Read():
"""
Read() -> int
Reads the next character from the standard input stream.
Returns: The next character from the input stream,or negative one (-1) if there are currently no more
characters to be read.
"""
pass
@staticmethod
def ReadKey(intercept=None):
"""
ReadKey(intercept: bool) -> ConsoleKeyInfo
Obtains the next character or function key pressed by the user. The pressed key is optionally
displayed in the console window.
intercept: Determines whether to display the pressed key in the console window. true to not display the
pressed key; otherwise,false.
Returns: A System.ConsoleKeyInfo object that describes the System.ConsoleKey constant and Unicode
character,if any,that correspond to the pressed console key. The System.ConsoleKeyInfo object
also describes,in a bitwise combination of System.ConsoleModifiers values,whether one or more
SHIFT,ALT,or CTRL modifier keys was pressed simultaneously with the console key.
ReadKey() -> ConsoleKeyInfo
Obtains the next character or function key pressed by the user. The pressed key is displayed in
the console window.
Returns: A System.ConsoleKeyInfo object that describes the System.ConsoleKey constant and Unicode
character,if any,that correspond to the pressed console key. The System.ConsoleKeyInfo object
also describes,in a bitwise combination of System.ConsoleModifiers values,whether one or more
SHIFT,ALT,or CTRL modifier keys was pressed simultaneously with the console key.
"""
pass
@staticmethod
def ReadLine():
"""
ReadLine() -> str
Reads the next line of characters from the standard input stream.
Returns: The next line of characters from the input stream,or null if no more lines are available.
"""
pass
@staticmethod
def ResetColor():
"""
ResetColor()
Sets the foreground and background console colors to their defaults.
"""
pass
@staticmethod
def SetBufferSize(width,height):
"""
SetBufferSize(width: int,height: int)
Sets the height and width of the screen buffer area to the specified values.
width: The width of the buffer area measured in columns.
height: The height of the buffer area measured in rows.
"""
pass
@staticmethod
def SetCursorPosition(left,top):
"""
SetCursorPosition(left: int,top: int)
Sets the position of the cursor.
left: The column position of the cursor.
top: The row position of the cursor.
"""
pass
@staticmethod
def SetError(newError):
"""
SetError(newError: TextWriter)
Sets the System.Console.Error property to the specified System.IO.TextWriter object.
newError: A stream that is the new standard error output.
"""
pass
@staticmethod
def SetIn(newIn):
"""
SetIn(newIn: TextReader)
Sets the System.Console.In property to the specified System.IO.TextReader object.
newIn: A stream that is the new standard input.
"""
pass
@staticmethod
def SetOut(newOut):
"""
SetOut(newOut: TextWriter)
Sets the System.Console.Out property to the specified System.IO.TextWriter object.
newOut: A stream that is the new standard output.
"""
pass
@staticmethod
def SetWindowPosition(left,top):
"""
SetWindowPosition(left: int,top: int)
Sets the position of the console window relative to the screen buffer.
left: The column position of the upper left corner of the console window.
top: The row position of the upper left corner of the console window.
"""
pass
@staticmethod
def SetWindowSize(width,height):
"""
SetWindowSize(width: int,height: int)
Sets the height and width of the console window to the specified values.
width: The width of the console window measured in columns.
height: The height of the console window measured in rows.
"""
pass
@staticmethod
def Write(*__args):
"""
Write(value: Single)
Writes the text representation of the specified single-precision floating-point value to the
standard output stream.
value: The value to write.
Write(value: int)
Writes the text representation of the specified 32-bit signed integer value to the standard
output stream.
value: The value to write.
Write(value: float)
Writes the text representation of the specified double-precision floating-point value to the
standard output stream.
value: The value to write.
Write(value: Decimal)
Writes the text representation of the specified System.Decimal value to the standard output
stream.
value: The value to write.
Write(value: UInt32)
Writes the text representation of the specified 32-bit unsigned integer value to the standard
output stream.
value: The value to write.
Write(value: object)
Writes the text representation of the specified object to the standard output stream.
value: The value to write,or null.
Write(value: str)
Writes the specified string value to the standard output stream.
value: The value to write.
Write(value: Int64)
Writes the text representation of the specified 64-bit signed integer value to the standard
output stream.
value: The value to write.
Write(value: UInt64)
Writes the text representation of the specified 64-bit unsigned integer value to the standard
output stream.
value: The value to write.
Write(format: str,arg0: object,arg1: object,arg2: object)
Writes the text representation of the specified objects to the standard output stream using the
specified format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
arg2: The third object to write using format.
Write(format: str,arg0: object,arg1: object,arg2: object,arg3: object)
Writes the text representation of the specified objects and variable-length parameter list to
the standard output stream using the specified format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
arg2: The third object to write using format.
arg3: The fourth object to write using format.
Write(format: str,arg0: object)
Writes the text representation of the specified object to the standard output stream using the
specified format information.
format: A composite format string (see Remarks).
arg0: An object to write using format.
Write(format: str,arg0: object,arg1: object)
Writes the text representation of the specified objects to the standard output stream using the
specified format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
Write(format: str,*arg: Array[object])
Writes the text representation of the specified array of objects to the standard output stream
using the specified format information.
format: A composite format string (see Remarks).
arg: An array of objects to write using format.
Write(buffer: Array[Char])
Writes the specified array of Unicode characters to the standard output stream.
buffer: A Unicode character array.
Write(buffer: Array[Char],index: int,count: int)
Writes the specified subarray of Unicode characters to the standard output stream.
buffer: An array of Unicode characters.
index: The starting position in buffer.
count: The number of characters to write.
Write(value: bool)
Writes the text representation of the specified Boolean value to the standard output stream.
value: The value to write.
Write(value: Char)
Writes the specified Unicode character value to the standard output stream.
value: The value to write.
"""
pass
@staticmethod
def WriteLine(*__args):
"""
WriteLine(value: object)
Writes the text representation of the specified object,followed by the current line terminator,
to the standard output stream.
value: The value to write.
WriteLine(value: str)
Writes the specified string value,followed by the current line terminator,to the standard
output stream.
value: The value to write.
WriteLine(value: Int64)
Writes the text representation of the specified 64-bit signed integer value,followed by the
current line terminator,to the standard output stream.
value: The value to write.
WriteLine(value: UInt64)
Writes the text representation of the specified 64-bit unsigned integer value,followed by the
current line terminator,to the standard output stream.
value: The value to write.
WriteLine(format: str,arg0: object)
Writes the text representation of the specified object,followed by the current line terminator,
to the standard output stream using the specified format information.
format: A composite format string (see Remarks).
arg0: An object to write using format.
WriteLine(format: str,arg0: object,arg1: object,arg2: object,arg3: object)
Writes the text representation of the specified objects and variable-length parameter list,
followed by the current line terminator,to the standard output stream using the specified
format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
arg2: The third object to write using format.
arg3: The fourth object to write using format.
WriteLine(format: str,*arg: Array[object])
Writes the text representation of the specified array of objects,followed by the current line
terminator,to the standard output stream using the specified format information.
format: A composite format string (see Remarks).
arg: An array of objects to write using format.
WriteLine(format: str,arg0: object,arg1: object)
Writes the text representation of the specified objects,followed by the current line
terminator,to the standard output stream using the specified format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
WriteLine(format: str,arg0: object,arg1: object,arg2: object)
Writes the text representation of the specified objects,followed by the current line
terminator,to the standard output stream using the specified format information.
format: A composite format string (see Remarks).
arg0: The first object to write using format.
arg1: The second object to write using format.
arg2: The third object to write using format.
WriteLine(value: UInt32)
Writes the text representation of the specified 32-bit unsigned integer value,followed by the
current line terminator,to the standard output stream.
value: The value to write.
WriteLine(value: Char)
Writes the specified Unicode character,followed by the current line terminator,value to the
standard output stream.
value: The value to write.
WriteLine(buffer: Array[Char])
Writes the specified array of Unicode characters,followed by the current line terminator,to
the standard output stream.
buffer: A Unicode character array.
WriteLine()
Writes the current line terminator to the standard output stream.
WriteLine(value: bool)
Writes the text representation of the specified Boolean value,followed by the current line
terminator,to the standard output stream.
value: The value to write.
WriteLine(buffer: Array[Char],index: int,count: int)
Writes the specified subarray of Unicode characters,followed by the current line terminator,to
the standard output stream.
buffer: An array of Unicode characters.
index: The starting position in buffer.
count: The number of characters to write.
WriteLine(value: Single)
Writes the text representation of the specified single-precision floating-point value,followed
by the current line terminator,to the standard output stream.
value: The value to write.
WriteLine(value: int)
Writes the text representation of the specified 32-bit signed integer value,followed by the
current line terminator,to the standard output stream.
value: The value to write.
WriteLine(value: Decimal)
Writes the text representation of the specified System.Decimal value,followed by the current
line terminator,to the standard output stream.
value: The value to write.
WriteLine(value: float)
Writes the text representation of the specified double-precision floating-point value,followed
by the current line terminator,to the standard output stream.
value: The value to write.
"""
pass
BackgroundColor=None
BufferHeight=1000
BufferWidth=127
CancelKeyPress=None
CapsLock=False
CursorLeft=0
CursorSize=25
CursorTop=999
CursorVisible=True
Error=None
ForegroundColor=None
In=None
InputEncoding=None
IsErrorRedirected=False
IsInputRedirected=False
IsOutputRedirected=False
KeyAvailable=False
LargestWindowHeight=60
LargestWindowWidth=127
NumberLock=True
Out=None
OutputEncoding=None
Title='cmd - ipy -m ironstubs make System --folder=stubs2 --overwrite'
TreatControlCAsInput=False
WindowHeight=60
WindowLeft=0
WindowTop=940
WindowWidth=127
__all__=[
'Beep',
'CancelKeyPress',
'Clear',
'MoveBufferArea',
'OpenStandardError',
'OpenStandardInput',
'OpenStandardOutput',
'Read',
'ReadKey',
'ReadLine',
'ResetColor',
'SetBufferSize',
'SetCursorPosition',
'SetError',
'SetIn',
'SetOut',
'SetWindowPosition',
'SetWindowSize',
'Write',
'WriteLine',
]
|
1710552
|
text = "This is some generic text"
index = 0
while index < len(text):
print(text[index])
index += 1
for character in text:
print(character)
print("\n".join(text))
|
1710553
|
import os
from typing import List, Union
import numpy as np
from ConfigSpace.configuration_space import Configuration
from smac.runhistory.runhistory import RunHistory
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.plot.scatter import plot_scatter_plot
from cave.utils.helpers import get_cost_dict_for_config, NotApplicable
from cave.utils.hpbandster_helpers import format_budgets
class PlotScatter(BaseAnalyzer):
"""
Scatter plots show the costs of the default and optimized parameter configuration on each instance. Since this
looses detailed information about the individual cost on each instance by looking at aggregated cost values in
tables, scatter plots provide a more detailed picture. They provide insights whether overall performance
improvements can be explained only by some outliers or whether they are due to improvements on the entire
instance set. On the left side the training-data is scattered, on the right side the test-data is scattered.
"""
def __init__(self,
runscontainer,
):
"""
Creates a scatterplot of the two configurations on the given set of instances.
Saves plot to file.
"""
super().__init__(runscontainer)
formatted_budgets = format_budgets(self.runscontainer.get_budgets())
for budget, run in zip(self.runscontainer.get_budgets(),
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
self.result[formatted_budgets[budget]] = self._plot_scatter(
default=run.default,
incumbent=run.incumbent,
rh=run.epm_runhistory,
train=run.scenario.train_insts,
test=run.scenario.test_insts,
run_obj=run.scenario.run_obj,
cutoff=run.scenario.cutoff,
output_dir=run.output_dir,
)
def get_name(self):
return "Scatter Plot"
def _plot_scatter(self,
default: Configuration,
incumbent: Configuration,
rh: RunHistory,
train: List[str],
test: Union[List[str], None],
run_obj: str,
cutoff,
output_dir):
"""
Parameters
----------
default, incumbent: Configuration
configurations to be compared
rh: RunHistory
runhistory to use for cost-estimations
train[, test]: list(str)
instance-names
run_obj: str
run-objective (time or quality)
cutoff: float
maximum runtime of ta
output_dir: str
output directory
"""
out_fn_base = os.path.join(output_dir, 'scatter_')
self.logger.info("... plotting scatter")
metric = run_obj
timeout = cutoff
labels = ["default {}".format(run_obj), "incumbent {}".format(run_obj)]
def_costs = get_cost_dict_for_config(rh, default).items()
inc_costs = get_cost_dict_for_config(rh, incumbent).items()
out_fns = []
if len(train) <= 1 and len(test) <= 1:
raise NotApplicable("No instances, so no scatter-plot.")
for insts, name in [(train, 'train'), (test, 'test')]:
if len(insts) <= 1:
self.logger.debug("No %s instances, skipping scatter", name)
continue
default = np.array([v for k, v in def_costs if k in insts])
incumbent = np.array([v for k, v in inc_costs if k in insts])
min_val = min(min(default), min(incumbent))
out_fn = out_fn_base + name + '.png'
out_fns.append(plot_scatter_plot((default,), (incumbent,), labels, metric=metric,
min_val=min_val, max_val=timeout, out_fn=out_fn))
self.logger.debug("Plotted scatter to %s", out_fn)
return {'figure' : out_fns if len(out_fns) > 0 else None}
|
1710560
|
from newspaper import Article
from sqlalchemy.orm.exc import NoResultFound
from .base import BaseCrawler
from ...models import Entity, Author, AuthorType
class GenericCrawler(BaseCrawler):
def offer(self, url):
""" Can this crawler process this URL? """
return True
def crawl(self, doc):
""" Crawl this document. """
# instantiate and download article
article = Article(url=doc.url, language='en', fetch_images=False, request_timeout=10)
article.download()
# extract content
self.extract(doc, article)
def extract(self, doc, article):
""" Extract text and other things from this document. """
super(GenericCrawler, self).extract(doc, article)
article.parse()
doc.title = article.title
doc.text = article.text
# todo: handle multiple authors
authors = article.authors
if authors:
author = authors[0]
doc.author = Author.get_or_create(author, AuthorType.journalist())
else:
doc.author = Author.unknown()
doc.published_at = self.parse_timestamp(article.published_date)
|
1710630
|
import unittest
from mox3.mox import MoxTestBase, IsA
import gevent
from gevent.pywsgi import WSGIServer as GeventWSGIServer
from slimta.http.wsgi import WsgiServer, log
class TestWsgiServer(MoxTestBase, unittest.TestCase):
def test_build_server(self):
w = WsgiServer()
server = w.build_server(('0.0.0.0', 0))
self.assertIsInstance(server, GeventWSGIServer)
def test_handle_unimplemented(self):
w = WsgiServer()
with self.assertRaises(NotImplementedError):
w.handle(None, None)
def test_call(self):
class FakeWsgiServer(WsgiServer):
def handle(self, environ, start_response):
start_response('200 Test', 13)
return ['test']
w = FakeWsgiServer()
environ = {}
start_response = self.mox.CreateMockAnything()
self.mox.StubOutWithMock(log, 'wsgi_request')
self.mox.StubOutWithMock(log, 'wsgi_response')
log.wsgi_request(environ)
start_response('200 Test', 13)
log.wsgi_response(environ, '200 Test', 13)
self.mox.ReplayAll()
self.assertEqual(['test'], w(environ, start_response))
# vim:et:fdm=marker:sts=4:sw=4:ts=4
|
1710634
|
import multiprocessing
import importlib
import sys
import os
class State:
''' Handles the Unicorn HAT state'''
def __init__(self, is_hd=True):
self._process = None
self.set_model(is_hd)
def set_model(self, is_hd):
self.is_hd = is_hd
if self.is_hd is True:
import unicornhathd
import app.programs.hd
self._unicornhat = unicornhathd
self._app_programs = app.programs.hd.list
else:
import unicornhat
import app.programs.original
self._unicornhat = unicornhat
self._app_programs = app.programs.original.list
def start_program(self, name, params={}):
program = self._get_program(name)
self.stop_program()
self._set_rotation(params)
self._set_brightness(params)
self._start_process(program, params)
def stop_program(self):
if self._process is not None:
self._process.terminate()
self._unicornhat.show()
def _get_program(self, name):
try:
return self._app_programs[name]
except KeyError:
raise ProgramNotFound(name)
def _set_brightness(self, params):
if params.get("brightness") is not None:
brightness = float(params["brightness"])
if 0 <= brightness <= 1:
self._unicornhat.brightness(brightness)
else:
raise ValueError("Brightness must be between 0.0 and 1.0")
def _set_rotation(self, params):
if params.get("rotation") is not None:
rotation = int(params["rotation"])
if rotation in [0, 90, 180, 270]:
self._unicornhat.rotation(rotation)
else:
raise ValueError("Rotation must be 0, 90, 180 or 270 degrees")
def _start_process(self, program, params):
def run_program(self):
importlib.import_module(program.location).run(params)
self._process = multiprocessing.Process(target=run_program, args=(params,))
self._process.start()
state = State()
class ProgramNotFound(Exception):
pass
|
1710652
|
import glob
import os
import random
import numpy as np
from PIL import Image
from datasets.BaseDataset import VideoDataset, INFO, IMAGES_, TARGETS
from utils.Resize import ResizeMode
class Davis(VideoDataset):
def __init__(self, root, mode='train', resize_mode=None, resize_shape=None, tw=8, max_temporal_gap=8, num_classes=2,
imset=None):
self.imset = imset
self.videos = []
self.num_frames = {}
self.num_objects = {}
self.shape = {}
self.raw_samples = []
super(Davis, self).__init__(root, mode, resize_mode, resize_shape, tw, max_temporal_gap, num_classes)
def filter_samples(self, video):
filtered_samples = [s for s in self.raw_samples if s[INFO]['video'] == video]
self.samples = filtered_samples
def set_video_id(self, video):
self.current_video = video
self.start_index = self.get_start_index(video)
self.filter_samples(video)
def get_video_ids(self):
# shuffle the list for training
return random.sample(self.videos, len(self.videos)) if self.is_train() else self.videos
def get_support_indices(self, index, sequence):
# index should be start index of the clip
if self.is_train():
index_range = np.arange(index, min(self.num_frames[sequence],
(index + max(self.max_temporal_gap, self.tw))))
else:
index_range = np.arange(index,
min(self.num_frames[sequence], (index + self.tw)))
support_indices = np.random.choice(index_range, min(self.tw, len(index_range)), replace=False)
support_indices = np.sort(np.append(support_indices, np.repeat([index],
self.tw - len(support_indices))))
# print(support_indices)
return support_indices
def create_sample_list(self):
image_dir = os.path.join(self.root, 'JPEGImages', '480p')
mask_dir = os.path.join(self.root, 'Annotations_unsupervised', '480p')
if self.is_train():
_imset_f = '2017/train.txt'
elif self.imset:
_imset_f = self.imset
else:
_imset_f = '2017/val.txt'
with open(os.path.join(self.root, "ImageSets",_imset_f), "r") as lines:
for line in lines:
_video = line.rstrip('\n')
self.videos += [_video]
img_list = list(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
img_list.sort()
# self.videos.append(_video)
num_frames = len(glob.glob(os.path.join(image_dir, _video, '*.jpg')))
self.num_frames[_video] = num_frames
_mask_file = os.path.join(mask_dir, _video, '00000.png')
_mask = np.array(Image.open(os.path.join(mask_dir, _video, '00000.png')).convert("P"))
num_objects = np.max(_mask)
self.num_objects[_video] = num_objects
self.shape[_video] = np.shape(_mask)
for i, img in enumerate(img_list):
sample = {INFO: {}, IMAGES_: [], TARGETS: []}
support_indices = self.get_support_indices(i, _video)
sample[INFO]['support_indices'] = support_indices
images = [os.path.join(image_dir, _video, '{:05d}.jpg'.format(s)) for s in np.sort(support_indices)]
targets = [os.path.join(mask_dir, _video, '{:05d}.png'.format(s)) for s in np.sort(support_indices)]
sample[IMAGES_] = images
sample[TARGETS] = targets
sample[INFO]['video'] = _video
sample[INFO]['num_frames'] = num_frames
sample[INFO]['num_objects'] = num_objects
sample[INFO]['shape'] = np.shape(_mask)
self.samples+=[sample]
self.raw_samples = self.samples
if __name__ == '__main__':
davis = Davis(root="/globalwork/data/DAVIS-Unsupervised/DAVIS/",
resize_shape=(480, 854), resize_mode=ResizeMode.FIXED_SIZE, mode="train", max_temporal_gap=32)
# davis.set_video_id('cat-girl')
print("Dataset size: {}".format(davis.__len__()))
for i, _input in enumerate(davis):
print(_input['info'])
print("Image Max {}, Image Min {}".format(_input['images'].max(), _input['images'].min()),
"Target max {}, Target Min {}".format(_input['target']['mask'].max(), _input['target']['mask'].min()))
|
1710728
|
import os
from ..models import DocumentType
from ..permissions import (
permission_document_properties_edit,
permission_document_type_create, permission_document_type_delete,
permission_document_type_edit, permission_document_type_view,
)
from .base import GenericDocumentViewTestCase
from .literals import (
TEST_DOCUMENT_TYPE_LABEL, TEST_DOCUMENT_TYPE_QUICK_LABEL,
TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED
)
from .mixins.document_type_mixins import (
DocumentQuickLabelViewTestMixin,
DocumentTypeDeletionPoliciesViewTestMixin,
DocumentTypeFilenameGeneratorViewTestMixin,
DocumentTypeQuickLabelTestMixin, DocumentTypeQuickLabelViewTestMixin,
DocumentTypeViewTestMixin
)
class DocumentTypeDeletionPoliciesViewTestCase(
DocumentTypeDeletionPoliciesViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_deletion_policies_get_view_no_permission(self):
response = self._request_test_document_type_policies_get_view()
self.assertEqual(response.status_code, 404)
def test_document_type_deletion_policies_get_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_policies_get_view()
self.assertEqual(response.status_code, 200)
def test_document_type_deletion_policies_post_view_no_permission(self):
response = self._request_test_document_type_policies_post_view()
self.assertEqual(response.status_code, 404)
def test_document_type_deletion_policies_post_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_policies_post_view()
self.assertEqual(response.status_code, 302)
class DocumentTypeFilenameGeneratorViewTestCase(
DocumentTypeFilenameGeneratorViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_filename_generator_get_view_no_permission(self):
response = self._request_test_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 404)
def test_document_type_filename_generator_get_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_filename_generator_get_view()
self.assertEqual(response.status_code, 200)
def test_document_type_filename_generator_post_view_no_permission(self):
response = self._request_test_document_type_filename_generator_post_view()
self.assertEqual(response.status_code, 404)
def test_document_type_filename_generator_post_view_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_filename_generator_post_view()
self.assertEqual(response.status_code, 302)
class DocumentTypeViewsTestCase(
DocumentTypeViewTestMixin, GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_create_view_no_permission(self):
self.test_document_type.delete()
response = self._request_test_document_type_create_view()
self.assertEqual(response.status_code, 403)
self.assertEqual(DocumentType.objects.count(), 0)
def test_document_type_create_view_with_permission(self):
self.test_document_type.delete()
self.grant_permission(permission=permission_document_type_create)
response = self._request_test_document_type_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(DocumentType.objects.count(), 1)
self.assertEqual(
DocumentType.objects.first().label, TEST_DOCUMENT_TYPE_LABEL
)
def test_document_type_delete_view_no_permission(self):
response = self._request_test_document_type_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(DocumentType.objects.count(), 1)
def test_document_type_delete_view_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_delete
)
response = self._request_test_document_type_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(DocumentType.objects.count(), 0)
def test_document_type_edit_view_no_permission(self):
test_document_type_label = self.test_document_type.label
response = self._request_test_document_type_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_type.refresh_from_db()
self.assertEqual(
self.test_document_type.label, test_document_type_label
)
def test_document_type_edit_view_with_access(self):
test_document_type_label = self.test_document_type.label
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_document_type_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document_type.refresh_from_db()
self.assertNotEqual(
self.test_document_type.label, test_document_type_label
)
def test_document_type_list_view_no_permission(self):
response = self._request_test_document_type_list_view()
self.assertNotContains(
response=response, status_code=200, text=self.test_document_type
)
def test_document_type_list_view_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_test_document_type_list_view()
self.assertContains(
response=response, status_code=200, text=self.test_document_type
)
class DocumentTypeQuickLabelViewsTestCase(
DocumentTypeQuickLabelTestMixin, DocumentTypeQuickLabelViewTestMixin,
GenericDocumentViewTestCase
):
auto_upload_test_document = False
def test_document_type_quick_label_create_no_permission(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_test_quick_label_create_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(self.test_document_type.filenames.count(), 0)
def test_document_type_quick_label_create_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
response = self._request_test_quick_label_create_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(self.test_document_type.filenames.count(), 1)
def test_document_type_quick_label_delete_no_permission(self):
self._create_test_document_type_quick_label()
response = self._request_test_quick_label_delete_view()
self.assertEqual(response.status_code, 404)
self.assertEqual(
self.test_document_type.filenames.count(), 1
)
def test_document_type_quick_label_delete_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
self._create_test_document_type_quick_label()
response = self._request_test_quick_label_delete_view()
self.assertEqual(response.status_code, 302)
self.assertEqual(
self.test_document_type.filenames.count(), 0
)
def test_document_type_quick_label_edit_no_permission(self):
self._create_test_document_type_quick_label()
response = self._request_test_quick_label_edit_view()
self.assertEqual(response.status_code, 404)
self.test_document_type_quick_label.refresh_from_db()
self.assertEqual(
self.test_document_type_quick_label.filename,
TEST_DOCUMENT_TYPE_QUICK_LABEL
)
def test_document_type_quick_label_edit_with_access(self):
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_edit
)
self._create_test_document_type_quick_label()
response = self._request_test_quick_label_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document_type_quick_label.refresh_from_db()
self.assertEqual(
self.test_document_type_quick_label.filename,
TEST_DOCUMENT_TYPE_QUICK_LABEL_EDITED
)
def test_document_type_quick_label_list_no_permission(self):
self._create_test_document_type_quick_label()
response = self._request_test_quick_label_list_view()
self.assertEqual(response.status_code, 404)
def test_document_type_quick_label_list_with_access(self):
self._create_test_document_type_quick_label()
self.grant_access(
obj=self.test_document_type,
permission=permission_document_type_view
)
response = self._request_test_quick_label_list_view()
self.assertContains(
response, status_code=200, text=self.test_document_type_quick_label
)
class DocumentsQuickLabelViewTestCase(
DocumentQuickLabelViewTestMixin, DocumentTypeQuickLabelTestMixin,
GenericDocumentViewTestCase
):
def test_document_quick_label_no_permission(self):
self._create_test_document_type_quick_label()
response = self._request_test_document_quick_label_edit_view()
self.assertEqual(response.status_code, 404)
def test_document_quick_label_with_access(self):
self._create_test_document_type_quick_label()
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
response = self._request_test_document_quick_label_edit_view()
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label,
self.test_document_type_quick_label.filename
)
def test_document_quick_label_preserve_extension_with_access(self):
self._create_test_document_type_quick_label()
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
filename, extension = os.path.splitext(self.test_document.label)
response = self._request_test_document_quick_label_edit_view(
extra_data={'preserve_extension': True}
)
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label, '{}{}'.format(
self.test_document_type_quick_label.filename, extension
)
)
def test_document_quick_label_no_preserve_extension_with_access(self):
self._create_test_document_type_quick_label()
self.grant_access(
obj=self.test_document,
permission=permission_document_properties_edit
)
filename, extension = os.path.splitext(self.test_document.label)
response = self._request_test_document_quick_label_edit_view(
extra_data={'preserve_extension': False}
)
self.assertEqual(response.status_code, 302)
self.test_document.refresh_from_db()
self.assertEqual(
self.test_document.label, self.test_document_type_quick_label.filename
)
|
1710741
|
import pandas as pd
df = pd.DataFrame({'A': [1, 2, 3], 'B': [10, 20, 30], 'C': [100, 200, 300]},
index=['One', 'Two', 'Three'])
print(df)
# A B C
# One 1 10 100
# Two 2 20 200
# Three 3 30 300
print(df.reindex(index=['Two', 'Three', 'One']))
# A B C
# Two 2 20 200
# Three 3 30 300
# One 1 10 100
print(df.reindex(columns=['B', 'C', 'A']))
# B C A
# One 10 100 1
# Two 20 200 2
# Three 30 300 3
print(df.reindex(index=['Two', 'Three', 'One'], columns=['B', 'C', 'A']))
# B C A
# Two 20 200 2
# Three 30 300 3
# One 10 100 1
print(df.reindex(columns=['B', 'A'], index=['Three', 'One']))
# B A
# Three 30 3
# One 10 1
print(df.reindex(['Two', 'Three', 'One'], axis=0))
# A B C
# Two 2 20 200
# Three 3 30 300
# One 1 10 100
print(df.reindex(['B', 'C', 'A'], axis='columns'))
# B C A
# One 10 100 1
# Two 20 200 2
# Three 30 300 3
print(df[['B', 'C', 'A']])
# B C A
# One 10 100 1
# Two 20 200 2
# Three 30 300 3
print(df.reindex(columns=['B', 'X', 'C'], index=['Two', 'One', 'Four']))
# B X C
# Two 20.0 NaN 200.0
# One 10.0 NaN 100.0
# Four NaN NaN NaN
print(df.reindex(columns=['B', 'X', 'C'], index=['Two', 'One', 'Four'],
fill_value=0))
# B X C
# Two 20 0 200
# One 10 0 100
# Four 0 0 0
df = pd.DataFrame({'A': [1, 2], 'B': [10, 20], 'C': [100, 200]},
index=[10, 20])
print(df)
# A B C
# 10 1 10 100
# 20 2 20 200
print(df.reindex(index=[5, 10, 15, 20, 25]))
# A B C
# 5 NaN NaN NaN
# 10 1.0 10.0 100.0
# 15 NaN NaN NaN
# 20 2.0 20.0 200.0
# 25 NaN NaN NaN
print(df.reindex(index=[5, 10, 15, 20, 25], method='bfill'))
# A B C
# 5 1.0 10.0 100.0
# 10 1.0 10.0 100.0
# 15 2.0 20.0 200.0
# 20 2.0 20.0 200.0
# 25 NaN NaN NaN
print(df.reindex(index=[5, 10, 15, 20, 25], method='ffill'))
# A B C
# 5 NaN NaN NaN
# 10 1.0 10.0 100.0
# 15 1.0 10.0 100.0
# 20 2.0 20.0 200.0
# 25 2.0 20.0 200.0
print(df.reindex(index=[5, 10, 15, 20, 25], method='nearest'))
# A B C
# 5 1 10 100
# 10 1 10 100
# 15 2 20 200
# 20 2 20 200
# 25 2 20 200
print(df.reindex(index=[10, 12, 14, 16, 18, 20]))
# A B C
# 10 1.0 10.0 100.0
# 12 NaN NaN NaN
# 14 NaN NaN NaN
# 16 NaN NaN NaN
# 18 NaN NaN NaN
# 20 2.0 20.0 200.0
print(df.reindex(index=[10, 12, 14, 16, 18, 20], method='bfill', limit=2))
# A B C
# 10 1.0 10.0 100.0
# 12 NaN NaN NaN
# 14 NaN NaN NaN
# 16 2.0 20.0 200.0
# 18 2.0 20.0 200.0
# 20 2.0 20.0 200.0
print(df.reindex(index=[25, 5, 15], method='bfill'))
# A B C
# 25 NaN NaN NaN
# 5 1.0 10.0 100.0
# 15 2.0 20.0 200.0
print(df.reindex(index=[5, 15, 25], method='bfill'))
# A B C
# 5 1.0 10.0 100.0
# 15 2.0 20.0 200.0
# 25 NaN NaN NaN
print(df.reindex(index=[5, 10, 15, 20, 25]).fillna(method='bfill'))
# A B C
# 5 1.0 10.0 100.0
# 10 1.0 10.0 100.0
# 15 2.0 20.0 200.0
# 20 2.0 20.0 200.0
# 25 NaN NaN NaN
print(df.reindex(index=[5, 10, 15, 20, 25]).interpolate())
# A B C
# 5 NaN NaN NaN
# 10 1.0 10.0 100.0
# 15 1.5 15.0 150.0
# 20 2.0 20.0 200.0
# 25 2.0 20.0 200.0
print(df.reindex(columns=['A', 'X', 'C'], method='bfill'))
# A X C
# 10 1 NaN 100
# 20 2 NaN 200
print(df.reindex(columns=['A', 'X', 'C']).fillna(method='bfill', axis=1))
# A X C
# 10 1.0 100.0 100.0
# 20 2.0 200.0 200.0
print(df.reindex(columns=['A', 'X', 'C']).interpolate(axis=1))
# A X C
# 10 1.0 50.5 100.0
# 20 2.0 101.0 200.0
df = pd.DataFrame({'A': [1, 2, 3], 'B': [10, 20, 30], 'C': [100, 200, 300]},
index=[20, 10, 30])
print(df)
# A B C
# 20 1 10 100
# 10 2 20 200
# 30 3 30 300
# print(df.reindex(index=[10, 15, 20], method='ffill'))
# ValueError: index must be monotonic increasing or decreasing
print(df.reindex(index=[10, 15, 20]))
# A B C
# 10 2.0 20.0 200.0
# 15 NaN NaN NaN
# 20 1.0 10.0 100.0
print(df.reindex(index=[10, 15, 20]).fillna(method='bfill'))
# A B C
# 10 2.0 20.0 200.0
# 15 1.0 10.0 100.0
# 20 1.0 10.0 100.0
|
1710801
|
import numpy as np
import scipy as sp
import matplotlib
matplotlib.use('Agg')
from math import ceil
import matplotlib.pyplot as plt
from sys import exit
from scipy.stats import gaussian_kde
import sklearn
import pandas as pd
from scipy.integrate import simps
sklearn_major_version = float(sklearn.__version__.split('.')[1])
if sklearn_major_version < 24 :
from sklearn.neighbors.kde import KernelDensity
else :
from sklearn.neighbors import KernelDensity
def kde(z, cdf=False, bandwidth=0.3):
#print(z)
z = np.array(z)
#Set NaN values to 0
z[np.isnan(z)]=0
std = z.std(axis=0)
if std == 0 or np.isnan(std) :
std = 1
z= (z - z.mean(axis=0)) / std
factor=1
#euc_dist = np.array([np.sqrt(np.sum((p-np.min(z,axis=0))**2)) for p in z] ).reshape(-1,1)
if len(z.shape) == 2 :
euc_dist = np.mean(z, axis=1).reshape(-1,1)
else :
euc_dist = z.reshape(-1,1)
#print(euc_dist)
kde = KernelDensity(bandwidth=bandwidth).fit(euc_dist)
density = np.exp(kde.score_samples(euc_dist)).reshape(-1,1)
min_euc_dist = min(euc_dist) #* -factor #0
max_euc_dist = max(euc_dist) #* factor
dd = np.linspace(min_euc_dist,max_euc_dist).reshape(-1,1)
n=int(len(dd))
ddx=(max_euc_dist-min_euc_dist)/n
lin_density=np.exp(kde.score_samples(dd)).reshape(-1,1)
n=len(density)
cum_dense=np.zeros(n).reshape(-1,1)
dd_range = range(len(dd))
if not cdf :
for ed,i in zip(euc_dist,range(n)):
cum_dense[i] = np.sum([ lin_density[j] for j in dd_range if abs(dd[j]) > abs(ed) ]) * ddx
return (cum_dense)
for ed,i in zip(euc_dist,range(n)):
cum_dense[i] = np.sum([ lin_density[j] for j in dd_range if dd[j] < ed]) * ddx
return(cum_dense)
def MAD(z):
z = np.array(z)
if len(z.shape) == 1 :
z=z.reshape(-1,1)
z=(z - z.mean(axis=0))/z.std(axis=0)
z=np.apply_along_axis( lambda x : np.sqrt(np.sum(x**2)) , 1, z)
z=abs((z - np.median(z)) / (0.001+np.median(np.abs(z - np.median(z)))))
z= 1/(0.1 + z)
return z
|
1710818
|
def sort_k(arr, k):
arr = sorted(arr[:k]) + arr[k:]
for i in range(k, len(arr)):
start, end = i-k+1, i+1
p, n = arr[:start], arr[end:]
sub = sorted(arr[start:end])
arr = p + sub + n
return arr
# Test
assert sort_k([1, 0, 2, 4, 3], 2) == [0, 1, 2, 3, 4]
|
1710825
|
import os
import os.path
from unittest.mock import patch
from programy.rdf.collection import RDFCollection
from programy.storage.stores.file.config import FileStorageConfiguration
from programy.storage.stores.file.config import FileStoreConfiguration
from programy.storage.stores.file.engine import FileStorageEngine
from programy.storage.stores.file.store.rdfs import FileRDFStore
from programytest.storage.asserts.store.assert_rdfs import RDFStoreAsserts
class FileRDFStoreTests(RDFStoreAsserts):
def test_initialise(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
self.assertEqual(store.storage_engine, engine)
def test_storage_path(self):
config = FileStorageConfiguration()
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
self.assertEquals(['/tmp/rdfs'], store._get_storage_path())
self.assertIsInstance(store.get_storage(), FileStoreConfiguration)
def test_load_from_test_dir_no_subdir(self):
config = FileStorageConfiguration()
config._rdf_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "data" + os.sep + "rdfs" + os.sep + "text"], extension="rdf", subdirs=False, fileformat="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
map_collection = RDFCollection()
store.load_all(map_collection)
self.assertTrue(map_collection.contains('ACTIVITY'))
def test_load_from_test_dir_with_subdir(self):
config = FileStorageConfiguration()
config._rdf_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "data" + os.sep + "rdfs" + os.sep + "text"], extension="rdf", subdirs=True, fileformat="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
map_collection = RDFCollection()
store.load_all(map_collection)
self.assertTrue(map_collection.contains('ACTIVITY'))
self.assertTrue(map_collection.contains('ANIMAL'))
def patch_load_rdfs_from_file(self, filename, the_set):
raise Exception("Mock Exception")
@patch("programy.storage.stores.file.store.rdfs.FileRDFStore._load_rdfs_from_file", patch_load_rdfs_from_file)
def test_load_with_exception(self):
config = FileStorageConfiguration()
config._rdf_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "data" + os.sep + "rdfs" + os.sep + "text"], extension="rdf", subdirs=True, fileformat="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
map_collection = RDFCollection()
store.load_all(map_collection)
self.assertFalse(map_collection.contains('ACTIVITY'))
self.assertFalse(map_collection.contains('ANIMAL'))
def test_reload(self):
config = FileStorageConfiguration()
config._rdf_storage = FileStoreConfiguration(dirs=[os.path.dirname(__file__) + os.sep + "data" + os.sep + "rdfs" + os.sep + "text"], extension="rdf", subdirs=False, fileformat="text", encoding="utf-8", delete_on_start=False)
engine = FileStorageEngine(config)
engine.initialise()
store = FileRDFStore(engine)
map_collection = RDFCollection()
store.load_all(map_collection)
self.assertTrue(map_collection.contains('ACTIVITY'))
store.reload(map_collection, 'ACTIVITY')
self.assertTrue(map_collection.contains('ACTIVITY'))
|
1710826
|
import gossip
@gossip.register('slash.session_start')
def session_start():
raise KeyboardInterrupt()
def test_1():
pass
|
1710835
|
import nltk
from collections import defaultdict
class PcfgEstimator:
"""
Estimates the production probabilities of a PCFG
"""
def __init__(self):
self._counts = defaultdict(nltk.FreqDist)
def add_sentence(self, sentence):
"""
Add the sentence to the dataset
"""
assert isinstance(sentence, nltk.tree.Tree), "Can only add counts from a tree"
# FINISH THIS!
def query(self, lhs, rhs):
"""
Returns the MLE probability of this production
"""
return self._counts[lhs].freq(rhs)
|
1710870
|
def pytest_addoption(parser):
parser.addoption("--corpus-file", action="store", default="Corpus to test")
parser.addoption("--morphem-file", action="store", default="Morphem dict to test")
def pytest_generate_tests(metafunc):
option_value = metafunc.config.option.corpus_file
if 'corpus_file' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("corpus_file", [option_value])
option_value = metafunc.config.option.morphem_file
if 'morphem_file' in metafunc.fixturenames and option_value is not None:
metafunc.parametrize("morphem_file", [option_value])
|
1710886
|
class Topology:
def is_circular(self):
"""
Is the path circular? In this case the number of CIGARs must be
equal to the number of segments.
Returns
-------
bool
"""
return len(self.overlaps) == len(self.segment_names)
def is_linear(self):
"""
Is the path linear? This is the case when the number of CIGARs
is equal to the number of segments minus 1, or the CIGARs are
represented by a single "*".
"""
return not self.is_circular()
|
1710889
|
import numpy as np
import pytest
import scico.scipy.special as ss
from scico.random import randn
# these are functions that take only a single ndarray as input
one_arg_funcs = [
ss.digamma,
ss.entr,
ss.erf,
ss.erfc,
ss.erfinv,
ss.expit,
ss.gammaln,
ss.i0,
ss.i0e,
ss.i1,
ss.i1e,
ss.ndtr,
ss.log_ndtr,
ss.logit,
ss.ndtri,
]
@pytest.mark.parametrize("func", one_arg_funcs)
def test_one_arg_funcs(func):
# blockarray array
x, key = randn(((8, 8), (4,)), key=None)
Fx = func(x)
fx0 = func(x[0])
fx1 = func(x[1])
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
def test_betainc():
a, key = randn(((8, 8), (4,)), key=None)
b, key = randn(((8, 8), (4,)), key=key)
x, key = randn(((8, 8), (4,)), key=key)
Fx = ss.betainc(a, b, x)
fx0 = ss.betainc(a[0], b[0], x[0])
fx1 = ss.betainc(a[1], b[1], x[1])
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
@pytest.mark.parametrize("func", [ss.gammainc, ss.gammaincc])
def test_gammainc(func):
a, key = randn(((8, 8), (4,)), key=None)
b, key = randn(((8, 8), (4,)), key=key)
x, key = randn(((8, 8), (4,)), key=key)
Fx = ss.betainc(a, b, x)
fx0 = ss.betainc(a[0], b[0], x[0])
fx1 = ss.betainc(a[1], b[1], x[1])
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
def test_multigammaln():
x, key = randn(((8, 8), (4,)), key=None)
d = 2
Fx = ss.multigammaln(x, d)
fx0 = ss.multigammaln(x[0], d)
fx1 = ss.multigammaln(x[1], d)
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
@pytest.mark.parametrize("func", [ss.xlog1py, ss.xlogy])
def test_logs(func):
x, key = randn(((8, 8), (4,)), key=None)
y, key = randn(((8, 8), (4,)), key=key)
Fx = func(x, y)
fx0 = func(x[0], y[0])
fx1 = func(x[1], y[1])
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
def test_zeta():
x, key = randn(((8, 8), (4,)), key=None)
y, key = randn(((8, 8), (4,)), key=None)
Fx = ss.zeta(x, y)
fx0 = ss.zeta(x[0], y[0])
fx1 = ss.zeta(x[1], y[1])
np.testing.assert_allclose(Fx[0].ravel(), fx0.ravel(), rtol=1e-4)
np.testing.assert_allclose(Fx[1].ravel(), fx1.ravel(), rtol=1e-4)
|
1710904
|
import pandas as pd
from sklearn.model_selection import KFold
if __name__ == "__main__":
train_df = pd.read_csv("./crohme-train/train.csv")
kfold = KFold(n_splits=10, shuffle=True, random_state=1337)
train_idx, val_idx = list(kfold.split(train_df))[0]
train_df, val_df = (
train_df.iloc[train_idx].reset_index(),
train_df.iloc[val_idx].reset_index(),
)
train_df.to_csv("./crohme-train/train.csv")
val_df.to_csv("./crohme-train/val.csv")
|
1710981
|
from __future__ import unicode_literals
from django.apps import AppConfig
class ThreadsConfig(AppConfig):
name = 'threads'
|
1711011
|
import os
import sys
root_path = os.path.dirname(os.path.dirname(os.getcwd()))
if root_path not in sys.path: sys.path.append(root_path)
import numpy as np
import tensorflow as tf
from DeepSparseCoding.tf1x.data.dataset import Dataset
import DeepSparseCoding.tf1x.utils.data_processing as dp
import DeepSparseCoding.tf1x.analysis.analysis_picker as ap
import response_contour_analysis.utils.model_handling as model_handling
import response_contour_analysis.utils.dataset_generation as iso_data
import response_contour_analysis.utils.histogram_analysis as hist_funcs
def get_dsc_activations_cell(analyzer, images, neuron, batch_size=10, activation_operation=None):
"""
Returns the activations from a model for given input images
Parameters:
analyzer [DSC analyzer object] an object from the DeepSparseCoding library
images [np.ndarray] of size NumImages x W x H
neuron [int or vector of ints] that points to the neuron index
batch_size [int] specifying the batch size to use for the getting the neuron activations
activation_operation [function] to be used if the DSC model has a unique function handle for getting neuron activations (e.g. in the case of lca_subspace)
Output:
activations [np.ndarray] vector of length len(neuron)
"""
images = dp.reshape_data(images[..., None], flatten=analyzer.model.params.vectorize_data)[0]
activations = analyzer.compute_activations(images, batch_size, activation_operation)[:, neuron]
return activations
def load_analyzer(params):
analyzer = ap.get_analyzer(params.model_type)
analyzer.setup(params)
analyzer.model.setup(analyzer.model_params)
analyzer.load_analysis(save_info=params.save_info)
return analyzer
class lca_512_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_512_vh"
self.display_name = "Sparse Coding 512"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_768_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_768_vh"
self.display_name = "Sparse Coding 768"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_1024_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_1024_vh"
self.display_name = "Sparse Coding 1024"
self.version = "0.0"
#self.save_info = "analysis_train_carlini_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_2560_vh_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_2560_vh"
self.display_name = "Sparse Coding 2560"
self.version = "0.0"
#self.save_info = "analysis_train_kurakin_targeted"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_768_vh_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_768_vh"
self.display_name = "ReLU Autoencoder 768"
self.version = "1.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class sae_768_vh_params(object):
def __init__(self):
self.model_type = "sae"
self.model_name = "sae_768_vh"
self.display_name = "Sparse Autoencoder 768"
self.version = "1.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class rica_768_vh_params(object):
def __init__(self):
self.model_type = "rica"
self.model_name = "rica_768_vh"
self.display_name = "Linear Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_768_mnist_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_768_mnist"
self.display_name = "Sparse Coding 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_1536_mnist_params(object):
def __init__(self):
self.model_type = "lca"
self.model_name = "lca_1536_mnist"
self.display_name = "Sparse Coding 1536"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_768_mnist_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_768_mnist"
self.display_name = "ReLU Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class sae_768_mnist_params(object):
def __init__(self):
self.model_type = "sae"
self.model_name = "sae_768_mnist"
self.display_name = "Sparse Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class rica_768_mnist_params(object):
def __init__(self):
self.model_type = "rica"
self.model_name = "rica_768_mnist"
self.display_name = "Linear Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class ae_deep_mnist_params(object):
def __init__(self):
self.model_type = "ae"
self.model_name = "ae_deep_mnist"
self.display_name = "ReLU Autoencoder 768"
self.version = "0.0"
self.save_info = "analysis_test_carlini_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = False
self.model_dir = (root_path+'/Projects/'+self.model_name)
class lca_subspace_params(object):
def __init__(self):
self.model_type = "lca_subspace"
self.model_name = "lca_subspace_vh"
self.display_name = "SSC"
self.version = "3.0"
self.save_info = "analysis_train_kurakin_targeted"
self.overwrite_analysis_log = False
self.use_group_activations = True
self.model_dir = (root_path+'/Projects/'+self.model_name)
if __name__ == "__main__":
print("Loading models...")
cont_analysis = {}
cont_analysis['min_angle'] = 15
cont_analysis['batch_size'] = 100
cont_analysis['vh_image_scale'] = 31.773287 # Mean of the l2 norm of the training set
cont_analysis['comparison_method'] = 'closest' # rand or closest
cont_analysis['num_neurons'] = 100 # How many neurons to plot
cont_analysis['num_comparisons'] = 300 # How many planes to construct (None is all of them)
cont_analysis['x_range'] = [-2.0, 2.0]
cont_analysis['y_range'] = [-2.0, 2.0]
cont_analysis['num_images'] = int(30**2)
cont_analysis['params_list'] = [lca_512_vh_params()]
#cont_analysis['params_list'] = [lca_768_vh_params()]
#cont_analysis['params_list'] = [lca_1024_vh_params()]
#cont_analysis['params_list'] = [lca_2560_vh_params()]
#cont_analysis['iso_save_name'] = "iso_curvature_xrange1.3_yrange-2.2_"
#cont_analysis['iso_save_name'] = "iso_curvature_ryan_"
cont_analysis['iso_save_name'] = "rescaled_closecomp_"
#cont_analysis['iso_save_name'] = ''
np.savez(save_root+'iso_params_'+cont_analysis['iso_save_name']+params.save_info+".npz",
data=cont_analysis)
analyzer_list = [load_analyzer(params) for params in cont_analysis['params_list']]
for analyzer, params in zip(analyzer_list, cont_analysis['params_list']):
print(analyzer.analysis_params.display_name)
print("Computing the iso-response vectors...")
cont_analysis['target_neuron_ids'] = iso_data.get_rand_target_neuron_ids(
cont_analysis['num_neurons'], analyzer.model.params.num_neurons)
neuron_weights = [analyzer.bf_stats["basis_functions"][idx]
for idx in range(len(analyzer.bf_stats["basis_functions"]))]
analyzer.target_neuron_ids = cont_analysis['target_neuron_ids']
rand_outputs = iso_data.compute_rand_vectors(
neuron_weights,
cont_analysis["num_comparisons"])
analyzer.rand_target_vectors = rand_outputs[0]
analyzer.rand_orth_vectors = rand_outputs[1]
comp_outputs = iso_data.compute_comp_vectors(
neuron_weights,
cont_analysis['target_neuron_ids'],
cont_analysis['min_angle'],
cont_analysis['num_comparisons'],
cont_analysis['comparison_method'])
analyzer.comparison_neuron_ids = comp_outputs[0]
analyzer.comparison_target_vectors = comp_outputs[1]
analyzer.comparison_vectors = comp_outputs[2]
analyzer.target_vectors = analyzer.comparison_target_vectors
assert len(analyzer.comparison_neuron_ids) == cont_analysis['num_neurons'], (
"Incorrect number of comparison vectors")
for comparison_ids_list in analyzer.comparison_neuron_ids:
assert len(comparison_ids_list) >= cont_analysis['num_comparisons'], (
"Not enough comparison vectors.")
key_list = ["target_neuron_ids", "comparison_neuron_ids", "target_vectors",
"rand_orth_vectors", "comparison_vectors"]
val_list = [analyzer.target_neuron_ids, analyzer.comparison_neuron_ids, analyzer.target_vectors,
analyzer.rand_orth_vectors, analyzer.comparison_vectors]
iso_vectors = dict(zip(key_list, val_list))
np.savez(analyzer.analysis_out_dir+"savefiles/iso_vectors_"+cont_analysis['iso_save_name']+params.save_info+".npz",
data=iso_vectors)
for use_rand_orth_vects, rand_str in zip([True, False], ["rand", "comparison"]):
print("Generating "+rand_str+" dataset...")
comp_vects = analyzer.rand_orth_vectors if use_rand_orth_vects else analyzer.comparison_vectors
contour_dataset, datapoints = iso_data.get_contour_dataset(
analyzer.target_vectors, comp_vects, cont_analysis['x_range'], cont_analysis['y_range'],
cont_analysis['num_images'], cont_analysis['vh_image_scale'])
print("Computing network activations for "+rand_str+" dataset...")
if params.use_group_activations:
activation_operation = analyzer.model.get_reshaped_group_activity
else:
activation_operation = None
activation_function_kwargs = {
'activation_operation': activation_operation,
'batch_size': cont_analysis['batch_size']
}
activations = model_handling.get_normalized_activations(
analyzer,
cont_analysis["target_neuron_ids"],
datapoints,
get_dsc_activations_cell,
activation_function_kwargs)
save_root=analyzer.analysis_out_dir+'savefiles/'
if use_rand_orth_vects:
np.savez(save_root+'iso_rand_activations_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=activations)
np.savez(save_root+'iso_rand_contour_dataset_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=contour_dataset)
else:
np.savez(save_root+'iso_comp_activations_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=activations)
np.savez(save_root+'iso_comp_contour_dataset_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=contour_dataset)
cont_analysis['comparison_neuron_ids'] = analyzer.comparison_neuron_ids
cont_analysis['contour_dataset'] = contour_dataset
curvatures, fits = hist_funcs.iso_response_curvature_poly_fits(
cont_analysis['activations'],
target_act=cont_analysis['target_act'],
measure_upper_right=False
)
cont_analysis['curvatures'] = np.stack(np.stack(curvatures, axis=0), axis=0)
np.savez(save_root+'group_iso_vectors_'+cont_analysis['iso_save_name']+params.save_info+'.npz',
data=cont_analysis)
|
1711028
|
pv = NXPlotView('Chopper Plots')
phi=np.linspace(5.,95.,10)
chopper.entry.data[phi[0]:phi[0]+10].sum(0).plot(xmin=1900,xmax=2600,ymax=6000)
for i in range(10):
(chopper.entry.data[phi[i]:phi[i]+10].sum(0)+500*i).oplot()
|
1711031
|
import boto3
import paramiko
import json
import os
import random
import sys
import time
client = boto3.client('ec2')
# if len(sys.argv) != 2:
# print 'usage: {} <number_of_instances>'.format(sys.argv[0])
# sys.exit(1)
# BEGIN CONFIGURABLE
# config stuff
port = 2720
# num_instances = int(sys.argv[1])
chain_len = 3
payload_filename = 'benchmark.conf'
git_key_filename = 'derek-git-token.txt'
key_filename = 'derek-stadium.pem'
# END CONFIGURABLE
outstanding_requests = client.describe_spot_fleet_requests()['SpotFleetRequestConfigs']
active = [x for x in outstanding_requests if x['SpotFleetRequestState'] == 'active']
assert len(active) == 1
rid = active[0]['SpotFleetRequestId']
response = client.describe_spot_fleet_instances(
SpotFleetRequestId = rid
)
instances = response['ActiveInstances']
iids = [x['InstanceId'] for x in instances]
response = client.describe_instances(
InstanceIds = iids
)
reservations = response['Reservations']
instances = []
for r in reservations:
i = r['Instances']
instances = instances + i
print len(instances)
# assert len(reservations) == 1
# print 1 / 0
# instances = response['Reservations'][0]['Instances']
# assert len(instances) == num_instances
print 'instance IPs:'
ips = [x['PublicIpAddress'] for x in instances]
for ip in ips:
print ip
from multiprocessing.dummy import Pool
from threading import Lock
blacklist = ['172.16.58.3']
def benchmark_one_stadium_round(ips, chain_len, num_machines, num_msgs, result_filename):
if num_machines > len(ips):
raise Exception('not enough machines for test! given {} but need {}'.format(len(ips), num_machines))
ips = [x for x in ips if x not in blacklist]
ips = random.sample(ips, num_machines)
listen_ips = [x + ':2720' for x in ips]
config = {
"ListenAddrs": listen_ips,
"ChainLen": chain_len,
"NumMsgs": num_msgs,
}
sys.stderr.write('running round with config {}\n'.format(config))
with open(payload_filename, 'w') as f:
json.dump(config, f)
with open(git_key_filename) as f:
token = f.read().strip()
server_commands = ''' # paramiko: remote server commands
export PATH=$PATH:$HOME/.local/bin:$HOME/bin:/usr/local/go/bin;
export GOPATH=$HOME/go;
export LD_LIBRARY_PATH=/usr/local/lib/:$HOME/go/src/stadium/groth;
cd ~/go/src/stadium/groth;
git config remote.origin.url https://derbear:{}@github.com/nirvantyagi/stadium.git
git fetch;
git checkout -- ..;
git checkout master;
git branch -D benchmark;
git reset --hard origin/benchmark;
git checkout benchmark;
git checkout -- ..;
# make &> output.log;
go install stadium/groth stadium/stadium stadium/coordinator stadium/server;
cd ~/go/src/stadium/groth;
'''.format(token)
k = paramiko.RSAKey.from_private_key_file(key_filename)
def copyover(args):
i, ip = args
try:
c = paramiko.SSHClient()
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(hostname = ip, username = 'ec2-user', pkey = k)
c.exec_command('pkill server')
c.exec_command('pkill python')
c.exec_command('pkill collectl')
c.exec_command('rm {}'.format(payload_filename))
c.exec_command('rm round.out')
cmd = 'scp -i {} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {} ec2-user@{}:~'.format(key_filename, payload_filename, ip)
print cmd
os.system(cmd)
run_cmd = 'nohup python ~/go/src/stadium/aws/aws_benchmark.py ~/{} {} > ~/benchmark.out 2> ~/benchmark.err'.format(payload_filename, i)
cmd = server_commands + run_cmd
# print cmd
c.exec_command(cmd)
time.sleep(5)
c.close()
print 'execute remote command (omitted from print) on server', i, ":", ip
except Exception as e:
print 'failure on ', i, ':', ip
raise e
p = Pool(len(ips))
p.map(copyover, enumerate(ips))
print 'remote servers (hopefully) set up; launching stadium round now!'
cmd = 'cp {} {}.conf'.format(payload_filename, result_filename)
print cmd
os.system(cmd)
cmd = '~/go/bin/coordinator -conf {} > {}'.format(payload_filename, result_filename)
print cmd
os.system(cmd)
cmd = 'cp roundstats.json {}.roundstats.json'.format(result_filename)
print cmd
os.system(cmd)
# for i, ip in enumerate(ips):
# cmd = 'scp -i {} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null ec2-user@{}:~/benchmark.collectl net_{}.dat'.format(key_filename, ip, ip)
# print cmd
# collectl is huge and not really useful
# os.system(cmd)
# os.system('tail -n +1 net*.dat > {}.net.dat'.format(result_filename))
# os.system('rm net*.dat')
time.sleep(2)
def experimentM():
'Server upper bound probe'
ips0 = [x for x in ips if x not in blacklist]
M = len(ips0)
chain_len = 8
trial_num = 0
num_msgs = 300000
benchmark_one_stadium_round(ips0, chain_len, M, num_msgs, "results-{:02}-{:02}-{:02}.txt".format(chain_len, M, trial_num))
def experimentT():
'Cheap dry run to build blacklist'
ips0 = [x for x in ips if x not in blacklist]
M = len(ips0)
chain_len = 3
trial_num = 0
num_msgs = 10000
benchmark_one_stadium_round(ips0, chain_len, M, num_msgs, "results-{:02}-{:02}-{:02}.txt".format(chain_len, M, trial_num))
def experiment1():
'Varying chain lengths with 12 machines'
num_machines = 12
num_msgs = 100000
for chain_len in range(12, 2, -1):
for trial_num in range(1, 4):
benchmark_one_stadium_round(ips, chain_len, num_machines, num_msgs, "results-{:08}-{:02}-{:02}-{:02}.txt".format(num_msgs, chain_len, num_machines, trial_num))
try:
os.system('mkdir experiment/1')
except:
pass
os.system('mv results* experiment/1')
def experiment2():
'Varying number of machines with chain lengths 3, 8, and 12'
num_msgs = 200000
for chain_len in [3, 8, 12]:
for num_machines in range(chain_len, 13):
for trial_num in range(1, 4):
benchmark_one_stadium_round(ips, chain_len, num_machines, num_msgs, "results-{:08}-{:02}-{:02}-{:02}.txt".format(num_msgs, chain_len, num_machines, trial_num))
try:
os.system('mkdir experiment/2')
except:
pass
os.system('mv results* experiment/2')
def experiment3():
'''
Varying total number of messages (100K, 150K, 200K, 500K, 750K, 1M, 2M, 5M, 7.5M, 10M),
fixed chain length of 8,
fixed number of servers 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 100
'''
num_msgs = [100, 150, 200, 500, 750, 1000, 2000, 5000, 7500, 10000]
num_msgs = [x * 1000 for x in num_msgs]
num_msgs = num_msgs[::-1]
# num_machines = [10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 100]
num_machines = [10, 20, 25, 30]
num_machines = num_machines[::-1]
chain_len = 8
trial_num = 0
for m in num_machines:
for n0 in num_msgs:
n = n0 / m
if n > 330000:
continue
print m, n
benchmark_one_stadium_round(ips, chain_len, m, n, "results-{:08}-{:02}-{:02}-{:02}.txt".format(n0, chain_len, m, trial_num))
try:
os.system('mkdir experiment/3')
except:
pass
os.system('mv results* experiment/3')
def experiment4():
'Varying number of cores for M messages'
pass
import datetime
now = datetime.datetime.now
print 'starting experiments. time is', now()
try:
os.system('mkdir experiment')
except:
pass
print 'starting experiment 1. time is', now()
experiment1()
# print 'starting experiment 2. time is', now()
# experiment2()
# print 'starting experiment 3. time is', now()
# experiment3()
# print 'starting light experiment. time is', now()
# experimentT()
# print 'starting probe experiment. time is', now()
# experimentM()
print 'finished experiments. time is', now()
|
1711037
|
import RutishauserLabtoNWB.events.newolddelay.python.analysis.helper as helper
import scipy.stats as stats
import logging
import os
import matplotlib.pyplot as plt
import numpy as np
def plot_behavioral_graphs(NWBFilePath):
"""
Behavioral analysis, plotting six graphs:
1. Probability of responses
2. ROC curves for different sessions
3. Overall performance
4. Histogram of AUC
5. Accuracy over confidence high low
6. Confidence level over correctness of responses
"""
# Get NWB files
allNWBfiles = os.listdir(NWBFilePath)
# Get NWB file paths, append them
filenames = []
for singleNWBfile in allNWBfiles:
NWBfile = os.path.join(NWBFilePath, singleNWBfile)
if not os.path.exists((NWBfile)):
print('This file does not exist: {}'.format(NWBfile))
else:
filenames.append(str(NWBfile))
# Get all the nwb file names from the data folder
#path_analysis = os.path.abspath('analysis')
#path_data = (os.path.dirname(('{}').format(os.path.dirname(path_analysis))))
#path_data = ('{}' + '/' + 'data').format(path_data)
#filenames = helper.get_nwbfile_names(path_data)
n = 0
# make the subplots
fig, axs = plt.subplots(nrows=2, ncols=3, sharex=False, sharey=False, figsize=(20, 10))
# Place holder ready to store separate the new and old response
response_1_old = []
response_2_old = []
response_3_old = []
response_4_old = []
response_5_old = []
response_6_old = []
response_1_new = []
response_2_new = []
response_3_new = []
response_4_new = []
response_5_new = []
response_6_new = []
# Placeholder for overall performance
all_performances = []
# Placeholder for aucs
all_auc = []
# Placeholder for accuracies for different confidence level
accuracies_high = []
accuracies_low = []
accuracies_all = []
# Placeholder for mean confidences over correctness
m_conf_all = []
for filename in filenames:
try:
print('processing file: ' + filename)
nwbfile = helper.read(filename)
except ValueError as e:
print('Problem opening the file: ' + str(e))
logging.warning('Error File: ' + filename + ':' + str(e))
continue
except OSError as e:
print('Problem opening the file:' + str(e))
logging.warning('Error File ' + filename + ':' + str(e))
continue
recog_response = helper.extract_recog_responses(nwbfile)
ground_truth = helper.extract_new_old_label(nwbfile)
if len(recog_response) != len(ground_truth):
print('response length not equal to ground truth, skipped this session')
continue
else:
recog_response_old = recog_response[ground_truth == 1]
n = n + 1
# Calculate the percentage of each responses
response_1_old.append(np.sum(recog_response_old == 1) / len(recog_response_old))
response_2_old.append(np.sum(recog_response_old == 2) / len(recog_response_old))
response_3_old.append(np.sum(recog_response_old == 3) / len(recog_response_old))
response_4_old.append(np.sum(recog_response_old == 4) / len(recog_response_old))
response_5_old.append(np.sum(recog_response_old == 5) / len(recog_response_old))
response_6_old.append(np.sum(recog_response_old == 6) / len(recog_response_old))
recog_response_new = recog_response[ground_truth == 0]
response_1_new.append(np.sum(recog_response_new == 1) / len(recog_response_new))
response_2_new.append(np.sum(recog_response_new == 2) / len(recog_response_new))
response_3_new.append(np.sum(recog_response_new == 3) / len(recog_response_new))
response_4_new.append(np.sum(recog_response_new == 4) / len(recog_response_new))
response_5_new.append(np.sum(recog_response_new == 5) / len(recog_response_new))
response_6_new.append(np.sum(recog_response_new == 6) / len(recog_response_new))
# Calculate the cumulative d and plot the cumulative ROC curve
stats_all = helper.cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
axs[0, 1].plot(x, y, marker='.', color='grey', alpha=0.5)
axs[0, 1].set_ylim(0, 1)
axs[0, 1].set_xlim(0, 1)
# Get the overall performance
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
# Calculate the auc
auc = helper.cal_auc(stats_all)
all_auc.append(auc)
# Check if this session should be included in the accuracies over high low section
is_included = helper.check_inclusion(recog_response, auc)
# Calculate the accuracies for high low confidence
if is_included:
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = helper.dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
# print(nr_low_response)
# print(len(ind_TN_low[0]))
# print(len(ind_TP_low[0]))
# print(len(ind_FN_low[0]))
# print(len(ind_FP_low[0]))
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high * 100)
accuracies_low.append(per_accuracy_low * 100)
accuracies_all.append(per_accuracy_all * 100)
# get correct/incorrect indexes
correct_inds, incorrect_inds = helper.correct_incorrect_indexes(recog_response, ground_truth)
# remap response
remapped_response = helper.remap_response(recog_response)
# Get the mean confidence for correctness
m_conf_all.append([np.mean(remapped_response[correct_inds]), np.mean(remapped_response[incorrect_inds])])
# Plot the percentage responses
response_old = np.asarray([response_1_old, response_2_old, response_3_old, response_4_old,
response_5_old, response_6_old])
response_new = np.asarray([response_1_new, response_2_new, response_3_new, response_4_new,
response_5_new, response_6_new])
response_percentage_old = np.mean(response_old, axis=1)
std_old = np.std(response_old, axis=1)
se_old = std_old/np.sqrt(n)
response_percentage_new = np.mean(response_new, axis=1)
std_new = np.std(response_new, axis=1)
se_new = std_new/np.sqrt(n)
x = [i for i in range(1, 7, 1)]
axs[0, 0].errorbar(x, response_percentage_old, yerr=se_old, color='blue', label='old stimuli')
axs[0, 0].errorbar(x, response_percentage_new, yerr=se_new, color='red', label='new stimuli')
axs[0, 0].legend()
axs[0, 0].set_xlabel('Confidence')
axs[0, 0].set_ylabel('Probability of Response')
axs[0, 0].set_title('n=' + str(len(filenames)) + ' sessions')
# Other settings for cumulative ROC
axs[0, 1].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 1].set_xlabel('false alarm rate')
axs[0, 1].set_ylabel('hit rate')
axs[0, 1].set_title('average roc')
# Calculate the average and overall performance
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
# Plot the overall performance
for performance in all_performances:
axs[0, 2].plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
axs[0, 2].set_ylim(0, 1)
axs[0, 2].set_xlim(0, 1)
axs[0, 2].plot([0, 1], [0, 1], color='black', alpha=0.7)
axs[0, 2].errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
axs[0, 2].set_xlabel('false alarm rate')
axs[0, 2].set_ylabel('hit rate')
axs[0, 2].set_title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
# Plot AUC histogram
m_auc = np.mean(all_auc)
axs[1, 0].hist(all_auc, 15, histtype='bar')
axs[1, 0].set_xlim(0.5, 1)
axs[1, 0].set_xlabel('AUC')
axs[1, 0].set_ylabel('nr of subjects')
axs[1, 0].set_title('AUC m=' + str(m_auc))
# Plot the accuracies of different confidence level
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
axs[1, 1].plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o', alpha=0.5)
axs[1, 1].plot(x_axis, [50, 50], color='black')
axs[1, 1].set_ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
axs[1, 1].set_title('p=' + str(p_val))
axs[1, 1].set_xlabel('confidence p vs. 50%')
axs[1, 1].set_ylabel('accuracy % correct')
# Calculate the mean and standard deviation for the confidence for correctness level
m_conf_all = np.asarray(m_conf_all)
m_conf = np.mean(m_conf_all, axis=0)
std_conf = np.std(m_conf_all, axis=0)
n = m_conf_all.shape[0]
se_conf = std_conf/np.sqrt(n)
tstat, p_val = stats.ttest_ind(m_conf_all[:, 0], m_conf_all[:, 1], equal_var=False)
axs[1, 2].bar(['correct', 'incorrect'], m_conf, yerr=se_conf)
axs[1, 2].set_ylabel('confidence 1=high, 3=guess')
axs[1, 2].set_title('pT2=' + str(p_val) + ' n=' + str(n))
plt.show()
# Functions that plot the graphs seperately.
def plot_prob_response():
"""
Plot single graph of probability of response
"""
filenames = helper.get_nwbfile_names("../data")
x = [i for i in range(1, 7, 1)]
response_percentage_old, std_old, response_percentage_new, std_new = helper.extract_probability_response(filenames)
#type="old")
plt.errorbar(x, response_percentage_old, yerr=std_old, color='blue', label='old stimuli')
plt.errorbar(x, response_percentage_new, yerr=std_new, color='red', label='new stimuli')
plt.legend(bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
plt.xlabel('Confidence')
plt.ylabel('Probability of Response')
plt.title('n=' + str(len(filenames)) + ' sessions')
plt.show()
def plot_cumulative_roc():
"""
Plot the cumulative roc
"""
filenames = get_nwbfile_names("../data")
for filename in filenames:
nwbfile = read(filename)
stats_all = cal_cumulative_d(nwbfile)
x = stats_all[0:5, 4]
y = stats_all[0:5, 3]
plt.plot(x, y, marker='.', color='grey', alpha=0.5)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('average roc')
plt.show()
def plot_overall_performance():
"""
Plot overall performance
"""
filenames = helper.get_nwbfile_names("../data")
all_performances = []
for filenames in filenames:
nwbfile = helper.read(filenames)
stats_all = helper.cal_cumulative_d(nwbfile)
all_performances.append([stats_all[2, 4], stats_all[2, 3]])
avg_performance = np.average(all_performances, axis=0)
std_performance = np.std(all_performances, axis=0)
for performance in all_performances:
plt.plot(performance[0], performance[1], marker='.', color='grey', alpha=0.6)
plt.ylim(0, 1)
plt.xlim(0, 1)
plt.plot([0, 1], [0, 1], color='black', alpha=0.7)
plt.errorbar(avg_performance[0], avg_performance[1], std_performance[1], std_performance[0])
plt.xlabel('false alarm rate')
plt.ylabel('hit rate')
plt.title('Overall Performance mTP=' + str(avg_performance[0]) + ' mFP=' + str(avg_performance[1]))
plt.show()
def plot_auc():
"""
Plot histogram of AUC
"""
filenames = get_nwbfile_names("../data")
all_auc = []
for filenames in filenames:
nwbfile = read(filenames)
stats_all = cal_cumulative_d(nwbfile)
auc = cal_auc(stats_all)
all_auc.append(auc)
m_auc = np.mean(all_auc)
plt.hist(all_auc, 15, histtype='bar')
plt.xlim(0, 1)
plt.xlabel('AUC')
plt.ylabel('nr of subjects')
plt.title('AUC m=' + str(m_auc))
plt.show()
def plot_confidence_accuracy():
"""
Plot accuracy over confidence high low.
"""
filenames = get_nwbfile_names("../data")
accuracies_high = []
accuracies_low = []
accuracies_all = []
for filename in filenames:
nwbfile = read(filename)
recog_response = extract_recog_responses(nwbfile)
ground_truth = extract_new_old_label(nwbfile)
split_status, split_mode, ind_TP_high, ind_TP_low, ind_FP_high, ind_FP_low, ind_TN_high, \
ind_TN_low, ind_FN_high, ind_FN_low, n_response = dynamic_split(recog_response, ground_truth)
nr_TN_high = len(ind_TN_high[0])
nr_TP_high = len(ind_TP_high[0])
nr_TN_all = len(ind_TN_high[0]) + len(ind_TN_low[0])
nr_TN_low = len(ind_TP_high[0]) + len(ind_TP_low[0])
nr_TP_low = len(ind_TP_low[0])
nr_TN_low = len(ind_TN_low[0])
nr_high_response = len(ind_TN_high[0]) + len(ind_TP_high[0]) + len(ind_FN_high[0]) + len(ind_FP_high[0])
nr_low_response = len(ind_TN_low[0]) + len(ind_TP_low[0]) + len(ind_FN_low[0]) + len(ind_FP_low[0])
per_accuracy_high = (nr_TN_high + nr_TP_high) / nr_high_response
per_accuracy_low = (nr_TN_low + nr_TP_low) / nr_low_response
per_accuracy_all = (nr_TN_low + nr_TP_high) / n_response
accuracies_high.append(per_accuracy_high*100)
accuracies_low.append(per_accuracy_low*100)
accuracies_all.append(per_accuracy_all*100)
p1 = stats.ttest_1samp(accuracies_high, 50)[1]
p2 = stats.ttest_1samp(accuracies_low, 50)[1]
x_axis_label_high = 'high p=' + str(p1)
x_axis_label_low = 'low p=' + str(p2)
x_axis = [x_axis_label_high, x_axis_label_low]
for i in range(len(accuracies_high)):
plt.plot(x_axis, [accuracies_high[i], accuracies_low[i]], marker='o')
plt.plot(x_axis, [50, 50], color='black')
plt.ylim([0, 100])
tstat, p_val = stats.ttest_ind(accuracies_high, accuracies_low, equal_var=False)
plt.title('p=' + str(p_val))
plt.xlabel('confidence p vs. 50%')
plt.ylabel('accuracy % correct')
plt.show()
|
1711058
|
import cv2
import numpy as np
# we will use linear_assignment to quickly write experiments,
# later a customerized KM algorithms with various optimization in c++ is employed
# see https://github.com/berhane/LAP-solvers
# This is used for "Complete Matching" and we can remove unreasonable "workers" first and then apply it
import scipy.optimize as Optimizer
# This is used for "Maximum Matching". There is a desired algorithm implementation for our references
import scipy.sparse.csgraph as Graph
from pysvso.lib.maths.nputil import IoU_numeric, UIoU_numeric, cosine_dist
from pysvso.config import Settings
settings = Settings()
# setting debug variable
DEBUG = settings.DEBUG
# Linear Assignment Problems Solver Wrapper
class ROIMatcher:
from enum import Enum
class Algorithm(Enum):
COMPLETE_MATCHING = 0
MAXIMUM_MATCHING = 1
def __init__(self):
self.algorithm = ROIMatcher.Algorithm.COMPLETE_MATCHING
self.THR = 0.85 # 0.75
pass
def mtch(self, trackList, detected_objects, product="composite"):
N = len(trackList)
M = len(detected_objects)
weights = np.zeros((N, M))
distance = np.zeros((N, M))
corr = np.zeros((N, M))
def make_standard_tf_box(box):
y1, x1, y2, x2 = box
return np.array([x1, y1, x2, y2])
def compose_feat_vec(roi_feats, encodedId, score):
new_feats = np.concatenate([roi_feats, encodedId, np.array([score])], axis=0)
return new_feats
INF = float("inf")
EPILON = 1e-9
column_names = list(map(lambda detection: str(detection), detected_objects))
row_names = list(map(lambda landmark: str(landmark), trackList))
for i in range(N):
for j in range(M):
obj1 = trackList[i]
obj2 = detected_objects[j]
# deep feature score
ext_feat1 = compose_feat_vec(obj1.roi_features['roi_feature'],
obj1.roi_features['class_id'],
obj1.roi_features['score'])
ext_feat2 = compose_feat_vec(obj2.roi_features['roi_feature'],
obj2.roi_features['class_id'],
obj2.roi_features['score'])
# compute cosine distance
score = cosine_dist(ext_feat1, ext_feat2)
if np.isinf(score) or np.isnan(score):
raise Exception("Wrong Value!")
corr[i, j] = score
# must hold same semantic meaning if we belive our detectron
if obj1.roi_features['label'] != obj2.roi_features['label']:
weights[i, j] = 1000
continue
box1 = obj1.predicted_states
box2 = make_standard_tf_box(obj2.projected_pos)
left_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
right_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
# 0 ~ 1
# iou = IoU_numeric(box1, box2, left_area, right_area)
# distance[i,j] = 1. - iou
uiou = UIoU_numeric(box1, box2, left_area, right_area)
distance[i, j] = (1 + uiou) / 2.0
if np.isinf(uiou) or np.isnan(uiou):
raise Exception("Wrong Value!")
# assign IoU distance
# weights[i,j] = 1. - iou
# assign UIoU distance
if product == "composite":
weights[i, j] = (1 + uiou) / 2.0
# compute total score
weights[i, j] *= score
weights[i, j] = 1 - weights[i, j]
elif product == "feature_only":
weights[i, j] = 1 - score
else:
raise Exception("Not Implemented Yet!")
mtched, unmtched_landmarks, unmtched_detections = ([], [], [])
row_indice, col_indice = [], []
np.set_printoptions(precision=3)
if self.algorithm is ROIMatcher.Algorithm.COMPLETE_MATCHING:
if DEBUG:
# print weight matrix
# print("%d landmarks, %d detections, forms %d x %d cost matrix :" % (N, M, N, M))
# print(weights)
pass
# remove rows if there are no reasonable matches from cols so that we could
# apply maximum match here. I have to say that this is very important!
# @todo : TODO
# see http://csclab.murraystate.edu/~bob.pilgrim/445/munkres.html,
# also see https://www.kaggle.com/c/santa-workshop-tour-2019/discussion/120020
try:
row_indice, col_indice = Optimizer.linear_sum_assignment(weights)
except Exception as e:
print(e)
import pandas as pd
# iou scores
df = pd.DataFrame(distance, index=row_names, columns=column_names)
# print("UIoUs:")
# print(df)
# entropy scores
df = pd.DataFrame(corr, index=row_names, columns=column_names)
# print("Corr:")
# print(df)
raise (e)
else:
raise Exception("Not Implemented Yet!")
# use maximum matching strategy
assignment = np.zeros((N, M))
for i, col in enumerate(col_indice):
row = row_indice[i]
print("landmark %s +--> Observation %s : score %f, uiou %f, corr %f" % (
row_names[row], column_names[col], weights[row, col], distance[row, col], corr[row, col]
))
# the solver has probability to produce unmatched pairs with different labels
if trackList[row].roi_features['label'] != detected_objects[col].roi_features['label']:
unmtched_landmarks.append(row)
unmtched_detections.append(col)
continue
if weights[row, col] > self.THR or (product == "composite" and distance[row, col] < 0.5): # 0.5 !important
unmtched_landmarks.append(row)
unmtched_detections.append(col)
continue
mtched.append((row, col, weights[row, col], distance[row, col]))
assignment[row, col] = 1
for i in range(N):
if i not in row_indice:
unmtched_landmarks.append(i)
for j in range(M):
if j not in col_indice:
unmtched_detections.append(j)
import pandas as pd
# iou scores
df = pd.DataFrame(distance, index=row_names, columns=column_names)
# print("UIoUs:")
# print(df)
# entropy scores
df = pd.DataFrame(corr, index=row_names, columns=column_names)
# print("Corr:")
# print(df)
# draw matches
df = pd.DataFrame(np.array(assignment), index=row_names, columns=column_names)
# print("assignment:")
# print(df)
return mtched, unmtched_landmarks, unmtched_detections
|
1711064
|
from cloudaux.gcp.iam import get_iam_policy, get_serviceaccount, get_serviceaccount_keys
from cloudaux.decorators import modify_output
from flagpole import FlagRegistry, Flags
registry = FlagRegistry()
FLAGS = Flags('BASE', 'KEYS', 'POLICY')
@registry.register(flag=FLAGS.KEYS, key='keys')
def get_keys(service_account, **conn):
return get_serviceaccount_keys(service_account=service_account, **conn)
@registry.register(flag=FLAGS.POLICY, key='policy')
def get_policy(service_account, **conn):
return get_iam_policy(service_account=service_account, **conn)
@registry.register(flag=FLAGS.BASE)
def _get_base(service_account, **conn):
sa = get_serviceaccount(service_account=service_account, **conn)
sa['_version'] = 1
return sa
@modify_output
def get_serviceaccount_complete(service_account, flags=FLAGS.ALL, **conn):
return registry.build_out(flags, service_account, **conn)
|
1711079
|
import hashlib
import hmac
def sign_payload(payload: str, key: str):
return hmac.new(
key=key.encode(), msg=payload.encode(), digestmod=hashlib.sha256
).hexdigest()
|
1711105
|
from os import listdir
from os.path import isfile, join
import pickle
import numpy as np
TASK_DICT = {'MRPC': 'mrpc', 'STS-B': 'STSBenchmark', 'SST-2': 'SST2'}
class BaseEncoder():
def __init__(self, model_name, encode_capacity, path_cache):
self.model_name = model_name
self.encode_capacity = encode_capacity
self.path_cache = path_cache
self.model = None
self.tokenizer = None
self.count = 0
def parse_model_name_to_cache_name(self, model_name, task, location):
if '/' in model_name:
temp = model_name.split('/')
task, model, exp_name, seed, ckpt = temp[5:]
task = TASK_DICT[task]
return "{}_{}_{}_{}_{}.pickle".format(task, model, exp_name, seed, ckpt)
else:
return "{}_{}_{}.pickle".format(model_name, task, location)
def load_cache(self, task, location):
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
onlyfiles = [f for f in listdir(self.path_cache) if isfile(join(self.path_cache, f))]
# ====== Look Up existing cache ====== #
if cache_name in onlyfiles:
print("cache Found {}".format(cache_name))
with open(join(self.path_cache, cache_name), 'rb') as f:
cache = pickle.load(f)
print("cache Loaded")
self.flag_cache_save = False
return cache
else:
print("cache not Found {}".format(cache_name))
self.flag_cache_save = True
return {}
def save_cache(self, task, location):
if self.flag_cache_save:
print("Start saving cache")
cache_name = self.parse_model_name_to_cache_name(self.model_name, task, location)
with open(join(self.path_cache, cache_name), 'wb') as f:
pickle.dump(self.cache, f, pickle.HIGHEST_PROTOCOL)
print("Saved cache {}".format(cache_name))
else:
print("Skipping saving cache")
def prepare(self, task, location):
self.cache = self.load_cache(task, location)
if bool(self.cache):
self.model = None
self.tokenizer = None
self.count = 0
else:
self.model, self.tokenizer = self.construct_encoder()
def get_mini_batch_size(self, sentences):
seq_length = max([len(tokens) for tokens in sentences])
mini_batch_size = self.encode_capacity // seq_length + 1
return mini_batch_size
def get_head_embedding(self, output, layer, head, head_size):
if head == -1:
embedding = output[:, layer, :]
else:
embedding = output[:, layer, head * head_size:(head + 1) * head_size]
return embedding
def get_multi_head_embedding(self, output, heads, head_size):
if len(heads) == 1: # If single attention head is probed
layer, head = heads[0]
embedding = self.get_head_embedding(output, layer, head, head_size)
else: # If multiple attention head is selected
list_embedding = []
for layer, head in heads:
embedding = self.get_head_embedding(output, layer, head, head_size)
list_embedding.append(embedding)
embedding = np.concatenate(list_embedding, axis=1)
return embedding
def construct_encoder(self):
raise NotImplementedError
def convert_sentences_to_features(self, sentences, seq_length):
raise NotImplementedError
def encode(self, sentences, heads, head_size, location):
raise NotImplementedError
if __name__ == '__main__':
model = BERTEncoder('bert-base-uncased')
model.prepare('Length')
model.construct_encoder()
|
1711119
|
from flask import Flask, Response, request, render_template, send_from_directory
import re
import subprocess
import urllib
from markupsafe import Markup
from gen import get_profiles
app = Flask(__name__)
@app.route('/')
def index():
clothing_items = get_profiles()
return render_template("index.html", clothing_items=clothing_items)
@app.route('/favicon.ico')
def favicon():
return send_from_directory("static", "favicon.ico")
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('static/js', path)
@app.route('/imgs/<path:path>')
def send_imgs(path):
return send_from_directory('static/imgs', path)
def stream_template(template_name, **context):
app.update_template_context(context)
t = app.jinja_env.get_template(template_name)
rv = t.stream(context)
rv.disable_buffering()
return rv
bad_regex = re.compile(
"^http(s|)://(www.|)github.com/(.*?/.*?)/blob/(.*)$", re.IGNORECASE)
def handle_non_raw_code_urls(code_url):
"""Some people will give us links to the non raw view"""
match = bad_regex.match(code_url)
if match is None:
return code_url
else:
return "https://raw.githubusercontent.com/{0}/{1}".format(
match.group(3), match.group(4))
gh_raw_re = re.compile(
"^https://raw.githubusercontent.com/(.*?)/(.*?)/.*/(.*?)$")
file_domain_re = re.compile("^https://(.*?)/.*/(.*?)$")
def extract_dress_name(code_url, clothing_type):
"""Try and turn a URL into a dress name"""
match = gh_raw_re.match(code_url)
if match is None:
match = file_domain_re.match(code_url)
if match is None:
return re.sub("^.*//.*/", " ", code_url)
else:
return match.group(1) + "'s " + match.group(2) + " glitch code dress"
else:
# Some folks have the group and repo name as the same
if (match.group(1) != match.group(2) and
not match.group(2).startswith(match.group(1))):
return match.group(1) + " " + match.group(2) + "'s " + match.group(3) + " glitch code " + clothing_type
else:
return match.group(2) + "'s " + match.group(3) + " glitch code " + clothing_type
def clean_name(name):
return re.sub("\.", "-", name)[0:200]
@app.template_filter('urlencode')
def urlencode_filter(s):
if type(s) == 'Markup':
s = s.unescape()
s = s.encode('utf8')
s = urllib.parse.quote(s)
return Markup(s)
@app.route('/generate_dress', methods=["POST"])
def generate_dress():
if request.form["url"] is None or len(request.form["url"]) == 0:
return send_from_directory("static", "missing_url.html")
else:
requested_code_url = request.form["url"]
clothing_type = request.form["clothing_type"] or "dress_with_pockets"
code_url = handle_non_raw_code_urls(requested_code_url)
dress_name = clean_name(extract_dress_name(code_url, clothing_type))
dress_dir = re.sub("[^a-zA-Z]", "_", dress_name)[0:10]
proc = subprocess.Popen(
["./wrapwork.sh",
clothing_type,
dress_dir,
dress_name,
code_url],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=False, bufsize=1)
return Response(
stream_template('generated.html',
dress_name=dress_name,
code_url=code_url,
rows=proc.stdout))
|
1711155
|
import collections
from torch import nn
import torch as th
from ..base import NodeClassifierBase
from FeedForwardNNLayer import FeedForwardNNLayer
class FeedForwardNN(NodeClassifierBase):
r"""Specific class for node classification task.
Parameters
----------
input_size : int
The length of input node embeddings
num_class : int
The number of node catrgoriey for classification
hidden_size : list of int type values
Example for two layers's FeedforwardNN: [50, 20]
activation: the activation function class for each fully connected layer
Default: nn.ReLU()
Example: nn.ReLU(),nn.Sigmoid().
"""
def __init__(self, input_size, num_class, hidden_size,activation=None):
super(FeedForwardNN, self).__init__()
if not activation:
activation==nn.ReLU
self.classifier=FeedForwardNNLayer(input_size, num_class, hidden_size,activation)
def forward(self, input_graph):
r"""
Forward functions to compute the logits tensor for node classification.
Parameters
----------
input graph : GraphData
The tensors stored in the node feature field named "node_emb" in the
input_graph are used for classification.
Returns
---------
output_graph : GraphData
The computed logit tensor for each nodes in the graph are stored
in the node feature field named "node_logits".
logit tensor shape is: [num_class]
"""
node_emb=input_graph.ndata['node_emb']
input_graph.ndata['logits']=self.classifier(node_emb)
return input_graph
|
1711161
|
from django.conf import settings
# Getting project settings
PROJECT_PROVIDER_SETTINGS = getattr(settings, "NOTIFICATIONS", {})
# Internal module settings be merged with project settings
PROVIDERS_SETTINGS = {
**{
"telegram": {
"enabled": False,
"max_retries": 5,
},
"twitter": {
"enabled": False,
"max_retries": 5,
},
},
**PROJECT_PROVIDER_SETTINGS,
}
NOTIFICATIONS_ENABLED = any([provider["enabled"] for provider in PROVIDERS_SETTINGS.values()])
ASYNC_QUEUE_NAME = getattr(settings, "NOTIFICATIONS_ASYNC_QUEUE_NAME", "default")
|
1711167
|
import warnings
#from ethereum import utils
#from ethereum.abi import encode_abi, decode_abi
from octopus.platforms.ETH.constants import DEFAULT_GAS_PER_TX, DEFAULT_GAS_PRICE, BLOCK_TAGS, BLOCK_TAG_LATEST
from octopus.platforms.ETH.util import hex_to_dec, clean_hex, validate_block
from octopus.engine.explorer import Explorer
"""
This code is adapted from: ethjsonrpc
https://github.com/ConsenSys/ethjsonrpc
"""
GETH_DEFAULT_RPC_PORT = 8545
ETH_DEFAULT_RPC_PORT = 8545
PARITY_DEFAULT_RPC_PORT = 8545
PYETHAPP_DEFAULT_RPC_PORT = 4000
INFURA_MAINNET = "mainnet.infura.io/"
INFURA_ROPSTEN = "ropsten.infura.io/"
INFURA_INFURANET = "infuranet.infura.io/"
INFURA_KOVAN = "kovan.infura.io/"
INFURA_RINKEBY = "rinkeby.infura.io/"
INFURA_RPC_PORT = 8545
INFURA_APIKEY = "<KEY>"
class EthereumExplorerRPC(Explorer):
"""
Ethereum JSON-RPC client class
"""
def __init__(self, host='localhost', port=GETH_DEFAULT_RPC_PORT, tls=False, max_retries=3):
Explorer.__init__(self, host=host, port=port, tls=tls, max_retries=max_retries)
def call(self, method, params=None, jsonrpc='2.0', _id=1):
return super().call(method, params, jsonrpc, _id)
'''
def _encode_function(self, signature, param_values):
prefix = utils.big_endian_to_int(utils.sha3(signature)[:4])
if signature.find('(') == -1:
raise RuntimeError('Invalid function signature. Missing "(" and/or ")"...')
if signature.find(')') - signature.find('(') == 1:
return utils.encode_int(prefix)
types = signature[signature.find('(') + 1: signature.find(')')].split(',')
encoded_params = encode_abi(types, param_values)
return utils.zpad(utils.encode_int(prefix), 4) + encoded_params
'''
#######################
# HIGHT-LEVEL METHODS #
#######################
def get_transaction(self, transaction_id, verbosity=None):
""" Return transaction informations
.. seealso::
:method:`eth_getTransactionByHash`
"""
return self.eth_getTransactionByHash(transaction_id)
def get_block_by_number(self, block_number):
""" Return block information using given block number
.. seealso::
:method:`eth_getBlockByNumber`
"""
return self.eth_getBlockByNumber(block_number)
def get_block_by_hash(self, block_hash):
""" Return block information using given block hash
.. seealso::
:method:`eth_getBlockByHash`
"""
return self.eth_getBlockByHash(block_hash)
def decode_tx(self, transaction_id):
""" Return dict with important information about
the given transaction
"""
tx_data = self.eth_getTransactionByHash(transaction_id)
return tx_data
#TODO
##########################
# HIGHT-LEVEL METHODS #2 #
##########################
def transfer(self, from_address, to_address, amount):
"""Send wei from one address to another
TODO
.. note:: https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
.. seealso:: :method:`eth_sendTransaction`
"""
return self.eth_sendTransaction(from_address=from_address, to_address=to_address, value=amount)
def create_contract(self, from_, code, gas, sig=None, args=None):
"""
Create a contract on the blockchain from compiled EVM code. Returns the
transaction hash.
"""
'''
from_ = from_ or self.eth_coinbase()
if sig is not None and args is not None:
types = sig[sig.find('(') + 1: sig.find(')')].split(',')
encoded_params = encode_abi(types, args)
code += encoded_params.encode('hex')
return self.eth_sendTransaction(from_address=from_, gas=gas, data=code)
'''
return NotImplementedError()
def get_contract_address(self, tx):
"""
Get the address for a contract from the transaction that created it
"""
receipt = self.eth_getTransactionReceipt(tx)
return receipt['contractAddress']
def call_without_transaction(self, address, sig, args, result_types):
"""
Call a contract function on the RPC server, without sending a
transaction (useful for reading data)
"""
'''
data = self._encode_function(sig, args)
data_hex = data.encode('hex')
response = self.eth_call(to_address=address, data=data_hex)
return decode_abi(result_types, response[2:].decode('hex'))
'''
return NotImplementedError()
def call_with_transaction(self, from_, address, sig, args, gas=None, gas_price=None, value=None):
"""
Call a contract function by sending a transaction (useful for storing
data)
"""
'''
gas = gas or DEFAULT_GAS_PER_TX
gas_price = gas_price or DEFAULT_GAS_PRICE
data = self._encode_function(sig, args)
data_hex = data.encode('hex')
return self.eth_sendTransaction(from_address=from_, to_address=address, data=data_hex, gas=gas,
gas_price=gas_price, value=value)
'''
return NotImplementedError()
####################
# JSON-RPC METHODS #
####################
# ressources :
# * https://github.com/ethereum/wiki/wiki/JSON-RPC
# *
# *
def web3_clientVersion(self):
""" Returns the current client version.
:return: The current client version
:rtype: str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.web3_clientVersion()
'Geth/v1.8.0-unstable-952482d5/linux-amd64/go1.9.2'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#web3_clientversion
.. todo::
TESTED
"""
return self.call('web3_clientVersion')
def web3_sha3(self, data):
""" Returns Keccak-256 (not the standardized SHA3-256) of the given data.
:param data: the data to convert into a SHA3 hash
:type data: hex string
:return: The SHA3 result of the given string.
:rtype: hex string
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.web3_sha3('0x' + b'hello world'.hex())
'0x47173285a8d7341e5e972fc677286384f802f8ef42a5ec5f03bbfa254cb01fad'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#web3_sha3
.. todo::
TESTED
"""
#data = str(data).encode('hex')
return self.call('web3_sha3', [data])
def net_version(self):
""" Returns the current network id.
:return: The current network id.
"1": Ethereum Mainnet
"2": Morden Testnet (deprecated)
"3": Ropsten Testnet
"4": Rinkeby Testnet
"42": Kovan Testnet
:rtype: str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.net_version()
'1'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_version
.. todo::
TESTED
"""
return self.call('net_version')
def net_listening(self):
""" Returns true if client is actively listening for network connections.
:return: The current network id.
true when listening otherwise false.
:rtype: Boolean
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.net_listening()
True
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_listening
.. todo::
TESTED
"""
return self.call('net_listening')
def net_peerCount(self):
""" Returns number of peers currently connected to the client.
:return: integer of the number of connected peers.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.net_peerCount()
25
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#net_peercount
.. todo::
TESTED
"""
return hex_to_dec(self.call('net_peerCount'))
def eth_protocolVersion(self):
""" Returns the current ethereum protocol version.
:return: The current ethereum protocol version
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_protocolVersion()
'0x3f'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_protocolversion
.. todo::
TESTED
"""
return self.call('eth_protocolVersion')
def eth_syncing(self):
""" Returns an object with data about the sync status or false.
:return: An object with sync status data or FALSE, when not syncing
startingBlock: QUANTITY - The block at which the import started (will only be reset, after the sync reached his head)
currentBlock: QUANTITY - The current block, same as eth_blockNumber
highestBlock: QUANTITY - The estimated highest block
:rtype: Boolean or object
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_syncing()
False
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_syncing
.. todo::
TESTED
"""
return self.call('eth_syncing')
def eth_coinbase(self):
""" Returns the client coinbase address.
:return: 20 bytes - the current coinbase address.
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_coinbase()
'0x407d73d8a49eeb85d32cf465507dd71d507100c1'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_coinbase
.. todo::
TESTED
"""
return self.call('eth_coinbase')
def eth_mining(self):
""" Returns true if client is actively mining new blocks.
:return: returns true of the client is mining, otherwise false
:rtype: Boolean
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_mining()
False
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_mining
.. todo::
TESTED
"""
return self.call('eth_mining')
def eth_hashrate(self):
""" Returns the number of hashes per second that the node is mining with.
:return: number of hashes per second.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_hashrate()
0
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_hashrate
.. todo::
TESTED
"""
return hex_to_dec(self.call('eth_hashrate'))
def eth_gasPrice(self):
""" Returns the current price per gas in wei.
:return: integer of the current gas price in wei.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_gasPrice()
4000000000
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gasprice
.. todo::
TESTED
"""
return hex_to_dec(self.call('eth_gasPrice'))
def eth_accounts(self):
""" Returns a list of addresses owned by client.
:return: 20 Bytes - addresses owned by the client.
:rtype: list
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_accounts()
["<KEY>"]
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_accounts
.. todo::
TESTED
"""
return self.call('eth_accounts')
def eth_blockNumber(self):
""" Returns the number of most recent block.
:return: integer of the current block number the client is on.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_blockNumber()
5100196
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_blocknumber
.. todo::
TESTED
"""
return hex_to_dec(self.call('eth_blockNumber'))
def eth_getBalance(self, address=None, block=BLOCK_TAG_LATEST):
""" Returns the balance of the account of given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the current balance in wei.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBalance("0x956b6B7454884b734B29A8115F045a95179ea00C")
17410594678300000000
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getbalance
.. todo::
TESTED
"""
address = address or self.eth_coinbase()
block = validate_block(block)
v = hex_to_dec(self.call('eth_getBalance', [address, block]))
return (v if v else 0)
def eth_getStorageAt(self, address=None, position=0, block=BLOCK_TAG_LATEST):
""" Returns the value from a storage position at a given address.
:param address: 20 Bytes - address to check for balance.
:type address: str
:param address: (optionnal) integer of the position in the storage. default is 0
:type address: int
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the value at this storage position.
:rtype: str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getStorageAt("0x295a70b2de5e3953354a6a8344e616ed314d7251", 0, "latest")
'0x0000000000000000000000000000000000000000000000000000000000000000'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getstorageat
.. todo::
TESTED
"""
block = validate_block(block)
return self.call('eth_getStorageAt', [address, hex(position), block])
def eth_getTransactionCount(self, address, block=BLOCK_TAG_LATEST):
""" Returns the number of transactions sent from an address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions send from this address.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionCount("0x956b6B7454884b734B29A8115F045a95179ea00C")
12891
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getTransactionCount', [address, block]))
def eth_getBlockTransactionCountByHash(self, block_hash):
""" Returns the number of transactions in a block from a block matching the given block hash.
:param block_hash: 32 Bytes - hash of a block
:type block_hash: str
:return: integer of the number of transactions in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockTransactionCountByHash('0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b')
69
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblocktransactioncountbyhash
.. todo::
TESTED
"""
return hex_to_dec(self.call('eth_getBlockTransactionCountByHash', [block_hash]))
def eth_getBlockTransactionCountByNumber(self, block=BLOCK_TAG_LATEST):
""" Returns the number of transactions in a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of transactions in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockTransactionCountByNumber(5100196)
69
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblocktransactioncountbynumber
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getBlockTransactionCountByNumber', [block]))
def eth_getUncleCountByBlockHash(self, block_hash):
""" Returns the number of transactions in a block matching the given block number.
:param block_hash: 32 Bytes - hash of a block
:type block_hash: str
:return: integer of the number of uncles in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getUncleCountByBlockHash('0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b')
0
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclecountbyblockhash
.. todo::
TESTED
"""
return hex_to_dec(self.call('eth_getUncleCountByBlockHash', [block_hash]))
def eth_getUncleCountByBlockNumber(self, block=BLOCK_TAG_LATEST):
""" Returns the number of uncles in a block from a block matching the given block number.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: integer of the number of uncles in this block.
:rtype: int
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getUncleCountByBlockNumber(5100196)
0
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclecountbyblocknumber
.. todo::
TESTED
"""
block = validate_block(block)
return hex_to_dec(self.call('eth_getUncleCountByBlockNumber', [block]))
def eth_getCode(self, address, default_block=BLOCK_TAG_LATEST):
""" Returns code at a given address.
:param address: 20 Bytes - address.
:type address: str
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:return: the code from the given address.
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getCode("0xBB9bc244D798123fDe783fCc1C72d3Bb8C189413")
'0x6060604052361561020e5760e060020a6000350463013cf08b[...]62f93160ef3e563'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcode
.. todo::
TESTED
"""
default_block = validate_block(default_block)
return self.call('eth_getCode', [address, default_block])
def eth_sign(self, address, data):
""" The sign method calculates an Ethereum specific signature with: sign(keccak256("\x19Ethereum Signed Message:\n" + len(message) + message))).
By adding a prefix to the message makes the calculated signature recognisable as an Ethereum specific signature.
This prevents misuse where a malicious DApp can sign arbitrary data (e.g. transaction) and use the signature to impersonate the victim.
:param address: 20 Bytes - address.
:type address: str
:param data: N Bytes - message to sign
:type data: hex str
:return: Signature
:rtype: hex str
.. note::
the address to sign with must be unlocked.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sign
.. todo::
NOT TESTED
"""
return self.call('eth_sign', [address, data])
def eth_sendTransaction(self, to_address=None, from_address=None, gas=None, gas_price=None, value=None, data=None,
nonce=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendtransaction
NEEDS TESTING
"""
params = {}
params['from'] = from_address or self.eth_coinbase()
if to_address is not None:
params['to'] = to_address
if gas is not None:
params['gas'] = hex(gas)
if gas_price is not None:
params['gasPrice'] = clean_hex(gas_price)
if value is not None:
params['value'] = clean_hex(value)
if data is not None:
params['data'] = data
if nonce is not None:
params['nonce'] = hex(nonce)
return self.call('eth_sendTransaction', [params])
def eth_sendRawTransaction(self, data):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_sendrawtransaction
NEEDS TESTING
"""
return self.call('eth_sendRawTransaction', [data])
def eth_call(self, to_address, from_address=None, gas=None, gas_price=None, value=None, data=None,
default_block=BLOCK_TAG_LATEST):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_call
NEEDS TESTING
"""
default_block = validate_block(default_block)
obj = {}
obj['to'] = to_address
if from_address is not None:
obj['from'] = from_address
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = clean_hex(gas_price)
if value is not None:
obj['value'] = value
if data is not None:
obj['data'] = data
return self.call('eth_call', [obj, default_block])
def eth_estimateGas(self, to_address=None, from_address=None, gas=None, gas_price=None, value=None, data=None,
default_block=BLOCK_TAG_LATEST):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_estimategas
NEEDS TESTING
"""
if isinstance(default_block, basestring):
if default_block not in BLOCK_TAGS:
raise ValueError
obj = {}
if to_address is not None:
obj['to'] = to_address
if from_address is not None:
obj['from'] = from_address
if gas is not None:
obj['gas'] = hex(gas)
if gas_price is not None:
obj['gasPrice'] = clean_hex(gas_price)
if value is not None:
obj['value'] = value
if data is not None:
obj['data'] = data
return hex_to_dec(self.call('eth_estimateGas', [obj, default_block]))
def eth_getBlockByHash(self, block_hash, tx_objects=True):
""" Returns information about a block by hash.
:param block_hash: 32 Bytes - Hash of a block.
:type block_hash: str
:param tx_objects: (optionnal) If true it returns the full transaction objects, if false only the hashes of the transactions.
:type tx_objects: Boolean
:return: A block object, or null when no block was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockByHash('0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b')
{'difficulty': '0xaa41aea7beb9e',
'extraData': '0x6e616e6f706f6f6c2e6f7267',
'gasLimit': '0x7a121d',
'gasUsed': '0x614398',
'hash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'logsBloom': '0x000001052440040410040000008006000020000002a11000308045410029410802804801080040c00880000002010c0201804010100900b0000001000240000010800040080044910000010c0000204a041140220008000040000040808800404020802226400018144000400484880012000408000401400000211000c000e2040000209000040080c00000c000890080001090008000001000000102000100002400082240104010400000420080041004050a1080c0042000000000080ac000000802020400001009088021040230000000249208020621000022001820180500200000820002600004888840810420200080100400080000ac0004100000',
'miner': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
'mixHash': '0xa79e0692e7056ea2af26a78a1ed42ac7f3049eb322041c073e5d5a08f6c7e053',
'nonce': '0x6677371ca8459875',
'number': '0x4dd2a4',
[...]
'totalDifficulty': '0x8b344e12294352eee8',
'transactions': [{
[...]
'value': '0x0'}],
'transactionsRoot': '0x184cd24c9f45c66ff3846c48fb63e24094aa5909cabfd38211c1d8209128cbc0',
'uncles': []}
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbyhash
.. todo::
TESTED
"""
return self.call('eth_getBlockByHash', [block_hash, tx_objects])
def eth_getBlockByNumber(self, block=BLOCK_TAG_LATEST, tx_objects=True):
""" Returns information about a block by hash.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param tx_objects: (optionnal) If true it returns the full transaction objects, if false only the hashes of the transactions.
:type tx_objects: Boolean
:return: A block object, or null when no block was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getBlockByNumber(5100196)
{'difficulty': '0xaa41aea7beb9e',
'extraData': '0x6e616e6f706f6f6c2e6f7267',
'gasLimit': '0x7a121d',
'gasUsed': '0x614398',
'hash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'logsBloom': '0x000001052440040410040000008006000020000002a11000308045410029410802804801080040c00880000002010c0201804010100900b0000001000240000010800040080044910000010c0000204a041140220008000040000040808800404020802226400018144000400484880012000408000401400000211000c000e2040000209000040080c00000c000890080001090008000001000000102000100002400082240104010400000420080041004050a1080c0042000000000080ac000000802020400001009088021040230000000249208020621000022001820180500200000820002600004888840810420200080100400080000ac0004100000',
'miner': '0x52bc44d5378309ee2abf1539bf71de1b7d7be3b5',
'mixHash': '0xa79e0692e7056ea2af26a78a1ed42ac7f3049eb322041c073e5d5a08f6c7e053',
'nonce': '0x6677371ca8459875',
'number': '0x4dd2a4',
[...]
'totalDifficulty': '0x8b344e12294352eee8',
'transactions': [{
[...]
'value': '0x0'}],
'transactionsRoot': '0x184cd24c9f45c66ff3846c48fb63e24094aa5909cabfd38211c1d8209128cbc0',
'uncles': []}
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
.. todo::
TESTED
"""
block = validate_block(block)
return self.call('eth_getBlockByNumber', [block, tx_objects])
def eth_getTransactionByHash(self, tx_hash):
""" Returns the information about a transaction requested by transaction hash.
:param tx_hash: 32 Bytes - hash of a transaction
:type tx_hash: str
:return: A transaction object, or null when no transaction was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionByHash('0x345303843c2f3041d12f0c5e6075fd294c2e2ca8cd9b4a9addca3f8caf4380ff')
{'blockHash': '0xaedd5193cb2b2d9de4c371404277468c7e30eb96f8f9990bd964ca535d88ebc4',
'blockNumber': '0x4dd3b0',
'from': '0xc5ff88c3e2902c56c0278fa0e7062d4b5c7e9358',
'gas': '0x5208',
'gasPrice': '0xbebc200',
'hash': '0x345303843c2f3041d12f0c5e6075fd294c2e2ca8cd9b4a9addca3f8caf4380ff',
'input': '0x',
'nonce': '0x1',
'r': '0x5b43f56b69bc78571de39b1b1fc33905a4af588a2ce59f31c54bc54391a255b8',
's': '0x359e7889447314fb8d9a7e526852471c7bccee006f47222499f9353074107910',
'to': '0xf954bbabe7bf9a2a4c98dceb293bf437e3863a4e',
'transactionIndex': '0x66',
'v': '0x25',
'value': '0x3202c66793000'}
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyhash
.. todo::
TESTED
"""
return self.call('eth_getTransactionByHash', [tx_hash])
def eth_getTransactionByBlockHashAndIndex(self, block_hash, index=0):
""" Returns information about a transaction by block hash and transaction index position.
:param tx_hash: 32 Bytes - hash of a transaction
:type tx_hash: str
:param index: (optionnal) integer of the transaction index position.
:type index: int
:return: A transaction object, or null when no transaction was found
:rtype: dict
.. seealso::
:method:`eth_getTransactionByHash`
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblockhashandindex
.. todo::
NOT TESTED
"""
return self.call('eth_getTransactionByBlockHashAndIndex', [block_hash, hex(index)])
def eth_getTransactionByBlockNumberAndIndex(self, block=BLOCK_TAG_LATEST, index=0):
""" Returns information about a transaction by block number and transaction index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) integer of the transaction index position.
:type index: int
:return: A transaction object, or null when no transaction was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionByBlockNumberAndIndex(5100196, 1)
{'blockHash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'blockNumber': '0x4dd2a4',
'from': '0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c',
'gas': '0x5208',
'gasPrice': '0xe33e22200',
'hash': '0xf02ffa405bae96e62a9e36fbd781362ca378ec62353d5e2bd0585868d3deaf61',
'input': '0x',
'nonce': '0x1908f',
'r': '0xcad900a5060ba9bb646a7f6965f98e945d71a19b3e30ff53d03b9797c6153d07',
's': '0x53b11a48758fc383df878a9b5468c83b033f5036b124b16dbb0a5167aee7fc4f',
'to': '0x26cd018553871f2e887986bc24c68a0ce622bb8f',
'transactionIndex': '0x1',
'v': '0x25',
'value': '0x1bc16d674ec80000'}
.. seealso::
:method:`eth_getTransactionByHash`
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionbyblocknumberandindex
.. todo::
TESTED
"""
block = validate_block(block)
return self.call('eth_getTransactionByBlockNumberAndIndex', [block, hex(index)])
def eth_getTransactionReceipt(self, tx_hash):
""" Returns the receipt of a transaction by transaction hash.
:param tx_hash: 32 Bytes - hash of a transaction
:type tx_hash: str
:return: A transaction receipt object, or null when no receipt was found
:rtype: dict
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_getTransactionReceipt('0xf02ffa405bae96e62a9e36fbd781362ca378ec62353d5e2bd0585868d3deaf61')
{'blockHash': '0x98a548cbd0cd385f46c9bf28c16bc36dc6ec27207617e236f527716e617ae91b',
'blockNumber': '0x4dd2a4',
'contractAddress': None,
'cumulativeGasUsed': '0xe2d7',
'from': '0xb01cb49fe0d6d6e47edf3a072d15dfe73155331c',
'gasUsed': '0x5208',
'logs': [],
'logsBloom': '0x00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000',
'status': '0x1',
'to': '0x26cd018553871f2e887986bc24c68a0ce622bb8f',
'transactionHash': '0xf02ffa405bae96e62a9e36fbd781362ca378ec62353d5e2bd0585868d3deaf61',
'transactionIndex': '0x1'}
.. note::
That the receipt is not available for pending transactions.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactionreceipt
.. todo::
TESTED
"""
return self.call('eth_getTransactionReceipt', [tx_hash])
def eth_getUncleByBlockHashAndIndex(self, block_hash, index=0):
""" Returns information about a uncle of a block by hash and uncle index position.
:param tx_hash: 32 Bytes - hash of a transaction
:type tx_hash: str
:param index: (optionnal) the uncle's index position.
:type index: int
:return: A block object, or null when no block was found
:rtype: dict
.. note::
An uncle doesn't contain individual transactions.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclebyblockhashandindex
.. todo::
NOT TESTED
"""
return self.call('eth_getUncleByBlockHashAndIndex', [block_hash, hex(index)])
def eth_getUncleByBlockNumberAndIndex(self, block=BLOCK_TAG_LATEST, index=0):
""" Returns information about a uncle of a block by number and uncle index position.
:param block: (optionnal) integer block number, or the string "latest", "earliest" or "pending"
:type block: int or str
:param index: (optionnal) the uncle's index position.
:type index: int
:return: A block object, or null when no block was found
:rtype: dict
.. note::
An uncle doesn't contain individual transactions.
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getunclebyblocknumberandindex
.. todo::
NOT TESTED
"""
block = validate_block(block)
return self.call('eth_getUncleByBlockNumberAndIndex', [block, hex(index)])
def eth_getCompilers(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getcompilers
NOT WORKING
"""
return self.call('eth_getCompilers')
def eth_compileSolidity(self, code):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_compilesolidity
NOT WORKING
"""
return self.call('eth_compileSolidity', [code])
def eth_compileLLL(self, code):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_compilelll
N/A
"""
return self.call('eth_compileLLL', [code])
def eth_compileSerpent(self, code):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_compileserpent
N/A
"""
return self.call('eth_compileSerpent', [code])
def eth_newFilter(self, from_block=BLOCK_TAG_LATEST, to_block=BLOCK_TAG_LATEST, address=None, topics=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newfilter
NEEDS TESTING
"""
_filter = {
'fromBlock': from_block,
'toBlock': to_block,
'address': address,
'topics': topics,
}
return self.call('eth_newFilter', [_filter])
def eth_newBlockFilter(self):
""" Creates a filter in the node, to notify when a new block arrives. To check if the state has changed, call eth_getFilterChanges.
:return: A filter id.
:rtype: hex str
:Example:
>>> explorer = EthereumExplorerRPC()
>>> explorer.eth_newBlockFilter()
'0x1d21d3c44b9a1501d4358a44bdb6da1d'
.. seealso::
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newblockfilter
.. todo::
TESTED
"""
return self.call('eth_newBlockFilter')
def eth_newPendingTransactionFilter(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_newpendingtransactionfilter
TESTED
"""
return hex_to_dec(self.call('eth_newPendingTransactionFilter'))
def eth_uninstallFilter(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_uninstallfilter
NEEDS TESTING
"""
return self.call('eth_uninstallFilter', [filter_id])
def eth_getFilterChanges(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterchanges
NEEDS TESTING
"""
return self.call('eth_getFilterChanges', [filter_id])
def eth_getFilterLogs(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getfilterlogs
NEEDS TESTING
"""
return self.call('eth_getFilterLogs', [filter_id])
def eth_getLogs(self, filter_object):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getlogs
NEEDS TESTING
"""
return self.call('eth_getLogs', [filter_object])
def eth_getWork(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getwork
TESTED
"""
return self.call('eth_getWork')
def eth_submitWork(self, nonce, header, mix_digest):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_submitwork
NEEDS TESTING
"""
return self.call('eth_submitWork', [nonce, header, mix_digest])
def eth_submitHashrate(self, hash_rate, client_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_submithashrate
TESTED
"""
return self.call('eth_submitHashrate', [hex(hash_rate), client_id])
def db_putString(self, db_name, key, value):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_putstring
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_putString', [db_name, key, value])
def db_getString(self, db_name, key):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_getstring
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_getString', [db_name, key])
def db_putHex(self, db_name, key, value):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_puthex
TESTED
"""
if not value.startswith('0x'):
value = '0x{}'.format(value)
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_putHex', [db_name, key, value])
def db_getHex(self, db_name, key):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#db_gethex
TESTED
"""
warnings.warn('deprecated', DeprecationWarning)
return self.call('db_getHex', [db_name, key])
def shh_version(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_version
N/A
"""
return self.call('shh_version')
def shh_post(self, topics, payload, priority, ttl, from_=None, to=None):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_post
NEEDS TESTING
"""
whisper_object = {
'from': from_,
'to': to,
'topics': topics,
'payload': payload,
'priority': hex(priority),
'ttl': hex(ttl),
}
return self.call('shh_post', [whisper_object])
def shh_newIdentity(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newidentity
N/A
"""
return self.call('shh_newIdentity')
def shh_hasIdentity(self, address):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_hasidentity
NEEDS TESTING
"""
return self.call('shh_hasIdentity', [address])
def shh_newGroup(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newgroup
N/A
"""
return self.call('shh_newGroup')
def shh_addToGroup(self):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_addtogroup
NEEDS TESTING
"""
return self.call('shh_addToGroup')
def shh_newFilter(self, to, topics):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_newfilter
NEEDS TESTING
"""
_filter = {
'to': to,
'topics': topics,
}
return self.call('shh_newFilter', [_filter])
def shh_uninstallFilter(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_uninstallfilter
NEEDS TESTING
"""
return self.call('shh_uninstallFilter', [filter_id])
def shh_getFilterChanges(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_getfilterchanges
NEEDS TESTING
"""
return self.call('shh_getFilterChanges', [filter_id])
def shh_getMessages(self, filter_id):
"""
https://github.com/ethereum/wiki/wiki/JSON-RPC#shh_getmessages
NEEDS TESTING
"""
return self.call('shh_getMessages', [filter_id])
class EthereumInfuraExplorer(EthereumExplorerRPC):
"""
EthereumExplorer subclass for using Infura
"""
def __init__(self, key=INFURA_APIKEY, network=INFURA_MAINNET):
EthereumExplorerRPC.__init__(self, host=network + key,
port=INFURA_RPC_PORT, tls=True)
class EthereumParityExplorer(EthereumExplorerRPC):
"""
EthereumExplorer subclass for Parity-specific methods
"""
def __init__(self, host='localhost', port=PARITY_DEFAULT_RPC_PORT, tls=False):
EthereumExplorerRPC.__init__(self, host=host, port=port, tls=tls)
def trace_filter(self, from_block=None, to_block=None, from_addresses=None, to_addresses=None):
"""
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_filter
TESTED
"""
params = {}
if from_block is not None:
from_block = validate_block(from_block)
params['fromBlock'] = from_block
if to_block is not None:
to_block = validate_block(to_block)
params['toBlock'] = to_block
if from_addresses is not None:
if not isinstance(from_addresses, list):
from_addresses = [from_addresses]
params['fromAddress'] = from_addresses
if to_addresses is not None:
if not isinstance(to_addresses, list):
to_addresses = [to_addresses]
params['toAddress'] = to_addresses
return self.call('trace_filter', [params])
def trace_get(self, tx_hash, positions):
"""
https://wiki.parity.io/JSONRPC
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_get
NEEDS TESTING
"""
if not isinstance(positions, list):
positions = [positions]
return self.call('trace_get', [tx_hash, positions])
def trace_transaction(self, tx_hash):
"""
https://wiki.parity.io/JSONRPC
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_transaction
NEEDS TESTING
"""
return self.call('trace_transaction', [tx_hash])
def trace_block(self, block=BLOCK_TAG_LATEST):
"""
https://wiki.parity.io/JSONRPC
https://github.com/ethcore/parity/wiki/JSONRPC-trace-module#trace_block
NEEDS TESTING
"""
block = validate_block(block)
return self.call('trace_block', [block])
|
1711170
|
from plugins.adversary.app.commands import wmic
from plugins.adversary.app.operation.operation import Step, OPUser, OPDomain, OPFile, OPCredential, OPHost, OPRat, OPVar
class WMIRemoteProcessCreate(Step):
"""
Description:
This step starts a process on a remote machine, using the Windows Management Interface (wmic). This allows
for lateral movement throughout the network.
Requirements:
Requires domain enumeration, access to a copy of the RAT on the target machine (usually accomplished using
Copy or Xcopy), and credentials for an administrator on the target machine (needs both administrator enumeration
'GetAdmin', and credential data 'Credentials').
"""
attack_mapping = [('T1047', 'Execution'), ('T1078', 'Persistence'), ('T1078', 'Defense Evasion'),
('T1106', 'Execution')]
display_name = "remote_process(WMI)"
summary = "Use WMI to start a process on a remote computer"
value = 20
preconditions = [("rat", OPRat),
('dest_host', OPHost),
('rat_file', OPFile({'host': OPVar('dest_host'), 'use_case': 'rat'})),
("cred", OPCredential({'$in': {'user': OPVar("dest_host.admins")}})),
('user', OPUser(OPVar("cred.user"))),
('domain', OPDomain(OPVar("user.domain")))]
postconditions = [("rat_g", OPRat({"host": OPVar("dest_host"), "elevated": True,
"executable": OPVar("rat_file.path")}))]
not_equal = [('dest_host', 'rat.host')]
preproperties = ['rat_file.path', 'domain.windows_domain', 'dest_host.fqdn', 'user.username', 'cred.password']
deterministic = True
cddl = """
Knowns:
rat: OPRat[host]
dest_host: OPHost
rat_file: OPFile[path, host]
cred: OPCredential[user[domain[windows_domain]], password]
Where:
rat.host != dest_host
rat_file.host == dest_host
Effects:
if not exist rat {
forget rat
} elif cred.user in dest_host.admins {
create OPRat[host=dest_host, elevated=True, executable=rat_file.path]
}
"""
@staticmethod
def description(rat, dest_host):
return "Starting a remote process on {} using WMI.".format(dest_host.fqdn)
@staticmethod
async def action(operation, rat, dest_host, user, rat_file, cred, domain, rat_g):
await operation.execute_shell_command(rat, *wmic.create(rat_file.path, arguments='-d -f',
remote_host=dest_host.fqdn, user=user.username,
user_domain=domain.windows_domain,
password=<PASSWORD>))
await rat_g()
return True
|
1711174
|
class StarData:
def __init__(self, ra, dec, mag, label=None):
self.ra = ra
self.dec = dec
self.mag = mag
self.label = label
self.ra_angle = None
self.dec_angle = None
self.x = None
self.y = None
class StarDataList:
def __init__(self, data):
self.data = data
self.min_x = self.max_x = self.min_y = self.max_y = None
|
1711211
|
import xcffib
import struct
import six
_events = {}
_errors = {}
class ClientMessageData(xcffib.Union):
def __init__(self, unpacker):
if isinstance(unpacker, xcffib.Protobj):
unpacker = xcffib.MemoryUnpacker(unpacker.pack())
xcffib.Union.__init__(self, unpacker)
self.data8 = xcffib.List(unpacker.copy(), "B", 20)
self.data16 = xcffib.List(unpacker.copy(), "H", 10)
self.data32 = xcffib.List(unpacker.copy(), "I", 5)
def pack(self):
buf = six.BytesIO()
buf.write(xcffib.pack_list(self.data8, "B"))
return buf.getvalue()
xcffib._add_ext(key, unionExtension, _events, _errors)
|
1711254
|
from ..remote import RemoteModel
from infoblox_netmri.utils.utils import check_api_availability
class SwitchPortFwdRemote(RemoteModel):
"""
The switch forwarding table entries per device, per switch port.
| ``SwitchPortFwdID:`` The internal NetMRI identifier for this switch port forwarding entry.
| ``attribute type:`` number
| ``DataSourceID:`` The internal NetMRI identifier for the collector NetMRI that collected this data record.
| ``attribute type:`` number
| ``DeviceID:`` The internal NetMRI identifier for the device from which this switch port forwarding entry was collected.
| ``attribute type:`` number
| ``SwitchPortNumber:`` The switch port number for the port on which this switch forwarding entry was found. This is as reported by the SNMP BRIDGE MIB, and is not the same as the SNMP interface index.
| ``attribute type:`` string
| ``InterfaceID:`` The internal NetMRI identifier for the interface on which this switch forwarding entry was found.
| ``attribute type:`` number
| ``SwitchPortFwdStartTime:`` The starting effective time of this record.
| ``attribute type:`` datetime
| ``SwitchPortFwdEndTime:`` The ending effective time of this record, or empty if still in effect.
| ``attribute type:`` datetime
| ``SwitchPortFwdChangedCols:`` The fields that changed between this revision of the record and the previous revision.
| ``attribute type:`` string
| ``SwitchPortFwdTimestamp:`` The date and time this record was collected or calculated.
| ``attribute type:`` datetime
| ``SwitchPortFwdMAC:`` The MAC address that is being forwarded.
| ``attribute type:`` string
| ``SwitchPortFwdStatus:`` The status of this entry; indicates how the entry was entered in the switch forwarding table.
| ``attribute type:`` string
| ``SwitchPortFwdVlanIndex:`` The VLAN number for which this MAC address is forwarded.
| ``attribute type:`` number
| ``SwitchPortFwdVlanID:`` The internal NetMRI identifier for the VLAN for which this MAC address is forwarded.
| ``attribute type:`` number
| ``SwitchPortFwdInterfaceID:`` The internal NetMRI identifier of the interface to which the MAC address corresponds (that is, the destination interface).
| ``attribute type:`` number
| ``SwitchPortFwdDeviceID:`` The internal NetMRI identifier of the device to which the MAC address corresponds (that is, the destination device).
| ``attribute type:`` number
"""
properties = ("SwitchPortFwdID",
"DataSourceID",
"DeviceID",
"SwitchPortNumber",
"InterfaceID",
"SwitchPortFwdStartTime",
"SwitchPortFwdEndTime",
"SwitchPortFwdChangedCols",
"SwitchPortFwdTimestamp",
"SwitchPortFwdMAC",
"SwitchPortFwdStatus",
"SwitchPortFwdVlanIndex",
"SwitchPortFwdVlanID",
"SwitchPortFwdInterfaceID",
"SwitchPortFwdDeviceID",
)
@property
@check_api_availability
def data_source(self):
"""
The collector NetMRI that collected this data record.
``attribute type:`` model
"""
return self.broker.data_source(**{"SwitchPortFwdID": self.SwitchPortFwdID})
@property
@check_api_availability
def device(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.device(**{"SwitchPortFwdID": self.SwitchPortFwdID})
@property
@check_api_availability
def interface(self):
"""
The interface on which the switch port forwarding entry was found.
``attribute type:`` model
"""
return self.broker.interface(**{"SwitchPortFwdID": self.SwitchPortFwdID})
@property
@check_api_availability
def vlan(self):
"""
The VLAN on which the switch port forwarding entry was found.
``attribute type:`` model
"""
return self.broker.vlan(**{"SwitchPortFwdID": self.SwitchPortFwdID})
@property
@check_api_availability
def infradevice(self):
"""
The device from which this data was collected.
``attribute type:`` model
"""
return self.broker.infradevice(**{"SwitchPortFwdID": self.SwitchPortFwdID})
|
1711291
|
from typing import List
from collections import deque
from sys import stdin
class Solution:
def tomato(self, M: int, N: int, H: int, graph: List[List[List[int]]]):
queue = deque()
count = 0
answer = 0
visited = [[[False] * (M + 1) for i in range(N + 1)] for j in range(H + 1)]
# print(graph)
tomato_complete = False
for h in range(H):
for n in range(N):
if 0 not in graph[h][n]:
tomato_complete = True
if tomato_complete:
print(0)
return
for h in range(H):
for n in range(N):
for m in range(M):
if graph[h][n][m] == 1:
queue.append([h, n, m, count])
if graph[h][n][m] == -1:
visited[h][n][m] = True
if not queue:
print(-1)
return
while queue:
h, n, m, count = queue.popleft()
if not visited[h][n][m]:
visited[h][n][m] = True
answer = max(answer, count)
count += 1
if h - 1 >= 0:
if graph[h - 1][n][m] == 0:
graph[h - 1][n][m] = 1
queue.append([h - 1, n, m, count])
if h + 1 < H:
if graph[h + 1][n][m] == 0:
graph[h + 1][n][m] = 1
queue.append([h + 1, n, m, count])
if n - 1 >= 0:
if graph[h][n - 1][m] == 0:
graph[h][n - 1][m] = 1
queue.append([h, n - 1, m, count])
if n + 1 < N:
if graph[h][n + 1][m] == 0:
graph[h][n + 1][m] = 1
queue.append([h, n + 1, m, count])
if m - 1 >= 0:
if graph[h][n][m - 1] == 0:
graph[h][n][m - 1] = 1
queue.append([h, n, m - 1, count])
if m + 1 < M:
if graph[h][n][m + 1] == 0:
graph[h][n][m + 1] = 1
queue.append([h, n, m + 1, count])
tomato_false = False
for h in range(H):
for n in range(N):
if 0 in graph[h][n]:
tomato_false = True
if tomato_false:
print(-1)
return
print(answer)
# [[[0, -1, 0, 0, 0], [-1, -1, 0, 1, 1], [0, 0, 0, 1, 1]]]
# [[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# [[0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]]]
# Solution().tomato(4, 3, 2, [[[1,1,1,1],[1,1,1,1],[1,1,1,1]],[[1,1,1,1],[-1,-1,-1,-1],[1,1,1,-1]]])
graph = []
M, N, H = map(int, stdin.readline().split())
for h in range(H):
tmp = []
for n in range(N):
tmp.append(list(map(int, stdin.readline().split())))
graph.append(tmp)
Solution().tomato(M, N, H, graph)
|
1711352
|
import torch
import numpy as np
from skimage import io
from .fit import forward_pass
from .notebook_utils import draw_pcd_bg, show_nb
from utils.common import tti, to_sigm, itt, dict2device
def infer_pid(dataloader, outfit_codes_dict, image_paths_dict,
draping_network, converter, ndesc_stack, renderer,
pid, device='cuda:0'):
'''
Infer the draping network and renderer (rasterizer) to predict the clothing point cloud with visible points.
'''
# > predict
outfit_code = torch.from_numpy(outfit_codes_dict[pid]).to(device)
print(f'Current style: pid={pid}, shape={outfit_code.shape}')
for i, (data_dict, target_dict) in enumerate(dataloader):
data_dict = dict2device(data_dict, device)
data_dict['zrotMatrix_c3d'] = None
source_pcd = data_dict['source_pcd'][0]
out_dict = forward_pass(data_dict, draping_network, converter, ndesc_stack, renderer,
device=device, outfit_code=outfit_code)
# > visualize
K = data_dict['K'].squeeze(0).to(device).float()
source_pcd = source_pcd @ K.T
source_pcd[:, :2] /= source_pcd[:, 2:]
cloth_pcd = out_dict['cloth_pcd'][0]
cloth_pcd = cloth_pcd[out_dict['visibilty_mask'][0]]
cloth_pcd = cloth_pcd @ K.T
cloth_pcd[:, :2] /= cloth_pcd[:, 2:]
cloth_mask = tti(to_sigm(target_dict['real_segm']))
cloth_mask = np.tile(cloth_mask[:,:,None], (1,1,3))
smpl_img = draw_pcd_bg(cloth_mask, source_pcd[:,:2])
cloth_img = draw_pcd_bg(cloth_mask, cloth_pcd[:,:2])
rgb_img = io.imread(image_paths_dict[pid])
show_nb([rgb_img, smpl_img, cloth_img],
title=f'Outfit point cloud fitted to a single image',
titles=['rgb', 'source pcd (cutted smpl)', 'outfit pcd (visible points only)'], n_cols=3)
|
1711399
|
import os
import math
import pandas as pd
import sys
import numpy as np
src=sys.argv[1]
dest=sys.argv[2]
for subdir, dirs, files in os.walk(src):
for file in files:
df = pd.DataFrame()
try:
df = pd.read_table(src+file, sep='\t', names=['id', 'position', 'depth'])
except:
print(src+file)
continue
if df.empty:
continue
log_scale = []
for i in df['depth']:
if i != 0:
log_scale.append(math.log(i))
else:
log_scale.append(0)
df['log(depth)'] = log_scale
depth = np.mean(df['depth'])
per_cov = (len(df[df['depth']>=1])/len(df['depth'])) * 100
print('|'+file+' | '+str(per_cov)+'|' +str(depth)+ '|')
# plt.clf()
# df['log(depth)'].plot()
# plt.xlabel('Position', fontsize=16)
# plt.ylabel('ln(depth)', fontsize=16)
# plt.savefig(dest+file.replace(".tsv", ".png"))
|
1711407
|
from django import db
from django.apps import AppConfig
from orchestra.core import administration
from orchestra.utils.db import database_ready
class ResourcesConfig(AppConfig):
name = 'orchestra.contrib.resources'
verbose_name = 'Resources'
def ready(self):
if database_ready():
from .models import create_resource_relation
try:
create_resource_relation()
except db.utils.OperationalError:
# Not ready afterall
pass
from .models import Resource, ResourceData, MonitorData
administration.register(Resource, icon='gauge.png')
administration.register(ResourceData, parent=Resource, icon='monitor.png')
administration.register(MonitorData, parent=Resource, dashboard=False)
from . import signals
def reload_relations(self):
from .admin import insert_resource_inlines
from .models import create_resource_relation
from .serializers import insert_resource_serializers
insert_resource_inlines()
insert_resource_serializers()
create_resource_relation()
|
1711434
|
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from codecs import open
if sys.version_info[:3] < (3, 0, 0):
print("Requires Python 3 to run.")
sys.exit(1)
with open("README.md", encoding="utf-8") as file:
readme = file.read()
setup(
name="communities",
description="Library for detecting and visualizing community structure in graphs",
long_description=readme,
long_description_content_type="text/markdown",
version="v3.0.0",
packages=["communities", "communities.algorithms", "communities.visualization"],
python_requires=">=3",
url="https://github.com/shobrook/communities",
author="shobrook",
author_email="<EMAIL>",
# classifiers=[],
install_requires=["networkx", "numpy", "matplotlib", "scipy"],
keywords=["graph", "louvain", "community", "clustering", "detection", "girvan-newman", "hierarchical", "visualization"],
license="MIT"
)
|
1711477
|
import pytest
import os.path
import subprocess
import sys
import os
import conftest
from herbstluftwm.types import Rectangle
def test_example(hlwm):
# test the example.py shipped with the bindings
example_py = os.path.join(os.path.dirname(__file__), '..', 'python', 'example.py')
# make 'herbstclient' binary available in the PATH
os.environ['PATH'] = conftest.BINDIR + ':' + os.environ['PATH']
assert subprocess.call([sys.executable, example_py]) == 0
def test_attr_get(hlwm):
assert hlwm.attr.tags.focus.index() == 0
assert str(hlwm.attr.tags.focus.index) == '0'
def test_multi_objects_format(hlwm):
tag = hlwm.attr.tags.focus
assert f'{tag.index} / {tag.frame_count}' == '0 / 1'
def test_attr_set(hlwm):
hlwm.attr.tags.focus.name = 'newname'
assert hlwm.call('get_attr tags.focus.name').stdout == 'newname'
assert hlwm.attr.tags.focus.name() == 'newname'
def test_attr_custom_attribute(hlwm):
hlwm.attr.monitors.my_new_attr = 'myvalue'
assert hlwm.call('get_attr monitors.my_new_attr').stdout == 'myvalue'
assert hlwm.attr.monitors.my_new_attr() == 'myvalue'
def test_attr_get_dict_child(hlwm):
assert hlwm.attr.monitors['0'].index() == 0
assert hlwm.attr.monitors[0].index() == 0
def test_attr_get_dict_attribute(hlwm):
assert hlwm.attr.monitors['0']['index']() == 0
def test_attr_set_dict_attribute(hlwm):
hlwm.attr.monitors['0']['name'] = 'newname'
assert hlwm.call('get_attr monitors.0.name').stdout == 'newname'
def test_attr_set_dict_custom_attribute(hlwm):
hlwm.attr.monitors['0']['my_test'] = 'value'
assert hlwm.call('get_attr monitors.0.my_test').stdout == 'value'
@pytest.mark.parametrize('value', ['a', 'b', 'c'])
def test_chain_commands_if_then_else(hlwm, value):
from herbstluftwm import chain
# perform the comparison value == 'a' in hlwm:
hlwm.attr.my_attr = value
cmd = chain('or', [
chain('and', [
['compare', 'my_attr', '=', 'a'],
chain('chain', [
['echo', 'then branch 1'],
['echo', 'then branch 2'],
]),
]),
chain('chain', [
['echo', 'else branch 1'],
['echo', 'else branch 2'],
]),
])
if value == 'a':
expected = 'then branch 1\nthen branch 2\n'
else:
expected = 'else branch 1\nelse branch 2\n'
output = hlwm.call(cmd).stdout
assert output == expected
def test_implicit_type_conversion_bool(hlwm):
hlwm.attr.my_bool = True # implicitly creates an attribute
assert hlwm.attr.my_bool() is True
hlwm.attr.my_bool = False
assert hlwm.attr.my_bool() is False
hlwm.attr.my_bool = 'toggle'
assert hlwm.attr.my_bool() is True
def test_implicit_type_conversion_int(hlwm):
hlwm.attr.my_int = 32
assert hlwm.attr.my_int() == 32
hlwm.attr.my_int = '-=40'
assert hlwm.attr.my_int() == -8
def test_implicit_type_conversion_uint(hlwm):
hlwm.call('new_attr uint my_uint 32')
assert hlwm.attr.my_uint() == 32
hlwm.attr.my_uint = '-=40'
assert hlwm.attr.my_uint() == 0
def test_implicit_type_conversion_string(hlwm):
hlwm.attr.my_str = "test"
assert hlwm.attr.my_str() == "test"
hlwm.attr.my_str = "foo"
assert hlwm.attr.my_str() == "foo"
def test_implicit_type_conversion_rectangle(hlwm):
# TODO: change as soon as custom attributes support Rectangle!
geo = Rectangle(10, 20, 400, 500)
hlwm.attr.monitors.focus.geometry = geo
assert hlwm.attr.monitors.focus.geometry() == geo
geo = Rectangle(20, 30, 422, 522)
hlwm.attr.monitors.focus.geometry = geo
assert hlwm.attr.monitors.focus.geometry() == geo
|
1711480
|
from typing import Iterator, Optional, Union as TypingUnion
from genpy import (
Function as FunctionOriginal,
Generable,
Suite,
Class as BrokenClass,
FromImport,
Assign,
)
class Class(BrokenClass):
def generate(self) -> Iterator[str]:
bases = self.bases
if not bases:
bases = []
yield "class {}({}):".format(self.name, ", ".join(bases))
for f in self.attributes:
yield from (" " + f_line for f_line in f.generate())
class TypedParam(Generable):
def __init__(self, name: str, type_: Optional[str]) -> None:
self.name = name
self.type = type_
def generate(self) -> Iterator[str]:
if self.type is None:
yield self.name
else:
yield f"{self.name}: {self.type}"
class Break(Generable):
def generate(self):
yield "break"
class Continue(Generable):
def generate(self):
yield "continue"
class Union(Generable):
def __init__(self, members: list[str]) -> None:
self.members = members
def generate(self) -> Iterator[str]:
joined = ",".join(self.members)
yield f"typing.Union[{joined}]"
class Tuple(Generable):
def __init__(self, members: list[str]) -> None:
self.members = members
def generate(self) -> Iterator[str]:
joined = ",".join(self.members)
yield f"({joined},)"
class List(Generable):
def __init__(self, members: list[str]) -> None:
self.members = members
def generate(self) -> Iterator[str]:
joined = ",".join(self.members)
yield f"[{joined}]"
class TupleTypeAlias(Generable):
def __init__(self, name: str, members: list[str]) -> None:
self.name = name
self.members = members
def generate(self) -> Iterator[str]:
yield str(Assign(self.name, f"tuple{List(self.members)}"))
class StrDictEntry(Generable):
def __init__(self, key: str, val: TypingUnion[str, "StrDict"]) -> None:
self.key = key
self.val = val
def generate(self) -> Iterator[str]:
yield f'"{self.key}": {self.val},'
class NamedArg(Generable):
def __init__(self, key: str, val: str) -> None:
self.key = key
self.val = val
def generate(self) -> Iterator[str]:
yield f"{self.key}={self.val},"
class Call(Generable):
def __init__(self, func: str, args: list[NamedArg]) -> None:
self.func = func
self.args = args
def generate(self) -> Iterator[str]:
formatted_args = "".join(str(arg) for arg in self.args)
yield f"{self.func}({formatted_args})"
class StrDict(Generable):
def __init__(self, items: list[StrDictEntry]) -> None:
self.items = items
def generate(self) -> Iterator[str]:
formatted_items = "".join(str(item) for item in self.items)
yield "{" + formatted_items + "}"
class IntDictEntry(Generable):
def __init__(self, key: int, val: str) -> None:
self.key = key
self.val = val
def generate(self) -> Iterator[str]:
yield f"{self.key}: {self.val},"
class IntDict(Generable):
def __init__(self, items: list[IntDictEntry]) -> None:
self.items = items
def generate(self) -> Iterator[str]:
formatted_items = "".join(str(item) for item in self.items)
yield "{" + formatted_items + "}"
class Function(FunctionOriginal):
def __init__(
self,
name: str,
args: list[TypedParam],
body: Generable,
return_type: str,
decorators: tuple[str, ...] = (),
is_async: bool = False,
) -> None:
super().__init__(name, args, body, decorators)
self.return_type = return_type
self.is_async = is_async
def generate(self) -> Iterator[str]:
yield from self.decorators
arg_strings = []
for arg in self.args:
annotation = "" if arg.type is None else f": {arg.type}"
arg_strings.append(f"{arg.name}{annotation}")
def_base = "async def" if self.is_async else "def"
yield "{} {}({}) -> {}:".format(
def_base, self.name, ", ".join(arg_strings), self.return_type
)
yield from self.body.generate()
class StaticMethod(Function):
def __init__(
self, name: str, args: list[TypedParam], body: Generable, return_type: str
) -> None:
super().__init__(name, args, body, return_type, ("@staticmethod",))
class ClassMethod(Function):
def __init__(
self,
name: str,
extra_args: list[TypedParam],
body: Generable,
return_type: str,
is_async: bool = False,
) -> None:
args = [TypedParam("cls", None), *extra_args]
super().__init__(name, args, body, return_type, ("@classmethod",), is_async)
class Method(Function):
def __init__(
self, name: str, extra_args: list[TypedParam], body: Generable, return_type: str
) -> None:
args = [TypedParam("self", None), *extra_args]
super().__init__(name, args, body, return_type)
class InitMethod(Method):
def __init__(self, extra_args: list[TypedParam], body: Generable) -> None:
super().__init__("__init__", extra_args, body, "None")
class Dataclass(Class):
def __init__(
self,
name,
attributes: list[TypingUnion[TypedParam, Assign, ClassMethod, Method]],
) -> None:
super().__init__(name, None, attributes)
def generate(self) -> Iterator[str]:
yield "@dataclass"
yield from super().generate()
class TypedDict(Class):
def __init__(self, name, params: list[TypedParam]) -> None:
super().__init__(name, ["typing.TypedDict"], params)
def generate(self) -> Iterator[str]:
yield from super().generate()
class Try(Generable):
def __init__(self, try_body, to_catch, except_body):
if not isinstance(try_body, Suite):
try_body = Suite(try_body)
if not isinstance(except_body, Suite):
except_body = Suite(except_body)
self.try_body = try_body
self.to_catch = to_catch
self.except_body = except_body
def generate(self):
yield "try:"
yield from self.try_body.generate()
yield f"except {self.to_catch}:"
yield from self.except_body.generate()
ANNOTATIONS_IMPORT = FromImport("__future__", ["annotations"])
|
1711505
|
import numpy as np
from sklearn.preprocessing import normalize
from utils.data_utils import convert_dir_vec_to_pose, dir_vec_pairs
def convert_pose_to_line_segments(pose):
line_segments = np.zeros((len(dir_vec_pairs) * 2, 3))
for j, pair in enumerate(dir_vec_pairs):
line_segments[2 * j] = pose[pair[0]]
line_segments[2 * j + 1] = pose[pair[1]]
line_segments[:, [1, 2]] = line_segments[:, [2, 1]] # swap y, z
line_segments[:, 2] = -line_segments[:, 2]
return line_segments
def convert_dir_vec_to_line_segments(dir_vec):
joint_pos = convert_dir_vec_to_pose(dir_vec)
line_segments = np.zeros((len(dir_vec_pairs) * 2, 3))
for j, pair in enumerate(dir_vec_pairs):
line_segments[2 * j] = joint_pos[pair[0]]
line_segments[2 * j + 1] = joint_pos[pair[1]]
line_segments[:, [1, 2]] = line_segments[:, [2, 1]] # swap y, z
line_segments[:, 2] = -line_segments[:, 2]
return line_segments
|
1711557
|
import random
from ledger.util import has_nth_bit_set
def test_nth_bit_set():
"""
Check if bits of randomly chosen integers are set or unset and compare
with the binary representation.
"""
for _ in range(0, 10000):
number = random.randint(0, 100000000)
bits = bin(number)[2:]
for i, b in enumerate(reversed(bits)):
assert has_nth_bit_set(number, i) == (int(b) == 1)
|
1711594
|
from django.urls import path, re_path
from .views import (MessageCreateView, ThreadDetailView, ThreadListView,
SantaThreadDetailView, SanteeThreadDetailView)
app_name = 'inbox'
urlpatterns = [
re_path(r'^@(?P<recipient>[a-zA-Z0-9_]+)/$', ThreadDetailView.as_view(),
name='thread-detail'),
path('<int:pk>/new/', MessageCreateView.as_view(),
name='new-message'),
path('santa/', SantaThreadDetailView.as_view(), name='santa-detail'),
path('santee/', SanteeThreadDetailView.as_view(), name='santee-detail'),
path('', ThreadListView.as_view(), name='threads')
]
|
1711602
|
import mnist
training_images, training_labels, test_images, test_labels = mnist.load()
print("We have successfully loaded the MNIST data!")
|
1711611
|
from tljh.yaml import yaml
def test_no_empty_flow(tmpdir):
path = tmpdir.join("config.yaml")
with path.open("w") as f:
f.write("{}")
# load empty config file
with path.open("r") as f:
config = yaml.load(f)
# set a value
config["key"] = "value"
# write to a file
with path.open("w") as f:
yaml.dump(config, f)
# verify that it didn't use compact '{}' flow-style
with path.open("r") as f:
content = f.read()
assert content.strip() == "key: value"
|
1711617
|
import numpy as np
from . import _librootnumpy
__all__ = [
'array',
]
def array(arr, copy=True):
"""Convert a ROOT TArray into a NumPy array.
Parameters
----------
arr : ROOT TArray
A ROOT TArrayD, TArrayF, TArrayL, TArrayI or TArrayS
copy : bool, optional (default=True)
If True (the default) then copy the underlying array, otherwise the
NumPy array will view (and not own) the same memory as the ROOT array.
Returns
-------
arr : NumPy array
A NumPy array
Examples
--------
>>> from root_numpy import array
>>> from ROOT import TArrayD
>>> a = TArrayD(5)
>>> a[3] = 3.141
>>> array(a)
array([ 0. , 0. , 0. , 3.141, 0. ])
"""
import ROOT
if isinstance(arr, ROOT.TArrayD):
arr = _librootnumpy.array_d(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayF):
arr = _librootnumpy.array_f(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayL):
arr = _librootnumpy.array_l(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayI):
arr = _librootnumpy.array_i(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayS):
arr = _librootnumpy.array_s(ROOT.AsCObject(arr))
elif isinstance(arr, ROOT.TArrayC):
arr = _librootnumpy.array_c(ROOT.AsCObject(arr))
else:
raise TypeError(
"unable to convert object of type {0} "
"into a numpy array".format(type(arr)))
if copy:
return np.copy(arr)
return arr
|
1711632
|
from itertools import zip_longest
import itertools
import tensorflow as tf
from functools import reduce
from operator import mul
import numpy as np
import re
VERY_BIG_NUMBER = 1e30
VERY_SMALL_NUMBER = 1e-30
VERY_POSITIVE_NUMBER = VERY_BIG_NUMBER
VERY_NEGATIVE_NUMBER = -VERY_BIG_NUMBER
def add_summary_zero_fraction(t, threshold=0.0):
tf.summary.scalar(t.op.name+'/sparsity',
tf.nn.zero_fraction(tf.cast(tf.greater(tf.abs(t), threshold), tf.int8))
)
def get_initializer(matrix):
def _initializer(shape, dtype=None, partition_info=None, **kwargs): return matrix
return _initializer
def variable_on_cpu(name, shape, initializer):
"""Helper to create a Variable stored on CPU memory.
Args:
name: name of the variable
shape: list of ints
initializer: initializer for Variable
Returns:
Variable Tensor
"""
with tf.device('/cpu:0'):
var = tf.get_variable(name, shape, initializer=initializer)
return var
def variable_with_weight_decay(name, shape, stddev, wd):
"""Helper to create an initialized Variable with weight decay.
Note that the Variable is initialized with a truncated normal distribution.
A weight decay is added only if one is specified.
Args:
name: name of the variable
shape: list of ints
stddev: standard deviation of a truncated Gaussian
wd: add L2Loss weight decay multiplied by this float. If None, weight
decay is not added for this Variable.
Returns:
Variable Tensor
"""
var = variable_on_cpu(name, shape,
tf.truncated_normal_initializer(stddev=stddev))
if wd:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
tf.add_to_collection('losses', weight_decay)
return var
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, var in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
assert g is not None, var.name
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def mask(val, mask, name=None):
if name is None:
name = 'mask'
return tf.multiply(val, tf.cast(mask, 'float'), name=name)
def exp_mask(val, mask, name=None):
"""Give very negative number to unmasked elements in val.
For example, [-3, -2, 10], [True, True, False] -> [-3, -2, -1e9].
Typically, this effectively masks in exponential space (e.g. softmax)
Args:
val: values to be masked
mask: masking boolean tensor, same shape as tensor
name: name for output tensor
Returns:
Same shape as val, where some elements are very small (exponentially zero)
"""
if name is None:
name = "exp_mask"
return tf.add(val, (1 - tf.cast(mask, 'float')) * VERY_NEGATIVE_NUMBER, name=name)
def flatten(tensor, keep):
fixed_shape = tensor.get_shape().as_list()
start = len(fixed_shape) - keep
left = reduce(mul, [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start)])
out_shape = [left] + [fixed_shape[i] or tf.shape(tensor)[i] for i in range(start, len(fixed_shape))]
flat = tf.reshape(tensor, out_shape)
return flat
def reconstruct(tensor, ref, keep):
ref_shape = ref.get_shape().as_list()
tensor_shape = tensor.get_shape().as_list()
ref_stop = len(ref_shape) - keep
tensor_start = len(tensor_shape) - keep
pre_shape = [ref_shape[i] or tf.shape(ref)[i] for i in range(ref_stop)]
keep_shape = [tensor_shape[i] or tf.shape(tensor)[i] for i in range(tensor_start, len(tensor_shape))]
# pre_shape = [tf.shape(ref)[i] for i in range(len(ref.get_shape().as_list()[:-keep]))]
# keep_shape = tensor.get_shape().as_list()[-keep:]
target_shape = pre_shape + keep_shape
out = tf.reshape(tensor, target_shape)
return out
def add_wd(wd, scope=None):
scope = scope or tf.get_variable_scope().name
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
with tf.name_scope("weight_decay"):
for var in variables:
weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name="{}/wd".format(var.op.name))
tf.add_to_collection('losses', weight_decay)
def _excluded_var_pattern():
#return "(main/logits)|(main/p0/bi_attention)|(prepro/u1)"
return "(thisisapatternwetrytoexcludenothing)"
def add_sparsity_regularization(wd, collection_name=None, scope=None):
orig_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
variables = []
for eachvar in orig_variables:
if not re.match(_excluded_var_pattern(), eachvar.op.name):
variables.append(eachvar)
with tf.name_scope("sparsity_regular"):
if len(variables):
the_regularizer = tf.contrib.layers.l1_regularizer(scale=wd, scope=scope)
reg_loss = tf.contrib.layers.apply_regularization(the_regularizer, variables)
tf.add_to_collection('losses', reg_loss)
# add to collections
collection_name = collection_name or 'sparse_vars'
for eachvar in variables:
tf.add_to_collection(collection_name, eachvar)
def reduce_square_sum(var, start=0, end=0, axis=0):
the_shape = var.get_shape().as_list()
if len(the_shape) == 2:
t = tf.square(var)
t = tf.reduce_sum(t, axis=axis)
assert(end>start and axis<2)
t = tf.gather(t,tf.range(start, end))
return t
else:
raise NotImplementedError('variables with shapes != 2 is not implemented.')
def add_mixedlasso(groupwd, l1wd, coef_scaling=False, collection_name=None, scope=None):
orig_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope)
variables = []
for eachvar in orig_variables:
if not re.match(_excluded_var_pattern(), eachvar.op.name):
variables.append(eachvar)
with tf.name_scope("DimenGroupLasso"):
collection_name = collection_name or 'sparse_vars'
for eachvar in variables:
the_shape = eachvar.get_shape().as_list()
if len(the_shape)<=1: # l1 is group lasso when the group size is 1
the_regularizer = tf.contrib.layers.l1_regularizer(scale=l1wd, scope=scope)
reg = tf.contrib.layers.apply_regularization(the_regularizer, [eachvar])
elif len(the_shape)==2:
reg = 0.0
for s, axis in zip(the_shape, range(len(the_shape))):
if s != np.prod(the_shape):
if s == 1:
the_regularizer = tf.contrib.layers.l1_regularizer(scale=l1wd, scope=scope)
reg = reg + tf.contrib.layers.apply_regularization(the_regularizer, [eachvar])
else:
t = tf.square(eachvar)
t = tf.reduce_sum(t, axis=axis) + tf.constant(1.0e-8)
t = tf.sqrt(t)
if coef_scaling:
reg = reg + tf.reduce_sum(t) * groupwd * np.sqrt(s)
else:
reg = reg + tf.reduce_sum(t) * groupwd
else:
raise NotImplementedError('variables with shapes > 2 is not implemented.')
tf.add_to_collection('losses', reg)
tf.add_to_collection(collection_name, eachvar)
def grouper(iterable, n, fillvalue=None, shorten=False, num_groups=None):
args = [iter(iterable)] * n
out = zip_longest(*args, fillvalue=fillvalue)
out = list(out)
if num_groups is not None:
default = (fillvalue, ) * n
assert isinstance(num_groups, int)
out = list(each for each, _ in zip_longest(out, range(num_groups), fillvalue=default))
if shorten:
assert fillvalue is None
out = (tuple(e for e in each if e is not None) for each in out)
return out
def padded_reshape(tensor, shape, mode='CONSTANT', name=None):
paddings = [[0, shape[i] - tf.shape(tensor)[i]] for i in range(len(shape))]
return tf.pad(tensor, paddings, mode=mode, name=name)
def get_num_params():
num_params = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
return num_params
def zerout_gradients_for_zero_weights(grads_and_vars, zero_threshold=0.0, mode='element'):
""" zerout gradients for weights with zero values, so as to freeze zero weights.
(make sure the history gradients are zeros too, otherwise, zero weights can still be updated in adam etc)
Args:
grads_and_vars: Lists of (gradient, variable).
mode: the mode to freeze weights.
'element': freeze all zero weights
'group': freeze rows/columns that are fully zeros
"""
gradients, variables = zip(*grads_and_vars)
zerout_gradients = []
for gradient, variable in zip(gradients, variables):
if gradient is None:
zerout_gradients.append(None)
continue
if mode=='element':
where_cond = tf.less_equal(tf.abs(variable), zero_threshold)
elif mode=='group':
raise NotImplementedError('Group wise freezing is not implemented yet.')
else:
raise ValueError('Unsupported mode == %s' % mode)
zerout_gradient = tf.where(where_cond,
tf.zeros_like(gradient),
gradient)
zerout_gradients.append(zerout_gradient)
return list(zip(zerout_gradients, variables))
|
1711643
|
import os
PYLIBJUJU_DEV_FEATURE_FLAG = "PYLIBJUJU_DEV_FEATURE_FLAGS"
DEFAULT_VALUES_FLAG = "default_values"
def feature_enabled(name):
flags = os.environ.get(PYLIBJUJU_DEV_FEATURE_FLAG)
if flags is not None:
parts = [s.strip() for s in flags.split(",")]
return name in parts
return False
|
1711674
|
from plugin.preferences.options.o_sync.lists.liked import SyncListsLikedOption, SyncListsLikedPlaylistsOption
from plugin.preferences.options.o_sync.lists.personal import SyncListsPersonalOption, SyncListsPersonalPlaylistsOption
from plugin.preferences.options.o_sync.lists.watchlist import SyncListsWatchlistOption, SyncListsWatchlistPlaylistsOption
|
1711681
|
import json
import multiprocessing
import pickle
from argparse import ArgumentParser
def collect_stats(line):
local_terminal_types = set()
local_nonterminal_types = set()
local_possible_children = {}
obj = json.loads(line)
relative_paths = obj['relative_paths']
head_root_path = obj['head_root_path']
tokens = [target for target, is_token in zip(obj['targets'], obj['is_token']) if is_token == 1]
for i, path in enumerate(relative_paths):
prev_node = None
for node in head_root_path['nodes'] + path['nodes']:
node = node['node']
type = node[0]
if type in tokens and prev_node[0] not in tokens:
local_terminal_types.add(prev_node[0])
elif type not in tokens and prev_node is not None:
local_nonterminal_types.add(prev_node[0])
cur_possible_children_for_parent = local_possible_children.get(prev_node[0], set())
cur_possible_children_for_parent.add(type)
local_possible_children[prev_node[0]] = cur_possible_children_for_parent
prev_node = node
if obj['is_token'][i] == 1 and prev_node[0] not in tokens:
local_terminal_types.add(prev_node[0])
return local_terminal_types, local_nonterminal_types, local_possible_children
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--json", dest="json", required=True)
parser.add_argument("--output", dest="output", required=False)
args = parser.parse_args()
terminal_types = set()
nonterminal_types = set()
possible_children = {}
with open(args.json, 'r') as file:
with multiprocessing.Pool(64) as pool:
local_results = pool.imap_unordered(collect_stats, file, chunksize=100)
#local_results = [collect_stats(line) for line in file]
for local_terminal_types, local_nonterminal_types, local_possible_children in local_results:
terminal_types = terminal_types.union(local_terminal_types)
nonterminal_types = nonterminal_types.union(local_nonterminal_types)
for key, val in local_possible_children.items():
cur_possible_children_for_parent = possible_children.get(key, set())
cur_possible_children_for_parent = cur_possible_children_for_parent.union(val)
possible_children[key] = cur_possible_children_for_parent
terminal_and_nonterminal_nodes = terminal_types.intersection(nonterminal_types)
if len(terminal_and_nonterminal_nodes) == 0:
print('No sets that are both terminals and nonterminals were found (this is a good sign).')
else:
print('WARNING: the following nodes were found to be both terminals and nonterminals.')
print('This is not necessarily a problem, but might be more difficult to generate trees: ')
for n in terminal_and_nonterminal_nodes:
print('\t', n)
if args.output:
with open(args.output, 'wb') as file:
pickle.dump(terminal_types, file)
pickle.dump(nonterminal_types, file)
pickle.dump(possible_children, file)
else:
print('Terminal types: ', terminal_types)
print('Nonterminal types: ', nonterminal_types)
print('Max child id for node: ')
print('Possible child nodes: ')
for node, children in possible_children.items():
print('{}: {}'.format(node, children))
|
1711682
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve(strict=True).parents[1]
GODOT_PROJECT = BASE_DIR / 'script_runner' / 'project'
PYTHON_PACKAGE = 'script_runner'
GDNATIVE_LIBRARY = 'script_runner.gdnlib'
|
1711696
|
import cv2
import cv2.aruco as aruco
import numpy as np
import os
from pathlib import Path
import math
def findArucoMarkers(img, arucoDict, arucoParam, intrinsics, distortion):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bboxs, ids, rejected = aruco.detectMarkers(gray, arucoDict, parameters=arucoParam,
cameraMatrix=intrinsics,
distCoeff=distortion)
return bboxs, ids, rejected
def detectAruco(markerSize, totalMarkers, intrinsics: np.ndarray, distortion: np.ndarray, should_draw_axis=False):
key = getattr(aruco, f'DICT_{markerSize}X{markerSize}_{totalMarkers}')
arucoDict = aruco.Dictionary_get(key)
arucoParam = aruco.DetectorParameters_create()
cap = cv2.VideoCapture(0)
while True:
success, img = cap.read()
# img = cv2.flip(img, 1)
if success:
corners, ids, rejected = findArucoMarkers(img, arucoDict, arucoParam, intrinsics, distortion)
if should_draw_axis and len(corners) > 0:
for i in range(0, len(ids)):
# Estimate pose of each marker and return the values rvec and tvec---
# (different from those of camera coefficients)
rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 0.02,
intrinsics,
distortion)
# Draw a square around the markers
cv2.aruco.drawDetectedMarkers(img, corners)
# Draw Axis
cv2.aruco.drawAxis(img, intrinsics, distortion, rvec, tvec, 0.01)
# print(f"id = {ids[i]} --> tvec = {tvec}, rvec = {rvec}")
R = np.array(cv2.Rodrigues(rvec)[0])
P = constructTransformation(R, tvec)
euler_angles_rad = rotationMatrixToEulerAngles(R)
euler_angles_deg = np.rad2deg(euler_angles_rad)
print(f"roll: {euler_angles_deg[2]}, pitch: {euler_angles_deg[0]}, yaw: {euler_angles_deg[1]}")
# print(matrix)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R):
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def constructTransformation(R, T):
transformation_matrix = np.zeros([4, 4])
transformation_matrix[0:3, 0:3] = R
transformation_matrix[0:3, 3] = T
transformation_matrix[3, 3] = 1
return transformation_matrix
def loadCalib(caliberation_file: Path):
npzfile = np.load(caliberation_file.as_posix())
return npzfile['intrinsics'], npzfile['distortion'], \
npzfile['new_intrinsics'], npzfile['roi']
def warpImageOnAruco(markerSize, totalMarkers, intrinsics, distortion, should_draw_axis):
key = getattr(aruco, f'DICT_{markerSize}X{markerSize}_{totalMarkers}')
arucoDict = aruco.Dictionary_get(key)
arucoParam = aruco.DetectorParameters_create()
cap = cv2.VideoCapture(0)
pts_dst = np.array([[921, 731], [113, 732], [1127, 909], [927, 905]]) # caliberate this on first scene
while True:
success, img = cap.read()
# img = cv2.flip(img, 1)
if success:
corners, ids, rejected = findArucoMarkers(img, arucoDict, arucoParam, intrinsics, distortion)
if should_draw_axis and len(corners) > 0:
for i in range(0, len(ids)):
# Estimate pose of each marker and return the values rvec and tvec---
# (different from those of camera coefficients)
rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 0.02,
intrinsics,
distortion)
# Draw a square around the markers
cv2.aruco.drawDetectedMarkers(img, corners)
# Draw Axis
cv2.aruco.drawAxis(img, intrinsics, distortion, rvec, tvec, 0.01)
# print(corners[0][0])
h, status = cv2.findHomography(np.array(corners[0][0]), pts_dst)
im_out = cv2.warpPerspective(img, h, (img.shape[0], img.shape[1]))
cv2.imshow("imout", im_out)
# print(matrix)
cv2.imshow('img', img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
def main(markerSize, totalMarkers, intrinsics: np.ndarray, distortion: np.ndarray, should_draw_axis=False):
detectAruco(markerSize, totalMarkers, intrinsics, distortion, should_draw_axis)
# warpImageOnAruco(markerSize, totalMarkers, intrinsics, distortion, should_draw_axis)
if __name__ == '__main__':
intrinsics, distortion, new_intrinsics, roi = loadCalib(Path("calib.npz"))
# print(type(intrinsics), type(distortion))
main(markerSize=5, totalMarkers=250, should_draw_axis=True,
intrinsics=intrinsics, distortion=distortion)
|
1711727
|
from django.contrib.auth import get_user_model
from django.core import mail
from django.test import TestCase
from herald.contrib.auth.forms import HeraldPasswordResetForm
class ContribAuthTests(TestCase):
def test_save_form(self):
User = get_user_model()
User.objects.create_user(username='<EMAIL>', email='<EMAIL>', password='password')
form = HeraldPasswordResetForm({'email': '<EMAIL>'})
form.is_valid()
form.save()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
def test_save_form_domain_override(self):
User = get_user_model()
User.objects.create_user(username='<EMAIL>', email='<EMAIL>', password='password')
form = HeraldPasswordResetForm({'email': '<EMAIL>'})
form.is_valid()
form.save(domain_override='foo')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['<EMAIL>'])
|
1711740
|
import copy
import logging
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import gui
from settings.config_objs.constants import NotSet
logger = logging.getLogger(__name__)
class AbstractConfigWidget:
def get_value(self):
raise NotImplementedError()
def set_value(self, value):
raise NotImplementedError()
def update_widget(self):
pass
class LineEdit(QWidget, AbstractConfigWidget):
def __init__(self, config_obj):
super().__init__()
self.config_obj = config_obj
self.line_edit = QLineEdit()
self.data_changed_signal = self.line_edit.textChanged
if config_obj.get() is not None:
self.set_value(config_obj.get())
self.layout = QHBoxLayout()
self.layout.setContentsMargins(1, 1, 1, 1)
self.layout.addWidget(QLabel(f"{config_obj.get_gui_name()}: "))
self.layout.addWidget(self.line_edit)
self.setLayout(self.layout)
def get_value(self):
value = self.line_edit.text().strip()
if value == "":
return None
return value
def set_value(self, value):
if value is not None:
self.line_edit.setText(str(value))
class ErrorLabel(QLabel):
def __init__(self):
super().__init__()
self.setStyleSheet("QLabel { color : red; }")
def changeEvent(self, event: QEvent):
super().changeEvent(event)
if QEvent.EnabledChange == event.type():
if self.isEnabled():
self.setStyleSheet("QLabel { color : red; }")
else:
self.setStyleSheet("QLabel { color : gray; }")
class WidgetWrapper(QWidget):
data_changed_signal = pyqtSignal()
def __init__(self, config_widget, hint_text=None, parent=None):
super().__init__(parent=parent)
self.layout = QVBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layout.setSpacing(0)
self.setLayout(self.layout)
self.config_widget = config_widget
if self.config_widget.data_changed_signal is not None:
self.config_widget.data_changed_signal.connect(self.data_changed_emit)
self.layout.addWidget(self.config_widget)
if hint_text is not None:
hint = gui.DynamicRichLabel(hint_text)
hint.setStyleSheet("QLabel { color : gray; }")
self.layout.addWidget(hint)
self.error_label = ErrorLabel()
self.error_label.hide()
self.layout.addWidget(self.error_label)
def get_value(self):
return self.config_widget.get_value()
def set_value(self, value):
self.config_widget.set_value(value)
def _set_error_msg(self):
if self.config_widget.config_obj.is_valid_from_widget():
return False
msg = self.config_widget.config_obj.msg
self.error_label.setText(msg)
return True
def update_widget(self):
self.config_widget.update_widget()
if self._set_error_msg():
self.error_label.show()
else:
self.error_label.hide()
def data_changed_emit(self, *args, **kwargs):
self.data_changed_signal.emit()
class ConfigString(object):
def __init__(self,
default=None,
active_func=lambda instance, from_widget, parent: True,
optional=False,
gui_name=None,
hint_text=None,
gray_out=False,
require_restart=False):
self.default = default
self.gui_name = gui_name
self.hint_text = hint_text
self.gray_out = gray_out
self.require_restart = require_restart
self._value = None
self.active_func = active_func
self.name = None # will be set on runtime
self.instance = None # will be set on runtime
self.optional = optional
self.msg = ""
self.widget = None
self.parent = None
def _get_new_widget(self) -> WidgetWrapper:
return WidgetWrapper(self.init_widget(), hint_text=self.hint_text)
def get_widget(self) -> WidgetWrapper:
if self.widget is None:
self.widget = self._get_new_widget()
return self.widget
def init_widget(self):
return LineEdit(self)
def get(self):
return self._get()
def _get(self):
return self._value
def get_from_widget(self):
if self.widget is None:
raise ValueError("Widget not active")
return self.widget.get_value()
def get_gui_name(self):
name = self.gui_name
if name is None:
name = self.name
return name + (" (Requires Restart)" if self.require_restart else "")
def set(self, value):
if not self.is_valid(value):
raise ValueError(f"Can not set invalid value. msg: {self.msg}")
self._set(value)
def _set(self, value):
self._value = value
def set_to_widget(self, value):
if self.widget is not None:
self.widget.set_value(value)
def set_from_widget(self):
value = self.widget.get_value()
self.set(value)
def is_valid_from_widget(self) -> bool:
if self.widget is None:
return False
value = self.widget.get_value()
return self.is_valid(value, from_widget=True)
def test(self, value, from_widget: bool = False) -> bool:
if value is None:
return True
try:
self._test(value, from_widget)
return True
except ValueError as e:
self.msg = str(e)
return False
def _test(self, value, from_widget: bool) -> bool:
return True
def load(self, value):
if not value:
return
result = self._load(value)
if not self.test(result):
return
self._value = result
def _load(self, value):
return value
def save(self):
if self._value is None:
return None
if self.test(self._value):
return self._save()
raise ValueError("Tried to save an invalid ConfigObject")
def _save(self):
return self._value
def is_active(self, from_widget: bool = False):
return self.active_func(self.instance, from_widget, self.parent)
def is_set(self, value=NotSet):
if value is NotSet:
value = self._value
if isinstance(value, str) and value == "":
value = None
return value is not None or self.optional
def is_valid(self, value=NotSet, from_widget: bool = False) -> bool:
if value is NotSet:
value = self._value
result = self._is_valid(value, from_widget)
return result
def _is_valid(self, value, from_widget: bool) -> bool:
if not self.is_active(from_widget):
return True
if not self.is_set(value):
self.msg = "Can not be empty"
return False
if not self.test(value, from_widget):
return False
return True
def set_parser(self, parser):
parser.add_argument(f"--{self.name.replace('_', '-')}")
def reset_widget(self):
self.set_to_widget(self.get())
def update_widget(self):
if self.widget is None:
return
self.widget.update_widget()
def update_visibility(self):
if self.widget is None:
return
is_active = self.is_active(from_widget=True)
if self.gray_out:
self.widget.setEnabled(is_active)
else:
self.widget.setVisible(is_active)
def cancel(self):
pass
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if v is self.widget:
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
|
1711776
|
from .utils.compatibility import *
from .utils.der import fromPem, removeSequence, removeObject, removeBitString, toPem, encodeSequence, encodeOid, encodeBitString
from .utils.binary import BinaryAscii
from .point import Point
from .curve import curvesByOid, supportedCurves, secp256k1
class PublicKey:
def __init__(self, point, curve):
self.point = point
self.curve = curve
def toString(self, encoded=False):
xString = BinaryAscii.stringFromNumber(
number=self.point.x,
length=self.curve.length(),
)
yString = BinaryAscii.stringFromNumber(
number=self.point.y,
length=self.curve.length(),
)
return "\x00\x04" + xString + yString if encoded else xString + yString
def toDer(self):
oidEcPublicKey = (1, 2, 840, 10045, 2, 1)
encodeEcAndOid = encodeSequence(
encodeOid(*oidEcPublicKey),
encodeOid(*self.curve.oid),
)
return encodeSequence(encodeEcAndOid, encodeBitString(self.toString(encoded=True)))
def toPem(self):
return toPem(der=toBytes(self.toDer()), name="PUBLIC KEY")
@classmethod
def fromPem(cls, string):
return cls.fromDer(fromPem(string))
@classmethod
def fromDer(cls, string):
s1, empty = removeSequence(string)
if len(empty) != 0:
raise Exception("trailing junk after DER public key: {}".format(
BinaryAscii.hexFromBinary(empty)
))
s2, pointBitString = removeSequence(s1)
oidPk, rest = removeObject(s2)
oidCurve, empty = removeObject(rest)
if len(empty) != 0:
raise Exception("trailing junk after DER public key objects: {}".format(
BinaryAscii.hexFromBinary(empty)
))
if oidCurve not in curvesByOid:
raise Exception(
"Unknown curve with oid %s. Only the following are available: %s" % (
oidCurve,
", ".join([curve.name for curve in supportedCurves])
)
)
curve = curvesByOid[oidCurve]
pointStr, empty = removeBitString(pointBitString)
if len(empty) != 0:
raise Exception(
"trailing junk after public key point-string: " +
BinaryAscii.hexFromBinary(empty)
)
return cls.fromString(pointStr[2:], curve)
@classmethod
def fromString(cls, string, curve=secp256k1, validatePoint=True):
baseLen = curve.length()
xs = string[:baseLen]
ys = string[baseLen:]
p = Point(
x=BinaryAscii.numberFromString(xs),
y=BinaryAscii.numberFromString(ys),
)
if validatePoint and not curve.contains(p):
raise Exception(
"point ({x},{y}) is not valid for curve {name}".format(
x=p.x, y=p.y, name=curve.name
)
)
return PublicKey(point=p, curve=curve)
|
1711788
|
from time import sleep
from typing import List, Union, Dict
from privex.loghelper import LogHelper
from tests.base import PrivexBaseCase
from privex.helpers import thread as modthread, LockConflict, random_str, OrderedDictObject
from privex.helpers.thread import BetterEvent, event_multi_wait_all, event_multi_wait_any, lock_acquire_timeout, SafeLoopThread
from collections import namedtuple
from threading import Event, Lock
import threading
import queue
import logging
LOG_FORMATTER = logging.Formatter('[%(asctime)s]: %(name)-25s -> %(funcName)-35s : %(levelname)-8s:: %(message)s')
_lh = LogHelper(__name__, handler_level=logging.DEBUG, formatter=LOG_FORMATTER)
_lh.add_console_handler()
_lh.copy_logger('privex.helpers.thread')
log = logging.getLogger(__name__)
# release_lock = BetterEvent(name='Global Release Lock event')
shared_lock = threading.Lock()
shared_queue = queue.Queue()
stop_threads = BetterEvent(name='Global stop_threads')
LockCheck = namedtuple('LockCheck', 'thread_id was_locked lock_exception thread_name')
UnlockEvent = namedtuple('UnlockEvent', 'thread_id thread_name')
class LockerThread(SafeLoopThread):
loop_sleep = 0.05
def __init__(self, lock: threading.Lock, timeout=2, fail=True, hold_lock_start=True, **kwargs):
kwargs = dict(kwargs)
# Arguments passed to lock_acquire_timeout
self.lock = lock
self.timeout = timeout
self.fail = fail
# Amount of time to wait between each `release_lock` check after the lock is acquired.
# self.lock_hold_sleep = kwargs.get('lock_hold_sleep', 0.2)
# When the release_lock is in the SET position, the thread will hold the lock until release_lock is cleared.
self.release_lock = BetterEvent(name='Release Lock')
if not hold_lock_start:
log.info("hold_lock_start is False. Triggering event self.release_lock (do not hold lock)")
self.release_lock.set()
self.event_change_lock = Lock()
self.pause_if_locked = kwargs.pop('pause_if_locked', True)
super().__init__(stop_events=[stop_threads], **kwargs)
@property
def should_lock(self):
return not self.release_lock.is_set()
def emit_lock(self, event_lock_timeout=None):
with lock_acquire_timeout(self.event_change_lock, event_lock_timeout, fail=True, block=event_lock_timeout is not None):
return self.release_lock.clear()
def emit_unlock(self, event_lock_timeout=None):
with lock_acquire_timeout(self.event_change_lock, event_lock_timeout, fail=True, block=event_lock_timeout is not None):
return self.release_lock.set()
def loop(self):
if not self.should_lock:
log.debug(f" [{self.name}] Waiting for release_lock event to be cleared...")
ev_trig = event_multi_wait_any(self.release_lock, *self.stop_events, invert_idx=[0], wait_timeout=20)
return log.debug(f" [{self.name}] Finished waiting due to events: {ev_trig}")
log.info(f" [{self.name}] Acquiring lock: %s", self.lock)
try:
with modthread.lock_acquire_timeout(self.lock, self.timeout, fail=self.fail) as locked:
if not locked:
log.debug(f" [{self.name}] did not acquire lock. not waiting to hold lock open.")
if self.pause_if_locked:
log.debug(f" [{self.name}] pause_if_locked is True. setting release_lock to pause lock acquisition attempts.")
try:
self.emit_unlock()
except LockConflict:
log.debug(f" [{self.name}] got lock conflict while setting release_lock...")
self.out_queue.put(LockCheck(self.ident, locked, lock_exception=None, thread_name=self.name))
if not locked:
return log.debug(f" [{self.name}] lock not acquired, returning loop...")
log.debug(f" [{self.name}] waiting until release_lock or any event in stop_events is triggered...")
ev_trig = event_multi_wait_any(self.release_lock, *self.stop_events)
log.debug(f" [{self.name}] finished waiting to release lock due to events: {ev_trig}")
if locked:
log.debug(f" [{self.name}] release_lock released or thread stop event fired. releasing previously acquired lock.")
was_locked = bool(locked)
log.debug(f" [{self.name}] finished lock_acquire_timeout context manager block. lock will be released if we held it...")
if was_locked:
self.out_queue.put(UnlockEvent(self.ident, self.name))
except LockConflict as e:
log.debug(f" [{self.name}] Lock conflict / timeout exception was raised: %s %s", type(e), str(e))
self.out_queue.put(LockCheck(self.ident, None, e, thread_name=self.name))
except Exception as e:
log.exception(f" [{self.name}] Exception raised while acquiring lock: %s %s", type(e), str(e))
self.out_queue.put(LockCheck(self.ident, None, e, thread_name=self.name))
ThreadTypes = Union[threading.Thread, LockerThread]
class TestThreading(PrivexBaseCase):
"""Test cases for :mod:`privex.helpers.thread` functions/classes"""
threads: Union[OrderedDictObject, Dict[str, ThreadTypes]] = OrderedDictObject()
def tearDown(self) -> None:
if len(self.threads) > 0:
stop_threads.set()
sleep(0.3)
thread_keys = list(self.threads.keys())
for name in thread_keys:
t = self.threads[name]
if not t.is_alive():
log.debug("Thread '%s' is dead. Removing from thread dict...", name)
del self.threads[name]
continue
log.debug("Thread '%s' is still alive. Joining and waiting for it to shutdown...", name)
if hasattr(t, 'emit_stop'):
log.debug("Thread '%s' has emit_stop method. Calling emit_stop before joining.", name)
t.emit_stop()
t.join(3)
log.debug("Removing stopped thread %s", name)
del self.threads[name]
log.debug("Successfully removed stopped thread %s", name)
# Reset global event thread signals to their default, empty queues, and release any leftover locks.
# if release_lock.is_set(): release_lock.clear()
if shared_lock.locked(): shared_lock.release()
while not shared_queue.empty():
shared_queue.get_nowait()
if stop_threads.is_set(): stop_threads.clear()
@classmethod
def _mk_locker(cls, lock: threading.Lock, timeout=2, fail=True, hold_lock_start=False, name=None, **kwargs) -> LockerThread:
"""
:param threading.Lock lock:
:param int|float timeout:
:param bool fail:
:param bool hold_lock_start:
:param str name:
:param kwargs:
:return:
"""
auto_start = kwargs.pop('auto_start', True)
name = random_str(8) if name is None else name
t = LockerThread(lock, timeout=timeout, fail=fail, hold_lock_start=hold_lock_start, **kwargs)
t.name = name
t.daemon = kwargs.pop('daemon', False)
if auto_start:
t.start()
cls.threads[name] = t
return t
@staticmethod
def _cleanup_lockers(*lockers: LockerThread):
for l in lockers:
l.emit_unlock() # Release any lock they might be holding
l.emit_stop() # Stop the locker thread
if l.is_alive(): # Join the thread if it's alive so that it can shutdown correctly.
l.join(1)
def test_lock_acquire_timeout_basic(self):
# First we test that we can successfully acquire an unlocked lock
t1 = self._mk_locker(shared_lock, timeout=2, fail=False, name="acquire_lock_timeout_t1")
self.assertFalse(shared_lock.locked())
self.assertTrue(t1.emit_lock(), msg="emit_lock should've returned True to acknowledge release_lock flipping")
# Check the LockCheck result from the thread queue
res: LockCheck = t1.out_queue.get(block=True, timeout=2)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked())
self.assertIsNone(res.lock_exception)
self.assertEqual(res.thread_name, "acquire_lock_timeout_t1")
# Ask t1 to release the lock
t1.emit_unlock()
res: UnlockEvent = t1.out_queue.get(block=True, timeout=2)
self.assertEqual(res.thread_name, 'acquire_lock_timeout_t1')
self.assertFalse(shared_lock.locked())
# Stop t1
t1.emit_stop()
t1.join(1)
def test_lock_acquire_timeout_timed_out(self):
self.assertFalse(shared_lock.locked())
# First we acquire a lock using our first thread
log.debug(" >>> thread 1 acquire")
t1 = self._mk_locker(shared_lock, timeout=4, fail=False, name="timeout_t1")
t1.emit_lock()
# Check the LockCheck result from the thread queue
res: LockCheck = t1.out_queue.get(block=True, timeout=2)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked()) # Confirm our lock is locked
# Now we try and acquire the lock with a second thread
log.debug(" >>> thread 2 acquire (test lock timeout fail)")
t2 = self._mk_locker(shared_lock, timeout=2, fail=False, name="timeout_t2")
t2.emit_lock()
# Confirm that t2 failed to get the lock
res: LockCheck = t2.out_queue.get(block=True, timeout=4)
self.assertFalse(res.was_locked)
self.assertTrue(shared_lock.locked())
self.assertIsNone(res.lock_exception)
# Now we'll ask t2 to try and get the lock again, wait 200ms and release the lock
log.debug(" >>> thread 2 acquiring (unlocking thread 1)")
t2.emit_lock()
sleep(0.2)
log.debug(" >>> thread 1 unlocking")
t1.emit_unlock()
# If the lock wait timeout was being acknowledged, t2 should now have the lock.
log.debug(" >>> get thread 2 out_queue")
res: LockCheck = t2.out_queue.get(block=True, timeout=4)
self.assertTrue(res.was_locked)
self.assertTrue(shared_lock.locked())
# Now we'll release the lock and confirm the lock is unlocked again
log.debug(" >>> thread 2 unlock")
t2.emit_unlock()
res: UnlockEvent = t2.out_queue.get(block=True, timeout=4)
self.assertEqual(res.thread_name, 'timeout_t2')
self.assertFalse(shared_lock.locked())
log.debug(" >>> cleanup")
# If we got this far - everything is fine :) - stop the threads and cleanup
self._cleanup_lockers(t1, t2)
|
1711807
|
passes = """
const char* PrimitiveCoordinatesPass::NAME = "PrimitiveCoords";
const char* MapCoordsPass::NAME = "MapCoords";
const char* GeometryNormalPass::NAME = "NormalsGeometry";
const char* ShadingNormalPass::NAME = "NormalsShading";
const char* DotProductPass::NAME = "NormalsDotProduct";
const char* OpacityColorPass::NAME = "OpacityColor";
const char* AlphaPass::NAME = "Alpha";
const char* DebugPass::NAME = "DEBUG";
const char* VelocityPass::NAME = "Velocity";
const char* ZDepthPass::NAME = "ZDepth";
const char* MapPass::NAME = "Texmap";
const char* NormalsDiscrepancyPass::NAME = "NormalsDiscrepancy";
const char* EmissionPass::NAME = "Emission";
const char* WorldPositionPass::NAME = "WorldPosition";
const char* IndirectLightingPass::NAME = "IndirectLighting";
const char* DirectLightingPass::NAME = "DirectLighting";
const char* ShadowsPass::NAME = "Shadows";
const char* SourceNodeIdPass::NAME = "SourceNodeId";
const char* PrimitiveIdPass::NAME = "PrimitiveId";
const char* MaterialIdPass::NAME = "MaterialId";
const char* InstanceIdPass::NAME = "InstanceId";
const char* DiffuseColorPass::NAME = "DiffuseColor";
const char* ReflectColorPass::NAME = "ReflectColor";
const char* RefractColorPass::NAME = "RefractColor";
const char* TranslucencyColorPass::NAME = "TranslucencyColor";
const char* DiffusePass::NAME = "Diffuse";
const char* RefractPass::NAME = "Refract";
const char* ReflectPass::NAME = "Reflect";
const char* TranslucencyPass::NAME = "Translucency";
const char* GiReflectPass::NAME = "ReflectGi";
const char* GiDiffusePass::NAME = "DiffuseGi";
const char* GiTranslucencyPass::NAME = "TranslucencyGi";
const char* GiRefractPass::NAME = "RefractGi";
"""
data="""
{ T_BOOL, PARAM_EXPORT_ONLY, "exportOnly", false },
{ T_BOOL, PARAM_SAVE_SECONDARY_GI, "gi.saveSecondary", false },
{ T_BOOL, PARAM_LOAD_SECONDARY_GI, "gi.loadSecondary", false },
{ T_BOOL, PARAM_LOW_PRIORITY, "lowThreadPriority", true },
{ T_BOOL, PARAM_SHOW_BUCKET_ORDER, "vfb.showBucketOrder", false },
{ T_BOOL, PARAM_ENABLE_SHADING, "doShading", true },
{ T_BOOL, PARAM_DO_AA, "doAa", true },
{ T_INT, PARAM_RENDERER, "renderer", RENDERER_PROGRESSIVE, RENDERER_BUCKET, RENDERER_PPM, },
{ T_INT, PARAM_ACCELERATION_STRUCTURE, "accelerationStructure", STRUCTURE_EMBREE_BVH4_SPATIAL, STRUCTURE_NONE, STRUCTURE_EMBREE_BVH4_SPATIAL },
{ T_INT, PARAM_GI_PRIMARY_SOLVER, "gi.primarySolver", GISOLVER_PATHTRACING, GISOLVER_NONE, GISOLVER_IRRADIANCE_CACHE },
{ T_INT, PARAM_GI_SECONDARY_SOLVER, "gi.secondarySolver", GISOLVER_PATHTRACING, GISOLVER_NONE, GISOLVER_VPL },
{ T_INT, PARAM_IMAGE_FILTER, "imageFilter", IMAGE_FILTER_NONE, IMAGE_FILTER_NONE, IMAGE_FILTER_TENT },
{ T_INT, PARAM_DIRECT_LIGHT_SOLVER, "lights.solver", DSOLVER_COMBINED, DSOLVER_COMBINED, DSOLVER_PHOTON, },
{ T_INT, PARAM_ENVIRO_SOLVER, "enviroSolver", ENVIRO_FAST, ENVIRO_FAST, ENVIRO_FAST_COMPENSATE, },
{ T_INT, PARAM_EMBREE_TRIANGLES, "embree.triangles", EMBREE_FAST, EMBREE_FAST, EMBREE_AVX, },
{ T_INT, PARAM_RANDOM_SAMPLER, "random.sampler", RANDOM_5D_HIGHD, RANDOM_SHARED, RANDOM_MAXIMAL_VALUE, },
{ T_INT, PARAM_PROGRESSIVE_MAX_PASSES, "progressive.maxPasses", 0, 0, 9999 },
{ T_INT, PARAM_PROGRESSIVE_TIME_LIMIT, "progressive.timeLimit", 60*000, 0, 10*24*60*60*1000 },
{ T_FLOAT, PARAM_LIGHTS_AREA_SAMPLES_MULT, "lights.areaSamplesMult", 1.0, 0.0001, 100.0 },
{ T_INT, PARAM_LIGHTS_ENV_RESOLUTION, "lights.env.resolution", 1000, 1, 100000 },
{ T_INT, PARAM_PT_SAMPLES, "pathtracingSamples", 16, 1, PT_MAX_SAMPLES },
{ T_INT, PARAM_LIGHTS_AREA_METHOD, "lights.areaMethod", AREALIGHT_REPROJECT, AREALIGHT_SIMPLE, AREALIGHT_REPROJECT, },
{ T_INT, PARAM_RAYCASTER_MAX_DEPTH, "raycaster.maxDepth", 25, 1, RAYCASTER_MAX_DEPTH },
{ T_INT, PARAM_MIN_DEPTH, "raycaster.minDepth", 0, 0, RAYCASTER_MAX_DEPTH },
{ T_INT, PARAM_VFB_UPDATE_INTERVAL, "buffer.updateInterval", 1000, 0, 1000*3600 },
{ T_INT, PARAM_IMAGE_REGION_START_X, "image.region.startX", 0, 0, 10000000 },
{ T_INT, PARAM_IMAGE_REGION_START_Y, "image.region.startY", 0, 0, 10000000 },
{ T_INT, PARAM_IMAGE_REGION_END_X, "image.region.endX", 0, 0, 10000000 },
{ T_INT, PARAM_IMAGE_REGION_END_Y, "image.region.endY", 0, 0, 10000000 },
{ T_INT, PARAM_IMAGE_WIDTH, "image.width", 640, 1, 10000000 },
{ T_INT, PARAM_IMAGE_HEIGHT, "image.height", 480, 1, 10000000 },
{ T_STR, PARAM_GI_PRIMARY_FILE, "gi.primaryFile", "C:/primaryGI.dat" },
{ T_STR, PARAM_GI_SECONDARY_FILE, "gi.secondaryFile", "C:/secondaryGI.dat" },
{ T_INT, PARAM_IMAGE_BUCKETSIZE, "image.bucketSize", 32, 1, 1024 },
{ T_VEC3, PARAM_COLOR_EXIT, "color.exit", Rgb(0.0, 0.0, 1.0)},
{ T_INT, PARAM_INTERNAL_RES_MULTIPLIER, "fb.internalResolutionMult", 1, 1, MAX_INTERNAL_RESOLUTION_MULT, },
{ T_FLOAT, PARAM_IMAGEFILTER_WIDTH, "imagefilter.width", 1.5, 1.0, float(MAX_IMAGEFILTER_WIDTH) },
{ T_FLOAT, PARAM_IMAGEFILTER_BLURRING, "imagefilter.blurring", 0.5, 0.0, 1.0 },
{ T_FLOAT, PARAM_MAX_NORMAL_DEV, "system.maxNormalDev", 0.55, 0.001, 2.0},
{ T_INT, PARAM_PROGRESSIVE_RECALCULATE_EVERY, "progressive.recalculateEvery", 0, 0, 999},
{ T_FLOAT, PARAM_PROGRESSIVE_ADAPTIVITY, "progressive.adaptivity", 0.0, 0.0, 99.0},
{ T_FLOAT, PARAM_PT_MAX_SAMPLE_INTENSITY, "maxPtSampleIntensity", 0.0, 0.0, 99999.0},
{ T_FLOAT, PARAM_SUBDIV_ENVIRO_THRESHOLD, "subdivEnviroThreshold", 0.005, 0.0, 1.0},
{ T_FLOAT, PARAM_TEXTURED_LIGHT_RES, "lights.texturedResolution", 0.3, 0.0001, 99.0},
{ T_INT, PARAM_RANDOM_SEED, "random.seed", 1234, INT_MIN, INT_MAX, },
{ T_INT, PARAM_NUM_THREADS, "numThreads", 0, 0, 128, },
{ T_FLOAT, PARAM_LIGHTSOLVER_FRAC_LOCAL, "lightSolver.localFrac", 0.33, 0.0, 1.0, },
{ T_FLOAT, PARAM_LIGHTSOLVER_FRAC_GLOBAL, "lightSolver.globalFrac", 0.33, 0.0, 1.0, },
{ T_FLOAT, PARAM_PORTAL_SAMPLE_AMOUNT, "portals.sampleAmount", 0.75, 0.0, 1.0, },
{ T_FLOAT, PARAM_SHADOW_BIAS, "shadowBias", -6.07, -8.0, -2.0, },
{ T_BOOL, PARAM_RESUME_RENDERING, "resumeRendering", false, },
{ T_INT, PARAM_INSTANCE_MIN_SAVING, "instance.minSize", 1, 1, 999999},
{ T_BOOL, PARAM_IC_INCREMENTAL_BUILD, "gi.ic.incrementalBuild", false },
{ T_STR, PARAM_IC_INCREMENTAL_FILENAME, "gi.ic.incrementalFilename", "incrementalIc.dat" },
{ T_INT, PARAM_IC_HEMISPHERE_SUBDIV, "gi.ic.hemisphereSubdiv", 7, 1, 100 },
{ T_FLOAT, PARAM_IC_PRECOMP_AMOUNT, "gi.ic.precompAmount", 1.0, 0.0, 99.0 },
{ T_BOOL, PARAM_IC_PATHTRACING_CORNERS, "gi.ic.pathtracingCorners", true },
{ T_FLOAT, PARAM_IC_MAX_ERROR, "gi.ic.maxGeomError", 0.6, 0.01, 10.0 },
{ T_FLOAT, PARAM_IC_SMOOTHING, "gi.ic.smoothing", 1.8, 1.0, 10.0 },
{ T_FLOAT, PARAM_IC_COLOR_THRESHOLD, "gi.ic.colorThreshold", 10.0, 0.01, 10.0 },
{ T_FLOAT, PARAM_IC_RECORD_SPACING_MIN, "gi.ic.recordSpacingMin", 1.0, 0.01, 500.0 },
{ T_FLOAT, PARAM_IC_RECORD_SPACING_MAX, "gi.ic.recordSpacingMax", 20.0, 0.1, 500.0 },
{ T_BOOL, PARAM_IC_USE_ROTGRAD, "gi.ic.useRotationGradient", true },
{ T_BOOL, PARAM_IC_USE_TRANSGRAD, "gi.ic.useTranslationGradient", true },
{ T_INT, PARAM_IC_INTERPOLATION_SCHEMA, "gi.ic.interpolationSchema", IC_INTERPOLATION_TABELLION, IC_INTERPOLATION_WARD, IC_INTERPOLATION_TABELLION },
{ T_INT, PARAM_IC_SEARCH_STRUCTURE, "gi.ic.searchStructure", ICSTRUCTURE_MULTIPLE_OCTREE, ICSTRUCTURE_MULTIPLE_OCTREE, ICSTRUCTURE_BVH},
{ T_BOOL, PARAM_IC_RELAXED_INTERPOLATION,"gi.ic.relaxedInterpolation", true },
{ T_INT, PARAM_IC_VIZ, "gi.ic.vizualization", ICVIZ_INDIRECT, ICVIZ_OFF, ICVIZ_INDIRECT},
{ T_INT, PARAM_IC_MIN_INTERP_SAMPLES, "gi.ic.minInterpSamples", 2, 1, 20},
{ T_FLOAT, PARAM_HDCACHE_PRECOMP_MULTIPLIER, "gi.hdCache.precompMult", 1.0, 0.0, 99.0},
{ T_INT, PARAM_HDCACHE_INTERPOLATION_COUNT, "gi.hdCache.interpolationCount", 3, 1, HdCacheConfig::MAX_INTERP_SAMPLES},
{ T_FLOAT, PARAM_HDCACHE_DIR_SENSITIVITY, "gi.hdCache.dirSensitivity", 2.0, 0.001, 100.0},
{ T_FLOAT, PARAM_HDCACHE_POS_SENSITIVITY, "gi.hdCache.posSensitivity", 20.0, 0.0, 100.0},
{ T_FLOAT, PARAM_HDCACHE_NORMAL_SENSITIVITY, "gi.hdCache.normalSensitivity", 3.0, 0.0, 10.0},
{ T_INT, PARAM_HDCACHE_PT_SAMPLES, "gi.hdCache.ptSamples", 256, 1, PT_MAX_SAMPLES},
{ T_FLOAT, PARAM_HDCACHE_SMOOTHING, "gi.hdCache.smoothing", 2.0, 1.0, 10.0},
{ T_FLOAT, PARAM_HDCACHE_GLOSSINESS_THRESHOLD, "gi.hdCache.glossyThreshold", 0.9, 0.0, 1.0},
{ T_INT, PARAM_HDCACHE_MAX_RECORDS, "gi.hdCache.maxRecords", 100000, 1000, 999000 },
{ T_INT, PARAM_HDCACHE_WRITE_PASSES, "gi.hdCache.writePasses", 0, 0, 9999999 },
{ T_INT, PARAM_PHOTONS_EMITTED, "gi.photons.emitted", 5000000, 1000, 100000000 },
{ T_BOOL, PARAM_PHOTONS_STORE_DIRECT, "gi.photons.storeDirect", true },
{ T_INT, PARAM_PHOTONS_DEPTH, "gi.photons.depth", -1, -1, 100 },
{ T_INT, PARAM_PHOTONS_LOOKUP_COUNT, "gi.photons.lookupCount", 50, 1, PHOTONS_INTERPOLATION_MAX_SAMPLES },
{ T_INT, PARAM_PHOTONS_FILTER, "gi.photons.0ilter", KERNEL_LINEAR, KERNEL_CONSTANT, KERNEL_GAUSSIAN },
{ T_INT, PARAM_VPL_EMITTED, "gi.vpl.emittedCount", 1000000, 1, 9999999 },
{ T_INT, PARAM_VPL_USED, "gi.vpl.usedCount", 150, 1, 9999 },
{ T_INT, PARAM_VPL_PROGRESSIVE_BATCH, "gi.vpl.progressiveBatch", 150, 1, 9999 },
{ T_FLOAT, PARAM_VPL_CLAMPING, "gi.vpl.clamping", 50.0, 0.0, 999.0 },
{ T_INT, PARAM_PATHTRACING_DIRECTMODE, "gi.pathtracing.directMode", PTDIRECT_MIS, PTDIRECT_SAMPLE_LIGHTS, PTDIRECT_MIS},
{ T_INT, PARAM_BUCKET_INITIAL_SAMPLES, "buckets.initialSamples", 1, 1, 999 },
{ T_INT, PARAM_BUCKET_ADAPTIVE_STEPS, "buckets.adaptiveSteps", 2, 1, 10 },
{ T_FLOAT, PARAM_BUCKETS_ADAPTIVE_THRESHOLD, "buckets-adaptiveThreshold", 0.03, 0.001, 1.0 },
{ T_FLOAT, PARAM_BVH_COST_ITERATION, "bvh.cost.iteration", 1.0, 0.01, 1000.0 },
{ T_FLOAT, PARAM_BVH_COST_TRIANGLE, "bvh.cost.triangle", 1.0, 0.01, 1000.0 },
{ T_INT, PARAM_BVH_LEAF_SIZE_MIN, "bvh.leafSizeMin", 2, 1, 1000 },
{ T_INT, PARAM_BVH_LEAF_SIZE_MAX, "bvh.leafSizeMax", 6, 2, 1000 },
{ T_FLOAT, PARAM_COLORMAP_EXPONENT, "colorMapping.exponent", 0.0, -100.0, 100.0 },
{ T_FLOAT, PARAM_COLORMAP_GAMMA, "colorMapping.gamma", 2.2, 0.1, 10.0 },
{ T_FLOAT, PARAM_COLORMAP_WHITEMULT, "colorMapping.whiteMultiplier", 1.0, 0.01, 999.0 },
{ T_FLOAT, PARAM_COLORMAP_DISPLAY_T, "colorMapping.displayTemperature", 6500.0, 1000.0, 99999.0 },
{ T_FLOAT, PARAM_COLORMAP_SCENE_T, "colorMapping.sceneTemperature", 6500.0, 1000.0, 99999.0 },
{ T_FLOAT, PARAM_COLORMAP_R_BALANCE, "colorMapping.rBalance", 1.0, 0.0, 999.0 },
{ T_FLOAT, PARAM_COLORMAP_G_BALANCE, "colorMapping.gBalance", 1.0, 0.0, 999.0 },
{ T_FLOAT, PARAM_COLORMAP_B_BALANCE, "colorMapping.bBalance", 1.0, 0.0, 999.0 },
{ T_INT, PARAM_COLORMAP_SPACE, "colorMapping.workingSpace", CAT_RGB, CAT_LMS, CAT_RGB},
{ T_FLOAT, PARAM_COLORMAP_CONTRAST, "colorMapping.contrast", 1.0, 1.0, 99.0 },
{ T_INT, PARAM_PPM_PT_SAMPLES_PER_ITER, "ppm.samplesPerIter", 1, 1, 50 },
{ T_INT, PARAM_PPM_PHOTONS_PER_ITER, "ppm.photonsPerIter", 5000000, 1000, 99000000 },
{ T_FLOAT, PARAM_PPM_ALPHA, "ppm.alpha", 0.666, 0.01, 1.0 },
{ T_FLOAT, PARAM_PPM_INITIAL_RADIUS, "ppm.initialRadius", 2.0, 0.0001, 200.0 },
{ T_BOOL, PARAM_BIDIR_DO_MIS, "bidir.doMis", true },
{ T_INT, PARAM_VCM_MODE, "vcm.mode", VCM_BIDIR, VCM_PT, VCM_VCM },
{ T_BOOL, PARAM_DISPLACE_USE_PROJ_SIZE, "displace.useProjectionSize", true, },
{ T_FLOAT, PARAM_DISPLACE_MAX_SIZE_PROJ, "displace.maxProjectSize", 2.0, 0.01, 100.0 },
{ T_FLOAT, PARAM_DISPLACE_MAX_SIZE_WORLD, "displace.maxWorldSize", 1.0, 0.00001, INFINITY },
{ T_INT, PARAM_DISPLACE_MAX_SUBDIV, "displace.maxSubdiv", 100, 1, 9999, },
// for export only
{ T_BOOL, PARAM_RENDERSTAMP_USE, "renderstamp.use", true },
{ T_STR, PARAM_RENDERSTAMP, "renderStamp", "Corona Renderer Alpha | %c | Time: %t | Passes: %p | Primitives: %o | Rays/s: %r", },
{ T_INT, PARAM_VFB_TYPE, "vfb.type", VFB_WX, VFB_NONE, VFB_WX },
"""
typeDict = {'T_BOOL' : 'bool',
'T_STR' : 'string',
'T_INT' : 'int',
'T_FLOAT': 'float',
'T_VEC3' : 'vector'}
def extract():
list = data.split("\n")
parasList = []
for l in list:
parts = l.replace("{", "").replace("}", "").split(",")
a = []
for p in parts:
a.append(p.strip())
parts = a
if len(parts) < 3:
continue
#print parts[0]
paras={}
paras['id'] = list.index(l)
paras['name'] = parts[2].strip().replace('"', '').replace(".", "_")
paras['type'] = typeDict[parts[0].strip()]
paras['default'] = parts[3].strip().lower().capitalize().replace("False", "false").replace("True", "true")
ds = []
for d in parts[4:]:
ds.append(d.replace(".f", ".0").lower().capitalize())
paras['data'] = ":".join(ds)[:-1]
if paras['data'].endswith(":"):
paras['data'] = paras['data'][:-1]
if not paras['default'].isdigit() and not paras['default'].replace(".", "0").isdigit() and not paras['default'].replace("-", "").isdigit():
#print "-"+paras['default']+"-", "-no digit"
if len(ds) > 1:
paras['type'] = 'enum'
else:
if len(paras['data'].split(":")) == 2:
paras['data'] = "minmax:"+paras['data']
parasList.append(paras)
for p in parasList:
ps = "{0}, {1}, {2}, {3}".format(p['name'], p['type'], p['name'].split(".")[-1].capitalize(), p['default'])
print ps,
if len(p['data']) > 0:
print "," + p['data']
else:
print
extract()
|
1711826
|
import copy
from datetime import *
from DyCommon.DyCommon import *
from ..DyStockBackTestingCommon import *
from ...Trade.DyStockTradeCommon import *
from ...Trade.AccountManager.DyStockPos import *
from ...Trade.AccountManager.StopMode.DyStockStopLossMaMode import *
from ...Trade.AccountManager.StopMode.DyStockStopProfitMaMode import *
from ...Trade.AccountManager.StopMode.DyStockStopLossPnlRatioMode import *
from ...Trade.AccountManager.StopMode.DyStockStopLossStepMode import *
from ...Trade.AccountManager.StopMode.DyStockStopProfitPnlRatioMode import *
from ...Trade.AccountManager.StopMode.DyStockStopTimeMode import *
class DyStockBackTestingAccountManager:
"""
回测账户管理
模拟实盘账户管理,接口跟实盘账户类保持一致
实盘账户管理不含有停损模块和风控模块,这里只是为回测时参数寻优提供便利,所以提供了通用性的停损模块和风控模块。
策略实盘时要实现自己的停损和风控模块。
"""
broker = 'BackTesting'
def __init__(self, eventEngine, info, dataEngine, settings):
self._eventEngine = eventEngine
self._info = info
self._dataEngine = dataEngine
self._daysEngine = self._dataEngine.daysEngine # for easy access
# settings
self._initCash = settings['cash']
self._curCash = settings['cash']
self._initStopMode(settings['stopSettings'])
# 回测参数组合和周期
self._paramGroupNo = None
self._period = None
# 跟交易相关的账户持久数据
self._curPos = {} # 当前持仓, {code: DyStockPos}
self._deals = [] # 历史成交
# 风控
self._riskGuardNbr = settings['riskGuard']
self._riskGuardCount = 0 # 一旦发生某些原因(清仓)的closePos,则进行风控,也就是说@self._riskGuardNbr个交易日内禁止买入
# 当日初始化
self._curInit()
# reset to T+1
DyStockTradeCommon.T1 = True
def _initStopMode(self, stopSettings):
# default
self._stopTimeMode = DyStockStopMode(self)
self._stopLossMode = DyStockStopMode(self)
self._stopProfitMode = DyStockStopMode(self)
if 'stopLoss' in stopSettings:
name, param = stopSettings['stopLoss']
if name == '固定':
self._stopLossMode = DyStockStopLossPnlRatioMode(self, *param)
elif name == '均线':
self._stopLossMode = DyStockStopLossMaMode(self, self._dataEngine, *param)
elif name == '阶梯':
self._stopLossMode = DyStockStopLossStepMode(self, *param)
if 'stopProfit' in stopSettings:
name, param = stopSettings['stopProfit']
if name == '固定':
self._stopProfitMode = DyStockStopProfitPnlRatioMode(self, *param)
elif name == '均线':
self._stopProfitMode = DyStockStopProfitMaMode(self, self._dataEngine, *param)
if 'stopTime' in stopSettings:
name, param = stopSettings['stopTime']
if name == '固定':
self._stopTimeMode = DyStockStopTimeMode(self, *param)
@property
def curPos(self):
return self._curPos
@property
def curCash(self):
return self._curCash
def getCurPosMarketValue(self):
"""
get market value of all positions
"""
value = 0
for _, pos in self._curPos.items():
value += pos.totalVolume * pos.price
return value
def getCurCodePosMarketValue(self, code):
"""
get market value of position of sepcified code
"""
value = None
if code in self._curPos:
pos = self._curPos[code]
value = pos.totalVolume * pos.price
return value
def getCurCapital(self):
"""
获取当前账户总资产
"""
# 由于买入委托锁定了资金,所以需要处理未成交的买入委托
entrustCash = 0
for entrust in self._curNotDoneEntrusts:
type = entrust.type
code = entrust.code
price = entrust.price
volume = entrust.totalVolume
if type == DyStockOpType.buy:
tradeCost = DyStockTradeCommon.getTradeCost(code, type, price, volume)
entrustCash += price*volume + tradeCost
return self._curCash + self.getCurPosMarketValue() + entrustCash
def getCurCodePosAvail(self, code):
"""
获取当前股票持仓可用数量
"""
return self._curPos[code].availVolume if code in self._curPos else 0
def getCurCodePosCost(self, code):
"""
获取当前股票持仓成本
"""
return self._curPos[code].cost if code in self._curPos else None
def _processDealedEntrusts(self, dealedEntrusts, ticks):
"""
处理成交的委托
"""
for entrust in dealedEntrusts:
# update
entrust.status = DyStockEntrust.Status.allDealed
entrust.dealedVolume = entrust.totalVolume
self.matchedDealedVolume = entrust.totalVolume
type = entrust.type
code = entrust.code
name = entrust.name
price = entrust.price
volume = entrust.totalVolume
strategyCls = entrust.strategyCls
signalInfo = entrust.signalInfo
entrustDatetime = entrust.entrustDatetime
datetime = ticks.get(code).datetime
tradeCost = DyStockTradeCommon.getTradeCost(code, type, price, volume)
# remove from not done entrusts list
self._curNotDoneEntrusts.remove(entrust)
# add into 等待推送的当日委托list
self._curWaitingPushEntrusts.append(entrust)
# new deal
self._curDealCount += 1
deal = DyStockDeal(datetime, type, code, name, price, volume, tradeCost, signalInfo=signalInfo, entrustDatetime=entrustDatetime)
deal.dyDealId = '{}.{}_{}'.format(self.broker, self._curTDay, self._curDealCount)
self._curDeals.append(deal)
self._deals.append(deal)
self._curWaitingPushDeals.append(deal)
# update positions
if type == DyStockOpType.buy: # 买入
if code in self._curPos:
pos = self._curPos[code]
pos.addPos(datetime, strategyCls, price, volume, tradeCost)
else:
self._curPos[code] = DyStockPos(datetime, strategyCls, code, name, price, volume, tradeCost)
else: # 卖出
pos = self._curPos[code]
pnl, pnlRatio = pos.removePos(price, volume, tradeCost, removeAvailVolume=False)
assert pnl is not None
deal.pnl = pnl
deal.pnlRatio = pnlRatio
deal.holdingPeriod = pos.holdingPeriod
deal.xrd = pos.xrd
deal.minPnlRatio = pos.minPnlRatio
cash = price*volume - tradeCost
self._curCash += cash
def _CrossCurNotDoneEntrustsByTicks(self, ticks):
"""
每tick撮合当日未成交的委托
为了简单,这里没有考虑成交量,当然可以考虑成交量做更精细化的成交处理。
"""
# 撮合委托
dealedEntrusts = []
for entrust in self._curNotDoneEntrusts:
tick = ticks.get(entrust.code)
if tick is None:
continue
if entrust.type == DyStockOpType.buy:
if tick.price >= entrust.price:
continue
else:
if tick.price <= entrust.price:
continue
dealedEntrusts.append(entrust)
# 处理成交的委托
self._processDealedEntrusts(dealedEntrusts, ticks)
def _isBarLimitDown(self, bar):
return bar.low == bar.high and (bar.low - bar.preClose)/bar.preClose*100 <= DyStockCommon.limitDownPct
def _isBarLimitUp(self, bar):
return bar.low == bar.high and (bar.high - bar.preClose)/bar.preClose*100 >= DyStockCommon.limitUpPct
def _CrossCurNotDoneEntrustsByBars(self, bars):
"""
每Bar撮合当日未成交的委托
@bars: {code: bar}
"""
# 撮合委托
dealedEntrusts = []
for entrust in self._curNotDoneEntrusts:
bar = bars.get(entrust.code)
if bar is None:
continue
if bar.mode[-1] == 'd': # 日线回测,直接撮合,2017.11.09
if entrust.type == DyStockOpType.buy:
if (bar.close - bar.preClose)/bar.preClose*100 < DyStockCommon.limitUpPct: # 涨停则不买入
dealedEntrusts.append(entrust)
else:
if (bar.close - bar.preClose)/bar.preClose*100 > DyStockCommon.limitDownPct: # 跌停则不卖出
dealedEntrusts.append(entrust)
else: # 分钟回测,穿价撮合
if entrust.type == DyStockOpType.buy:
if bar.low < entrust.price or (bar.low == entrust.price and self._isBarLimitDown(bar)): # 穿价或者跌停买入
dealedEntrusts.append(entrust)
else:
if bar.high > entrust.price or (bar.high == entrust.price and self._isBarLimitUp(bar)): # 穿价或者涨停卖出
dealedEntrusts.append(entrust)
# 处理成交的委托
self._processDealedEntrusts(dealedEntrusts, bars)
def _newEntrust(self, type, datetime, strategyCls, code, name, price, volume, signalInfo=None, tickOrBar=None):
"""
生成新的委托
"""
# create a new entrust
self._curEntrustCount += 1
entrust = DyStockEntrust(datetime, type, code, name, price, volume)
entrust.dyEntrustId = '{}.{}_{}'.format(self.broker, self._curTDay, self._curEntrustCount)
entrust.strategyCls = strategyCls
entrust.status = DyStockEntrust.Status.notDealed
entrust.signalInfo = signalInfo
self._curWaitingPushEntrusts.append(entrust) # add into 等待推送的当日委托list
self._curNotDoneEntrusts.append(entrust) # add into 未成交委托list
# 日线回测,则直接撮合
if strategyCls.backTestingMode == 'bar1d':
self._CrossCurNotDoneEntrustsByBars({tickOrBar.code: tickOrBar})
return entrust
def buy(self, datetime, strategyCls, code, name, price, volume, signalInfo=None, tickOrBar=None):
"""
@tickOrBar: 主要为了日内回测
"""
if volume < 100:
return None # 至少买一手
if self._riskGuardCount > 0:
return None # 风控中
tradeCost = DyStockTradeCommon.getTradeCost(code, DyStockOpType.buy, price, volume)
cash = price*volume + tradeCost
if self._curCash < cash:
return None
self._curCash -= cash
# 生成新的委托
return self._newEntrust(DyStockOpType.buy, datetime, strategyCls, code, name, price, volume, signalInfo=signalInfo, tickOrBar=tickOrBar)
def sell(self, datetime, strategyCls, code, price, volume, sellReason=None, signalInfo=None, tickOrBar=None):
"""
@tickOrBar: 主要为了日内回测
"""
pos = self._curPos.get(code)
if pos is None:
return None
name = pos.name
if not pos.availVolume >= volume > 0:
return None
pos.availVolume -= volume
# 生成新的委托
return self._newEntrust(DyStockOpType.sell, datetime, strategyCls, code, name, price, volume, signalInfo=signalInfo, tickOrBar=tickOrBar)
def setParamGroupNoAndPeriod(self, paramGroupNo, period):
self._paramGroupNo = paramGroupNo
self._period = period
def _curInit(self, tDay=None):
""" 当日初始化 """
self._curTDay = tDay
self._curDeals = [] # 当日成交
self._curEntrustCount = 0
self._curDealCount = 0
# 需要推送给策略的委托和成交回报
self._curWaitingPushDeals = []
self._curWaitingPushEntrusts = []
self._curNotDoneEntrusts = [] # 当日未成交委托
# 风控
if self._riskGuardCount > 0:
self._riskGuardCount -= 1
def onMonitor(self):
return list(self._curPos)
def onTicks(self, ticks):
# 撮合委托
self._CrossCurNotDoneEntrustsByTicks(ticks)
# 先更新持仓
for code, pos in self._curPos.items():
tick = ticks.get(code)
if tick is not None:
pos.onTick(tick)
# 止损
self._stopLossMode.onTicks(ticks)
# 止盈
self._stopProfitMode.onTicks(ticks)
# 止时
self._stopTimeMode.onTicks(ticks)
def onBars(self, bars):
# 撮合委托
self._CrossCurNotDoneEntrustsByBars(bars)
# 先更新持仓
for code, pos in self._curPos.items():
bar = bars.get(code)
if bar is not None:
pos.onBar(bar)
# 止损
self._stopLossMode.onBars(bars)
# 止盈
self._stopProfitMode.onBars(bars)
# 止时
self._stopTimeMode.onBars(bars)
def _onCloseCurNotDoneEntrusts(self):
"""
处理收盘后的未成交委托
"""
# 处理未成交的买入委托
for entrust in self._curNotDoneEntrusts:
type = entrust.type
code = entrust.code
name = entrust.name
price = entrust.price
volume = entrust.totalVolume
datetime = entrust.entrustDatetime
self._info.print('{}未成交委托: {}({}), {}, 价格{}, {}股, 委托时间{}'.format(self._curTDay,
code, name, type, price, volume,
datetime.strftime('%H:%M:%S')), DyLogData.ind1)
if type == DyStockOpType.buy:
tradeCost = DyStockTradeCommon.getTradeCost(code, type, price, volume)
self._curCash += price*volume + tradeCost
def onClose(self):
# remove pos
for code in list(self._curPos):
if self._curPos[code].totalVolume == 0:
del self._curPos[code]
# update positions
for _, pos in self._curPos.items():
pos.onClose()
# 处理收盘后的未成交委托
self._onCloseCurNotDoneEntrusts()
def getCurAckData(self, strategyCls):
""" 获取当日策略回测结果数据 """
ackData = DyStockBackTestingStrategyAckData(datetime.strptime(self._curTDay + ' 15:00:00', '%Y-%m-%d %H:%M:%S'), strategyCls, self._paramGroupNo, self._period, True)
ackData.initCash = self._initCash
ackData.curCash = self._curCash
ackData.curPos = copy.deepcopy(self._curPos)
ackData.day = self._curTDay
ackData.deals = self._curDeals
return ackData
def onOpen(self, date):
# 当日初始化
self._curInit(date)
# 初始化止损模式
if not self._stopLossMode.onOpen(date):
return False
# 初始化止盈模式
if not self._stopProfitMode.onOpen(date):
return False
# 初始化当前持仓
for _, pos in self._curPos.items():
if not pos.onOpen(date, self._dataEngine):
return False
return True
def closePos(self, datetime, code, price, sellReason, signalInfo=None, tickOrBar=None):
entrust = None
if code in self._curPos:
pos = self._curPos[code]
entrust = self.sell(datetime, pos.strategyCls, code, price, pos.availVolume, sellReason, signalInfo, tickOrBar)
# 风控, 不管有没有持仓
if sellReason == DyStockSellReason.liquidate:
if self._riskGuardNbr > 0: # 风控 is on
self._riskGuardCount = self._riskGuardNbr + 1
return entrust
def popCurWaitingPushDeals(self):
deals = self._curWaitingPushDeals
self._curWaitingPushDeals = []
return deals
def popCurWaitingPushEntrusts(self):
entrusts = self._curWaitingPushEntrusts
self._curWaitingPushEntrusts = []
return entrusts
def syncStrategyPos(self, strategy):
"""
由于除复权的问题,同步策略的持仓。
每次账户Tick或者Bar回测完后调用,一定要先于策略的onTicks或者onBars。
"""
# 构造持仓同步数据
syncData = {}
for code, pos in self._curPos.items():
if pos.sync: # 只跟策略同步同步过的持仓。若持仓当日停牌或者前面tick数据缺失,则持仓不会被同步。所以也不能假设停牌,而把复权因子设为1。
syncData[code] = {'priceAdjFactor': pos.priceAdjFactor,
'volumeAdjFactor': pos.volumeAdjFactor,
'cost': pos.cost
}
# 同步策略持仓
strategy.syncPos(syncData)
|
1711838
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("TEST")
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration/StandardSequences/MagneticField_cff")
process.load("Configuration/StandardSequences/FrontierConditions_GlobalTag_cff")
process.load("Configuration/StandardSequences/RawToDigi_Data_cff")
process.load("RecoMET/METProducers/BeamHaloSummary_cfi")
process.load("RecoMET/METProducers/CSCHaloData_cfi")
process.load("RecoMET/METProducers/EcalHaloData_cfi")
process.load("RecoMET/METProducers/HcalHaloData_cfi")
process.load("RecoMET/METProducers/GlobalHaloData_cfi")
#process.GlobalTag.globaltag = 'STARTUP31X_V1::All'
process.GlobalTag.globaltag ='STARTUP3X_V14::All'
process.DQMStore = cms.Service("DQMStore")
process.load("Configuration/StandardSequences/ReconstructionCosmics_cff")
process.load("RecoMuon/Configuration/RecoMuon_cff")
process.load("DQMOffline/JetMET/BeamHaloAnalyzer_cfi")
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(100) )
process.load("DQMServices.Components.DQMStoreStats_cfi")
#process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'/store/relval/CMSSW_3_4_1/RelValBeamHalo/GEN-SIM-RECO/STARTUP3X_V14-v1/0005/DA97EC42-21EE-DE11-A0D1-003048D3750A.root',
'/store/relval/CMSSW_3_4_1/RelValBeamHalo/GEN-SIM-RECO/STARTUP3X_V14-v1/0004/6684C797-C9ED-DE11-BC19-0030487C6090.root'
)
)
process.p = cms.Path(process.BeamHaloId*process.AnalyzeBeamHalo*process.dqmStoreStats)
#### If cosmic muons are not by default in the event, then you should run this sequence
#process.p = cms.Path(process.ctfWithMaterialTracksP5LHCNavigation*process.muonRecoLHC*process.CSCHaloData*process.EcalHaloData*process.HcalHaloData*process.GlobalHaloData*process.AnalyzeBeamHalo)
|
1711942
|
import torch
def swinTransformerObjectDetectionToCustomerModel(num_class=2,model_path='mask_rcnn_swin_tiny_patch4_window7.pth',model_save_dir=""):
pretrained_weights = torch.load(model_path)
pretrained_weights['state_dict']['roi_head.bbox_head.fc_cls.weight'].resize_(num_class + 1, 1024)
pretrained_weights['state_dict']['roi_head.bbox_head.fc_cls.bias'].resize_(num_class + 1)
pretrained_weights['state_dict']['roi_head.bbox_head.fc_reg.weight'].resize_(num_class * 4, 1024)
pretrained_weights['state_dict']['roi_head.bbox_head.fc_reg.bias'].resize_(num_class * 4)
pretrained_weights['state_dict']['roi_head.mask_head.conv_logits.weight'].resize_(num_class, 256, 1, 1)
pretrained_weights['state_dict']['roi_head.mask_head.conv_logits.bias'].resize_(num_class)
torch.save(pretrained_weights, "{}/mask_rcnn_swin_{}.pth".format(model_save_dir, num_class))
if __name__ == '__main__':
swinTransformerObjectDetectionToCustomerModel(num_class=10,
model_path="/home/zengxh/workspace/Swin-Transformer-Object-Detection/mask_rcnn_swin_tiny_patch4_window7.pth",
model_save_dir="/home/zengxh/workspace/Swin-Transformer-Object-Detection")
|
1711946
|
from PIL import Image
from .detector import detect_faces
from .align_trans import get_reference_facial_points, warp_and_crop_face
import numpy as np
import os
from tqdm import tqdm
def face_align(data=None, dest='None', crop_size=112):
source_root = data
dest_root = dest
crop_size = crop_size
scale = crop_size / 112.
reference = get_reference_facial_points(default_square=True) * scale
cwd = os.getcwd()
os.chdir(source_root)
os.system("find . -name '*.DS_Store' -type f -delete")
os.chdir(cwd)
if not os.path.isdir(dest_root):
os.makedirs(dest_root)
for subfolder in tqdm(os.listdir(source_root)):
if not os.path.isdir(os.path.join(dest_root, subfolder)):
os.mkdir(os.path.join(dest_root, subfolder))
for image_name in os.listdir(os.path.join(source_root, subfolder)):
img = Image.open(os.path.join(source_root, subfolder, image_name))
try:
_, landmarks = detect_faces(img)
except Exception:
print("{} is discarded due to exception!".format(os.path.join(source_root, subfolder, image_name)))
continue
if len(landmarks) == 0:
print("{} is discarded due to non-detected landmarks!".format(os.path.join(source_root, subfolder, image_name)))
continue
facial5points = [[landmarks[0][j], landmarks[0][j + 5]] for j in range(5)]
warped_face = warp_and_crop_face(np.array(img), facial5points, reference, crop_size=(crop_size, crop_size))
img_warped = Image.fromarray(warped_face)
if image_name.split('.')[-1].lower() not in ['jpg', 'jpeg']:
image_name = '.'.join(image_name.split('.')[:-1]) + '.jpg'
img_warped.save(os.path.join(dest_root, subfolder, image_name))
|
1711973
|
from autogluon_utils.benchmarking.evaluation.runners import run_generate_clean_openml_full, run_generate_clean_kaggle_full
from autogluon_utils.benchmarking.evaluation.runners import run_evaluation_openml_core, run_evaluation_openml_core10fold, run_evaluation_openml_orig10fold, run_evaluation_openml_orig_vs_core10fold, run_evaluation_openml_core_1h_vs_4h, run_evaluation_openml_ablation, run_evaluation_openml_accuracy
from autogluon_utils.benchmarking.evaluation.runners import run_evaluation_kaggle
from autogluon_utils.benchmarking.evaluation.runners import run_generate_tex_openml
from autogluon_utils.benchmarking.evaluation.runners import run_generate_dataset_x_framework
from autogluon_utils.benchmarking.evaluation.runners import run_move_tex
from autogluon_utils.benchmarking.evaluation.runners import run_generate_plots_openml
# End-to-End results generation, from raw input to finished graphs
def run():
print('Starting full run...')
# Clean raw input to standardized format
run_generate_clean_openml_full.run()
run_generate_clean_kaggle_full.run()
# Evaluate openml and kaggle separately
run_evaluation_openml_core.run()
run_evaluation_openml_core10fold.run()
run_evaluation_openml_orig10fold.run()
run_evaluation_openml_orig_vs_core10fold.run()
run_evaluation_openml_core_1h_vs_4h.run()
run_evaluation_openml_ablation.run()
run_evaluation_openml_accuracy.run()
run_evaluation_kaggle.run()
# TODO: Run 1h, 4h, 8h, ablation -> currently hardcoded to 4h for both
# TODO: Compare original openml to new openml
# Generate DataFrames
run_generate_dataset_x_framework.run()
# Next: Code to generate graphs/plots/etc.
# Code to convert to LaTeX
run_generate_tex_openml.run()
# Code to move tex files to common directory
run_move_tex.run()
# Next: Code to generate graphs/plots/etc.
run_generate_plots_openml.run()
print('Full run complete!')
if __name__ == '__main__':
run()
|
1712109
|
import configparser
import dataclasses
import io
from typing import Type, TypeVar
_HAS_PREFIX = 'has_'
@dataclasses.dataclass(frozen=True)
class IniField:
section: str
name: str = ''
class _IniDescriptor:
def __init__(self, field: IniField):
self._section = field.section
self._name = field.name
def __get__(self, instance, owner=None):
if instance is None:
raise AttributeError(
f'{owner.__name__}.{self._name} is not supported')
try:
return instance._cfg.get(self._section, self._name)
except (configparser.NoOptionError,
configparser.NoSectionError) as e:
raise ValueError(f'{self._name!r} is not defined') from e
def __set__(self, instance, value):
if not instance._cfg.has_section(self._section):
instance._cfg.add_section(self._section)
instance._cfg.set(self._section, self._name, value)
return value
class _IniHasDescriptor:
def __init__(self, field: IniField):
self._section = field.section
self._name = field.name
def __get__(self, instance, owner=None):
if instance is None:
raise AttributeError(
f'{owner.__name__}.{_HAS_PREFIX}{self._name} is not supported')
return instance._cfg.has_option(self._section, self._name)
class _IniMeta(type):
def __new__(cls, name, bases, dct):
patched_dct = {}
for k, v in dct.items():
if not isinstance(v, IniField):
patched_dct[k] = v
continue
field = v if v.name else dataclasses.replace(v, name=k)
patched_dct[k] = _IniDescriptor(field)
patched_dct[_HAS_PREFIX + k] = _IniHasDescriptor(field)
return super().__new__(cls, name, bases, patched_dct)
# https://github.com/python/typing/issues/254#issuecomment-235618152
IniFileType = TypeVar('IniFileType', bound='IniFile')
class IniFile(metaclass=_IniMeta):
__slots__ = ('_cfg',)
def __init__(self, **kwargs):
self._cfg = configparser.ConfigParser(interpolation=None)
for k, v in kwargs.items():
setattr(self, k, v)
@classmethod
def from_string(cls: Type[IniFileType], value: str) -> IniFileType:
inst = cls()
inst.read_string(value)
return inst
@classmethod
def from_file(cls: Type[IniFileType], filename: str) -> IniFileType:
inst = cls()
inst.read_file(filename)
return inst
def read_file(self, filename) -> None:
with open(filename) as f:
self._cfg.read_file(f)
def read_string(self, value) -> None:
self._cfg.read_string(value)
def write(self, fp) -> None:
self._cfg.write(fp)
def write_file(self, filename) -> None:
with open(filename, 'w') as fp:
self._cfg.write(fp)
def write_string(self) -> str:
sio = io.StringIO()
self._cfg.write(sio)
return sio.getvalue()
def has(self, attr: str) -> bool:
# TODO: Python 3.8 make positional only
return getattr(self, 'has_' + attr)
def has_section(self, name: str) -> bool:
return self._cfg.has_section(name)
class WrapFile(IniFile):
SECTION = 'wrap-file'
directory = IniField(SECTION)
lead_directory_missing = IniField(SECTION)
source_url = IniField(SECTION)
source_filename = IniField(SECTION)
source_hash = IniField(SECTION)
patch_url = IniField(SECTION)
patch_filename = IniField(SECTION)
patch_hash = IniField(SECTION)
class WrapMeta(IniFile):
description = IniField('metadata')
homepage = IniField('metadata')
|
1712140
|
import argparse
import os
import os.path as osp
import torch
import mmcv
from mmaction.apis import init_recognizer
from mmcv.parallel import collate, scatter
from mmaction.datasets.pipelines import Compose
from mmaction.datasets import build_dataloader, build_dataset
from mmcv.parallel import MMDataParallel
import numpy as np
from tqdm import tqdm
from sklearn.metrics import f1_score, roc_auc_score, accuracy_score
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
def parse_args():
"""
experiments/baseline_rpl.py --config configs/recognition/tsm/inference_tsm_rpl.py \
--checkpoint work_dirs/tsm/finetune_ucf101_tsm_rpl/latest.pth \
--train_data data/ucf101/ucf101_train_split_1_videos.txt \
--ind_data
--result_prefix experiments/tsm/results_baselines/rpl/RPL
"""
parser = argparse.ArgumentParser(description='MMAction2 test')
# model and data config
parser.add_argument('--config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file/url')
parser.add_argument('--train_data', help='the split file of in-distribution training data')
parser.add_argument('--batch_size', type=int, default=8, help='the testing batch size')
# test data config
parser.add_argument('--ind_data', help='the split file of in-distribution testing data')
parser.add_argument('--ood_data', help='the split file of out-of-distribution testing data')
parser.add_argument('--ood_ncls', type=int, help='the number of classes in unknwon dataset')
parser.add_argument('--ood_dataname', choices=['HMDB', 'MiT'], help='the name of out-of-distribution testing data')
# env config
parser.add_argument('--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument('--result_prefix', help='result file prefix')
args = parser.parse_args()
return args
def set_deterministic(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed) # if you are using multi-GPU.
np.random.seed(seed) # Numpy module.
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
def run_inference(config, checkpoint, data_split, batch_size, device):
# initialize recognition model
model = init_recognizer(config, checkpoint, device=device, use_frames=False)
torch.backends.cudnn.benchmark = True
model.cfg.data.test.test_mode = True
model.cfg.test_cfg.average_clips = 'prob' # we need the probability socore from softmax layer
model.cfg.data.videos_per_gpu = batch_size # batch size
model.cfg.data.test.ann_file = data_split
model.cfg.data.test.data_prefix = os.path.join(os.path.dirname(data_split), 'videos')
# build the dataloader
dataset = build_dataset(model.cfg.data.test, dict(test_mode=True))
dataloader_setting = dict(
videos_per_gpu=model.cfg.data.get('videos_per_gpu', 1),
workers_per_gpu=model.cfg.data.get('workers_per_gpu', 1),
dist=False,
shuffle=False,
pin_memory=False)
dataloader_setting = dict(dataloader_setting, **model.cfg.data.get('test_dataloader', {}))
data_loader = build_dataloader(dataset, **dataloader_setting)
# running the inference
model = MMDataParallel(model, device_ids=[0])
all_scores, all_labels = [], []
prog_bar = mmcv.ProgressBar(len(data_loader.dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
scores = model(return_loss=False, **data) # (B, C)
all_scores.append(scores)
# gather labels
labels = data['label'].numpy()
all_labels.append(labels)
# use the first key as main key to calculate the batch size
bs = len(next(iter(data.values())))
for _ in range(bs):
prog_bar.update()
all_scores = np.concatenate(all_scores, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_scores, all_labels
def evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, ood_ncls, thresh, num_rand=10):
ind_ncls = ind_softmax.shape[1]
ind_results = np.argmax(ind_softmax, axis=1)
ood_results = np.argmax(ood_softmax, axis=1)
# close-set accuracy (multi-class)
acc = accuracy_score(ind_labels, ind_results)
# open-set auc-roc (binary class)
ind_conf = np.max(ind_softmax, axis=1)
ood_conf = np.max(ood_softmax, axis=1)
preds = np.concatenate((ind_results, ood_results), axis=0)
confs = np.concatenate((ind_conf, ood_conf), axis=0)
preds[confs < threshold] = 1 # unknown class
preds[confs >= threshold] = 0 # known class
labels = np.concatenate((np.zeros_like(ind_labels), np.ones_like(ood_labels)))
auc = roc_auc_score(labels, preds)
print('\nClosedSet Accuracy (multi-class): %.3lf, OpenSet AUC (bin-class): %.3lf'%(acc * 100, auc * 100))
ind_results[ind_conf < thresh] = ind_ncls # incorrect rejection
# open set F1 score (multi-class)
macro_F1_list = [f1_score(ind_labels, ind_results, average='macro')]
std_list = [0]
openness_list = [0]
for n in range(ood_ncls):
ncls_novel = n + 1
openness = (1 - np.sqrt((2 * ind_ncls) / (2 * ind_ncls + ncls_novel))) * 100
openness_list.append(openness)
# randoml select the subset of ood samples
macro_F1_multi = np.zeros((num_rand), dtype=np.float32)
for m in range(num_rand):
cls_select = np.random.choice(ood_ncls, ncls_novel, replace=False)
ood_sub_results = np.concatenate([ood_results[ood_labels == clsid] for clsid in cls_select])
ood_sub_labels = np.ones_like(ood_sub_results) * ind_ncls
ood_sub_confs = np.concatenate([ood_conf[ood_labels == clsid] for clsid in cls_select])
ood_sub_results[ood_sub_confs < thresh] = ind_ncls # correct rejection
# construct preds and labels
preds = np.concatenate((ind_results, ood_sub_results), axis=0)
labels = np.concatenate((ind_labels, ood_sub_labels), axis=0)
macro_F1_multi[m] = f1_score(labels, preds, average='macro')
macro_F1 = np.mean(macro_F1_multi)
std = np.std(macro_F1_multi)
macro_F1_list.append(macro_F1)
std_list.append(std)
# draw comparison curves
macro_F1_list = np.array(macro_F1_list)
std_list = np.array(std_list)
w_openness = np.array(openness_list) / 100.
open_maF1_mean = np.sum(w_openness * macro_F1_list) / np.sum(w_openness)
open_maF1_std = np.sum(w_openness * std_list) / np.sum(w_openness)
print('Open macro-F1 score: %.3f, std=%.3lf'%(open_maF1_mean * 100, open_maF1_std * 100))
return openness_list, macro_F1_list, std_list
if __name__ == '__main__':
args = parse_args()
# assign the desired device.
device = torch.device(args.device)
set_deterministic(0)
modelname = os.path.dirname(args.config).split('/')[-1].upper()
######## Compute threshold with training data ########
result_file = os.path.join(os.path.dirname(args.result_prefix), modelname + '_RPL_trainset_softmax.npz')
if not os.path.exists(result_file):
# prepare result path
result_dir = os.path.dirname(result_file)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on training data
trainset_scores, _ = run_inference(args.config, args.checkpoint, args.train_data, args.batch_size, device)
# save
np.savez(result_file[:-4], trainset_scores=trainset_scores)
else:
result = np.load(result_file)
trainset_scores = result['trainset_scores']
max_scores = np.max(trainset_scores, axis=1)
scores_sort = np.sort(max_scores)[::-1] # sort the uncertainties with descending order
N = max_scores.shape[0]
threshold = scores_sort[int(N * 0.95)-1] # 95% percentile
print('\nThe RPL softmax threshold on UCF-101 train set: %lf'%(threshold))
######## OOD and IND detection ########
testset_result = os.path.join(os.path.dirname(args.result_prefix), modelname +'_RPL_'+ args.ood_dataname +'_result.npz')
if not os.path.exists(testset_result):
# prepare result path
result_dir = os.path.dirname(testset_result)
if not os.path.exists(result_dir):
os.makedirs(result_dir)
# run the inference on OOD data
ood_softmax, ood_labels = run_inference(args.config, args.checkpoint, args.ood_data, args.batch_size, device)
# run the inference on IND data
ind_softmax, ind_labels = run_inference(args.config, args.checkpoint, args.ind_data, args.batch_size, device)
# save
np.savez(testset_result[:-4], ind_softmax=ind_softmax, ood_softmax=ood_softmax,
ind_label=ind_labels, ood_label=ood_labels)
else:
results = np.load(testset_result, allow_pickle=True)
ind_softmax = results['ind_softmax'] # (N1, C)
ood_softmax = results['ood_softmax'] # (N2, C)
ind_labels = results['ind_label'] # (N1,)
ood_labels = results['ood_label'] # (N2,)
openness_list, macro_F1_list, std_list = evaluate_softmax(ind_softmax, ood_softmax, ind_labels, ood_labels, args.ood_ncls, threshold)
|
1712152
|
from __future__ import print_function
import pickle
import os.path as path
import sklearn.utils
def dump_list(input_list, file_path):
"""
Dump list to file, either in "txt" or binary ("pickle") mode.
Dump mode is chosen accordingly to "file_path" extension.
Parameters
----------
input_list: list
List object to dump
file_path: str
Path of the dump file
Returns
-------
None
"""
f_name, f_ext = path.splitext(file_path)
if f_ext != '.txt' and f_ext != '.pickle':
raise ValueError('File extension not supported. Allowed: {".txt", ".pickle"}. Provided: "{}"'.format(f_ext))
with open(file_path, 'wb') as f:
if f_ext == '.txt':
for str in input_list:
f.write('{}\n'.format(str))
else:
pickle.dump(input_list, f)
def load_list(file_path):
"""
Load list from file, either in "txt" or binary ("pickle") mode.
Load mode is chosen accordingly to "file_path" extension.
Parameters
----------
file_path: str
Path of the dump file
Returns
-------
file_list: list
List loaded from file.
"""
if not path.exists(file_path):
raise IOError('File "{}" does not exist.'.format(file_path))
f_name, f_ext = path.splitext(file_path)
file_list = []
with open(file_path, 'rt') as f:
if f_ext == '.txt':
for line in f:
file_list.append(line.strip()) # remove trailing newline
elif f_ext == '.pickle':
file_list = pickle.load(f)
else:
raise ValueError('File extension not supported. Allowed: {".txt", ".pickle"}. Provided: "{}"'.format(f_ext))
return file_list
def split_into_chunks(list_in, max_elements, shuffle=False):
"""
Split a list a variable number of chunks of at most "max_elements" each.
Parameters
----------
list_in: list
Input list to split into chunks
max_elements: int
Max elements allowed into each chunk
shuffle: bool
If True, input list is shuffled before chunking
Returns
-------
list_out: list
List of list in which each element is a chunk of list_in
"""
if not isinstance(list_in, list):
raise ValueError('Input must be a list.')
list_out = []
if shuffle:
list_in = sklearn.utils.shuffle(list_in)
counter = 0
for offset in range(0, len(list_in), max_elements):
list_chunk = list_in[offset:offset + max_elements]
list_out.append(list_chunk)
counter += 1
return list_out
|
1712175
|
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support.expected_conditions import (
presence_of_element_located,
title_is,
)
def getWaits(driver):
"""Inspection methods that need a wait
We define methods to get an element and to verify the window title.
They will be invoked on a `wait` object, so that they execute
when the conditions under which they can run have been met.
Parameters
----------
driver
A driver object.
Returns
-------
waits
A dictionary keyed by a short name of the method, and valued
by functions bound to the wait object, that find something
on the page.
"""
def getElem(method, address, maxWait=1):
wait = WebDriverWait(driver, timeout=maxWait)
return wait.until(presence_of_element_located((method, address)))
def getTitle(title, maxWait=1):
wait = WebDriverWait(driver, timeout=maxWait)
return wait.until(title_is(title))
return dict(elem=getElem, title=getTitle)
|
1712225
|
import os
import numpy as np
import fnmatch
import PIL.Image
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
NCOLS = 5
def check_folder(dir):
'''
create a new directory if it doesn't exist
:param dir:
:return:
'''
if not os.path.exists(dir):
os.makedirs(dir)
return dir
def save_files(save_dir, file_name_list, array_list):
'''
save a list of array with the given name
:param save_dir: the directory for saving the files
:param file_name_list: the list of the file names
:param array_list: the list of arrays to be saved
'''
assert len(file_name_list) == len(array_list)
for i in range(len(file_name_list)):
np.save(os.path.join(save_dir, file_name_list[i]), array_list[i], allow_pickle=False)
def load_model_from_checkpoint(checkpoint_dir, saver, sess):
'''
load a pre-trained model from the checkpoint file directory
:param checkpoint_dir: directory for the checkpoint file
:param saver: tf.saver
:param sess: session
:return:
'''
import tensorflow as tf
print(" [*] Reading checkpoints...", checkpoint_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def get_filepaths_from_dir(data_dir, ext):
'''
return all the file paths with extension 'ext' in the given directory 'data_dir'
:param data_dir: the data directory
:param ext: the extension type
:return:
path_list: list of file paths
'''
pattern = '*.' + ext
path_list = []
for d, s, fList in os.walk(data_dir):
for filename in fList:
if fnmatch.fnmatch(filename, pattern):
path_list.append(os.path.join(d, filename))
return sorted(path_list)
def read_image(filepath, resolution=64, cx=89, cy=121):
'''
read,crop and scale an image given the path
:param filepath: the path of the image file
:param resolution: desired size of the output image
:param cx: x_coordinate of the crop center
:param cy: y_coordinate of the crop center
:return:
image in range [-1,1] with shape (resolution,resolution,3)
'''
img = np.asarray(PIL.Image.open(filepath))
shape = img.shape
if shape == (resolution, resolution, 3):
pass
else:
img = img[cy - 64: cy + 64, cx - 64: cx + 64]
resize_factor = 128 // resolution
img = img.astype(np.float32)
while resize_factor > 1:
img = (img[0::2, fdf8:f53e:61e4::18, :] + img[0::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :] + img[1::2, fdf8:f53e:61e4::18, :] + img[1::2, fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, :]) * 0.25
resize_factor -= 1
img = np.rint(img).clip(0, 255).astype(np.uint8)
img = img.astype(np.float32) / 255.
img = img * 2 - 1.
return img
####################################################
## visualize
####################################################
def inverse_transform(imgs):
'''
normalize the image to be of range [0,1]
:param imgs: input images
:return:
images with value range [0,1]
'''
imgs = (imgs + 1.) / 2.
return imgs
def visualize_gt(imgs, save_dir):
'''
visualize the ground truth images and save
:param imgs: input images with value range [-1,1]
:param save_dir: directory for saving the results
'''
plt.figure(1)
num_imgs = len(imgs)
imgs = np.clip(inverse_transform(imgs), 0., 1.)
NROWS = int(np.ceil(float(num_imgs) / float(NCOLS)))
for i in range(num_imgs):
plt.subplot(NROWS, NCOLS, i + 1)
plt.imshow(imgs[i])
plt.axis('off')
plt.savefig(os.path.join(save_dir, 'input.png'))
plt.close()
def visualize_progress(imgs, loss, save_dir, counter):
'''
visualize the optimization results and save
:param imgs: input images with value range [-1,1]
:param loss: the corresponding loss values
:param save_dir: directory for saving the results
:param counter: number of the function evaluation
:return:
'''
plt.figure(2)
num_imgs = len(imgs)
imgs = np.clip(inverse_transform(imgs), 0., 1.)
NROWS = int(np.ceil(float(num_imgs) / float(NCOLS)))
for i in range(num_imgs):
plt.subplot(NROWS, NCOLS, i + 1)
plt.imshow(imgs[i])
plt.title('loss: %.4f' % loss[i], fontdict={'fontsize': 8, 'color': 'blue'})
plt.axis('off')
plt.savefig(os.path.join(save_dir, 'output_%d.png' % counter))
plt.close()
def visualize_samples(img_r01, save_dir):
plt.figure(figsize=(20, 20))
for i in range(64):
plt.subplot(8, 8, i + 1)
plt.imshow(img_r01[i])
plt.axis('off')
plt.tight_layout()
plt.savefig(os.path.join(save_dir, 'samples.png'))
|
1712262
|
import numpy as np
def log_sum_exp_sample(lnp):
"""
Sample uniformly from a vector of unnormalized log probs using
the log-sum-exp trick
"""
assert np.ndim(lnp) == 1, "ERROR: logSumExpSample requires a 1-d vector"
lnp = np.ravel(lnp)
N = np.size(lnp)
# Use logsumexp trick to calculate ln(p1 + p2 + ... + pR) from ln(pi)'s
max_lnp = np.max(lnp)
denom = np.log(np.sum(np.exp(lnp-max_lnp))) + max_lnp
p_safe = np.exp(lnp - denom)
# Normalize the discrete distribution over blocks
sum_p_safe = np.sum(p_safe)
if sum_p_safe == 0 or not np.isfinite(sum_p_safe):
print "Total probability for logSumExp is not valid! %f" % sum_p_safe
raise Exception("Invalid input. Probability infinite everywhere.")
# Randomly sample a block
choice = -1
u = np.random.rand()
acc = 0.0
for n in np.arange(N):
acc += p_safe[n]
if u <= acc:
choice = n
break
if choice == -1:
raise Exception("Invalid choice in logSumExp!")
return choice
|
1712306
|
import os
import shutil
import torch
import scipy.io as scio
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from data.folder_new import ImageFolder_new
from data.Uniform_folder import ImageFolder_uniform
from data.Uniform_sampler import UniformBatchSampler
import numpy as np
import cv2
def make_weights_for_balanced_classes(images, nclasses):
count = [0] * nclasses
for item in images:
count[item[1]] += 1
weight_per_class = [0.] * nclasses
N = float(sum(count))
for i in range(nclasses):
weight_per_class[i] = N/float(count[i])
weight = [0] * len(images)
# weight_per_class[-1] = weight_per_class[-1] ########### adjust the cate-weight for unknown category.
for idx, val in enumerate(images):
weight[idx] = weight_per_class[val[1]]
return weight
def _random_affine_augmentation(x):
M = np.float32([[1 + np.random.normal(0.0, 0.1), np.random.normal(0.0, 0.1), 0],
[np.random.normal(0.0, 0.1), 1 + np.random.normal(0.0, 0.1), 0]])
rows, cols = x.shape[1:3]
dst = cv2.warpAffine(np.transpose(x.numpy(), [1, 2, 0]), M, (cols,rows))
dst = np.transpose(dst, [2, 0, 1])
return torch.from_numpy(dst)
def _gaussian_blur(x, sigma=0.1):
ksize = int(sigma + 0.5) * 8 + 1
dst = cv2.GaussianBlur(x.numpy(), (ksize, ksize), sigma)
return torch.from_numpy(dst)
def generate_dataloader(args):
# Data loading code
traindir_source = os.path.join(args.data_path_source, args.src)
traindir_target = os.path.join(args.data_path_source_t, args.src_t)
valdir = os.path.join(args.data_path_target, args.tar)
if not os.path.isdir(traindir_source):
# split_train_test_images(args.data_path)
raise ValueError('Null path of source train data!!!')
# normalize_s = transforms.Normalize(mean=[0.9094, 0.9077, 0.9047],
# std=[0.1977, 0.2013, 0.2081])
# normalize_s = transforms.Normalize(mean=[0.459, 0.459, 0.459], ############# should be all the same ...
# std=[0.226, 0.226, 0.226])
normalize_s = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
normalize_gray = transforms.Normalize(mean=[0.459, 0.459, 0.459], ############# should be all the same ...
std=[0.226, 0.226, 0.226])
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
#################### random sampled source and target dataset for cross-entropy training
if args.img_process_s == 'ours':
source_train_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'longs':
####################### below is long's preprocess
source_train_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'simple':
source_train_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize_s,
])
)
source_train_loader_ce = torch.utils.data.DataLoader(
source_train_dataset, batch_size=args.batch_size, shuffle=True,
drop_last=True, num_workers=args.workers, pin_memory=True
)
# uniformbatchsampler = UniformBatchSampler(args.per_category, source_train_dataset.category_index_list,
# source_train_dataset.imgs) ##### select images in the iteration process
# source_train_loader_cas = torch.utils.data.DataLoader(
# source_train_dataset, num_workers=args.workers, pin_memory=True, batch_sampler=uniformbatchsampler
# )
if args.img_process_t == 'ours':
target_train_dataset_ce = ImageFolder_new(
traindir_target,
transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
]),
transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize_s,
])
)
elif args.img_process_t == 'longs':
####################### long's preprocess
target_train_dataset_ce = ImageFolder_new(
traindir_target,
transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
]),
transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize_s,
])
)
elif args.img_process_t == 'simple':
target_train_dataset_ce = ImageFolder_new(
traindir_target,
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize_s,
]),
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Lambda(lambda x: _random_affine_augmentation(x)),
transforms.Lambda(lambda x: _gaussian_blur(x)),
normalize_s,
])
)
target_train_loader_ce = torch.utils.data.DataLoader(
target_train_dataset_ce, batch_size=args.batch_size, shuffle=True,
drop_last=True, num_workers=args.workers, pin_memory=True
)
if args.img_process_s == 'ours':
target_train_dataset = ImageFolder_uniform(
traindir_target,
transforms.Compose([
transforms.Resize(256),
transforms.RandomCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'longs':
####################### long's preprocess
target_train_dataset = ImageFolder_uniform(
traindir_target,
transforms.Compose([
transforms.Resize((256, 256)),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'simple':
target_train_dataset = ImageFolder_uniform(
traindir_target,
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize_s,
])
)
if args.img_process_s == 'ours':
source_val_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'longs':
source_val_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize((256, 256)),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize_s,
])
)
elif args.img_process_s == 'simple':
source_val_dataset = ImageFolder_uniform(
traindir_source,
transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize_s,
])
)
source_val_loader = torch.utils.data.DataLoader(
source_val_dataset, batch_size=500, shuffle=False,
num_workers=args.workers, pin_memory=True, sampler=None
)
if args.img_process_t == 'ours':
target_test_dataset = ImageFolder_uniform(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
elif args.img_process_t == 'longs':
target_test_dataset = ImageFolder_uniform(valdir, transforms.Compose([
transforms.Resize(256, 256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
]))
elif args.img_process_t == 'simple':
target_test_dataset = ImageFolder_uniform(valdir, transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
normalize,
]))
target_val_loader = torch.utils.data.DataLoader(
target_test_dataset,
batch_size=500, shuffle=False,
num_workers=args.workers, pin_memory=True
)
return source_train_loader_ce, source_train_dataset, target_train_loader_ce, target_train_dataset, source_val_loader, target_val_loader
|
1712351
|
from unittest.mock import create_autospec, ANY
import pytest
from stack.commands import DatabaseConnection
from stack.commands.list.firmware import Command
from stack.commands.list.firmware.make.plugin_basic import Plugin
class TestListFirmwareMakeBasicPlugin:
"""A test case for the list firmware make basic plugin."""
@pytest.fixture
def basic_plugin(self):
"""A fixture that returns the plugin instance for use in tests.
This sets up the required mocks needed to construct the plugin class.
"""
mock_command = create_autospec(
spec = Command,
instance = True,
)
mock_command.db = create_autospec(
spec = DatabaseConnection,
spec_set = True,
instance = True,
)
return Plugin(command = mock_command)
def test_provides(self, basic_plugin):
"""Ensure that provides returns 'basic'."""
assert basic_plugin.provides() == "basic"
def test_run(self, basic_plugin):
"""Test that run queries the DB as expected when expanded is true."""
basic_plugin.owner.db.select.return_value = [["foo", "bar"], ["baz", "bag"]]
expected_results = {
"keys": ["make", "version_regex_name"],
"values": [(row[0], row[1:]) for row in basic_plugin.owner.db.select.return_value],
}
assert expected_results == basic_plugin.run(args = True)
basic_plugin.owner.db.select.assert_called_once_with(ANY)
def test_run_expanded_false(self, basic_plugin):
"""Test that run queries the DB as expected when expanded is false."""
basic_plugin.owner.db.select.return_value = [["foo"], ["bar"]]
expected_results = {
"keys": ["make"],
"values": [(row[0], []) for row in basic_plugin.owner.db.select.return_value],
}
assert expected_results == basic_plugin.run(args = False)
basic_plugin.owner.db.select.assert_called_once_with(ANY)
|
1712362
|
import os
import os.path
import cv2
import util
from tqdm import tqdm
def main(in_path, Q=25):
# scan image file path
names_hr = sorted(util._get_paths_from_images(os.path.expanduser(in_path)))
base_name = os.path.expanduser(in_path).split('/')[-1]
base_name_q = base_name+'Q{}'.format(Q)
out_path_base = os.path.expanduser(in_path).replace(base_name, base_name_q)
os.makedirs(out_path_base, exist_ok=True)
# make out folders for sequences.
out_folder_names = set(['/'.join(_.split('/')[:-1]).replace(base_name, base_name_q) for _ in names_hr])
for o_folder in out_folder_names:
os.makedirs(o_folder, exist_ok=True)
# read each image and store it to corresponding path
# for hr_path in names_hr:
for hr_path in tqdm(names_hr, ncols=80):
img = cv2.imread(hr_path, cv2.IMREAD_COLOR)
# store it with jpeg compression
o_path = hr_path.replace(base_name, base_name_q)
o_path = o_path.replace('.png', '.jpg')
cv2.imwrite(o_path, img, [int(cv2.IMWRITE_JPEG_QUALITY), Q])
if __name__ == '__main__':
main('~/Datasets/REDS/train/train_sharp', 25)
|
1712403
|
import os
import random
import logging
import ray
from pathlib import Path
from tqdm import tqdm
from .io.video_io import VideoFrameReader, write_img_files_to_vid_ffmpeg, \
resize_and_write_video_ffmpeg, convert_vid_ffmpeg
from .utils.serialization_utils import save_json
from .dataset import GluonCVMotionDataset, DataSample, FieldNames, get_resized_video_location, \
get_vis_gt_location, get_vis_thumb_location, get_vis_video_location
def generate_resized_video(data_sample:DataSample, short_edge_res:int, overwrite=False, cache_dir=None,
upscale=True, use_vis_path=True, force_encode=False, encode_kwargs=None, **kwargs):
if cache_dir is None:
resized_path = get_resized_video_location(data_sample,short_edge_res)
else:
resized_path = data_sample.get_cache_file(cache_dir, extension='.mp4')
if os.path.isfile(resized_path) and not overwrite:
return
os.makedirs(os.path.dirname(resized_path), exist_ok=True)
orig_vid_path = get_vis_video_location(data_sample) if use_vis_path else data_sample.data_path
if not upscale and min(data_sample.width, data_sample.height) <= short_edge_res:
if force_encode:
if encode_kwargs is None:
encode_kwargs = {k: v for k, v in kwargs.items() if k == "addn_args"}
convert_vid_ffmpeg(orig_vid_path, resized_path, **encode_kwargs)
else:
os.symlink(orig_vid_path, resized_path)
else:
resize_and_write_video_ffmpeg(orig_vid_path, resized_path, short_edge_res, **kwargs)
return resized_path
def generate_video(data_sample:DataSample, force_encode=False, overwrite=False, **kwargs):
new_file = get_vis_video_location(data_sample)
if (os.path.isfile(new_file) and not overwrite) and not (force_encode and os.path.islink(new_file)):
return
os.makedirs(os.path.dirname(new_file), exist_ok=True)
video_file = data_sample.data_path
#### Generate Video ####
if os.path.isdir(video_file):
# the data is a set of images, generate a video
img_files = [os.path.join(video_file, f) for f in sorted(os.listdir(video_file))]
write_img_files_to_vid_ffmpeg(out_file=new_file, in_files=img_files, fps=data_sample.metadata['fps'])
else:
if not video_file.endswith(".mp4") or force_encode:
# Convert the video to mp4
convert_vid_ffmpeg(video_file, new_file, **kwargs)
else:
# the data is a video, symlink to it
if os.path.exists(new_file):
os.remove(new_file)
os.symlink(video_file, new_file)
return new_file
def generate_thumbnail(data_sample:DataSample, overwrite=False):
video_thumbnail_frame = get_vis_thumb_location(data_sample)
if os.path.isfile(video_thumbnail_frame) and not overwrite:
return
os.makedirs(os.path.dirname(video_thumbnail_frame), exist_ok=True)
video_file = get_vis_video_location(data_sample)
#### Generate Thumbnail ####
vid = VideoFrameReader(video_file)
img, ts = vid.get_frame(30) if len(vid) > 30 else vid.get_frame(0)
img.thumbnail((300, 300))
img.save(video_thumbnail_frame)
return video_thumbnail_frame
def generate_gt_vis_json(data_sample:DataSample, cache_suffix="", overwrite=False):
gt_file = get_vis_gt_location(data_sample, cache_suffix)
if os.path.isfile(gt_file) and not overwrite:
return
#### Generate GT Track json ####
os.makedirs(os.path.dirname(gt_file), exist_ok=True)
vis_video_file = get_vis_video_location(data_sample)
vis_vid = VideoFrameReader(vis_video_file)
sample_dict = data_sample.to_dict(include_id=True)
sample_dict[FieldNames.METADATA][FieldNames.FPS] = vis_vid.fps
save_json(sample_dict, gt_file, indent=0)
return gt_file
@ray.remote
def generate_files_for_one_sample_ray(data_sample:DataSample, generator_list, overwrite):
generate_files_for_one_sample(data_sample, generator_list, overwrite)
@ray.remote
def generate_files_for_multi_samples_ray(data_samples, generator_list, overwrite):
for id, data_sample in data_samples:
generate_files_for_one_sample(data_sample, generator_list, overwrite)
logging.info('Finished: {}'.format(data_sample.data_path))
def generate_files_for_one_sample(data_sample:DataSample, generator_list, overwrite):
if os.path.isabs(data_sample.data_relative_path):
logging.error("Relative path of sample id: {} is absolute and so we cannot add to the cache, skipping."
" Path: {}".format(data_sample.id, data_sample.data_relative_path))
return
for gen in generator_list:
try:
gen(data_sample, overwrite)
except Exception as e:
try:
gen(data_sample, True)
except Exception as e:
logging.exception('Failed: {}'.format(data_sample.data_path))
return
def generate_preprocess_files(part=0, parts=1,
annotation_file='./kinetics/annotation/anno_400.json',
use_ray=False, num_cpus=4, overwrite=False, force_encode=False, short_edge_res=256,
distributed=False, shuffle_seed=None, dataset=None):
if dataset is None:
dataset = GluonCVMotionDataset(annotation_file)
generator_list = [
lambda sample, overwrite: generate_video(sample, force_encode, overwrite),
generate_thumbnail,
lambda sample, overwrite: generate_gt_vis_json(sample, dataset.get_anno_subpath(), overwrite),
]
samples = sorted(dataset.samples)
if shuffle_seed is not None and shuffle_seed != "None":
random.seed(shuffle_seed)
random.shuffle(samples)
samples = samples[part::parts]
logging.info("Using ray {} , distributed: {}".format(use_ray, distributed))
if use_ray:
num_ray_threads = 500
if distributed:
ray.init(redis_address="localhost:6379")
else:
ray.init(num_cpus=num_cpus)
ray.get([generate_files_for_multi_samples_ray.remote(samples[i::num_ray_threads], generator_list, overwrite) for i in range(num_ray_threads)])
else:
for id, data_sample in tqdm(samples, mininterval=1.0):
generate_files_for_one_sample(data_sample, generator_list, overwrite)
if __name__ == '__main__':
import fire
fire.Fire(generate_preprocess_files)
|
1712420
|
from django.conf import settings
from django.template import defaultfilters
from django.utils.translation import gettext_lazy as _
from django.views.generic import ListView
from django_cradmin import javascriptregistry
from django_cradmin.viewhelpers import listbuilder
from django_cradmin.viewhelpers.mixins import CommonCradminViewMixin
class ViewMixin(object):
"""
Listbuilder view mixin. Must be mixin in before any Django View subclass.
This is typically used with a Django ListView or TemplateView.
The mixin is not dependent on any specific backend, so it works
no matter where you get your data from (database, mongodb, elasticsearch, ...).
For a ready to use view that extends this to work with Django model objects,
see :class:`.View`.
The ViewMixin works much like :class:`.View`, but you must override/implement:
- :meth:`.get_pagetitle`
- :meth:`.get_listbuilder_list_value_iterable`
- :meth:`.get_no_items_message`
"""
template_name = 'django_cradmin/viewhelpers/listbuilderview/default.django.html'
#: See :meth:`~ViewMixin.get_listbuilder_class`.
listbuilder_class = listbuilder.lists.RowList
#: See :meth:`~ViewMixin.get_value_renderer_class`.
value_renderer_class = listbuilder.itemvalue.FocusBox
#: See :meth:`~ViewMixin.get_frame_renderer_class`.
frame_renderer_class = listbuilder.itemframe.DefaultSpacingItemFrame
#: Set this to True to hide the page header. See :meth:`~.FormViewMixin.get_hide_page_header`.
hide_page_header = False
#: Enable previews? See :meth:`.get_enable_previews`. Defaults to ``False``.
enable_previews = False
def get_pagetitle(self):
"""
Get the page title (the title tag).
Must be implemented in subclasses.
"""
raise NotImplementedError()
def get_pageheading(self):
"""
Get the page heading.
Defaults to :meth:`.get_pagetitle`.
"""
return self.get_pagetitle()
def get_hide_page_header(self):
"""
Return ``True`` if we should hide the page header.
You can override this, or set :obj:`.hide_page_header`, or hide the page header
in all form views with the ``DJANGO_CRADMIN_HIDE_PAGEHEADER_IN_LISTVIEWS`` setting.
"""
return self.hide_page_header or getattr(settings, 'DJANGO_CRADMIN_HIDE_PAGEHEADER_IN_LISTVIEWS', False)
def get_enable_previews(self):
"""
If this returns ``True``, we enable previews.
This is required for the ``django-cradmin-page-preview-open-on-click``
angularJS directive to work.
Defaults to :obj:`.enable_previews`.
"""
return self.enable_previews
def get_listbuilder_class(self):
"""
Get a subclass of :class:`django_cradmin.viewhelpers.listbuilder.base.List`.
Defaults to :obj:`.ViewMixin.listbuilder_class`.
"""
return self.listbuilder_class
def get_listbuilder_list_kwargs(self):
"""
Get kwargs for :meth:`.get_listbuilder_class`.
"""
return {}
def get_value_and_frame_renderer_kwargs(self):
"""
Get kwargs for the classes returned by :meth:`.get_value_renderer_class`
and :meth:`.get_frame_renderer_class`.
Both the frame and value renderer base classes takes ``**kwargs``
and store them in ``self.kwargs`` (just like django class based views),
so even if you just need to send kwargs to one of these classes,
you can still use this.
See the docs for the ``value_and_frame_renderer_kwargs`` argument for
:meth:`django_cradmin.viewhelpers.listbuilder.base.List.extend_with_values`
method for more information. HINT: This can also return a callable to generate
kwargs based on each value in the list!
"""
return {}
def get_value_renderer_class(self):
"""
Get a subclass of :class:`django_cradmin.viewhelpers.listbuilder.base.ItemValueRenderer`.
Defaults to :obj:`.ViewMixin.value_renderer_class`.
"""
return self.value_renderer_class
def get_frame_renderer_class(self):
"""
Get a subclass of :class:`django_cradmin.viewhelpers.listbuilder.base.ItemFrameRenderer`.
Defaults to :obj:`.ViewMixin.frame_renderer_class`.
"""
return self.frame_renderer_class
def get_listbuilder_list_value_iterable(self, context):
"""
Get the value_iterable for the listbuilder list.
Must be overridden in subclasses.
Parameters:
context: The Django template context.
"""
raise NotImplementedError()
def get_listbuilder_list(self, context):
"""
Get the listbuilder List object.
You normally do not have to override this, but instead you should
override:
- :meth:`.get_listbuilder_list_value_iterable`
- :meth:`.get_value_renderer_class`
- :meth:`.get_frame_renderer_class`
- :meth:`.get_value_and_frame_renderer_kwargs`
- :meth:`.get_listbuilder_list_kwargs`
Parameters:
context: The Django template context.
"""
return self.get_listbuilder_class().from_value_iterable(
value_iterable=self.get_listbuilder_list_value_iterable(context),
value_renderer_class=self.get_value_renderer_class(),
frame_renderer_class=self.get_frame_renderer_class(),
value_and_frame_renderer_kwargs=self.get_value_and_frame_renderer_kwargs(),
**self.get_listbuilder_list_kwargs())
def get_no_items_message(self):
"""
Get the message to show when there are no items.
Must be overridden in subclasses.
"""
raise NotImplementedError()
def add_listview_context_data(self, context):
context['listbuilder_list'] = self.get_listbuilder_list(context)
context['pagetitle'] = self.get_pagetitle()
context['hide_pageheader'] = self.get_hide_page_header()
context['pageheading'] = self.get_pageheading()
context['no_items_message'] = self.get_no_items_message()
context['enable_previews'] = self.get_enable_previews()
context['pre_include_template'] = self.get_pre_include_template()
context['buttons_include_template'] = self.get_buttons_include_template()
context['post_include_template'] = self.get_post_include_template()
def get_pre_include_template(self):
"""
You can return a template to include before the listbuilder list here.
"""
return None
def get_buttons_include_template(self):
"""
You can return a template to include buttons above the
listbuilder list here. If you include this template,
we will create a ``<p>`` with ``django-cradmin-listbuilderview-buttons``
as css class, and include your template within that ``<p>``.
"""
return None
def get_post_include_template(self):
"""
You can return a template to include after the listbuilder list here.
"""
return None
def get_context_data(self, **kwargs):
context = super(ViewMixin, self).get_context_data(**kwargs)
self.add_listview_context_data(context)
return context
class ViewCreateButtonMixin(object):
"""
Mixin class that overrides :meth:`.View.get_buttons_include_template`
with a template that renders a create button that assumes the
create view is named ``"create"``.
"""
def get_buttons_include_template(self):
return "django_cradmin/viewhelpers/listbuilderview/includes/create-button.django.html"
class View(javascriptregistry.viewmixin.WithinRoleViewMixin,
CommonCradminViewMixin, ViewMixin, ListView):
"""
View using the :doc:`viewhelpers_listbuilder`.
Examples:
Minimal::
from django_cradmin.viewhelpers import listbuilderview
class MyView(listbuilderview.View):
def get_queryset(self):
return MyModel.objects.all()
"""
#: The model class to list objects for. You do not have to specify
#: this, but if you do not specify this or :meth:`~.ObjectTableView.get_model_class`,
#: you have to override :meth:`~.ObjectTableView.get_pagetitle` and
#: :meth:`~.ObjectTableView.get_no_items_message`.
model = None
#: Set this to ``True`` to make the template not render the menu.
#: Very useful when creating foreign-key select views, and other views
#: where you do not want your users to accidentally click out of the
#: current view.
hide_menu = False
def get_model_class(self):
"""
Get the model class to list objects for.
Defaults to :obj:`~.View.model`. See :obj:`~.View.model` for more info.
"""
return self.model
def get_pagetitle(self):
"""
Get the page title (the title tag).
Defaults to the ``verbose_name_plural`` of the :obj:`~.View.model`
with the first letter capitalized.
"""
return defaultfilters.capfirst(self.get_model_class()._meta.verbose_name_plural)
def get_listbuilder_list_value_iterable(self, context):
return context['object_list']
def get_queryset_for_role(self):
"""
Get a queryset with all objects of :obj:`~.View.model` that
the current role can access.
"""
raise NotImplementedError()
def get_queryset(self):
"""
DO NOT override this. Override :meth:`.get_queryset_for_role`
instead.
"""
return self.get_queryset_for_role()
def get_no_items_message(self):
"""
Get the message to show when there are no items.
"""
return _('No %(modelname_plural)s') % {
'modelname_plural': self.get_model_class()._meta.verbose_name_plural.lower(),
}
def get_context_data(self, **kwargs):
context = super(View, self).get_context_data(**kwargs)
context['cradmin_hide_menu'] = self.hide_menu
self.add_javascriptregistry_component_ids_to_context(context=context)
self.add_common_view_mixin_data_to_context(context=context)
return context
|
1712430
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.messages import constants
from petition.models import Organization, Petition, PytitionUser, Signature, Permission
from .utils import add_default_data
class PetitionViewTest(TestCase):
"""Test index view"""
@classmethod
def setUpTestData(cls):
add_default_data()
def tearDown(self):
# Clean up run after every test method.
pass
def login(self, name, password=None):
self.client.login(username=name, password=password if password else name)
self.pu = PytitionUser.objects.get(user__username=name)
return self.pu
def logout(self):
self.client.logout()
def test_show_signatures(self):
julia = self.login('julia')
# User petition
p = julia.petition_set.first()
response = self.client.get(reverse("show_signatures", args=[p.id]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/signature_data.html")
# Add signature
self.assertEqual(Signature.objects.count(), 0)
Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition = p)
self.assertEqual(Signature.objects.count(), 1)
response = self.client.get(reverse("show_signatures", args=[p.id]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/signature_data.html")
# Org petition
max = self.login("max")
org = Organization.objects.get(name='Les Amis de la Terre')
p2 = org.petition_set.first()
response = self.client.get(reverse("show_signatures", args=[p2.id]))
self.assertEqual(response.status_code, 302)
# Ah right, Max does not have access rights for that
perm = Permission.objects.get(organization=org, user=max)
perm.can_view_signatures = True
perm.save()
response = self.client.get(reverse("show_signatures", args=[p2.id]))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "petition/signature_data.html")
def test_get_csv_signature(self):
julia = self.login('julia')
# User petition
p = julia.petition_set.first()
response = self.client.get(reverse("get_csv_signature", args=[p.id]))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("get_csv_confirmed_signature", args=[p.id]))
self.assertEqual(response.status_code, 200)
# Add signature
self.assertEqual(Signature.objects.count(), 0)
Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition = p)
self.assertEqual(Signature.objects.count(), 1)
response = self.client.get(reverse("get_csv_signature", args=[p.id]))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("get_csv_confirmed_signature", args=[p.id]))
self.assertEqual(response.status_code, 200)
# Org petition
self.login("max")
org = Organization.objects.get(name='Les Amis de la Terre')
p2 = org.petition_set.first()
response = self.client.get(reverse("get_csv_signature", args=[p2.id]))
self.assertEqual(response.status_code, 403)
response = self.client.get(reverse("get_csv_confirmed_signature", args=[p2.id]))
self.assertEqual(response.status_code, 403)
# Ah right, max does not have access rights for that
self.login("julia")
response = self.client.get(reverse("get_csv_signature", args=[p2.id]))
self.assertEqual(response.status_code, 200)
response = self.client.get(reverse("get_csv_confirmed_signature", args=[p2.id]))
self.assertEqual(response.status_code, 200)
def test_show_signatures_post_deleteOK(self):
julia = self.login("julia")
petition = julia.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 'delete',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
with self.assertRaises(Signature.DoesNotExist):
Signature.objects.get(pk=sid)
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
def test_show_signatures_post_deleteOK_org(self):
self.login("julia")
org = Organization.objects.get(name="Les Amis de la Terre")
petition = org.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 'delete',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
with self.assertRaises(Signature.DoesNotExist):
Signature.objects.get(pk=sid)
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
def test_show_signatures_post_deleteKONoRightsUser(self):
self.login("julia")
max = PytitionUser.objects.get(user__username="max")
petition = max.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 'delete',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("user_dashboard"))
self.assertTemplateUsed(response, "petition/user_dashboard.html")
s = Signature.objects.get(pk=sid)
self.assertEquals(s.id, sid) # dummy test, we just want the previous line not to raise a DoesNotExist exception
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, True)
def test_show_signatures_post_deleteKONoRightsOrg(self):
self.login("max")
org = Organization.objects.get(name="Les Amis de la Terre")
petition = org.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 'delete',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("org_dashboard", args=[org.slugname]))
self.assertTemplateUsed(response, "petition/org_dashboard.html")
s = Signature.objects.get(pk=sid)
self.assertEquals(s.id, sid) # dummy test, we just want the previous line not to raise a DoesNotExist exception
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, True)
def test_show_signatures_post_resendOK_org(self):
self.login("julia")
org = Organization.objects.get(name="Les Amis de la Terre")
petition = org.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 're-send',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
def test_show_signatures_post_resendOK(self):
julia = self.login("julia")
petition = julia.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
sid = signature.id
data = {
'action': 're-send',
'signature_id': [sid],
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
def test_show_signatures_post_resendallOK(self):
julia = self.login("julia")
petition = julia.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
#sid = signature.id
data = {
'action': 're-send-all',
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
def test_show_signatures_post_resendallOK_org(self):
self.login("julia")
org = Organization.objects.get(name="Les Amis de la Terre")
petition = org.petition_set.first()
pid = petition.id
signature = Signature.objects.create(
first_name="Me",
last_name="You",
email="<EMAIL>",
petition=petition)
#sid = signature.id
data = {
'action': 're-send-all',
}
response = self.client.post(reverse("show_signatures", args=[pid]), data, follow=True)
self.assertRedirects(response, reverse("show_signatures", args=[pid]))
self.assertTemplateUsed(response, "petition/signature_data.html")
messages = response.context['messages']
self.assertGreaterEqual(len(messages), 1)
ThereIsAnyError = False
for msg in messages:
if msg.level == constants.ERROR:
ThereIsAnyError = True
self.assertEquals(ThereIsAnyError, False)
|
1712468
|
from __future__ import annotations
import numpy as np
from PIL import Image
_ptrtypestr = np.dtype(np.uintp).str
class _ArrayInterfaceForObject:
__slots__ = ('__array_interface__', 'owner_object')
def __init__(self, array_intf, obj=None):
self.__array_interface__ = array_intf
self.owner_object = obj
class _ArrayInterfaceForPointer:
__slots__ = ('__array_interface__', 'owner_object')
def __init__(self, ptr, shape, typestr='|u8', strides=None, readonly=True, owner_object=None):
self.owner_object = owner_object
self.__array_interface__ = {
'data': (ptr, readonly),
'typestr': typestr,
'shape': shape,
'strides': strides,
}
def asarray(im: Image.Image, padding_channel='stride', allow_copy=True) -> np.ndarray:
"""
Return an numpy.ndarray that shares the underlying buffer with the given image (if possible).
Parameters
----------
im : PIL.Image.Image
The image.
padding_channel : str
Handling of padding channel in 3-channel image. 'stride' or 'passthrough' or 'copy_remove'.
'stride': The padding channel will be hidden from the result array. NOTE: ndarray with channel stride cannot be passed to OpenCV.
'passthrough': The padding channel will be available with undetermined values in the result array.
'copy_remove': The result array will be a copy of the image buffer with padding channel removed.
allow_copy : bool
Allow copying pixels when buffer sharing is unavailable, or raise an exception instead.
Returns
-------
numpy.ndarray
"""
imdata = im.getdata()
make_copy = False
shape, typestr = Image._conv_type_shape(im)
pilbuf = dict(imdata.unsafe_ptrs)
if pilbuf.get('pixel32', 0) != 0:
pixel_advance = 4
if shape[-1] == 3:
if padding_channel == 'passthrough':
shape = (shape[0], shape[1], 4)
elif padding_channel == 'stride':
pass
elif padding_channel == 'copy_remove':
if not allow_copy:
raise ValueError('cannot remove padding channel without copying')
make_copy = True
else:
raise ValueError('invalid padding_channel')
elif im.mode == 'LA':
pixel_advance = 2
else:
pixel_advance = 1
# examine if we can represent the PIL-style buffer as an numpy-style buffer (fixed stride)
line_ptrs = np.asarray(_ArrayInterfaceForPointer(pilbuf['image'], (im.height,), _ptrtypestr, owner_object=imdata))
pil_strides = np.diff(line_ptrs)
if np.all(pil_strides == pil_strides[0]):
# found fixed stride, construct numpy array interface
data = (line_ptrs[0], False)
arr_strides = (pil_strides[0], pixel_advance, 1) if pixel_advance > 1 else (pil_strides[0], 1)
array_intf = {
'version': 3,
'shape': shape,
'typestr': typestr,
'strides': arr_strides,
'data': data,
}
result = np.asarray(_ArrayInterfaceForObject(array_intf, imdata))
if make_copy:
result = result.copy()
return result
elif allow_copy:
# the PIL buffer cannot fit in numpy layout, so we need to copy it
if pixel_advance == 4 and shape[-1] == 3:
arr_strides = (4,1)
else:
arr_strides = None
# use ndarray for writable buffer
return np.concatenate([np.asarray(_ArrayInterfaceForPointer(p, shape[1:], typestr, strides=arr_strides, readonly=False, owner_object=imdata)) for p in line_ptrs])
raise ValueError('this PIL buffer cannot be represented as a numpy array')
# def asarray(im: Image.Image, hide_padding_channel=True):
# """
# Return a ndarray that shares the underlying buffer with the given image.
# NOTE: For 3-channel modes (RGB, HSV, YCbCr, LAB), the returned array has stride in pixel axis, which is not supported by OpenCV.
# """
# return np.asarray(_ArrayInterfaceForObject(_pil_array_interface(im), im.getdata()))
def fromarray(arr, mode=None, must_share_buffer=False) -> Image.Image:
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
:param arr: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
arr = np.asarray(arr)
height, width, *channels = arr.shape
if not channels:
channels = 1
else:
channels = channels[0]
|
1712498
|
import datetime
import ckan.tests.factories as factories
import ckanext.hdx_theme.tests.hdx_test_with_inds_and_orgs as hdx_test_with_inds_and_orgs
class TestFreshness(hdx_test_with_inds_and_orgs.HDXWithIndsAndOrgsTest):
def test_is_fresh_flag(self):
dataset_1 = self._get_action('package_show')({}, {'id': 'test_dataset_1'})
assert dataset_1.get('is_fresh') is False
res1 = factories.Resource(package_id='test_dataset_1')
context = {'user': 'testsysadmin'}
result = self._get_action('package_patch')(context, {
'id': 'test_dataset_1',
'data_update_frequency': '7'
})
dataset_2 = self._get_action('package_show')({}, {'id': 'test_dataset_1'})
assert dataset_2.get('is_fresh') is True, 'last_modified is null, so revision_last_modified is used'
res_last_modified = (datetime.datetime.now() - datetime.timedelta(days=15)).isoformat()
self._get_action('resource_patch')(context, {
'id': res1['id'],
'last_modified': res_last_modified
})
dataset_3 = self._get_action('package_show')({}, {'id': 'test_dataset_1'})
assert dataset_3.get('is_fresh') is False, 'needs to be False, last_modified is more than 7+7 days in the past'
|
1712568
|
import json
from typing import List, Dict
from .api_constants import LABEL, CONFIDENCE, PREDICTIONS
from .signature_constants import (
PREDICTED_LABEL_COMPAT, LABEL_CONFIDENCES, LABEL_CONFIDENCES_COMPAT, SUPPORTED_EXPORT_VERSIONS
)
from .utils import dict_get_compat
class ClassificationResult:
"""
Data structure to expose the classification predicted result from running a Lobe model.
Exposes the top predicted label, as well as a list of tuples (label, confidence) sorted by highest confidence to lowest.
Sorted list of predictions (label, confidence): ClassificationResult.labels
Top predicted label: ClassificationResult.prediction
These can be batched and contain the results for many examples.
"""
def __init__(self, results: Dict[str, any], labels: List[str] = None, export_version: int = None):
"""
Parse the classification results from a dictionary in the form {Name: val} for each output in the signature
Labels need to be provided to map our confidences, but in the case of the local API they are already returned
with the prediction.
"""
# If `results` comes from the Lobe Connect local API, there will not be an export version and the
# predictions will already be in sorted order. Just need to assign to our 'labels' and 'prediction' variables.
if export_version is None:
api_results = results.get(PREDICTIONS, [])
self.labels = [(prediction.get(LABEL), prediction.get(CONFIDENCE)) for prediction in api_results]
self.prediction = self.labels[0][0]
# Otherwise, results comes from running the ImageModel -- check supported versions of the exported model
elif export_version in SUPPORTED_EXPORT_VERSIONS:
# grab the list of confidences
confidences, _ = dict_get_compat(in_dict=results, current_key=LABEL_CONFIDENCES,
compat_keys=LABEL_CONFIDENCES_COMPAT, default=[])
# zip the labels and confidences together
labels_and_confidences = []
# the model results are batched
for row in confidences:
# zip this row with the labels to make (label, confidence) pairs
if not labels:
raise ValueError(
f"Needed labels to assign the confidences returned. Confidences: {confidences}")
label_conf_pairs = list(zip(labels, row))
# sort them by confidence and add to the batch array
label_conf_pairs = sorted(label_conf_pairs, key=lambda pair: pair[1], reverse=True)
labels_and_confidences.append(label_conf_pairs)
# grab the predicted class if it exists (backwards compatibility)
prediction, _ = dict_get_compat(in_dict=results, current_key=None,
compat_keys=PREDICTED_LABEL_COMPAT)
# if there was no prediction, grab the label with the highest confidence from labels_and_confidences
if prediction is None:
new_prediction = []
for row in labels_and_confidences:
new_prediction.append(row[0][0])
prediction = new_prediction
# un-batch if this is a batch size of 1, so that the return is just the value for the single image
self.labels = _un_batch(labels_and_confidences)
self.prediction = _un_batch(prediction)
# Else the exported model version is not officially supported (but may still work anyway)
# Throw a ValueError with details.
else:
raise ValueError(
f'The model version {export_version} you are using may not be compatible with the supported versions {SUPPORTED_EXPORT_VERSIONS}. Please update both lobe-python and Lobe to latest versions, and try exporting your model again. If the issue persists, please contact us at <EMAIL>'
)
def as_dict(self):
return {
"Labels": self.labels,
"Prediction": self.prediction,
}
def __str__(self) -> str:
return json.dumps(self.as_dict())
def _un_batch(item):
"""
Given an arbitrary input, if it is a list with exactly one item then return that first item
"""
if isinstance(item, list) and len(item) == 1:
item = item[0]
return item
|
1712602
|
from django import forms
from django.urls import reverse_lazy
from django_filters import CharFilter
from django_filters import MultipleChoiceFilter
from common.filters import MultiValueCharFilter
from common.filters import TamatoFilter
from common.filters import TamatoFilterBackend
from quotas import models
from quotas import validators
from quotas.forms import QuotaFilterForm
class OrderNumberFilterBackend(TamatoFilterBackend):
search_fields = ("order_number",) # XXX order is significant
class QuotaFilter(TamatoFilter):
order_number = CharFilter(
label="Order number",
field_name="order_number",
)
origin = MultiValueCharFilter(
label="Geographical area(s)",
field_name="origins__area_id",
)
mechanism = MultipleChoiceFilter(
label="Administration mechanism",
field_name="mechanism",
widget=forms.CheckboxSelectMultiple,
help_text="Select all that apply",
choices=validators.AdministrationMechanism.choices,
)
category = MultipleChoiceFilter(
label="Quota category",
field_name="category",
widget=forms.CheckboxSelectMultiple,
help_text="Select all that apply",
choices=validators.QuotaCategory.choices,
)
clear_url = reverse_lazy("quota-ui-list")
class Meta:
form = QuotaFilterForm
model = models.QuotaDefinition
fields = ["order_number"]
|
1712604
|
import re
test_string = '"the" is the most used word in the English language'
def word_count(s: str) -> int:
"""
>>> word_count(test_string)
10
"""
s = re.sub('[^A-Za-z0-9 ]+', '', s)
return len(s.lower().split())
def unique_word_count(s: str) -> int:
"""
>>> unique_word_count(test_string)
8
"""
s = re.sub('[^A-Za-z0-9 ]+', '', s)
return len(set(s.lower().split()))
for s in ("The Matrix", "To Be or Not to Be", "Kiss Kiss Bang Bang", test_string):
print(s, word_count(s), unique_word_count(s))
|
1712605
|
import pytest
import cupy
if cupy.cuda.runtime.is_hip:
pytest.skip('HIP sparse support is not yet ready',
allow_module_level=True)
|
1712614
|
from typing import Iterable, Optional
from unittest.mock import patch
# This import verifies that the dependencies are available.
import cx_Oracle # noqa: F401
import pydantic
from pydantic.fields import Field
from sqlalchemy import event
from sqlalchemy.dialects.oracle.base import OracleDialect
from sqlalchemy.engine.reflection import Inspector
from datahub.ingestion.api.decorators import (
SourceCapability,
SupportStatus,
capability,
config_class,
platform_name,
support_status,
)
from datahub.ingestion.source.sql.sql_common import (
BasicSQLAlchemyConfig,
SQLAlchemySource,
make_sqlalchemy_type,
)
extra_oracle_types = {
make_sqlalchemy_type("SDO_GEOMETRY"),
make_sqlalchemy_type("SDO_POINT_TYPE"),
make_sqlalchemy_type("SDO_ELEM_INFO_ARRAY"),
make_sqlalchemy_type("SDO_ORDINATE_ARRAY"),
}
assert OracleDialect.ischema_names
def output_type_handler(cursor, name, defaultType, size, precision, scale):
"""Add CLOB and BLOB support to Oracle connection."""
if defaultType == cx_Oracle.CLOB:
return cursor.var(cx_Oracle.LONG_STRING, arraysize=cursor.arraysize)
elif defaultType == cx_Oracle.BLOB:
return cursor.var(cx_Oracle.LONG_BINARY, arraysize=cursor.arraysize)
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
cursor.outputtypehandler = output_type_handler
class OracleConfig(BasicSQLAlchemyConfig):
# defaults
scheme: str = Field(
default="oracle+cx_oracle",
description="Will be set automatically to default value.",
)
service_name: Optional[str] = Field(
default=None, description="Oracle service name. If using, omit `database`."
)
database: Optional[str] = Field(
default=None, description="If using, omit `service_name`."
)
@pydantic.validator("service_name")
def check_service_name(cls, v, values):
if values.get("database") and v:
raise ValueError(
"specify one of 'database' and 'service_name', but not both"
)
return v
def get_sql_alchemy_url(self):
url = super().get_sql_alchemy_url()
if self.service_name:
assert not self.database
url = f"{url}/?service_name={self.service_name}"
return url
@platform_name("Oracle")
@config_class(OracleConfig)
@support_status(SupportStatus.CERTIFIED)
@capability(SourceCapability.DOMAINS, "Enabled by default")
class OracleSource(SQLAlchemySource):
"""
This plugin extracts the following:
- Metadata for databases, schemas, and tables
- Column types associated with each table
- Table, row, and column statistics via optional SQL profiling
Using the Oracle source requires that you've also installed the correct drivers; see the [cx_Oracle docs](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html). The easiest one is the [Oracle Instant Client](https://www.oracle.com/database/technologies/instant-client.html).
"""
def __init__(self, config, ctx):
super().__init__(config, ctx, "oracle")
@classmethod
def create(cls, config_dict, ctx):
config = OracleConfig.parse_obj(config_dict)
return cls(config, ctx)
def get_inspectors(self) -> Iterable[Inspector]:
for inspector in super().get_inspectors():
event.listen(
inspector.engine, "before_cursor_execute", before_cursor_execute
)
yield inspector
def get_workunits(self):
with patch.dict(
"sqlalchemy.dialects.oracle.base.OracleDialect.ischema_names",
{klass.__name__: klass for klass in extra_oracle_types},
clear=False,
):
return super().get_workunits()
|
1712642
|
import argparse
import numpy as np
import pandas as pd
import pyproj
from netCDF4 import Dataset
from make_proj_grids import read_ncar_map_file, make_proj_grids
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--start_date", required=True, help="Start date in YYYY-MM-DD format")
parser.add_argument("-e", "--end_date", required=False, help="End date in YYYY-MM-DD format")
parser.add_argument("-o", "--out_path", required=True, help="Path to the destination of MESH verification data")
parser.add_argument("-m", "--map_file", required=True, help="Path to the ensemble mapfile")
args = parser.parse_args()
if args.end_date:
run_dates = pd.date_range(start=args.start_date, end=args.end_date, freq='1D').strftime("%y%m%d")
else:
run_dates = pd.date_range(start=args.start_date, end=args.start_date, freq='1D').strftime("%y%m%d")
out_path = args.out_path
mapfile = args.map_file
LSR_calibration_data(mapfile, out_path, run_dates)
return
def LSR_calibration_data(mapfile, out_path, run_dates, hours=[17, 19, 21], sector=None):
"""
Using the grid from input ML forecast (netcdf) data, SPC storm reports
with a 25 mile radius around the reports can be plotted.
The output file contains binary data, where any point
within the 25 mile radius is a 1, and all other points are 0.
Currently only supports netcdf files.
"""
hail_threshold = [25, 50]
lsr_dict = dict()
proj_dict, grid_dict = read_ncar_map_file(mapfile)
projection = pyproj.Proj(proj_dict)
mapping_data = make_proj_grids(proj_dict, grid_dict)
forecast_lons = np.array(mapping_data['lon'])
forecast_lats = np.array(mapping_data['lat'])
forecast_x = np.array(mapping_data['x'])
forecast_y = np.array(mapping_data['y'])
for date in run_dates:
print(date)
csv_file = 'https://www.spc.noaa.gov/climo/reports/{0}_rpts_hail.csv'.format(date)
try:
hail_reports = pd.read_csv(csv_file)
except:
print('Report CSV file could not be opened.')
continue
for threshold in hail_threshold:
# if os.path.exists(out_path+'{0}_{1}_lsr_mask.nc'.format(date,threshold)):
# print('>{0}mm file already exists'.format(threshold))
# continue
# print('Creating LSR mask >{0}mm'.format(threshold))
# Get size values from hail reports
inches_thresh = round((threshold) * 0.03937) * 100
report_size = hail_reports.loc[:, 'Size'].values
lsr_dict['full_day'] = np.zeros(forecast_lats.shape)
full_day_indices = np.where(report_size >= inches_thresh)[0]
if len(full_day_indices) < 1:
print('No >{0}mm LSRs found'.format(threshold))
continue
reports_lat_full = hail_reports.loc[full_day_indices, 'Lat'].values
reports_lon_full = hail_reports.loc[full_day_indices, 'Lon'].values
lsr_dict['full_day'] = calculate_distance(reports_lat_full, reports_lon_full, forecast_y, forecast_x,
projection)
# Get time values from hail reports
report_time = (hail_reports.loc[:, 'Time'].values) / 100
# Get lat/lon of different time periods and hail sizes
for start_hour in hours:
lsr_dict['{0}'.format(start_hour)] = np.zeros(forecast_lats.shape)
end_hour = (start_hour + 4) % 24
if end_hour > 12:
hour_indices = \
np.where((start_hour <= report_time) & (end_hour >= report_time) & (report_size >= inches_thresh))[
0]
else:
# Find reports before and after 0z
hour_before_0z = np.where((start_hour <= report_time) & (report_size >= inches_thresh))[0]
hour_after_0z = np.where((end_hour >= report_time) & (report_size >= inches_thresh))[0]
# Combine two arrays
hour_indices = np.hstack((hour_before_0z, hour_after_0z))
if len(hour_indices) < 1:
continue
reports_lat = hail_reports.loc[hour_indices, 'Lat'].values
reports_lon = hail_reports.loc[hour_indices, 'Lon'].values
lsr_dict['{0}'.format(start_hour)] = calculate_distance(reports_lat, reports_lon, forecast_y,
forecast_x, projection)
# Create netCDF file
if sector:
out_filename = out_path + '{0}_{1}_{2}_lsr_mask.nc'.format(date, threshold, sector)
else:
out_filename = out_path + '{0}_{1}_lsr_mask.nc'.format(date, threshold)
out_file = Dataset(out_filename, "w")
out_file.createDimension("x", forecast_lons.shape[0])
out_file.createDimension("y", forecast_lons.shape[1])
out_file.createVariable("Longitude", "f4", ("x", "y"))
out_file.createVariable("Latitude", "f4", ("x", "y"))
out_file.createVariable("24_Hour_All_12z_12z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_17z_21z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_19z_23z", "f4", ("x", "y"))
out_file.createVariable("4_Hour_All_21z_25z", "f4", ("x", "y"))
out_file.variables["Longitude"][:, :] = forecast_lons
out_file.variables["Latitude"][:, :] = forecast_lats
out_file.variables["24_Hour_All_12z_12z"][:, :] = lsr_dict['full_day']
out_file.variables["4_Hour_All_17z_21z"][:, :] = lsr_dict['17']
out_file.variables["4_Hour_All_19z_23z"][:, :] = lsr_dict['19']
out_file.variables["4_Hour_All_21z_25z"][:, :] = lsr_dict['21']
out_file.close()
print("Writing to " + out_filename)
print()
return
def calculate_distance(obs_lat, obs_lon, forecast_y, forecast_x, projection):
"""
Calculate the difference between forecast data points and observed data.
Returns:
Binary array where ones are within a 40km radius
"""
x, y = projection(obs_lon, obs_lat)
mask_array = np.zeros(forecast_y.shape)
for index, point in enumerate(obs_lat):
y_diff = (y[index] - forecast_y) ** 2.0
x_diff = (x[index] - forecast_x) ** 2.0
total_dist = np.sqrt(y_diff + x_diff)
row, col = np.where(total_dist < 40234.0)
mask_array[row, col] = + 1.0
return mask_array
if __name__ == "__main__":
main()
|
1712667
|
from pylabnet.utils.helper_methods import load_device_config, get_ip
from pylabnet.network.core.service_base import ServiceBase
from pylabnet.network.core.generic_server import GenericServer
from pylabnet.network.core.client_base import ClientBase
import os
class Dummy:
pass
class Client(ClientBase):
pass
def launch(**kwargs):
""" Launches a dummy hardware driver and instantiates server """
log = kwargs['logger']
log.info(f'Launching with config {kwargs["config"]}')
config = load_device_config(
os.path.basename(__file__)[:-3],
kwargs['config'],
log
)
dum = Dummy()
log.info(f'Created dummy object with configuration parameters {config}')
dum_service = ServiceBase()
dum_service.assign_module(module=dum)
dum_service.assign_logger(logger=log)
dum_server = GenericServer(
service=dum_service,
host=get_ip(),
port=kwargs['port']
)
dum_server.start()
|
1712672
|
import numpy as np
import pandas as pd
def long_to_short_transformation(df, idx, icds):
"""
Summary:
Used for processing a dataframe from a long format where you want to roll up many claims into an episode for example,
but aggregate all of the possible icd codes. This is a usual preprocessing step to then use function icd_to_comorbidities.
REQUIRED: RESET THE INDEX BEFORE USING ON DF
REQUIRED: DATAFRAME MUST ALREADY BE SORTED IN IDX AND SECONDAR_IDX THAT YOU ARE ROLLING UP
Args:
df (Pandas DataFrame): Dataframe containing the data witht he valid ids and columns holding the icd codes on a LONG dataset
idx (str): id column name for reference on the output
icds (list(strs)): list of columns that contain the icd codes
Returns:
New pandas DataFrame containing the ids specified with the rolled up icd codes appended wide wise with format icd_1, icd_2, ... icd_n
"""
info_dict = {}
current_icd_set = set()
id_list = []
unique_icd_count = 0
current_id = ""
last_id = df.loc[0,idx] #Initializing last_id to get over first iteration
#Step 1
#Populate the appropriate info and find the largest unique_icd_count to allocate column space
for row in range(0,len(df)):
#Initialize the new row id
current_id = df.loc[row,idx]
#Know when to switch to a new set and save the temp info
if current_id != last_id:
#update new dataframe column counter
if len(current_icd_set) > unique_icd_count:
unique_icd_count = len(current_icd_set)
#save the icd set casted to a list for faster iteration in the next step
info_dict[last_id] = list(current_icd_set)
#clear the current set
current_icd_set = set()
#Loop over columns, adding to set
for col in icds:
icd = df.loc[row,col]
current_icd_set.add(icd)
#Remember the last id for next loop
last_id = current_id
#Loop is done save out one last time for last record
if len(current_icd_set) > unique_icd_count:
unique_icd_count = len(current_icd_set)
#save the icd set casted to a list for faster iteration in the next step
info_dict[last_id] = list(current_icd_set)
#Step 2
#Create and populate output df
#Need equal length lists if mapping a df from dict, so we pad the lists in the current dict
for key in info_dict.keys():
info_dict[key] += [''] * (unique_icd_count - len(info_dict[key]))
#Populating the out_df
out_df = pd.DataFrame.from_dict(info_dict, orient="index")
#Creating columns list
columns = ["icd_" + str(i) for i in range(0,unique_icd_count)]
out_df.columns = columns #rename to out columns
out_df[idx] = out_df.index.tolist() #Give us a column 'id' in addition in case we want to throw into icd_to_comorbidities next
return(out_df)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.