content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## | python/pycylon/pycylon/common/__init__.py | 557 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 517 | en | 0.872906 |
"""Support for interface with an Samsung TV."""
import asyncio
from datetime import timedelta
import logging
import socket
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE,
SUPPORT_PLAY, SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_STEP)
from homeassistant.const import (
CONF_HOST, CONF_MAC, CONF_NAME, CONF_PORT, CONF_TIMEOUT, STATE_OFF,
STATE_ON)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Samsung TV Remote'
DEFAULT_PORT = 55000
DEFAULT_TIMEOUT = 1
KEY_PRESS_TIMEOUT = 1.2
KNOWN_DEVICES_KEY = 'samsungtv_known_devices'
SOURCES = {
'TV': 'KEY_TV',
'HDMI': 'KEY_HDMI',
}
SUPPORT_SAMSUNGTV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_PREVIOUS_TRACK | SUPPORT_SELECT_SOURCE | \
SUPPORT_NEXT_TRACK | SUPPORT_TURN_OFF | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_MAC): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Samsung TV platform."""
known_devices = hass.data.get(KNOWN_DEVICES_KEY)
if known_devices is None:
known_devices = set()
hass.data[KNOWN_DEVICES_KEY] = known_devices
uuid = None
# Is this a manual configuration?
if config.get(CONF_HOST) is not None:
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
mac = config.get(CONF_MAC)
timeout = config.get(CONF_TIMEOUT)
elif discovery_info is not None:
tv_name = discovery_info.get('name')
model = discovery_info.get('model_name')
host = discovery_info.get('host')
name = "{} ({})".format(tv_name, model)
port = DEFAULT_PORT
timeout = DEFAULT_TIMEOUT
mac = None
udn = discovery_info.get('udn')
if udn and udn.startswith('uuid:'):
uuid = udn[len('uuid:'):]
else:
_LOGGER.warning("Cannot determine device")
return
# Only add a device once, so discovered devices do not override manual
# config.
ip_addr = socket.gethostbyname(host)
if ip_addr not in known_devices:
known_devices.add(ip_addr)
add_entities([SamsungTVDevice(host, port, name, timeout, mac, uuid)])
_LOGGER.info("Samsung TV %s:%d added as '%s'", host, port, name)
else:
_LOGGER.info("Ignoring duplicate Samsung TV %s:%d", host, port)
class SamsungTVDevice(MediaPlayerDevice):
"""Representation of a Samsung TV."""
def __init__(self, host, port, name, timeout, mac, uuid):
"""Initialize the Samsung device."""
from samsungctl import exceptions
from samsungctl import Remote
import wakeonlan
# Save a reference to the imported classes
self._exceptions_class = exceptions
self._remote_class = Remote
self._name = name
self._mac = mac
self._uuid = uuid
self._wol = wakeonlan
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._state = None
self._remote = None
# Mark the end of a shutdown command (need to wait 15 seconds before
# sending the next command to avoid turning the TV back ON).
self._end_of_power_off = None
# Generate a configuration for the Samsung library
self._config = {
'name': 'HomeAssistant',
'description': name,
'id': 'ha.component.samsung',
'port': port,
'host': host,
'timeout': timeout,
}
if self._config['port'] in (8001, 8002):
self._config['method'] = 'websocket'
else:
self._config['method'] = 'legacy'
def update(self):
"""Update state of device."""
self.send_key("KEY")
def get_remote(self):
"""Create or return a remote control instance."""
if self._remote is None:
# We need to create a new instance to reconnect.
self._remote = self._remote_class(self._config)
return self._remote
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
if self._power_off_in_progress() \
and key not in ('KEY_POWER', 'KEY_POWEROFF'):
_LOGGER.info("TV is powering off, not sending command: %s", key)
return
try:
# recreate connection if connection was dead
retry_count = 1
for _ in range(retry_count + 1):
try:
self.get_remote().control(key)
break
except (self._exceptions_class.ConnectionClosed,
BrokenPipeError):
# BrokenPipe can occur when the commands is sent to fast
self._remote = None
self._state = STATE_ON
except (self._exceptions_class.UnhandledResponse,
self._exceptions_class.AccessDenied):
# We got a response so it's on.
self._state = STATE_ON
self._remote = None
_LOGGER.debug("Failed sending command %s", key, exc_info=True)
return
except OSError:
self._state = STATE_OFF
self._remote = None
if self._power_off_in_progress():
self._state = STATE_OFF
def _power_off_in_progress(self):
return self._end_of_power_off is not None and \
self._end_of_power_off > dt_util.utcnow()
@property
def unique_id(self) -> str:
"""Return the unique ID of the device."""
return self._uuid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""List of available input sources."""
return list(SOURCES)
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._mac:
return SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
return SUPPORT_SAMSUNGTV
def turn_off(self):
"""Turn off media player."""
self._end_of_power_off = dt_util.utcnow() + timedelta(seconds=15)
if self._config['method'] == 'websocket':
self.send_key('KEY_POWER')
else:
self.send_key('KEY_POWEROFF')
# Force closing of remote session to provide instant UI feedback
try:
self.get_remote().close()
self._remote = None
except OSError:
_LOGGER.debug("Could not establish connection.")
def volume_up(self):
"""Volume up the media player."""
self.send_key('KEY_VOLUP')
def volume_down(self):
"""Volume down media player."""
self.send_key('KEY_VOLDOWN')
def mute_volume(self, mute):
"""Send mute command."""
self.send_key('KEY_MUTE')
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self.send_key('KEY_PLAY')
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self.send_key('KEY_PAUSE')
def media_next_track(self):
"""Send next track command."""
self.send_key('KEY_FF')
def media_previous_track(self):
"""Send the previous track command."""
self.send_key('KEY_REWIND')
async def async_play_media(self, media_type, media_id, **kwargs):
"""Support changing a channel."""
if media_type != MEDIA_TYPE_CHANNEL:
_LOGGER.error('Unsupported media type')
return
# media_id should only be a channel number
try:
cv.positive_int(media_id)
except vol.Invalid:
_LOGGER.error('Media ID must be positive integer')
return
for digit in media_id:
await self.hass.async_add_job(self.send_key, 'KEY_' + digit)
await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop)
await self.hass.async_add_job(self.send_key, 'KEY_ENTER')
def turn_on(self):
"""Turn the media player on."""
if self._mac:
self._wol.send_magic_packet(self._mac)
else:
self.send_key('KEY_POWERON')
async def async_select_source(self, source):
"""Select input source."""
if source not in SOURCES:
_LOGGER.error('Unsupported source')
return
await self.hass.async_add_job(self.send_key, SOURCES[source])
| homeassistant/components/samsungtv/media_player.py | 9,626 | Representation of a Samsung TV.
Initialize the Samsung device.
Create or return a remote control instance.
Boolean if volume is currently muted.
Send next track command.
Send media pause command to media player.
Send play command.
Simulate play pause media player.
Send the previous track command.
Send mute command.
Return the name of the device.
Send a key to the tv and handles exceptions.
Set up the Samsung TV platform.
List of available input sources.
Return the state of the device.
Flag media player features that are supported.
Turn off media player.
Turn the media player on.
Return the unique ID of the device.
Update state of device.
Volume down media player.
Volume up the media player.
Support for interface with an Samsung TV.
Is this a manual configuration? Only add a device once, so discovered devices do not override manual config. Save a reference to the imported classes Assume that the TV is not muted Assume that the TV is in Play mode Mark the end of a shutdown command (need to wait 15 seconds before sending the next command to avoid turning the TV back ON). Generate a configuration for the Samsung library We need to create a new instance to reconnect. recreate connection if connection was dead BrokenPipe can occur when the commands is sent to fast We got a response so it's on. Force closing of remote session to provide instant UI feedback media_id should only be a channel number | 1,414 | en | 0.871632 |
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'everpro.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| backend/everpro/manage.py | 627 | Django's command-line utility for administrative tasks.
!/usr/bin/env python | 77 | en | 0.656913 |
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a Container VM with the provided Container manifest."""
from container_helper import GenerateManifest
COMPUTE_URL_BASE = 'https://www.googleapis.com/compute/v1/'
def GlobalComputeUrl(project, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/global/', collection, '/', name])
def ZonalComputeUrl(project, zone, collection, name):
return ''.join([COMPUTE_URL_BASE, 'projects/', project,
'/zones/', zone, '/', collection, '/', name])
def GenerateConfig(context):
"""Generate configuration."""
base_name = context.env['name']
# Properties for the container-based instance.
instance = {
'zone': context.properties['zone'],
'machineType': ZonalComputeUrl(context.env['project'],
context.properties['zone'],
'machineTypes',
'f1-micro'),
'metadata': {
'items': [{
'key': 'gce-container-declaration',
'value': GenerateManifest(context)
}]
},
'disks': [{
'deviceName': 'boot',
'type': 'PERSISTENT',
'autoDelete': True,
'boot': True,
'initializeParams': {
'diskName': base_name + '-disk',
'sourceImage': GlobalComputeUrl('cos-cloud',
'images',
context.properties[
'containerImage'])
},
}],
'networkInterfaces': [{
'accessConfigs': [{
'name': 'external-nat',
'type': 'ONE_TO_ONE_NAT'
}],
'network': GlobalComputeUrl(context.env['project'],
'networks',
'default')
}],
'serviceAccounts': [{
'email': 'default',
'scopes': ['https://www.googleapis.com/auth/logging.write']
}]
}
# Resources to return.
resources = {
'resources': [{
'name': base_name,
'type': 'compute.v1.instance',
'properties': instance
}]
}
return resources | templates/container_vm.py | 2,909 | Generate configuration.
Creates a Container VM with the provided Container manifest.
Copyright 2016 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Properties for the container-based instance. Resources to return. | 722 | en | 0.849014 |
"""Data handling by server or instance."""
import json
import urllib3
class DataHandler(object):
"""Handle data."""
def __init__(self, server_port=None):
"""Initialize.
:param server_port: Int. local port.
"""
self.server_port = server_port
self.logged_requests = {}
self.analysis = {
'total_requests': 0, 'domains': set(), 'duration': 0
}
def _delete(self):
http = urllib3.PoolManager()
resp = http.request(
'DELETE',
'http://localhost:{}/'.format(self.server_port)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
def _get(self):
http = urllib3.PoolManager()
resp = http.request(
'GET',
'http://localhost:{}/'.format(self.server_port)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
return json.loads(resp.data)
def _post(self, data):
http = urllib3.PoolManager()
resp = http.request(
'POST',
'http://localhost:{}/'.format(self.server_port),
headers={'Content-Type': 'application/json'},
body=json.dumps(data)
)
if resp.status != 200:
raise Exception('Monitor Requests server error: {}.'.format(
resp.status
))
def delete(self):
"""Delete data from server if applicable."""
if not self.server_port:
return
self._delete()
def log(self, url, domain, method, response, tb_list, duration):
"""Log request, store traceback/response data and update counts."""
if self.server_port:
self._post({
'url': url,
'domain': domain,
'method': method,
'response_content': str(response.content),
'response_status_code': response.status_code,
'duration': duration,
'traceback_list': tb_list
})
else:
if url not in self.logged_requests:
self.logged_requests[url] = {
'count': 0,
'methods': set(),
'tracebacks': set(),
'responses': set()
}
self.logged_requests[url]['count'] += 1
self.logged_requests[url]['methods'].add(method)
self.logged_requests[url]['tracebacks'].add(tuple(tb_list))
self.logged_requests[url]['responses'].add((
response.status_code,
response.content,
))
self.analysis['duration'] += duration
self.analysis['total_requests'] += 1
self.analysis['domains'].add(domain)
def retrieve(self):
"""Retrieve data from server or instance."""
if not self.server_port:
return self.logged_requests, self.analysis
data = self._get()
return data.get('logged_requests'), data.get('analysis')
| monitor_requests/data.py | 3,207 | Handle data.
Initialize.
:param server_port: Int. local port.
Delete data from server if applicable.
Log request, store traceback/response data and update counts.
Retrieve data from server or instance.
Data handling by server or instance. | 239 | en | 0.700405 |
from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, Group
)
from phonenumber_field.modelfields import PhoneNumberField
# For the signal
from django.dispatch import receiver
from django.urls import reverse
from django.core.mail import send_mail
from django_rest_passwordreset.signals import reset_password_token_created
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
email_plaintext_message = "{}?token={}".format(
reverse('password_reset:reset-password-request'), reset_password_token.key)
send_mail(
# title:
"Password Reset for {title}".format(title="Some website title"),
# message:
email_plaintext_message,
# from:
"noreply@somehost.local",
# to:
[reset_password_token.user.email]
)
class CustomUserManager(BaseUserManager):
def create_user(self, email, password=None):
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password=None):
user = self.create_user(
email,
password=password,
)
user.is_admin = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractBaseUser):
email = models.EmailField(
verbose_name='email address',
max_length=255,
unique=True,
)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
is_staff = models.BooleanField(default=False)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
USERNAME_FIELD = 'email'
objects = CustomUserManager()
class Meta:
ordering: ['-createdAt']
verbose_name_plural = "Users"
def __str__(self):
return self.email
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
class UserProfile(models.Model):
GENDER = (
('M', "Male"),
('F', "Female"),
)
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
)
firstName = models.CharField(max_length=100)
lastName = models.CharField(max_length=100)
phone = PhoneNumberField(null=False, blank=False, unique=True)
createdAt = models.DateTimeField(auto_now_add=True)
updatedAt = models.DateTimeField(auto_now=True)
class Meta:
ordering: ['-createdAt']
verbose_name_plural = "UserProfiles"
def userEmail(self):
email = self.user.email
return email
def fullName(self):
return f'{self.firstName} {self.lastName}'
def __str__(self):
return self.fullName()
| dentalapp-backend/dentalapp/userauth/models.py | 3,401 | Does the user have permissions to view the app `app_label`?
Does the user have a specific permission?
For the signal title: message: from: to: Simplest possible answer: Yes, always Simplest possible answer: Yes, always | 220 | en | 0.949819 |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.math_ops.matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class MatMulTest(tf.test.TestCase):
def _testCpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=False):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _testGpuMatmul(self, x, y, transpose_x=False, transpose_y=False):
x_mat = np.matrix(x).T if transpose_x else np.matrix(x)
y_mat = np.matrix(y).T if transpose_y else np.matrix(y)
np_ans = x_mat * y_mat
with self.test_session(use_gpu=True):
tf_ans = tf.matmul(x, y, transpose_x, transpose_y).eval()
self.assertAllClose(np_ans, tf_ans)
self.assertAllEqual(np_ans.shape, tf_ans.shape)
def _randMatrix(self, rows, cols, dtype):
if dtype is np.complex64:
real = self._randMatrix(rows, cols, np.float32)
imag = self._randMatrix(rows, cols, np.float32)
return real + np.complex(0, 1) * imag
else:
return np.random.uniform(low=1.0, high=100.0, size=rows * cols).reshape(
[rows, cols]).astype(dtype)
# Basic test:
# [ [1],
# [2],
# [3], * [1, 2]
# [4] ]
def testFloatBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.float64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.float64)
self._testCpuMatmul(x, y)
def testInt32Basic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.int32)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.int32)
self._testCpuMatmul(x, y)
def testSComplexBasic(self):
x = np.arange(1., 5.).reshape([4, 1]).astype(np.complex64)
y = np.arange(1., 3.).reshape([1, 2]).astype(np.complex64)
self._testCpuMatmul(x, y)
# Tests testing random sized matrices.
def testFloatRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testDoubleRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.float64)
y = self._randMatrix(k, m, np.float64)
self._testCpuMatmul(x, y)
def testInt32Random(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.int32)
y = self._randMatrix(k, m, np.int32)
self._testCpuMatmul(x, y)
def testSComplexRandom(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(n, k, np.complex64)
y = self._randMatrix(k, m, np.complex64)
self._testCpuMatmul(x, y)
# Test the cases that transpose the matrices before multiplying.
# NOTE(keveman): The cases where only one of the inputs is
# transposed are covered by tf.matmul's gradient function.
def testFloatRandomTransposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float32)
y = self._randMatrix(m, k, np.float32)
self._testCpuMatmul(x, y, True, True)
self._testGpuMatmul(x, y, True, True)
def testDoubleRandomTranposeBoth(self):
for _ in range(10):
n, k, m = np.random.randint(1, 100, size=3)
x = self._randMatrix(k, n, np.float64)
y = self._randMatrix(m, k, np.float64)
self._testCpuMatmul(x, y, True, True)
def testMatMul_OutEmpty_A(self):
n, k, m = 0, 8, 3
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_OutEmpty_B(self):
n, k, m = 3, 8, 0
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
def testMatMul_Inputs_Empty(self):
n, k, m = 3, 0, 4
x = self._randMatrix(n, k, np.float32)
y = self._randMatrix(k, m, np.float32)
self._testCpuMatmul(x, y)
self._testGpuMatmul(x, y)
# TODO(zhifengc): Figures out how to test matmul gradients on GPU.
class MatMulGradientTest(tf.test.TestCase):
def testGradientInput0(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = gc.ComputeGradientError(x, [3, 2], m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1(self):
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2],
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=[2, 4], dtype=tf.float64, name="y")
m = tf.matmul(x, y, name="matmul")
err = gc.ComputeGradientError(y, [2, 4], m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def _VerifyInput0(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = gc.ComputeGradientError(x, shape_x, m, [3, 4])
print("matmul input0 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput0WithTranspose(self):
self._VerifyInput0(transpose_a=True, transpose_b=False)
self._VerifyInput0(transpose_a=False, transpose_b=True)
self._VerifyInput0(transpose_a=True, transpose_b=True)
def _VerifyInput1(self, transpose_a, transpose_b):
shape_x = [3, 2]
shape_y = [2, 4]
if transpose_a:
shape_x = list(reversed(shape_x))
if transpose_b:
shape_y = list(reversed(shape_y))
with self.test_session(use_gpu=False):
x = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=shape_x,
dtype=tf.float64, name="x")
y = tf.constant([1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7],
shape=shape_y, dtype=tf.float64, name="y")
m = tf.matmul(x, y, transpose_a, transpose_b, name="matmul")
err = gc.ComputeGradientError(y, shape_y, m, [3, 4])
print("matmul input1 gradient err = ", err)
self.assertLess(err, 1e-10)
def testGradientInput1WithTranspose(self):
self._VerifyInput1(transpose_a=True, transpose_b=False)
self._VerifyInput1(transpose_a=False, transpose_b=True)
self._VerifyInput1(transpose_a=True, transpose_b=True)
if __name__ == "__main__":
tf.test.main()
| tensorflow/python/kernel_tests/matmul_op_test.py | 8,392 | Tests for tensorflow.ops.math_ops.matmul.
Copyright 2015 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================================== Basic test: [ [1], [2], [3], * [1, 2] [4] ] Tests testing random sized matrices. Test the cases that transpose the matrices before multiplying. NOTE(keveman): The cases where only one of the inputs is transposed are covered by tf.matmul's gradient function. TODO(zhifengc): Figures out how to test matmul gradients on GPU. | 1,032 | en | 0.820769 |
#!/usr/bin/env python
# Copyright 2016 Vijayaditya Peddinti.
# 2016 Vimal Manohar
# Apache 2.0.
""" This script is similar to steps/nnet3/train_dnn.py but trains a
raw neural network instead of an acoustic model.
"""
from __future__ import print_function
from __future__ import division
import argparse
import logging
import pprint
import os
import sys
import traceback
sys.path.insert(0, 'steps')
import libs.nnet3.train.common as common_train_lib
import libs.common as common_lib
import libs.nnet3.train.frame_level_objf as train_lib
import libs.nnet3.report.log_parse as nnet3_log_parse
logger = logging.getLogger('libs')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s [%(pathname)s:%(lineno)s - "
"%(funcName)s - %(levelname)s ] %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.info('Starting raw DNN trainer (train_raw_dnn.py)')
def get_args():
""" Get args from stdin.
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py
"""
parser = argparse.ArgumentParser(
description="""Trains a feed forward raw DNN (without transition model)
using frame-level objectives like cross-entropy and mean-squared-error.
DNNs include simple DNNs, TDNNs and CNNs.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
parents=[common_train_lib.CommonParser(include_chunk_context=False).parser])
# egs extraction options
parser.add_argument("--egs.frames-per-eg", type=int, dest='frames_per_eg',
default=8,
help="Number of output labels per example")
parser.add_argument("--image.augmentation-opts", type=str,
dest='image_augmentation_opts',
default=None,
help="Image augmentation options")
# trainer options
parser.add_argument("--trainer.input-model", type=str,
dest='input_model', default=None,
action=common_lib.NullstrToNoneAction,
help="""If specified, this model is used as initial
raw model (0.raw in the script) instead of initializing
the model from xconfig. Configs dir is not expected to
exist and left/right context is computed from this
model.""")
parser.add_argument("--trainer.prior-subset-size", type=int,
dest='prior_subset_size', default=20000,
help="Number of samples for computing priors")
parser.add_argument("--trainer.num-jobs-compute-prior", type=int,
dest='num_jobs_compute_prior', default=10,
help="The prior computation jobs are single "
"threaded and run on the CPU")
# Parameters for the optimization
parser.add_argument("--trainer.optimization.minibatch-size",
type=str, dest='minibatch_size', default='512',
help="""Size of the minibatch used in SGD training
(argument to nnet3-merge-egs); may be a more general
rule as accepted by the --minibatch-size option of
nnet3-merge-egs; run that program without args to see
the format.""")
parser.add_argument("--compute-average-posteriors",
type=str, action=common_lib.StrToBoolAction,
choices=["true", "false"], default=False,
help="""If true, then the average output of the
network is computed and dumped as post.final.vec""")
# General options
parser.add_argument("--nj", type=int, default=4,
help="Number of parallel jobs")
parser.add_argument("--use-dense-targets", type=str,
action=common_lib.StrToBoolAction,
default=True, choices=["true", "false"],
help="Train neural network using dense targets")
parser.add_argument("--feat-dir", type=str, required=False,
help="Directory with features used for training "
"the neural network.")
parser.add_argument("--targets-scp", type=str, required=False,
help="""Targets for training neural network.
This is a kaldi-format SCP file of target matrices.
<utterance-id> <extended-filename-of-target-matrix>.
The target matrix's column dim must match
the neural network output dim, and the
row dim must match the number of output frames
i.e. after subsampling if "--frame-subsampling-factor"
option is passed to --egs.opts.""")
parser.add_argument("--vad-egs", type=str,
action=common_lib.StrToBoolAction,
default=False, choices=["true", "false"],
help="Get nnet3 egs with vad applied on features.")
parser.add_argument("--dir", type=str, required=True,
help="Directory to store the models and "
"all other files.")
print(' '.join(sys.argv))
print(sys.argv)
args = parser.parse_args()
[args, run_opts] = process_args(args)
return [args, run_opts]
def process_args(args):
""" Process the options got from get_args()
"""
if args.frames_per_eg < 1:
raise Exception("--egs.frames-per-eg should have a minimum value of 1")
if not common_train_lib.validate_minibatch_size_str(args.minibatch_size):
raise Exception("--trainer.optimization.minibatch-size has an invalid value")
if (not os.path.exists(args.dir)):
raise Exception("Directory specified with --dir={0} "
"does not exist.".format(args.dir))
if (not os.path.exists(args.dir + "/configs") and
(args.input_model is None or not os.path.exists(args.input_model))):
raise Exception("Either --trainer.input-model option should be supplied, "
"and exist; or the {0}/configs directory should exist."
"{0}/configs is the output of make_configs.py"
"".format(args.dir))
# set the options corresponding to args.use_gpu
run_opts = common_train_lib.RunOpts()
if args.use_gpu in ["true", "false"]:
args.use_gpu = ("yes" if args.use_gpu == "true" else "no")
if args.use_gpu in ["yes", "wait"]:
if not common_lib.check_if_cuda_compiled():
logger.warning(
"""You are running with one thread but you have not compiled
for CUDA. You may be running a setup optimized for GPUs.
If you have GPUs and have nvcc installed, go to src/ and do
./configure; make""")
run_opts.train_queue_opt = "--gpu 1"
run_opts.parallel_train_opts = "--use-gpu={}".format(args.use_gpu)
run_opts.combine_gpu_opt = "--use-gpu={}".format(args.use_gpu)
run_opts.combine_queue_opt = "--gpu 1"
run_opts.prior_gpu_opt = "--use-gpu={}".format(args.use_gpu)
run_opts.prior_queue_opt = "--gpu 1"
else:
logger.warning("Without using a GPU this will be very slow. "
"nnet3 does not yet support multiple threads.")
run_opts.train_queue_opt = ""
run_opts.parallel_train_opts = "--use-gpu=no"
run_opts.combine_gpu_opt = "--use-gpu=no"
run_opts.combine_queue_opt = ""
run_opts.prior_gpu_opt = "--use-gpu=no"
run_opts.prior_queue_opt = ""
run_opts.command = args.command
run_opts.egs_command = (args.egs_command
if args.egs_command is not None else
args.command)
run_opts.num_jobs_compute_prior = args.num_jobs_compute_prior
return [args, run_opts]
def train(args, run_opts):
""" The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args()
"""
arg_string = pprint.pformat(vars(args))
logger.info("Arguments for the experiment\n{0}".format(arg_string))
# Set some variables.
# note, feat_dim gets set to 0 if args.feat_dir is unset (None).
feat_dim = common_lib.get_feat_dim(args.feat_dir)
ivector_dim = common_lib.get_ivector_dim(args.online_ivector_dir)
ivector_id = common_lib.get_ivector_extractor_id(args.online_ivector_dir)
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
if args.input_model is None:
config_dir = '{0}/configs'.format(args.dir)
var_file = '{0}/vars'.format(config_dir)
variables = common_train_lib.parse_generic_config_vars_file(var_file)
else:
# If args.input_model is specified, the model left and right contexts
# are computed using input_model.
variables = common_train_lib.get_input_model_info(args.input_model)
# Set some variables.
try:
model_left_context = variables['model_left_context']
model_right_context = variables['model_right_context']
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined in "
"{1}".format(str(e), '{0}/configs'.format(args.dir)))
left_context = model_left_context
right_context = model_right_context
# Initialize as "raw" nnet, prior to training the LDA-like preconditioning
# matrix. This first config just does any initial splicing that we do;
# we do this as it's a convenient way to get the stats for the 'lda-like'
# transform.
if (args.stage <= -4) and os.path.exists(args.dir+"/configs/init.config") and \
(args.input_model is None):
logger.info("Initializing the network for computing the LDA stats")
common_lib.execute_command(
"""{command} {dir}/log/nnet_init.log \
nnet3-init --srand=-2 {dir}/configs/init.config \
{dir}/init.raw""".format(command=run_opts.command,
dir=args.dir))
default_egs_dir = '{0}/egs'.format(args.dir)
if (args.stage <= -3) and args.egs_dir is None:
if args.targets_scp is None or args.feat_dir is None:
raise Exception("If you don't supply the --egs-dir option, the "
"--targets-scp and --feat-dir options are required.")
logger.info("Generating egs")
if args.use_dense_targets:
target_type = "dense"
try:
num_targets = int(variables['num_targets'])
if (common_lib.get_feat_dim_from_scp(args.targets_scp)
!= num_targets):
raise Exception("Mismatch between num-targets provided to "
"script vs configs")
except KeyError as e:
num_targets = -1
else:
target_type = "sparse"
try:
num_targets = int(variables['num_targets'])
except KeyError as e:
raise Exception("KeyError {0}: Variables need to be defined "
"in {1}".format(
str(e), '{0}/configs'.format(args.dir)))
train_lib.raw_model.generate_egs_using_targets(
data=args.feat_dir, targets_scp=args.targets_scp,
vad_egs=args.vad_egs,
egs_dir=default_egs_dir,
left_context=left_context, right_context=right_context,
run_opts=run_opts,
frames_per_eg_str=str(args.frames_per_eg),
srand=args.srand,
egs_opts=args.egs_opts,
cmvn_opts=args.cmvn_opts,
online_ivector_dir=args.online_ivector_dir,
samples_per_iter=args.samples_per_iter,
stage=args.egs_stage,
target_type=target_type,
num_targets=num_targets)
if args.egs_dir is None:
egs_dir = default_egs_dir
else:
egs_dir = args.egs_dir
[egs_left_context, egs_right_context,
frames_per_eg_str, num_archives] = (
common_train_lib.verify_egs_dir(egs_dir, feat_dim,
ivector_dim, ivector_id,
left_context, right_context))
assert str(args.frames_per_eg) == frames_per_eg_str
if args.num_jobs_final > num_archives:
raise Exception('num_jobs_final cannot exceed the number of archives '
'in the egs directory')
# copy the properties of the egs to dir for
# use during decoding
common_train_lib.copy_egs_properties_to_exp_dir(egs_dir, args.dir)
if args.stage <= -2 and os.path.exists(args.dir+"/configs/init.config") and \
(args.input_model is None):
logger.info('Computing the preconditioning matrix for input features')
train_lib.common.compute_preconditioning_matrix(
args.dir, egs_dir, num_archives, run_opts,
max_lda_jobs=args.max_lda_jobs,
rand_prune=args.rand_prune)
if args.stage <= -2:
logger.info("Computing initial vector for FixedScaleComponent before"
" softmax, using priors^{prior_scale} and rescaling to"
" average 1".format(
prior_scale=args.presoftmax_prior_scale_power))
# total num of frames per target already prepared
counts_path = os.path.dirname(args.targets_scp) + '/target_counts'
common_train_lib.compute_presoftmax_prior_scale_targets(
args.dir, counts_path,
presoftmax_prior_scale_power=args.presoftmax_prior_scale_power)
if args.stage <= -1:
logger.info("Preparing the initial network.")
common_train_lib.prepare_initial_network(args.dir, run_opts, args.srand, args.input_model)
# set num_iters so that as close as possible, we process the data
# $num_epochs times, i.e. $num_iters*$avg_num_jobs) ==
# $num_epochs*$num_archives, where
# avg_num_jobs=(num_jobs_initial+num_jobs_final)/2.
num_archives_expanded = num_archives * args.frames_per_eg
num_archives_to_process = int(args.num_epochs * num_archives_expanded)
num_archives_processed = 0
num_iters = int((num_archives_to_process * 2) / (args.num_jobs_initial + args.num_jobs_final))
# If do_final_combination is True, compute the set of models_to_combine.
# Otherwise, models_to_combine will be none.
if args.do_final_combination:
models_to_combine = common_train_lib.get_model_combine_iters(
num_iters, args.num_epochs,
num_archives_expanded, args.max_models_combine,
args.num_jobs_final)
else:
models_to_combine = None
if os.path.exists('{0}/valid_diagnostic.scp'.format(egs_dir)):
if os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('both {0}/valid_diagnostic.egs and '
'{0}/valid_diagnostic.scp exist.'
'This script expects only one of them to exist.'
''.format(egs_dir))
use_multitask_egs = True
else:
if not os.path.exists('{0}/valid_diagnostic.egs'.format(egs_dir)):
raise Exception('neither {0}/valid_diagnostic.egs nor '
'{0}/valid_diagnostic.scp exist.'
'This script expects one of them.'
''.format(egs_dir))
use_multitask_egs = False
logger.info("Training will run for {0} epochs = "
"{1} iterations".format(args.num_epochs, num_iters))
for iter in range(num_iters):
if (args.exit_stage is not None) and (iter == args.exit_stage):
logger.info("Exiting early due to --exit-stage {0}".format(iter))
return
current_num_jobs = common_train_lib.get_current_num_jobs(
iter, num_iters,
args.num_jobs_initial, args.num_jobs_step, args.num_jobs_final)
if args.stage <= iter:
lrate = common_train_lib.get_learning_rate(iter, current_num_jobs,
num_iters,
num_archives_processed,
num_archives_to_process,
args.initial_effective_lrate,
args.final_effective_lrate)
shrinkage_value = 1.0 - (args.proportional_shrink * lrate)
if shrinkage_value <= 0.5:
raise Exception("proportional-shrink={0} is too large, it gives "
"shrink-value={1}".format(args.proportional_shrink,
shrinkage_value))
percent = num_archives_processed * 100.0 / num_archives_to_process
epoch = (num_archives_processed * args.num_epochs
/ num_archives_to_process)
shrink_info_str = ''
if shrinkage_value != 1.0:
shrink_info_str = 'shrink: {0:0.5f}'.format(shrinkage_value)
logger.info("Iter: {0}/{1} Jobs: {2} "
"Epoch: {3:0.2f}/{4:0.1f} ({5:0.1f}% complete) "
"lr: {6:0.6f} {7}".format(iter, num_iters - 1,
current_num_jobs,
epoch, args.num_epochs,
percent,
lrate, shrink_info_str))
train_lib.common.train_one_iteration(
dir=args.dir,
iter=iter,
srand=args.srand,
egs_dir=egs_dir,
num_jobs=current_num_jobs,
num_archives_processed=num_archives_processed,
num_archives=num_archives,
learning_rate=lrate,
dropout_edit_string=common_train_lib.get_dropout_edit_string(
args.dropout_schedule,
float(num_archives_processed) / num_archives_to_process,
iter),
train_opts=' '.join(args.train_opts),
minibatch_size_str=args.minibatch_size,
frames_per_eg=args.frames_per_eg,
momentum=args.momentum,
max_param_change=args.max_param_change,
shrinkage_value=shrinkage_value,
shuffle_buffer_size=args.shuffle_buffer_size,
run_opts=run_opts,
get_raw_nnet_from_am=False,
image_augmentation_opts=args.image_augmentation_opts,
use_multitask_egs=use_multitask_egs,
backstitch_training_scale=args.backstitch_training_scale,
backstitch_training_interval=args.backstitch_training_interval)
if args.cleanup:
# do a clean up everything but the last 2 models, under certain
# conditions
common_train_lib.remove_model(
args.dir, iter-2, num_iters, models_to_combine,
args.preserve_model_interval,
get_raw_nnet_from_am=False)
if args.email is not None:
reporting_iter_interval = num_iters * args.reporting_interval
if iter % reporting_iter_interval == 0:
# lets do some reporting
[report, times, data] = (
nnet3_log_parse.generate_acc_logprob_report(args.dir))
message = report
subject = ("Update : Expt {dir} : "
"Iter {iter}".format(dir=args.dir, iter=iter))
common_lib.send_mail(message, subject, args.email)
num_archives_processed = num_archives_processed + current_num_jobs
if args.stage <= num_iters:
if args.do_final_combination:
logger.info("Doing final combination to produce final.raw")
train_lib.common.combine_models(
dir=args.dir, num_iters=num_iters,
models_to_combine=models_to_combine, egs_dir=egs_dir,
minibatch_size_str=args.minibatch_size, run_opts=run_opts,
get_raw_nnet_from_am=False,
max_objective_evaluations=args.max_objective_evaluations,
use_multitask_egs=use_multitask_egs)
else:
common_lib.force_symlink("{0}.raw".format(num_iters),
"{0}/final.raw".format(args.dir))
if args.compute_average_posteriors and args.stage <= num_iters + 1:
logger.info("Getting average posterior for output-node 'output'.")
train_lib.common.compute_average_posterior(
dir=args.dir, iter='final', egs_dir=egs_dir,
num_archives=num_archives,
prior_subset_size=args.prior_subset_size, run_opts=run_opts,
get_raw_nnet_from_am=False)
if args.cleanup:
logger.info("Cleaning up the experiment directory "
"{0}".format(args.dir))
remove_egs = args.remove_egs
if args.egs_dir is not None:
# this egs_dir was not created by this experiment so we will not
# delete it
remove_egs = False
common_train_lib.clean_nnet_dir(
nnet_dir=args.dir, num_iters=num_iters, egs_dir=egs_dir,
preserve_model_interval=args.preserve_model_interval,
remove_egs=remove_egs,
get_raw_nnet_from_am=False)
# do some reporting
outputs_list = common_train_lib.get_outputs_list("{0}/final.raw".format(
args.dir), get_raw_nnet_from_am=False)
if 'output' in outputs_list:
[report, times, data] = nnet3_log_parse.generate_acc_logprob_report(
args.dir)
if args.email is not None:
common_lib.send_mail(report, "Update : Expt {0} : "
"complete".format(args.dir),
args.email)
with open("{dir}/accuracy.{output_name}.report".format(dir=args.dir,
output_name="output"),
"w") as f:
f.write(report)
common_lib.execute_command("steps/info/nnet3_dir_info.pl "
"{0}".format(args.dir))
def main():
[args, run_opts] = get_args()
try:
train(args, run_opts)
common_lib.wait_for_background_commands()
except BaseException as e:
# look for BaseException so we catch KeyboardInterrupt, which is
# what we get when a background thread dies.
if args.email is not None:
message = ("Training session for experiment {dir} "
"died due to an error.".format(dir=args.dir))
common_lib.send_mail(message, message, args.email)
if not isinstance(e, KeyboardInterrupt):
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()
| egs/wsj/s5/steps/nnet3/train_raw_dnn.py | 23,833 | Get args from stdin.
The common options are defined in the object
libs.nnet3.train.common.CommonParser.parser.
See steps/libs/nnet3/train/common.py
Process the options got from get_args()
The main function for training.
Args:
args: a Namespace object with the required parameters
obtained from the function process_args()
run_opts: RunOpts object obtained from the process_args()
This script is similar to steps/nnet3/train_dnn.py but trains a
raw neural network instead of an acoustic model.
!/usr/bin/env python Copyright 2016 Vijayaditya Peddinti. 2016 Vimal Manohar Apache 2.0. egs extraction options trainer options Parameters for the optimization General options set the options corresponding to args.use_gpu Set some variables. note, feat_dim gets set to 0 if args.feat_dir is unset (None). If args.input_model is specified, the model left and right contexts are computed using input_model. Set some variables. Initialize as "raw" nnet, prior to training the LDA-like preconditioning matrix. This first config just does any initial splicing that we do; we do this as it's a convenient way to get the stats for the 'lda-like' transform. copy the properties of the egs to dir for use during decoding total num of frames per target already prepared set num_iters so that as close as possible, we process the data $num_epochs times, i.e. $num_iters*$avg_num_jobs) == $num_epochs*$num_archives, where avg_num_jobs=(num_jobs_initial+num_jobs_final)/2. If do_final_combination is True, compute the set of models_to_combine. Otherwise, models_to_combine will be none. do a clean up everything but the last 2 models, under certain conditions lets do some reporting this egs_dir was not created by this experiment so we will not delete it do some reporting look for BaseException so we catch KeyboardInterrupt, which is what we get when a background thread dies. | 1,900 | en | 0.770116 |
#!/usr/bin/env python
# Script for grading performance.
# Performance is graded by comparing the student's best wall-clock time
# (not speedup) after running the code in 64, 128, and 240 thread
# configurations for bfs, kbfs, pagerank, and graph decomp against the
# reference solution.
import re
import subprocess
import sys
GRAPHS = [
"/home/15-418/asst3_graphs/soc-pokec_30m.graph",
"/home/15-418/asst3_graphs/soc-livejournal1_68m.graph",
"/home/15-418/asst3_graphs/com-orkut_117m.graph",
"/home/15-418/asst3_graphs/rmat_200m.graph"
]
# runGraph returns the student's score and total possible score for runinng 3
# algorithms on the given graph.
def runGraph(paraGraph, g):
args = [
paraGraph,
"grade",
g,
"-r" # Don't run ref
]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
while True:
line = proc.stdout.readline()
if line != '':
line = line.strip();
# Print the line so the user can see the detailed timing breakdown.
print line
matchObj = re.match(r'Total Grade: ([\d\.]*)\/([\d\.]*)$', line, re.M)
if matchObj:
return float(matchObj.group(1)), float(matchObj.group(2))
else:
break
return -1, -1
def main():
if len(sys.argv) != 2:
print "Usage: ./grade_peformance.py <path to paraGraph>"
sys.exit(1)
paraGraph = sys.argv[1]
score = 0
possibleScore = 0
for g in GRAPHS:
print "Timing " + g
graphScore, pScore = runGraph(paraGraph, g)
if graphScore < 0:
sys.stderr.write("Error parsing total grade for graph " + g + "\n")
score += graphScore
possibleScore += pScore
print ""
print "**************************************************"
print "Final Performance Score: %f/%f" % (score, possibleScore)
print "**************************************************"
if __name__ == "__main__":
main()
| jobs/grade_performance.py | 1,864 | !/usr/bin/env python Script for grading performance. Performance is graded by comparing the student's best wall-clock time (not speedup) after running the code in 64, 128, and 240 thread configurations for bfs, kbfs, pagerank, and graph decomp against the reference solution. runGraph returns the student's score and total possible score for runinng 3 algorithms on the given graph. Don't run ref Print the line so the user can see the detailed timing breakdown. | 462 | en | 0.874077 |
"""
Test module for Strava API reader base module
"""
import os
import json
import pytest
from pandas import DataFrame, Timedelta, Timestamp
from runpandas import read_strava
from runpandas import types
from stravalib.protocol import ApiV3
from stravalib.client import Client
from stravalib.model import Stream
pytestmark = pytest.mark.stable
class MockResponse:
def __init__(self, json_file):
with open(json_file) as json_handler:
self.json_data = json.load(json_handler)
def json(self):
return self.json_data
def mock_get_activity_streams(streams_file):
"""
@TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,
it isn't the best alternative for mock the request from strava by passing a json file.
"""
stream_mock = MockResponse(streams_file).json()
entities = {}
for key, value in stream_mock.items():
value["type"] = key
stream = Stream.deserialize(value)
entities[stream.type] = stream
return entities
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data")
@pytest.fixture
def strava_activity(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=False,
)
return activity
@pytest.fixture
def strava_dataframe(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=True,
)
return activity
def test_read_strava_basic_dataframe(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=True,
)
assert isinstance(activity, DataFrame)
included_data = set(
[
"latitude",
"longitude",
"altitude",
"distance",
"velocity_smooth",
"heartrate",
"cadence",
"moving",
"grade_smooth",
]
)
assert included_data <= set(activity.columns.to_list())
assert activity.size == 15723
def test_read_strava_activity(dirpath, mocker):
activity_json = os.path.join(dirpath, "strava", "activity.json")
streams_json = os.path.join(dirpath, "strava", "streams.json")
mocker.patch.object(ApiV3, "get", return_value=MockResponse(activity_json).json())
mocker.patch.object(
Client,
"get_activity_streams",
return_value=mock_get_activity_streams(streams_json),
)
# we don't use access token here, since we will mock the stravalib json response
activity = read_strava(
activity_id=4437021783,
access_token=None,
refresh_token=None,
to_df=False,
)
assert isinstance(activity, types.Activity)
included_data = set(
[
"alt",
"cad",
"dist",
"hr",
"lon",
"lat",
"moving",
"velocity_smooth",
"grade_smooth",
]
)
assert included_data <= set(activity.columns.to_list())
assert activity.size == 15723
test_data = [
(pytest.lazy_fixture("strava_activity"), "alt", 0, 6.4),
(pytest.lazy_fixture("strava_activity"), "alt", -1, 6.6),
(pytest.lazy_fixture("strava_activity"), "cad", 0, 79),
(pytest.lazy_fixture("strava_activity"), "cad", -1, 86),
(pytest.lazy_fixture("strava_activity"), "dist", 0, 0.0),
(pytest.lazy_fixture("strava_activity"), "dist", -1, 12019.7),
(pytest.lazy_fixture("strava_activity"), "hr", 0, 111),
(pytest.lazy_fixture("strava_activity"), "hr", -1, 160),
(pytest.lazy_fixture("strava_activity"), "lat", 0, -8.016994),
(pytest.lazy_fixture("strava_activity"), "lon", 0, -34.847439),
(pytest.lazy_fixture("strava_activity"), "lat", -1, -8.016821),
(pytest.lazy_fixture("strava_activity"), "lon", -1, -34.84716),
(pytest.lazy_fixture("strava_activity"), "moving", 0, False),
(pytest.lazy_fixture("strava_activity"), "moving", -1, True),
(pytest.lazy_fixture("strava_activity"), "velocity_smooth", 0, 0.0),
(pytest.lazy_fixture("strava_activity"), "velocity_smooth", -1, 3.2),
(pytest.lazy_fixture("strava_activity"), "grade_smooth", 0, 1.1),
(pytest.lazy_fixture("strava_activity"), "grade_smooth", -1, -0.6),
(pytest.lazy_fixture("strava_dataframe"), "altitude", 0, 6.4),
(pytest.lazy_fixture("strava_dataframe"), "altitude", -1, 6.6),
(pytest.lazy_fixture("strava_dataframe"), "cadence", 0, 79),
(pytest.lazy_fixture("strava_dataframe"), "cadence", -1, 86),
(pytest.lazy_fixture("strava_dataframe"), "distance", 0, 0.0),
(pytest.lazy_fixture("strava_dataframe"), "distance", -1, 12019.7),
(pytest.lazy_fixture("strava_dataframe"), "heartrate", 0, 111),
(pytest.lazy_fixture("strava_dataframe"), "heartrate", -1, 160),
(pytest.lazy_fixture("strava_dataframe"), "latitude", 0, -8.016994),
(pytest.lazy_fixture("strava_dataframe"), "longitude", 0, -34.847439),
(pytest.lazy_fixture("strava_dataframe"), "latitude", -1, -8.016821),
(pytest.lazy_fixture("strava_dataframe"), "longitude", -1, -34.84716),
(pytest.lazy_fixture("strava_dataframe"), "moving", 0, False),
(pytest.lazy_fixture("strava_dataframe"), "moving", -1, True),
(pytest.lazy_fixture("strava_dataframe"), "velocity_smooth", 0, 0.0),
(pytest.lazy_fixture("strava_dataframe"), "velocity_smooth", -1, 3.2),
(pytest.lazy_fixture("strava_dataframe"), "grade_smooth", 0, 1.1),
(pytest.lazy_fixture("strava_dataframe"), "grade_smooth", -1, -0.6),
]
@pytest.mark.parametrize("activity,column,index,expected", test_data)
def test_strava_values(activity, column, index, expected):
assert activity[column].iloc[index] == expected
assert activity.index[-1] == Timedelta("0 days 01:25:45")
if isinstance(activity, types.Activity):
assert activity.start == Timestamp("2020-12-06 06:36:27")
| runpandas/tests/test_strava_parser.py | 7,371 | @TODO: I needed to mock the behavior the `stravalib.client.get_activity_streams`,
it isn't the best alternative for mock the request from strava by passing a json file.
Test module for Strava API reader base module
we don't use access token here, since we will mock the stravalib json response we don't use access token here, since we will mock the stravalib json response we don't use access token here, since we will mock the stravalib json response we don't use access token here, since we will mock the stravalib json response | 532 | en | 0.836659 |
from flask import g
from app.comm.CompositeOperate import CompositeOperate
from app.comm.SqlExecute import SqlExecute
from app.module_config import table_module_map
class CommentController(CompositeOperate):
def __init__(self, module):
super(CommentController, self).__init__(module)
def after_deal_get(self):
comments = g.result.get("data")
# 获取用户点赞记录
user_id = g.flask_httpauth_user.get('id', None) if g.flask_httpauth_user else None
# 点赞记录
comment_licks_dict = dict()
if user_id is not None:
sql_query = table_module_map['bloglikelog'].sql_query_default
sql_query = f'{sql_query} where bll_userid={user_id}'
user_likes = SqlExecute.query_sql_data(sql_query)
comment_licks_dict = {like['bll_blogcommentid']:like['bll_status'] for like in user_likes}
# 所有评论根节点,添加用户是否点赞标志
new_comments = []
for comment in comments:
comment['is_like'] = comment_licks_dict.get(comment['id']) or 0
if not comment['bc_commentupid']:
new_comments.append(comment)
# new_comments = [comment for comment in comments if not comment['bc_commentupid']]
for comment in new_comments:
# 获取每个评论的回复
comment['sub'] = [sub for sub in comments if sub['bc_commentupid']==comment['id']]
g.result['data'] = new_comments
def before_deal_post(self):
g.json_data["data"]["bc_createuid"] = g.flask_httpauth_user.get('id')
| blog_server/app/api/general/CommentController.py | 1,600 | 获取用户点赞记录 点赞记录 所有评论根节点,添加用户是否点赞标志 new_comments = [comment for comment in comments if not comment['bc_commentupid']] 获取每个评论的回复 | 124 | zh | 0.560331 |
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
"""
from decimal import Decimal
import http.client
import subprocess
import sys
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
assert_raises,
assert_raises_rpc_error,
assert_is_hex_string,
assert_is_hash_string,
)
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import (
msg_block,
)
from test_framework.mininode import (
P2PInterface,
network_thread_start,
)
class BlockchainTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
self.stderr = sys.stdout
self.extra_args = [['-stopatheight=207', '-prune=1', '-txindex=0']]
def run_test(self):
# Have to prepare the chain manually here.
# txindex=1 by default in Xazab which is incompatible with pruning.
self.set_genesis_mocktime()
for i in range(200):
self.bump_mocktime(156)
self.nodes[0].generate(1)
# Actual tests
self._test_getblockchaininfo()
self._test_getchaintxstats()
self._test_gettxoutsetinfo()
self._test_getblockheader()
self._test_getdifficulty()
self._test_getnetworkhashps()
self._test_stopatheight()
self._test_waitforblockheight()
assert self.nodes[0].verifychain(4, 0)
def _test_getblockchaininfo(self):
self.log.info("Test getblockchaininfo")
keys = [
'bestblockhash',
'bip9_softforks',
'blocks',
'chain',
'chainwork',
'difficulty',
'headers',
'initialblockdownload',
'mediantime',
'pruned',
'size_on_disk',
'softforks',
'verificationprogress',
'warnings',
]
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if manual pruning is enabled
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning'] + keys))
# size_on_disk should be > 0
assert_greater_than(res['size_on_disk'], 0)
# pruneheight should be greater or equal to 0
assert_greater_than_or_equal(res['pruneheight'], 0)
# check other pruning fields given that prune=1
assert res['pruned']
assert not res['automatic_pruning']
self.restart_node(0, ['-stopatheight=207', '-txindex=0'])
res = self.nodes[0].getblockchaininfo()
# should have exact keys
assert_equal(sorted(res.keys()), keys)
self.restart_node(0, ['-stopatheight=207', '-prune=550', '-txindex=0'])
res = self.nodes[0].getblockchaininfo()
# result should have these additional pruning keys if prune=550
assert_equal(sorted(res.keys()), sorted(['pruneheight', 'automatic_pruning', 'prune_target_size'] + keys))
# check related fields
assert res['pruned']
assert_equal(res['pruneheight'], 0)
assert res['automatic_pruning']
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
# Test `getchaintxstats` invalid extra parameters
assert_raises_rpc_error(-1, 'getchaintxstats', self.nodes[0].getchaintxstats, 0, '', 0)
# Test `getchaintxstats` invalid `nblocks`
assert_raises_rpc_error(-1, "JSON value is not an integer as expected", self.nodes[0].getchaintxstats, '')
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, -1)
assert_raises_rpc_error(-8, "Invalid block count: should be between 0 and the block's height - 1", self.nodes[0].getchaintxstats, self.nodes[0].getblockcount())
# Test `getchaintxstats` invalid `blockhash`
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getchaintxstats, blockhash=0)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getchaintxstats, blockhash='0')
blockhash = self.nodes[0].getblockhash(200)
self.nodes[0].invalidateblock(blockhash)
assert_raises_rpc_error(-8, "Block is not in main chain", self.nodes[0].getchaintxstats, blockhash=blockhash)
self.nodes[0].reconsiderblock(blockhash)
chaintxstats = self.nodes[0].getchaintxstats(1)
# 200 txs plus genesis tx
assert_equal(chaintxstats['txcount'], 201)
# tx rate should be 1 per ~2.6 minutes (156 seconds), or 1/156
# we have to round because of binary math
assert_equal(round(chaintxstats['txrate'] * 156, 10), Decimal(1))
b1_hash = self.nodes[0].getblockhash(1)
b1 = self.nodes[0].getblock(b1_hash)
b200_hash = self.nodes[0].getblockhash(200)
b200 = self.nodes[0].getblock(b200_hash)
time_diff = b200['mediantime'] - b1['mediantime']
chaintxstats = self.nodes[0].getchaintxstats()
assert_equal(chaintxstats['time'], b200['time'])
assert_equal(chaintxstats['txcount'], 201)
assert_equal(chaintxstats['window_final_block_hash'], b200_hash)
assert_equal(chaintxstats['window_block_count'], 199)
assert_equal(chaintxstats['window_tx_count'], 199)
assert_equal(chaintxstats['window_interval'], time_diff)
assert_equal(round(chaintxstats['txrate'] * time_diff, 10), Decimal(199))
chaintxstats = self.nodes[0].getchaintxstats(blockhash=b1_hash)
assert_equal(chaintxstats['time'], b1['time'])
assert_equal(chaintxstats['txcount'], 2)
assert_equal(chaintxstats['window_final_block_hash'], b1_hash)
assert_equal(chaintxstats['window_block_count'], 0)
assert('window_tx_count' not in chaintxstats)
assert('window_interval' not in chaintxstats)
assert('txrate' not in chaintxstats)
def _test_gettxoutsetinfo(self):
node = self.nodes[0]
res = node.gettxoutsetinfo()
assert_equal(res['total_amount'], Decimal('98214.28571450'))
assert_equal(res['transactions'], 200)
assert_equal(res['height'], 200)
assert_equal(res['txouts'], 200)
assert_equal(res['bogosize'], 17000),
size = res['disk_size']
assert size > 6400
assert size < 64000
assert_equal(len(res['bestblock']), 64)
assert_equal(len(res['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() works for blockchain with just the genesis block")
b1hash = node.getblockhash(1)
node.invalidateblock(b1hash)
res2 = node.gettxoutsetinfo()
assert_equal(res2['transactions'], 0)
assert_equal(res2['total_amount'], Decimal('0'))
assert_equal(res2['height'], 0)
assert_equal(res2['txouts'], 0)
assert_equal(res2['bogosize'], 0),
assert_equal(res2['bestblock'], node.getblockhash(0))
assert_equal(len(res2['hash_serialized_2']), 64)
self.log.info("Test that gettxoutsetinfo() returns the same result after invalidate/reconsider block")
node.reconsiderblock(b1hash)
res3 = node.gettxoutsetinfo()
assert_equal(res['total_amount'], res3['total_amount'])
assert_equal(res['transactions'], res3['transactions'])
assert_equal(res['height'], res3['height'])
assert_equal(res['txouts'], res3['txouts'])
assert_equal(res['bogosize'], res3['bogosize'])
assert_equal(res['bestblock'], res3['bestblock'])
assert_equal(res['hash_serialized_2'], res3['hash_serialized_2'])
def _test_getblockheader(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Block not found", node.getblockheader, "nonsense")
besthash = node.getbestblockhash()
secondbesthash = node.getblockhash(199)
header = node.getblockheader(besthash)
assert_equal(header['hash'], besthash)
assert_equal(header['height'], 200)
assert_equal(header['confirmations'], 1)
assert_equal(header['previousblockhash'], secondbesthash)
assert_is_hex_string(header['chainwork'])
assert_equal(header['nTx'], 1)
assert_is_hash_string(header['hash'])
assert_is_hash_string(header['previousblockhash'])
assert_is_hash_string(header['merkleroot'])
assert_is_hash_string(header['bits'], length=None)
assert isinstance(header['time'], int)
assert isinstance(header['mediantime'], int)
assert isinstance(header['nonce'], int)
assert isinstance(header['version'], int)
assert isinstance(int(header['versionHex'], 16), int)
assert isinstance(header['difficulty'], Decimal)
def _test_getdifficulty(self):
difficulty = self.nodes[0].getdifficulty()
# 1 hash in 2 should be valid, so difficulty should be 1/2**31
# binary => decimal => binary math is why we do this check
assert abs(difficulty * 2**31 - 1) < 0.0001
def _test_getnetworkhashps(self):
hashes_per_second = self.nodes[0].getnetworkhashps()
# This should be 2 hashes every 2.6 minutes (156 seconds) or 1/78
assert abs(hashes_per_second * 78 - 1) < 0.0001
def _test_stopatheight(self):
assert_equal(self.nodes[0].getblockcount(), 200)
self.nodes[0].generate(6)
assert_equal(self.nodes[0].getblockcount(), 206)
self.log.debug('Node should not stop at this height')
assert_raises(subprocess.TimeoutExpired, lambda: self.nodes[0].process.wait(timeout=3))
try:
self.nodes[0].generate(1)
except (ConnectionError, http.client.BadStatusLine):
pass # The node already shut down before response
self.log.debug('Node should stop at this height...')
self.nodes[0].wait_until_stopped()
self.start_node(0, ['-txindex=0'])
assert_equal(self.nodes[0].getblockcount(), 207)
def _test_waitforblockheight(self):
self.log.info("Test waitforblockheight")
node = self.nodes[0]
# Start a P2P connection since we'll need to create some blocks.
node.add_p2p_connection(P2PInterface())
network_thread_start()
node.p2p.wait_for_verack()
current_height = node.getblock(node.getbestblockhash())['height']
# Create a fork somewhere below our current height, invalidate the tip
# of that fork, and then ensure that waitforblockheight still
# works as expected.
#
# (Previously this was broken based on setting
# `rpc/blockchain.cpp:latestblock` incorrectly.)
#
b20hash = node.getblockhash(20)
b20 = node.getblock(b20hash)
def solve_and_send_block(prevhash, height, time):
b = create_block(prevhash, create_coinbase(height), time)
b.solve()
node.p2p.send_message(msg_block(b))
node.p2p.sync_with_ping()
return b
b21f = solve_and_send_block(int(b20hash, 16), 21, b20['time'] + 1)
b22f = solve_and_send_block(b21f.sha256, 22, b21f.nTime + 1)
node.invalidateblock(b22f.hash)
def assert_waitforheight(height, timeout=2):
assert_equal(
node.waitforblockheight(height, timeout)['height'],
current_height)
assert_waitforheight(0)
assert_waitforheight(current_height - 1)
assert_waitforheight(current_height)
assert_waitforheight(current_height + 1)
if __name__ == '__main__':
BlockchainTest().main()
| test/functional/rpc_blockchain.py | 12,336 | Test RPCs related to blockchainstate.
Test the following RPCs:
- getblockchaininfo
- gettxoutsetinfo
- getdifficulty
- getbestblockhash
- getblockhash
- getblockheader
- getchaintxstats
- getnetworkhashps
- verifychain
Tests correspond to code in rpc/blockchain.cpp.
!/usr/bin/env python3 Copyright (c) 2014-2016 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Have to prepare the chain manually here. txindex=1 by default in Xazab which is incompatible with pruning. Actual tests result should have these additional pruning keys if manual pruning is enabled size_on_disk should be > 0 pruneheight should be greater or equal to 0 check other pruning fields given that prune=1 should have exact keys result should have these additional pruning keys if prune=550 check related fields Test `getchaintxstats` invalid extra parameters Test `getchaintxstats` invalid `nblocks` Test `getchaintxstats` invalid `blockhash` 200 txs plus genesis tx tx rate should be 1 per ~2.6 minutes (156 seconds), or 1/156 we have to round because of binary math 1 hash in 2 should be valid, so difficulty should be 1/2**31 binary => decimal => binary math is why we do this check This should be 2 hashes every 2.6 minutes (156 seconds) or 1/78 The node already shut down before response Start a P2P connection since we'll need to create some blocks. Create a fork somewhere below our current height, invalidate the tip of that fork, and then ensure that waitforblockheight still works as expected. (Previously this was broken based on setting `rpc/blockchain.cpp:latestblock` incorrectly.) | 1,718 | en | 0.759356 |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.plugins import DeepSpeedStrategy
from pytorch_lightning.utilities.deepspeed import convert_zero_checkpoint_to_fp32_state_dict
from tests.helpers.boring_model import BoringModel
from tests.helpers.runif import RunIf
@RunIf(min_gpus=2, deepspeed=True, standalone=True)
def test_deepspeed_collate_checkpoint(tmpdir):
"""Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file."""
model = BoringModel()
trainer = Trainer(
default_root_dir=tmpdir, strategy=DeepSpeedStrategy(stage=3), gpus=2, fast_dev_run=True, precision=16
)
trainer.fit(model)
checkpoint_path = os.path.join(tmpdir, "model.pt")
checkpoint_path = trainer.strategy.broadcast(checkpoint_path)
trainer.save_checkpoint(checkpoint_path)
trainer.strategy.barrier()
if trainer.is_global_zero:
# ensure function call works
output_path = os.path.join(tmpdir, "single_model.pt")
convert_zero_checkpoint_to_fp32_state_dict(checkpoint_path, output_path)
_assert_checkpoint_equal(model, output_path)
def _assert_checkpoint_equal(model, output_path):
assert os.path.exists(output_path)
single_output = torch.load(output_path)
state_dict = model.state_dict()
for orig_param, saved_model_param in zip(state_dict.values(), single_output["state_dict"].values()):
if model.dtype == torch.half:
# moved model to float32 for comparison with single fp32 saved weights
saved_model_param = saved_model_param.half()
assert torch.equal(orig_param.cpu(), saved_model_param)
| tests/utilities/test_deepspeed_collate_checkpoint.py | 2,267 | Test to ensure that with DeepSpeed Stage 3 we can collate the sharded checkpoints into a single file.
Copyright The PyTorch Lightning team. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ensure function call works moved model to float32 for comparison with single fp32 saved weights | 759 | en | 0.889291 |
"""meiduo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from . import views
from django.urls import re_path
urlpatterns = [
re_path(r'^usernames/(?P<username>\w{5,20})/count/$',views.UsernameCountView.as_view()),
re_path(r'^mobiles/(?P<mobile>1[3-9]\d{9})/count/$',views.MobileCountView.as_view()),
re_path(r'^register/$',views.RegisterView.as_view()),
re_path(r'^login/$',views.LoginView.as_view()),
re_path(r'^logout/$',views.LogoutView.as_view()),
re_path(r'^info/$',views.UserInfoView.as_view()),
re_path(r'^emails/$', views.EmailView.as_view()),
re_path(r'^emails/verification/$', views.VerifyEmailView.as_view()),
re_path(r'^addresses/create/$', views.CreateAddressView.as_view()),
re_path(r'^addresses/$', views.AddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/$', views.UpdateDestroyAddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/default/$', views.DefaultAddressView.as_view()),
re_path(r'^addresses/(?P<address_id>\d+)/title/$', views.UpdateTitleAddressView.as_view()),
re_path(r'^password/$', views.ChangePasswordView.as_view()),
re_path(r'^browse_histories/$', views.UserBrowseHistory.as_view()),
]
| meiduo/meiduo/apps/users/urls.py | 1,779 | meiduo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) | 622 | en | 0.608713 |
import pygame
# TODO: make these configurable
c_UP = pygame.K_UP
c_DOWN = pygame.K_DOWN
c_LEFT = pygame.K_LEFT
c_RIGHT = pygame.K_RIGHT
c_PREV = pygame.K_LEFTBRACKET
c_NEXT = pygame.K_RIGHTBRACKET
c_START = pygame.K_RETURN
c_1 = pygame.K_1
c_2 = pygame.K_2
c_3 = pygame.K_3
c_4 = pygame.K_4
c_5 = pygame.K_5
c_6 = pygame.K_6
c_7 = pygame.K_7
c_8 = pygame.K_8
c_9 = pygame.K_9
c_0 = pygame.K_0
c_POINT = pygame.K_PERIOD
c_DEL = pygame.K_BACKSPACE
c_X = pygame.K_a
c_A = pygame.K_x
c_B = pygame.K_z
c_Y = pygame.K_LSHIFT
c_L = pygame.K_q
c_R = pygame.K_w
def isDown(code):
return pygame.key.get_pressed()[code]
def isUp(code):
return not isDown(code)
| ceControl.py | 664 | TODO: make these configurable | 29 | en | 0.38381 |
import argparse
import cv2
import numpy as np
from onnxruntime.quantization import quantize_static, CalibrationDataReader, QuantType
from onnxruntime.quantization.calibrate import CalibrationMethod
from onnxruntime.quantization.quant_utils import QuantFormat
from dataset import pre_process_vgg
def parse_args():
parser = argparse.ArgumentParser(description="ONNXRuntime quantization tool")
parser.add_argument("--input", "-i", type=str)
parser.add_argument("--output", "-o", type=str)
parser.add_argument("--dataset", "-d", type=str)
parser.add_argument("--entropy-calibration", default=False, action="store_true")
return parser.parse_args()
# https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/imagenet_v2/mobilenet.ipynb
def preprocess_image(image_path, height, width, channels=3):
image = cv2.imread(image_path)
image_data = pre_process_vgg(image, dims=[height, width, channels], need_transpose=True)
image_data = np.expand_dims(image_data, axis=0)
return image_data
def preprocess_func(images_folder, height, width, size_limit=0):
unconcatenated_batch_data = []
import pathlib
image_filepathes = [str(path) for path in pathlib.Path(images_folder).glob("*.JPEG")]
for image_filepath in image_filepathes:
# image_filepath = images_folder + '/' + image_name
image_data = preprocess_image(image_filepath, height, width)
unconcatenated_batch_data.append(image_data)
batch_data = np.concatenate(np.expand_dims(unconcatenated_batch_data, axis=0), axis=0)
return batch_data
image_height = 224
image_width = 224
class ResNetDataReader(CalibrationDataReader):
def __init__(self, calibration_image_folder):
self.image_folder = calibration_image_folder
self.preprocess_flag = True
self.enum_data_dicts = []
self.datasize = 0
def get_next(self):
if self.preprocess_flag:
self.preprocess_flag = False
nhwc_data_list = preprocess_func(
self.image_folder, image_height, image_width, size_limit=0
)
self.datasize = len(nhwc_data_list)
self.enum_data_dicts = iter(
[{"input_tensor:0": nhwc_data} for nhwc_data in nhwc_data_list]
)
return next(self.enum_data_dicts, None)
if __name__ == "__main__":
args = parse_args()
dr = ResNetDataReader(args.dataset)
if args.entropy_calibration:
method = CalibrationMethod.Entropy
else:
method = CalibrationMethod.MinMax
quantize_static(
args.input,
args.output,
dr,
quant_format=QuantFormat.QDQ,
per_channel=True,
calibrate_method=method,
)
| closed/FuriosaAI/code/quantization/mlperf_evaluation/python/ort_quantization.py | 2,771 | https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/imagenet_v2/mobilenet.ipynb image_filepath = images_folder + '/' + image_name | 178 | en | 0.615625 |
# -*- coding: utf-8 -*-
import logging
from dbaas_dbmonitor.provider import DBMonitorProvider
from workflow.steps.util.base import BaseInstanceStep
LOG = logging.getLogger(__name__)
class DBMonitorStep(BaseInstanceStep):
def __init__(self, instance):
super(DBMonitorStep, self).__init__(instance)
self.provider = DBMonitorProvider()
def do(self):
raise NotImplementedError
def undo(self):
pass
class DisableMonitoring(DBMonitorStep):
def __unicode__(self):
return "Disabling DB Monitor..."
def do(self):
self.provider.disabled_dbmonitor_monitoring_instance(self.instance)
class EnableMonitoring(DBMonitorStep):
def __unicode__(self):
return "Enabling DB Monitor..."
def do(self):
self.provider.enabled_dbmonitor_monitoring_instance(self.instance)
class CreateMonitoring(DBMonitorStep):
def __unicode__(self):
return "Creating DB Monitor..."
def do(self):
instance_number = self.instance.databaseinfra.last_vm_created
self.provider.create_dbmonitor_instance_monitoring(
self.instance, instance_number
)
def undo(self):
DisableMonitoring(self.instance).do()
class DisableInfraMonitoring(DBMonitorStep):
def __unicode__(self):
return "Disabling DB Monitor..."
def do(self):
self.provider.remove_dbmonitor_monitoring(self.infra)
class CreateInfraMonitoring(DBMonitorStep):
def __unicode__(self):
return "Creating DB Monitor..."
def do(self):
if self.instance == self.infra.instances.all()[0]:
if not self.provider.get_dbmonitor_databaseinfra(self.infra):
self.provider.create_dbmonitor_monitoring(self.infra)
def undo(self):
if self.instance == self.infra.instances.all()[0]:
DisableInfraMonitoring(self.instance).do()
class UpdateInfraVersion(DBMonitorStep):
def __unicode__(self):
return "Update version on DB Monitor..."
@property
def is_valid(self):
if ((self.upgrade or self.upgrade_patch) and
self.instance == self.infra.instances.all()[0]):
return True
return False
@property
def target_version(self):
if self.upgrade:
return self.upgrade.target_plan.engine.full_inicial_version
elif self.upgrade_patch:
return self.upgrade_patch.target_patch_full_version
@property
def source_version(self):
if self.upgrade:
return self.upgrade.source_plan.engine.full_inicial_version
elif self.upgrade_patch:
return self.upgrade_patch.source_patch_full_version
def do(self):
if self.is_valid:
self.provider.update_dbmonitor_database_version(
self.infra, self.target_version)
def undo(self):
if self.is_valid:
self.provider.update_dbmonitor_database_version(
self.infra, self.source_version)
class UpdateInfraCloudDatabaseMigrate(DBMonitorStep):
def __unicode__(self):
return "Update info about cloud on DBMonitor..."
def do(self):
self.provider.update_database_cloud(
self.infra, self.environment.cloud.name)
class UpdateInfraOrganizationName(DBMonitorStep):
def __unicode__(self):
return "Update info about organization on DBMonitor..."
def __init__(self, instance, organization_name=None):
super(UpdateInfraOrganizationName, self).__init__(instance)
self.organization_name = organization_name
@property
def is_valid(self):
if self.organization_name:
return True
return self.instance == self.infra.instances.first()
def do(self):
if not self.is_valid:
return
self.provider.update_database_organization(
self.infra, self.organization_name)
| dbaas/workflow/steps/util/db_monitor.py | 3,916 | -*- coding: utf-8 -*- | 21 | en | 0.767281 |
import os
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Optional
from yarl import URL
@dataclass(frozen=True)
class ServerConfig:
host: str = "0.0.0.0"
port: int = 8080
name: str = "Docker Registry"
@dataclass(frozen=True)
class AuthConfig:
server_endpoint_url: Optional[URL]
service_token: str = field(repr=False)
class UpstreamType(str, Enum):
BASIC = "basic"
OAUTH = "oauth"
AWS_ECR = "aws_ecr"
@dataclass(frozen=True)
class UpstreamRegistryConfig:
endpoint_url: URL
project: str
type: UpstreamType = UpstreamType.OAUTH
basic_username: str = field(repr=False, default="")
basic_password: str = field(repr=False, default="")
# TODO: should be derived from the WWW-Authenticate header instead
token_endpoint_url: URL = URL()
token_service: str = ""
token_endpoint_username: str = field(repr=False, default="")
token_endpoint_password: str = field(repr=False, default="")
token_registry_catalog_scope: str = "registry:catalog:*"
token_repository_scope_actions: str = "*"
sock_connect_timeout_s: Optional[float] = 30.0
sock_read_timeout_s: Optional[float] = 30.0
# https://github.com/docker/distribution/blob/dcfe05ce6cff995f419f8df37b59987257ffb8c1/registry/handlers/catalog.go#L16
max_catalog_entries: int = 100
@property
def is_basic(self) -> bool:
return self.type == UpstreamType.BASIC
@property
def is_oauth(self) -> bool:
return self.type == UpstreamType.OAUTH
@dataclass(frozen=True)
class ZipkinConfig:
url: URL
app_name: str = "platform-registry"
sample_rate: float = 0
@dataclass(frozen=True)
class SentryConfig:
dsn: URL
cluster_name: str
app_name: str = "platform-registry"
sample_rate: float = 0
@dataclass(frozen=True)
class Config:
server: ServerConfig
upstream_registry: UpstreamRegistryConfig
auth: AuthConfig
cluster_name: str
zipkin: Optional[ZipkinConfig] = None
sentry: Optional[SentryConfig] = None
class EnvironConfigFactory:
def __init__(self, environ: Optional[dict[str, str]] = None) -> None:
self._environ = environ or os.environ
def _get_url(self, name: str) -> Optional[URL]:
value = self._environ[name]
if value == "-":
return None
else:
return URL(value)
def create_server(self) -> ServerConfig:
port = int(self._environ.get("NP_REGISTRY_API_PORT", ServerConfig.port))
return ServerConfig(port=port)
def create_upstream_registry(self) -> UpstreamRegistryConfig:
endpoint_url = URL(self._environ["NP_REGISTRY_UPSTREAM_URL"])
project = self._environ["NP_REGISTRY_UPSTREAM_PROJECT"]
max_catalog_entries = int(
self._environ.get(
"NP_REGISTRY_UPSTREAM_MAX_CATALOG_ENTRIES",
UpstreamRegistryConfig.max_catalog_entries,
)
)
upstream_type = UpstreamType(
self._environ.get("NP_REGISTRY_UPSTREAM_TYPE", UpstreamType.OAUTH.value)
)
upstream: dict[str, Any] = dict(
endpoint_url=endpoint_url,
project=project,
max_catalog_entries=max_catalog_entries,
type=upstream_type,
)
if upstream_type == UpstreamType.OAUTH:
upstream.update(
dict(
token_endpoint_url=URL(
self._environ["NP_REGISTRY_UPSTREAM_TOKEN_URL"]
),
token_service=self._environ["NP_REGISTRY_UPSTREAM_TOKEN_SERVICE"],
token_endpoint_username=self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_USERNAME"
],
token_endpoint_password=self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_PASSWORD"
],
)
)
if "NP_REGISTRY_UPSTREAM_TOKEN_REGISTRY_SCOPE" in self._environ:
upstream["token_registry_catalog_scope"] = self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_REGISTRY_SCOPE"
]
if "NP_REGISTRY_UPSTREAM_TOKEN_REPO_SCOPE_ACTIONS" in self._environ:
upstream["token_repository_scope_actions"] = self._environ[
"NP_REGISTRY_UPSTREAM_TOKEN_REPO_SCOPE_ACTIONS"
]
if upstream_type == UpstreamType.BASIC:
basic_username = self._environ.get("NP_REGISTRY_UPSTREAM_BASIC_USERNAME")
if basic_username is not None:
upstream["basic_username"] = basic_username
basic_password = self._environ.get("NP_REGISTRY_UPSTREAM_BASIC_PASSWORD")
if basic_password is not None:
upstream["basic_password"] = basic_password
return UpstreamRegistryConfig(**upstream)
def create_auth(self) -> AuthConfig:
url = self._get_url("NP_REGISTRY_AUTH_URL")
token = self._environ["NP_REGISTRY_AUTH_TOKEN"]
return AuthConfig(server_endpoint_url=url, service_token=token)
def create_zipkin(self) -> Optional[ZipkinConfig]:
if "NP_ZIPKIN_URL" not in self._environ:
return None
url = URL(self._environ["NP_ZIPKIN_URL"])
app_name = self._environ.get("NP_ZIPKIN_APP_NAME", ZipkinConfig.app_name)
sample_rate = float(
self._environ.get("NP_ZIPKIN_SAMPLE_RATE", ZipkinConfig.sample_rate)
)
return ZipkinConfig(url=url, app_name=app_name, sample_rate=sample_rate)
def create_sentry(self) -> Optional[SentryConfig]:
if "NP_SENTRY_DSN" not in self._environ:
return None
return SentryConfig(
dsn=URL(self._environ["NP_SENTRY_DSN"]),
cluster_name=self._environ["NP_SENTRY_CLUSTER_NAME"],
app_name=self._environ.get("NP_SENTRY_APP_NAME", SentryConfig.app_name),
sample_rate=float(
self._environ.get("NP_SENTRY_SAMPLE_RATE", SentryConfig.sample_rate)
),
)
def create(self) -> Config:
server_config = self.create_server()
upstream_registry_config = self.create_upstream_registry()
auth_config = self.create_auth()
zipkin_config = self.create_zipkin()
sentry_config = self.create_sentry()
cluster_name = self._environ["NP_CLUSTER_NAME"]
assert cluster_name
return Config(
server=server_config,
upstream_registry=upstream_registry_config,
auth=auth_config,
cluster_name=cluster_name,
zipkin=zipkin_config,
sentry=sentry_config,
)
| platform_registry_api/config.py | 6,717 | TODO: should be derived from the WWW-Authenticate header instead https://github.com/docker/distribution/blob/dcfe05ce6cff995f419f8df37b59987257ffb8c1/registry/handlers/catalog.goL16 | 181 | en | 0.57769 |
# Copyright 1997 - 2018 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class WriteActionsMissLearnedInfo(Base):
"""The WriteActionsMissLearnedInfo class encapsulates a system managed writeActionsMissLearnedInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the WriteActionsMissLearnedInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
"""
_SDM_NAME = 'writeActionsMissLearnedInfo'
def __init__(self, parent):
super(WriteActionsMissLearnedInfo, self).__init__(parent)
@property
def ExperimenterData(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('experimenterData')
@property
def ExperimenterDataLength(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('experimenterDataLength')
@property
def ExperimenterId(self):
"""NOT DEFINED
Returns:
number
"""
return self._get_attribute('experimenterId')
@property
def NextTableIds(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('nextTableIds')
@property
def Property(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('property')
@property
def SupportedField(self):
"""NOT DEFINED
Returns:
str
"""
return self._get_attribute('supportedField')
def find(self, ExperimenterData=None, ExperimenterDataLength=None, ExperimenterId=None, NextTableIds=None, Property=None, SupportedField=None):
"""Finds and retrieves writeActionsMissLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.
Args:
ExperimenterData (str): NOT DEFINED
ExperimenterDataLength (number): NOT DEFINED
ExperimenterId (number): NOT DEFINED
NextTableIds (str): NOT DEFINED
Property (str): NOT DEFINED
SupportedField (str): NOT DEFINED
Returns:
self: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
return self._select(locals())
def read(self, href):
"""Retrieves a single instance of writeActionsMissLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| RestPy/ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/openflow/writeactionsmisslearnedinfo.py | 4,134 | The WriteActionsMissLearnedInfo class encapsulates a system managed writeActionsMissLearnedInfo node in the ixnetwork hierarchy.
An instance of the class can be obtained by accessing the WriteActionsMissLearnedInfo property from a parent instance.
The internal properties list will be empty when the property is accessed and is populated from the server by using the find method.
NOT DEFINED
Returns:
str
NOT DEFINED
Returns:
number
NOT DEFINED
Returns:
number
NOT DEFINED
Returns:
str
NOT DEFINED
Returns:
str
NOT DEFINED
Returns:
str
Finds and retrieves writeActionsMissLearnedInfo data from the server.
All named parameters support regex and can be used to selectively retrieve writeActionsMissLearnedInfo data from the server.
By default the find method takes no parameters and will retrieve all writeActionsMissLearnedInfo data from the server.
Args:
ExperimenterData (str): NOT DEFINED
ExperimenterDataLength (number): NOT DEFINED
ExperimenterId (number): NOT DEFINED
NextTableIds (str): NOT DEFINED
Property (str): NOT DEFINED
SupportedField (str): NOT DEFINED
Returns:
self: This instance with matching writeActionsMissLearnedInfo data retrieved from the server available through an iterator or index
Raises:
ServerError: The server has encountered an uncategorized error condition
Retrieves a single instance of writeActionsMissLearnedInfo data from the server.
Args:
href (str): An href to the instance to be retrieved
Returns:
self: This instance with the writeActionsMissLearnedInfo data from the server available through an iterator or index
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
Copyright 1997 - 2018 by IXIA Keysight Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 2,922 | en | 0.809345 |
# -*- coding: utf-8 -*-
''' Globals variables '''
# ##################################################################################
# MG ILLUMINATION #
# First Crazy Debroussailleur : jDepoortere #
# Author : cPOTTIER #
# Date : 12-05-2016 #
# ##################################################################################
# Required modules
import sys
import os
# InK modules
import graphs
import nomen
import ink.proto
import ink.query
import ink.io
import nask.sdk
import nask.sdk.casting
import nask.sdk.shots as shots
import nask.sdk.hit
import proj.pipe.ink.graphs as prodgraphs
from subprocess import Popen, PIPE
# Maya modules
import sip
# Optionals modules
import re
import shutil
import time
import datetime
from datetime import datetime
import subprocess
import glob
import json
import shutil
import string
import subprocess
import collections
from collections import OrderedDict
# QT modules
from PyQt4 import QtGui
from PyQt4 import QtGui, QtCore, Qt, QtOpenGL
from PyQt4.QtCore import QThread
# qt module for InK
try:
if 'sandboxQt' in sys.modules:
del(sys.modules["sandboxQt"])
import sandboxQt
else:
import sandboxQt
except:
pass
# Globals
CONNECT_USER_INFOS = ink.io.ConnectUserInfo()
CONNECT_USER0 = CONNECT_USER_INFOS[0]
CONNECT_USER1 = CONNECT_USER_INFOS[1] # todo to ask why ?
PROJECT = CONNECT_USER_INFOS[2].upper() # cf Nomen.GetFilm()
projectLower = PROJECT.lower()
USER = CONNECT_USER_INFOS[1]
MAIL_HOSTNAME = 'HOSTNAME.illum-mg.fr'
MAIL_USER = USER+'@illum-mg.fr'
LOCALPATH = '/u/'+projectLower+'/Users/'+USER+'/Presets/Graphs/'
# Useful Classes
if str(USER) == 'cpottier': # for dev
if '__InK__classes_forDev' in sys.modules:
del(sys.modules["__InK__classes_forDev"])
import __InK__classes_forDev
from __InK__classes_forDev import __PIPEIN_GRAPH__
else:
import __InK__classes_forDev
from __InK__classes_forDev import __PIPEIN_GRAPH__
else:
if '__InK__classes' in sys.modules:
del(sys.modules["__InK__classes"])
import __InK__classes
from __InK__classes import __PIPEIN_GRAPH__
else:
import __InK__classes
from __InK__classes import __PIPEIN_GRAPH__
print sys.modules
| __InK__connect.py | 2,488 | -*- coding: utf-8 -*- MG ILLUMINATION First Crazy Debroussailleur : jDepoortere Author : cPOTTIER Date : 12-05-2016 Required modules InK modules Maya modules Optionals modules QT modules qt module for InK Globals todo to ask why ? cf Nomen.GetFilm() Useful Classes for dev | 505 | en | 0.210847 |
import re
import discord
from discord.ext import commands
from discord.ext.commands import clean_content
from Util import Configuration, GearbotLogging, Permissioncheckers, Translator, Utils
INVITE_MATCHER = re.compile(r"(?:https?:\/\/)?(?:www\.)?(?:discord\.(?:gg|io|me|li)|discordapp\.com\/invite)\/([\w|\d|-]+)", flags=re.IGNORECASE)
async def censor(ctx, code, server_name):
try:
await ctx.message.delete()
clean_message = await clean_content().convert(ctx, ctx.message.content)
clean_name = Utils.clean_user(ctx.message.author)
await GearbotLogging.log_to(ctx.guild.id, "CENSOR",
f":no_entry_sign: {Translator.translate('censored_invite', ctx.guild.id, user=clean_name, code=code, message=clean_message, server_name=server_name)}")
except discord.NotFound:
pass # we failed? guess we lost the race
class Censor:
def __init__(self, bot):
self.bot: commands.Bot = bot
async def on_message(self, message: discord.Message):
if not hasattr(message.channel, "guild") or message.channel.guild is None:
return
ctx: commands.Context = await self.bot.get_context(message)
guild = message.guild
is_mod = Permissioncheckers.get_user_lvl(ctx) >= 2
if message.author == guild.me or is_mod or message.author.id in Configuration.get_var(guild.id, "IGNORED_USERS"):
return
guilds = Configuration.get_var(message.guild.id, "INVITE_WHITELIST")
if len(guilds) is not 0:
codes = INVITE_MATCHER.findall(message.content)
for code in codes:
try:
invite:discord.Invite = await self.bot.get_invite(code)
except discord.NotFound:
pass
except KeyError:
await censor(ctx, code, "DM group")
else:
if invite.guild is None or (not invite.guild.id in guilds and invite.guild.id != guild.id):
await censor(ctx, code, invite.guild.name)
def setup(bot):
bot.add_cog(Censor(bot)) | GearBot/Cogs/Censor.py | 2,144 | we failed? guess we lost the race | 33 | en | 0.961133 |
#!/usr/bin/env python
"""
python list_bundles.py prod b6dc9b93-929a-45d0-beb2-5cf8e64872fe
python list_bundles.py staging 3b41f062-621c-46ca-abad-bce09427934d
"""
import argparse
import json
import logging
import sys
from ingest.api.ingestapi import IngestApi
logging.getLogger('ingest').setLevel(logging.DEBUG)
format = ' %(asctime)s - %(name)s - %(levelname)s in %(filename)s:' \
'%(lineno)s %(funcName)s(): %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.WARNING, format=format)
class BundleManifest:
def __init__(self, resource):
self._object = resource
@property
def fqid(self):
uuid = self._object.get('bundleUuid')
version = self._object.get('bundleVersion')
return f'{uuid}.{version}'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generates a bundle fqid list given a project uuid')
parser.add_argument('env', choices=['dev', 'integration', 'staging', 'prod'], help='environment')
parser.add_argument('project_uuid', metavar='project-uuid', type=str, help='Project uuid')
parser.add_argument('--filename', type=str, help='Output filename')
args = parser.parse_args()
project_uuid = args.project_uuid
filename = args.filename or f'{args.project_uuid}.json'
env = args.env
infix = f'.{env}' if env != 'prod' else ''
url = f'https://api.ingest{infix}.data.humancellatlas.org'
ingest_api = IngestApi(url)
project = ingest_api.get_project_by_uuid(project_uuid)
bundle_manifests = ingest_api.get_related_entities("bundleManifests", project, "bundleManifests")
bundle_fqids = [BundleManifest(obj).fqid for obj in bundle_manifests]
with open(filename, 'w') as outfile:
json.dump(bundle_fqids, outfile, indent=4)
print(f'Total bundle count: {len(bundle_fqids)}')
print(f'Saved into file: {filename}')
| list_bundles.py | 1,889 | python list_bundles.py prod b6dc9b93-929a-45d0-beb2-5cf8e64872fe
python list_bundles.py staging 3b41f062-621c-46ca-abad-bce09427934d
!/usr/bin/env python | 154 | en | 0.122515 |
# -*- coding: utf-8 -*-
"""
Code to take template spectra, used for RV fitting, and pass them through 4FS to resample them to 4MOST's resolution.
It then further resamples each arm onto a fixed logarithmic stride.
"""
import argparse
import hashlib
import logging
import numpy as np
import os
from os import path as os_path
from fourgp_fourfs import FourFS
from fourgp_degrade.resample import SpectrumResampler
from fourgp_degrade import SpectrumProperties
from fourgp_speclib import SpectrumLibrarySqlite
def command_line_interface(root_path):
"""
A simple command-line interface for running a tool to resample a library of template spectra onto fixed
logarithmic rasters representing each of the 4MOST arms.
We use the python argparse module to build the interface, and return the inputs supplied by the user.
:param root_path:
The root path of this 4GP installation; the directory where we can find 4FS.
:return:
An object containing the arguments supplied by the user.
"""
# Read input parameters
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--templates-in',
required=False,
default='turbospec_rv_templates',
dest='templates_in',
help="Library of spectra to use as templates for RV code")
parser.add_argument('--workspace', dest='workspace', default="",
help="Directory where we expect to find spectrum libraries")
parser.add_argument('--templates-out',
required=False,
default="rv_templates_resampled",
dest="templates_out",
help="Library into which to place resampled templates for RV code")
parser.add_argument('--binary-path',
required=False,
default=root_path,
dest="binary_path",
help="Specify a directory where 4FS binary package is installed")
args = parser.parse_args()
# Set up logger
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info("Resampling template spectra")
return args
def logarithmic_raster(lambda_min, lambda_max, lambda_step):
"""
Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,
and a mean wavelength step.
:param lambda_min:
Smallest wavelength in raster.
:param lambda_max:
Largest wavelength in raster.
:param lambda_step:
The approximate pixel size in the raster.
:return:
A numpy array containing a wavelength raster with fixed logarithmic stride.
"""
return np.exp(np.arange(
np.log(lambda_min),
np.log(lambda_max),
np.log(1 + lambda_step / lambda_min)
))
def resample_templates(args, logger):
"""
Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in
turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect
the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster
with fixed logarithmic stride.
:param args:
Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for
input and output. The required fields are defined by the user interface above.
:param logger:
A python logging object.
:return:
None.
"""
# Set path to workspace where we expect to find libraries of spectra
workspace = args.workspace if args.workspace else os_path.join(args.our_path, "../../../workspace")
# Open input template spectra
spectra = SpectrumLibrarySqlite.open_and_search(
library_spec=args.templates_in,
workspace=workspace,
extra_constraints={"continuum_normalised": 0}
)
templates_library, templates_library_items, templates_spectra_constraints = \
[spectra[i] for i in ("library", "items", "constraints")]
# Create new SpectrumLibrary to hold the resampled output templates
library_path = os_path.join(workspace, args.templates_out)
output_library = SpectrumLibrarySqlite(path=library_path, create=True)
# Instantiate 4FS wrapper
etc_wrapper = FourFS(
path_to_4fs=os_path.join(args.binary_path, "OpSys/ETC"),
snr_list=[250.],
magnitude=13,
snr_per_pixel=True
)
for input_spectrum_id in templates_library_items:
logger.info("Working on <{}>".format(input_spectrum_id['filename']))
# Open Spectrum data from disk
input_spectrum_array = templates_library.open(ids=input_spectrum_id['specId'])
# Load template spectrum (flux normalised)
template_flux_normalised = input_spectrum_array.extract_item(0)
# Look up the unique ID of the star we've just loaded
# Newer spectrum libraries have a uid field which is guaranteed unique; for older spectrum libraries use
# Starname instead.
# Work out which field we're using (uid or Starname)
spectrum_matching_field = 'uid' if 'uid' in template_flux_normalised.metadata else 'Starname'
# Look up the unique ID of this object
object_name = template_flux_normalised.metadata[spectrum_matching_field]
# Search for the continuum-normalised version of this same object (which will share the same uid / name)
search_criteria = {
spectrum_matching_field: object_name,
'continuum_normalised': 1
}
continuum_normalised_spectrum_id = templates_library.search(**search_criteria)
# Check that continuum-normalised spectrum exists and is unique
assert len(continuum_normalised_spectrum_id) == 1, "Could not find continuum-normalised spectrum."
# Load the continuum-normalised version
template_continuum_normalised_arr = templates_library.open(
ids=continuum_normalised_spectrum_id[0]['specId']
)
# Turn the SpectrumArray we got back into a single Spectrum
template_continuum_normalised = template_continuum_normalised_arr.extract_item(0)
# Now create a mock observation of this template using 4FS
logger.info("Passing template through 4FS")
mock_observed_template = etc_wrapper.process_spectra(
spectra_list=((template_flux_normalised, template_continuum_normalised),)
)
# Loop over LRS and HRS
for mode in mock_observed_template:
# Loop over the spectra we simulated (there was only one!)
for index in mock_observed_template[mode]:
# Loop over the various SNRs we simulated (there was only one!)
for snr in mock_observed_template[mode][index]:
# Create a unique ID for this arm's data
unique_id = hashlib.md5(os.urandom(32)).hexdigest()[:16]
# Import the flux- and continuum-normalised spectra separately, but give them the same ID
for spectrum_type in mock_observed_template[mode][index][snr]:
# Extract continuum-normalised mock observation
logger.info("Resampling {} spectrum".format(mode))
mock_observed = mock_observed_template[mode][index][snr][spectrum_type]
# Replace errors which are nans with a large value
mock_observed.value_errors[np.isnan(mock_observed.value_errors)] = 1000.
# Check for NaN values in spectrum itself
if not np.all(np.isfinite(mock_observed.values)):
print("Warning: NaN values in template <{}>".format(template_flux_normalised.metadata['Starname']))
mock_observed.value_errors[np.isnan(mock_observed.values)] = 1000.
mock_observed.values[np.isnan(mock_observed.values)] = 1.
# Resample template onto a logarithmic raster of fixed step
resampler = SpectrumResampler(mock_observed)
# Construct the raster for each wavelength arm
wavelength_arms = SpectrumProperties(mock_observed.wavelengths).wavelength_arms()
# Resample 4FS output for each arm onto a fixed logarithmic stride
for arm_count, arm in enumerate(wavelength_arms["wavelength_arms"]):
arm_raster, mean_pixel_width = arm
name = "{}_{}".format(mode, arm_count)
arm_info = {
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
}
arm_raster = logarithmic_raster(lambda_min=arm_info['lambda_min'],
lambda_max=arm_info['lambda_max'],
lambda_step=arm_info['lambda_step']
)
# Resample 4FS output onto a fixed logarithmic step
mock_observed_arm = resampler.onto_raster(arm_raster)
# Save it into output spectrum library
output_library.insert(spectra=mock_observed_arm,
filenames=input_spectrum_id['filename'],
metadata_list={
"uid": unique_id,
"template_id": object_name,
"mode": mode,
"arm_name": "{}_{}".format(mode,arm_count),
"lambda_min": arm_raster[0],
"lambda_max": arm_raster[-1],
"lambda_step": mean_pixel_width
})
| src/pythonModules/fourgp_rv/fourgp_rv/templates_resample.py | 10,666 | A simple command-line interface for running a tool to resample a library of template spectra onto fixed
logarithmic rasters representing each of the 4MOST arms.
We use the python argparse module to build the interface, and return the inputs supplied by the user.
:param root_path:
The root path of this 4GP installation; the directory where we can find 4FS.
:return:
An object containing the arguments supplied by the user.
Create a logarithmic raster with a fixed logarithmic stride, based on a starting wavelength, finishing wavelength,
and a mean wavelength step.
:param lambda_min:
Smallest wavelength in raster.
:param lambda_max:
Largest wavelength in raster.
:param lambda_step:
The approximate pixel size in the raster.
:return:
A numpy array containing a wavelength raster with fixed logarithmic stride.
Resample a spectrum library of templates onto a fixed logarithmic stride, representing each of the 4MOST arms in
turn. We use 4FS to down-sample the templates to the resolution of 4MOST observations, and automatically detect
the list of arms contained within each 4FS mock observation. We then resample the 4FS output onto a new raster
with fixed logarithmic stride.
:param args:
Object containing arguments supplied by the used, for example the name of the spectrum libraries we use for
input and output. The required fields are defined by the user interface above.
:param logger:
A python logging object.
:return:
None.
Code to take template spectra, used for RV fitting, and pass them through 4FS to resample them to 4MOST's resolution.
It then further resamples each arm onto a fixed logarithmic stride.
-*- coding: utf-8 -*- Read input parameters Set up logger Set path to workspace where we expect to find libraries of spectra Open input template spectra Create new SpectrumLibrary to hold the resampled output templates Instantiate 4FS wrapper Open Spectrum data from disk Load template spectrum (flux normalised) Look up the unique ID of the star we've just loaded Newer spectrum libraries have a uid field which is guaranteed unique; for older spectrum libraries use Starname instead. Work out which field we're using (uid or Starname) Look up the unique ID of this object Search for the continuum-normalised version of this same object (which will share the same uid / name) Check that continuum-normalised spectrum exists and is unique Load the continuum-normalised version Turn the SpectrumArray we got back into a single Spectrum Now create a mock observation of this template using 4FS Loop over LRS and HRS Loop over the spectra we simulated (there was only one!) Loop over the various SNRs we simulated (there was only one!) Create a unique ID for this arm's data Import the flux- and continuum-normalised spectra separately, but give them the same ID Extract continuum-normalised mock observation Replace errors which are nans with a large value Check for NaN values in spectrum itself Resample template onto a logarithmic raster of fixed step Construct the raster for each wavelength arm Resample 4FS output for each arm onto a fixed logarithmic stride Resample 4FS output onto a fixed logarithmic step Save it into output spectrum library | 3,217 | en | 0.807205 |
#!/usr/bin/env python
import sys
import os
sys.path.append(os.path.normpath(os.path.join(os.path.dirname(__file__), '../lib')))
import init
import config
import misc
from kzcashd import KZCashDaemon
from models import Superblock, Proposal, GovernanceObject, Watchdog
from models import VoteSignals, VoteOutcomes, Transient
import socket
from misc import printdbg
import time
from bitcoinrpc.authproxy import JSONRPCException
import signal
import atexit
import random
from scheduler import Scheduler
import argparse
# sync kzcashd gobject list with our local relational DB backend
def perform_kzcashd_object_sync(kzcashd):
GovernanceObject.sync(kzcashd)
# delete old watchdog objects, create new when necessary
def watchdog_check(kzcashd):
printdbg("in watchdog_check")
# delete expired watchdogs
for wd in Watchdog.expired(kzcashd):
printdbg("\tFound expired watchdog [%s], voting to delete" % wd.object_hash)
wd.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
# now, get all the active ones...
active_wd = Watchdog.active(kzcashd)
active_count = active_wd.count()
# none exist, submit a new one to the network
if 0 == active_count:
# create/submit one
printdbg("\tNo watchdogs exist... submitting new one.")
wd = Watchdog(created_at=int(time.time()))
wd.submit(kzcashd)
else:
wd_list = sorted(active_wd, key=lambda wd: wd.object_hash)
# highest hash wins
winner = wd_list.pop()
printdbg("\tFound winning watchdog [%s], voting VALID" % winner.object_hash)
winner.vote(kzcashd, VoteSignals.valid, VoteOutcomes.yes)
# if remaining Watchdogs exist in the list, vote delete
for wd in wd_list:
printdbg("\tFound losing watchdog [%s], voting DELETE" % wd.object_hash)
wd.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
printdbg("leaving watchdog_check")
def attempt_superblock_creation(kzcashd):
import kzcashlib
if not kzcashd.is_masternode():
print("We are not a Masternode... can't submit superblocks!")
return
# query votes for this specific ebh... if we have voted for this specific
# ebh, then it's voted on. since we track votes this is all done using joins
# against the votes table
#
# has this masternode voted on *any* superblocks at the given event_block_height?
# have we voted FUNDING=YES for a superblock for this specific event_block_height?
event_block_height = kzcashd.next_superblock_height()
if Superblock.is_voted_funding(event_block_height):
# printdbg("ALREADY VOTED! 'til next time!")
# vote down any new SBs because we've already chosen a winner
for sb in Superblock.at_height(event_block_height):
if not sb.voted_on(signal=VoteSignals.funding):
sb.vote(kzcashd, VoteSignals.funding, VoteOutcomes.no)
# now return, we're done
return
if not kzcashd.is_govobj_maturity_phase():
printdbg("Not in maturity phase yet -- will not attempt Superblock")
return
proposals = Proposal.approved_and_ranked(proposal_quorum=kzcashd.governance_quorum(), next_superblock_max_budget=kzcashd.next_superblock_max_budget())
budget_max = kzcashd.get_superblock_budget_allocation(event_block_height)
sb_epoch_time = kzcashd.block_height_to_epoch(event_block_height)
sb = kzcashlib.create_superblock(proposals, event_block_height, budget_max, sb_epoch_time)
if not sb:
printdbg("No superblock created, sorry. Returning.")
return
# find the deterministic SB w/highest object_hash in the DB
dbrec = Superblock.find_highest_deterministic(sb.hex_hash())
if dbrec:
dbrec.vote(kzcashd, VoteSignals.funding, VoteOutcomes.yes)
# any other blocks which match the sb_hash are duplicates, delete them
for sb in Superblock.select().where(Superblock.sb_hash == sb.hex_hash()):
if not sb.voted_on(signal=VoteSignals.funding):
sb.vote(kzcashd, VoteSignals.delete, VoteOutcomes.yes)
printdbg("VOTED FUNDING FOR SB! We're done here 'til next superblock cycle.")
return
else:
printdbg("The correct superblock wasn't found on the network...")
# if we are the elected masternode...
if (kzcashd.we_are_the_winner()):
printdbg("we are the winner! Submit SB to network")
sb.submit(kzcashd)
def check_object_validity(kzcashd):
# vote (in)valid objects
for gov_class in [Proposal, Superblock]:
for obj in gov_class.select():
obj.vote_validity(kzcashd)
def is_kzcashd_port_open(kzcashd):
# test socket open before beginning, display instructive message to MN
# operators if it's not
port_open = False
try:
info = kzcashd.rpc_command('getgovernanceinfo')
port_open = True
except (socket.error, JSONRPCException) as e:
print("%s" % e)
return port_open
def main():
kzcashd = KZCashDaemon.from_kzcash_conf(config.kzcash_conf)
options = process_args()
# check kzcashd connectivity
if not is_kzcashd_port_open(kzcashd):
print("Cannot connect to kzcashd. Please ensure kzcashd is running and the JSONRPC port is open to Sentinel.")
return
# check kzcashd sync
if not kzcashd.is_synced():
print("kzcashd not synced with network! Awaiting full sync before running Sentinel.")
return
# ensure valid masternode
if not kzcashd.is_masternode():
print("Invalid Masternode Status, cannot continue.")
return
# register a handler if SENTINEL_DEBUG is set
if os.environ.get('SENTINEL_DEBUG', None):
import logging
logger = logging.getLogger('peewee')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
if options.bypass:
# bypassing scheduler, remove the scheduled event
printdbg("--bypass-schedule option used, clearing schedule")
Scheduler.clear_schedule()
if not Scheduler.is_run_time():
printdbg("Not yet time for an object sync/vote, moving on.")
return
if not options.bypass:
# delay to account for cron minute sync
Scheduler.delay()
# running now, so remove the scheduled event
Scheduler.clear_schedule()
# ========================================================================
# general flow:
# ========================================================================
#
# load "gobject list" rpc command data, sync objects into internal database
perform_kzcashd_object_sync(kzcashd)
# delete old watchdog objects, create a new if necessary
watchdog_check(kzcashd)
# auto vote network objects as valid/invalid
# check_object_validity(kzcashd)
# create a Superblock if necessary
attempt_superblock_creation(kzcashd)
# schedule the next run
Scheduler.schedule_next_run()
def signal_handler(signum, frame):
print("Got a signal [%d], cleaning up..." % (signum))
Transient.delete('SENTINEL_RUNNING')
sys.exit(1)
def cleanup():
Transient.delete(mutex_key)
def process_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--bypass-scheduler',
action='store_true',
help='Bypass scheduler and sync/vote immediately',
dest='bypass')
args = parser.parse_args()
return args
if __name__ == '__main__':
atexit.register(cleanup)
signal.signal(signal.SIGINT, signal_handler)
# ensure another instance of Sentinel is not currently running
mutex_key = 'SENTINEL_RUNNING'
# assume that all processes expire after 'timeout_seconds' seconds
timeout_seconds = 90
is_running = Transient.get(mutex_key)
if is_running:
printdbg("An instance of Sentinel is already running -- aborting.")
sys.exit(1)
else:
Transient.set(mutex_key, misc.now(), timeout_seconds)
# locked to this instance -- perform main logic here
main()
Transient.delete(mutex_key)
| bin/sentinel.py | 8,161 | !/usr/bin/env python sync kzcashd gobject list with our local relational DB backend delete old watchdog objects, create new when necessary delete expired watchdogs now, get all the active ones... none exist, submit a new one to the network create/submit one highest hash wins if remaining Watchdogs exist in the list, vote delete query votes for this specific ebh... if we have voted for this specific ebh, then it's voted on. since we track votes this is all done using joins against the votes table has this masternode voted on *any* superblocks at the given event_block_height? have we voted FUNDING=YES for a superblock for this specific event_block_height? printdbg("ALREADY VOTED! 'til next time!") vote down any new SBs because we've already chosen a winner now return, we're done find the deterministic SB w/highest object_hash in the DB any other blocks which match the sb_hash are duplicates, delete them if we are the elected masternode... vote (in)valid objects test socket open before beginning, display instructive message to MN operators if it's not check kzcashd connectivity check kzcashd sync ensure valid masternode register a handler if SENTINEL_DEBUG is set bypassing scheduler, remove the scheduled event delay to account for cron minute sync running now, so remove the scheduled event ======================================================================== general flow: ======================================================================== load "gobject list" rpc command data, sync objects into internal database delete old watchdog objects, create a new if necessary auto vote network objects as valid/invalid check_object_validity(kzcashd) create a Superblock if necessary schedule the next run ensure another instance of Sentinel is not currently running assume that all processes expire after 'timeout_seconds' seconds locked to this instance -- perform main logic here | 1,902 | en | 0.836922 |
"""
"""
import numpy as np
from ..stellar_ages import _get_lg_age_bin_edges, _get_lgt_birth, T_BIRTH_MIN
from ..stellar_ages import _get_sfh_tables, _get_age_weights_from_tables
from ..sfh_model import DEFAULT_MAH_PARAMS, DEFAULT_MS_PARAMS, DEFAULT_Q_PARAMS
from ..utils import _jax_get_dt_array
FSPS_LG_AGES = np.arange(5.5, 10.2, 0.05) # log10 ages in years
def linear_sfr(t_gyr):
return t_gyr * 1e9
def linear_smh(t0, t_gyr):
return 1e9 * 0.5 * (t_gyr ** 2 - t0 ** 2)
def test_age_bin_edges_have_correct_array_shape():
lgt_ages = np.linspace(5.5, 10.5, 50)
lgt_age_bins = _get_lg_age_bin_edges(lgt_ages)
assert lgt_age_bins.size == lgt_ages.size + 1
def test_age_weights_are_mathematically_sensible():
t_obs = 11.0
mah_params = np.array(list(DEFAULT_MAH_PARAMS.values()))
ms_params = np.array(list(DEFAULT_MS_PARAMS.values()))
q_params = np.array(list(DEFAULT_Q_PARAMS.values()))
res = _get_sfh_tables(mah_params, ms_params, q_params)
t_table, lgt_table, dt_table, sfh_table, logsm_table = res
lgt_ages = np.linspace(5.5, 10.5, 50) - 9.0
lgt_age_bin_edges = _get_lg_age_bin_edges(lgt_ages)
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table, logsm_table
)
assert age_weights.shape == lgt_ages.shape
assert np.allclose(age_weights.sum(), 1.0)
def test_age_weights_agree_with_analytical_calculation_of_constant_sfr_weights():
constant_sfr = 1.0 * 1e9 # Msun/Gyr
# Analytically calculate age distributions for constant SFR (independent of t_obs)
log_ages_gyr = FSPS_LG_AGES - 9
ages_gyr = 10 ** log_ages_gyr
dt_ages = _jax_get_dt_array(ages_gyr)
mstar_age_bins = dt_ages * constant_sfr
correct_weights = mstar_age_bins / mstar_age_bins.sum()
# Calculate age distributions with DSPS
t_obs = 16.0
t_table = np.linspace(T_BIRTH_MIN, t_obs, 50_000)
lgt_table = np.log10(t_table)
mstar_table = constant_sfr * t_table
logsm_table = np.log10(mstar_table)
lgt_age_bin_edges = _get_lg_age_bin_edges(log_ages_gyr)
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
dsps_age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table, logsm_table
)
assert np.allclose(dsps_age_weights, correct_weights, atol=0.01)
def test_age_weights_agree_with_analytical_calculation_of_linear_sfr_weights():
t_obs = 16.0
# Analytically calculate age distributions for SFR(t) = t
log_ages_gyr = FSPS_LG_AGES - 9
lgt_age_bin_edges = _get_lg_age_bin_edges(log_ages_gyr)
t_age_bin_edges_gyr = 10 ** lgt_age_bin_edges
t_births_bin_edges = t_obs - t_age_bin_edges_gyr
mstar_at_age_bins = linear_smh(T_BIRTH_MIN, t_births_bin_edges)
dmstar_ages = -np.diff(mstar_at_age_bins)
correct_weights = dmstar_ages / dmstar_ages.sum()
# Calculate age distributions with DSPS
t_table = np.linspace(T_BIRTH_MIN, t_obs, 50_000)
lgt_table = np.log10(t_table)
logsm_table = np.log10(linear_smh(T_BIRTH_MIN, t_table[1:]))
lgt_birth_bin_edges = _get_lgt_birth(t_obs, lgt_age_bin_edges)
dsps_age_weights = _get_age_weights_from_tables(
lgt_birth_bin_edges, lgt_table[1:], logsm_table
)
assert np.allclose(dsps_age_weights, correct_weights, atol=0.001)
| dsps/tests/test_stellar_ages.py | 3,374 | log10 ages in years Msun/Gyr Analytically calculate age distributions for constant SFR (independent of t_obs) Calculate age distributions with DSPS Analytically calculate age distributions for SFR(t) = t Calculate age distributions with DSPS | 241 | en | 0.848496 |
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 09 21:40:30 2014
@author: yyu
"""
import JBCWebAutomation as jbc
today=jbc.GetToday()
# to override the date: today='140305' in 'yymmdd' format
###################################################################
print 'Start job for date ' + today +'...'
jbc.ConvertSermonDOC2PDF(today, 1) # 0 for English
ftp = jbc.JBCFTPConnection()
jbc.uploadJBCWeb(ftp, today, 1, 'pdf')
mp3today='20'+today[0:2]+'_'+today[2:6]
jbc.uploadJBCWeb(ftp, mp3today, 1, 'mp3')
jbc.backup_podcast_xml(ftp, today)
print 'Finshed job.'
ftp.close()
raw_input("Press Enter to continue...")
| JBCMain-Chinese.py | 636 | -*- coding: utf-8 -*- to override the date: today='140305' in 'yymmdd' format 0 for English | 91 | en | 0.734573 |
import numpy as np
from skmultiflow.drift_detection.base_drift_detector import BaseDriftDetector
class ADWIN(BaseDriftDetector):
""" Adaptive Windowing method for concept drift detection.
Parameters
----------
delta : float (default=0.002)
The delta parameter for the ADWIN algorithm.
Notes
-----
ADWIN [1]_ (ADaptive WINdowing) is an adaptive sliding window algorithm
for detecting change, and keeping updated statistics about a data stream.
ADWIN allows algorithms not adapted for drifting data, to be resistant
to this phenomenon.
The general idea is to keep statistics from a window of variable size while
detecting concept drift.
The algorithm will decide the size of the window by cutting the statistics'
window at different points and analysing the average of some statistic over
these two windows. If the absolute value of the difference between the two
averages surpasses a pre-defined threshold, change is detected at that point
and all data before that time is discarded.
References
----------
.. [1] Bifet, Albert, and Ricard Gavalda. "Learning from time-changing data with adaptive
windowing."
In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.
Society for Industrial and Applied Mathematics, 2007.
Examples
--------
>>> # Imports
>>> import numpy as np
>>> from skmultiflow.drift_detection.adwin import ADWIN
>>> adwin = ADWIN()
>>> # Simulating a data stream as a normal distribution of 1's and 0's
>>> data_stream = np.random.randint(2, size=2000)
>>> # Changing the data concept from index 999 to 2000
>>> for i in range(999, 2000):
... data_stream[i] = np.random.randint(4, high=8)
>>> # Adding stream elements to ADWIN and verifying if drift occurred
>>> for i in range(2000):
... adwin.add_element(data_stream[i])
... if adwin.detected_change():
... print('Change detected in data: ' + str(data_stream[i]) + ' - at index: ' + str(i))
"""
MAX_BUCKETS = 5
def __init__(self, delta=.002):
super().__init__()
# default values affected by init_bucket()
self.delta = delta
self.last_bucket_row = 0
self.list_row_bucket = None
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0
self.__init_buckets()
# other default values
self.mint_min_window_longitude = 10
self.mdbl_delta = .002
self.mint_time = 0
self.mdbl_width = 0
self.detect = 0
self._n_detections = 0
self.detect_twice = 0
self.mint_clock = 32
self.bln_bucket_deleted = False
self.bucket_num_max = 0
self.mint_min_window_length = 5
super().reset()
def reset(self):
""" Reset detectors
Resets statistics and adwin's window.
Returns
-------
ADWIN
self
"""
self.__init__(delta=self.delta)
def get_change(self):
""" Get drift
Returns
-------
bool
Whether or not a drift occurred
"""
return self.bln_bucket_deleted
def reset_change(self):
self.bln_bucket_deleted = False
def set_clock(self, clock):
self.mint_clock = clock
def detected_warning_zone(self):
return False
@property
def _bucket_used_bucket(self):
return self.bucket_num_max
@property
def width(self):
return self._width
@property
def n_detections(self):
return self._n_detections
@property
def total(self):
return self._total
@property
def variance(self):
return self._variance / self._width
@property
def estimation(self):
if self._width == 0:
return 0
return self._total / self._width
@estimation.setter
def estimation(self, value):
pass
@property
def width_t(self):
return self.mdbl_width
def __init_buckets(self):
""" Initialize the bucket's List and statistics
Set all statistics to 0 and create a new bucket List.
"""
self.list_row_bucket = List()
self.last_bucket_row = 0
self._total = 0
self._variance = 0
self._width = 0
self.bucket_number = 0
def add_element(self, value):
""" Add a new element to the sample window.
Apart from adding the element value to the window, by inserting it in
the correct bucket, it will also update the relevant statistics, in
this case the total sum of all values, the window width and the total
variance.
Parameters
----------
value: int or float (a numeric value)
Notes
-----
The value parameter can be any numeric value relevant to the analysis
of concept change. For the learners in this framework we are using
either 0's or 1's, that are interpreted as follows:
0: Means the learners prediction was wrong
1: Means the learners prediction was correct
This function should be used at every new sample analysed.
"""
if self.in_concept_change:
self.reset()
self._width += 1
self.__insert_element_bucket(0, value, self.list_row_bucket.first)
incremental_variance = 0
if self._width > 1:
incremental_variance = (self._width - 1) * \
(value - self._total / (self._width - 1)) * \
(value - self._total / (self._width - 1)) / self._width
self._variance += incremental_variance
self._total += value
self.__compress_buckets()
def __insert_element_bucket(self, variance, value, node):
node.insert_bucket(value, variance)
self.bucket_number += 1
if self.bucket_number > self.bucket_num_max:
self.bucket_num_max = self.bucket_number
@staticmethod
def bucket_size(row):
return np.power(2, row)
def delete_element(self):
""" Delete an Item from the bucket list.
Deletes the last Item and updates relevant statistics kept by ADWIN.
Returns
-------
int
The bucket size from the updated bucket
"""
node = self.list_row_bucket.last
n1 = self.bucket_size(self.last_bucket_row)
self._width -= n1
self._total -= node.get_total(0)
u1 = node.get_total(0) / n1
incremental_variance = node.get_variance(0) + n1 * self._width * (
u1 - self._total / self._width) * (u1 - self._total / self._width) / (
n1 + self._width)
self._variance -= incremental_variance
node.remove_bucket()
self.bucket_number -= 1
if node.bucket_size_row == 0:
self.list_row_bucket.remove_from_tail()
self.last_bucket_row -= 1
return n1
def __compress_buckets(self):
cursor = self.list_row_bucket.first
i = 0
while cursor is not None:
k = cursor.bucket_size_row
if k == self.MAX_BUCKETS + 1:
next_node = cursor.get_next_item()
if next_node is None:
self.list_row_bucket.add_to_tail()
next_node = cursor.get_next_item()
self.last_bucket_row += 1
n1 = self.bucket_size(i)
n2 = self.bucket_size(i)
u1 = cursor.get_total(0) / n1
u2 = cursor.get_total(1) / n2
incremental_variance = n1 * n2 * ((u1 - u2) * (u1 - u2)) / (n1 + n2)
next_node.insert_bucket(
cursor.get_total(0) + cursor.get_total(1),
cursor.get_variance(1) + incremental_variance)
self.bucket_number += 1
cursor.compress_bucket_row(2)
if next_node.bucket_size_row <= self.MAX_BUCKETS:
break
else:
break
cursor = cursor.get_next_item()
i += 1
def detected_change(self):
""" Detects concept change in a drifting data stream.
The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from
Time-Changing Data with Adaptive Windowing'. The general idea is to keep
statistics from a window of variable size while detecting concept drift.
This function is responsible for analysing different cutting points in
the sliding window, to verify if there is a significant change in concept.
Returns
-------
bln_change : bool
Whether change was detected or not
Notes
-----
If change was detected, one should verify the new window size, by reading
the width property.
"""
bln_change = False
bln_exit = False
bln_bucket_deleted = False
self.mint_time += 1
n0 = 0
if (self.mint_time % self.mint_clock == 0) and (
self.width > self.mint_min_window_longitude):
bln_reduce_width = True
while bln_reduce_width:
bln_reduce_width = not bln_reduce_width
bln_exit = False
n0 = 0
n1 = self._width
u0 = 0
u1 = self.total
v0 = 0
v1 = self._variance
n2 = 0
u2 = 0
cursor = self.list_row_bucket.last
i = self.last_bucket_row
while (not bln_exit) and (cursor is not None):
for k in range(cursor.bucket_size_row):
n2 = self.bucket_size(i)
u2 = cursor.get_total(k)
if n0 > 0:
v0 += cursor.get_variance(k) + 1. * n0 * n2 * \
(u0 / n0 - u2 / n2) * (u0 / n0 - u2 / n2) / (n0 + n2)
if n1 > 0:
v1 -= cursor.get_variance(k) + 1. * n1 * n2 * \
(u1 / n1 - u2 / n2) * (u1 / n1 - u2 / n2) / (n1 + n2)
n0 += self.bucket_size(i)
n1 -= self.bucket_size(i)
u0 += cursor.get_total(k)
u1 -= cursor.get_total(k)
if (i == 0) and (k == cursor.bucket_size_row - 1):
bln_exit = True
break
abs_value = 1. * ((u0 / n0) - (u1 / n1))
if (n1 >= self.mint_min_window_length) \
and (n0 >= self.mint_min_window_length) \
and (
self.__bln_cut_expression(n0, n1, u0, u1, v0, v1, abs_value,
self.delta)):
bln_bucket_deleted = True # noqa: F841
self.detect = self.mint_time
if self.detect == 0:
self.detect = self.mint_time
elif self.detect_twice == 0:
self.detect_twice = self.mint_time
bln_reduce_width = True
bln_change = True
if self.width > 0:
n0 -= self.delete_element()
bln_exit = True
break
cursor = cursor.get_previous()
i -= 1
self.mdbl_width += self.width
if bln_change:
self._n_detections += 1
self.in_concept_change = bln_change
return bln_change
def __bln_cut_expression(self, n0, n1, u0, u1, v0, v1, abs_value, delta):
n = self.width
dd = np.log(2 * np.log(n) / delta)
v = self.variance
m = (1. / (n0 - self.mint_min_window_length + 1)) + \
(1. / (n1 - self.mint_min_window_length + 1))
epsilon = np.sqrt(2 * m * v * dd) + 1. * 2 / 3 * dd * m
return np.absolute(abs_value) > epsilon
class List(object):
""" A linked list object for ADWIN algorithm.
Used for storing ADWIN's bucket list. Is composed of Item objects.
Acts as a linked list, where each element points to its predecessor
and successor.
"""
def __init__(self):
super().__init__()
self._count = None
self._first = None
self._last = None
self.reset()
self.add_to_head()
def reset(self):
self._count = 0
self._first = None
self._last = None
def add_to_head(self):
self._first = Item(self._first, None)
if self._last is None:
self._last = self._first
def remove_from_head(self):
self._first = self._first.get_next_item()
if self._first is not None:
self._first.set_previous(None)
else:
self._last = None
self._count -= 1
def add_to_tail(self):
self._last = Item(None, self._last)
if self._first is None:
self._first = self._last
self._count += 1
def remove_from_tail(self):
self._last = self._last.get_previous()
if self._last is not None:
self._last.set_next_item(None)
else:
self._first = None
self._count -= 1
@property
def first(self):
return self._first
@property
def last(self):
return self._last
@property
def size(self):
return self._count
class Item(object):
""" Item to be used by the List object.
The Item object, alongside the List object, are the two main data
structures used for storing the relevant statistics for the ADWIN
algorithm for change detection.
Parameters
----------
next_item: Item object
Reference to the next Item in the List
previous_item: Item object
Reference to the previous Item in the List
"""
def __init__(self, next_item=None, previous_item=None):
super().__init__()
self.next = next_item
self.previous = previous_item
if next_item is not None:
next_item.previous = self
if previous_item is not None:
previous_item.set_next_item(self)
self.bucket_size_row = None
self.max_buckets = ADWIN.MAX_BUCKETS
self.bucket_total = np.zeros(self.max_buckets + 1, dtype=float)
self.bucket_variance = np.zeros(self.max_buckets + 1, dtype=float)
self.reset()
def reset(self):
""" Reset the algorithm's statistics and window
Returns
-------
ADWIN
self
"""
self.bucket_size_row = 0
for i in range(ADWIN.MAX_BUCKETS + 1):
self.__clear_buckets(i)
return self
def __clear_buckets(self, index):
self.set_total(0, index)
self.set_variance(0, index)
def insert_bucket(self, value, variance):
new_item = self.bucket_size_row
self.bucket_size_row += 1
self.set_total(value, new_item)
self.set_variance(variance, new_item)
def remove_bucket(self):
self.compress_bucket_row(1)
def compress_bucket_row(self, num_deleted=1):
for i in range(num_deleted, ADWIN.MAX_BUCKETS + 1):
self.bucket_total[i - num_deleted] = self.bucket_total[i]
self.bucket_variance[i - num_deleted] = self.bucket_variance[i]
for i in range(1, num_deleted + 1):
self.__clear_buckets(ADWIN.MAX_BUCKETS - i + 1)
self.bucket_size_row -= num_deleted
def get_next_item(self):
return self.next
def set_next_item(self, next_item):
self.next = next_item
def get_previous(self):
return self.previous
def set_previous(self, previous):
self.previous = previous
def get_total(self, index):
return self.bucket_total[index]
def get_variance(self, index):
return self.bucket_variance[index]
def set_total(self, value, index):
self.bucket_total[index] = value
def set_variance(self, value, index):
self.bucket_variance[index] = value
| src/skmultiflow/drift_detection/adwin.py | 16,563 | Adaptive Windowing method for concept drift detection.
Parameters
----------
delta : float (default=0.002)
The delta parameter for the ADWIN algorithm.
Notes
-----
ADWIN [1]_ (ADaptive WINdowing) is an adaptive sliding window algorithm
for detecting change, and keeping updated statistics about a data stream.
ADWIN allows algorithms not adapted for drifting data, to be resistant
to this phenomenon.
The general idea is to keep statistics from a window of variable size while
detecting concept drift.
The algorithm will decide the size of the window by cutting the statistics'
window at different points and analysing the average of some statistic over
these two windows. If the absolute value of the difference between the two
averages surpasses a pre-defined threshold, change is detected at that point
and all data before that time is discarded.
References
----------
.. [1] Bifet, Albert, and Ricard Gavalda. "Learning from time-changing data with adaptive
windowing."
In Proceedings of the 2007 SIAM international conference on data mining, pp. 443-448.
Society for Industrial and Applied Mathematics, 2007.
Examples
--------
>>> # Imports
>>> import numpy as np
>>> from skmultiflow.drift_detection.adwin import ADWIN
>>> adwin = ADWIN()
>>> # Simulating a data stream as a normal distribution of 1's and 0's
>>> data_stream = np.random.randint(2, size=2000)
>>> # Changing the data concept from index 999 to 2000
>>> for i in range(999, 2000):
... data_stream[i] = np.random.randint(4, high=8)
>>> # Adding stream elements to ADWIN and verifying if drift occurred
>>> for i in range(2000):
... adwin.add_element(data_stream[i])
... if adwin.detected_change():
... print('Change detected in data: ' + str(data_stream[i]) + ' - at index: ' + str(i))
Item to be used by the List object.
The Item object, alongside the List object, are the two main data
structures used for storing the relevant statistics for the ADWIN
algorithm for change detection.
Parameters
----------
next_item: Item object
Reference to the next Item in the List
previous_item: Item object
Reference to the previous Item in the List
A linked list object for ADWIN algorithm.
Used for storing ADWIN's bucket list. Is composed of Item objects.
Acts as a linked list, where each element points to its predecessor
and successor.
Initialize the bucket's List and statistics
Set all statistics to 0 and create a new bucket List.
Add a new element to the sample window.
Apart from adding the element value to the window, by inserting it in
the correct bucket, it will also update the relevant statistics, in
this case the total sum of all values, the window width and the total
variance.
Parameters
----------
value: int or float (a numeric value)
Notes
-----
The value parameter can be any numeric value relevant to the analysis
of concept change. For the learners in this framework we are using
either 0's or 1's, that are interpreted as follows:
0: Means the learners prediction was wrong
1: Means the learners prediction was correct
This function should be used at every new sample analysed.
Delete an Item from the bucket list.
Deletes the last Item and updates relevant statistics kept by ADWIN.
Returns
-------
int
The bucket size from the updated bucket
Detects concept change in a drifting data stream.
The ADWIN algorithm is described in Bifet and Gavaldà's 'Learning from
Time-Changing Data with Adaptive Windowing'. The general idea is to keep
statistics from a window of variable size while detecting concept drift.
This function is responsible for analysing different cutting points in
the sliding window, to verify if there is a significant change in concept.
Returns
-------
bln_change : bool
Whether change was detected or not
Notes
-----
If change was detected, one should verify the new window size, by reading
the width property.
Get drift
Returns
-------
bool
Whether or not a drift occurred
Reset detectors
Resets statistics and adwin's window.
Returns
-------
ADWIN
self
Reset the algorithm's statistics and window
Returns
-------
ADWIN
self
default values affected by init_bucket() other default values noqa: F841 | 4,195 | en | 0.817724 |
from django.contrib import admin
from django.db import transaction
from zinc.models import Policy, PolicyMember
class PolicyMemberInline(admin.TabularInline):
readonly_fields = ('ip_enabled',)
model = PolicyMember
extra = 1
verbose_name = 'member'
verbose_name_plural = 'members'
def ip_enabled(self, obj):
return obj.ip.enabled
ip_enabled.boolean = True
@admin.register(Policy)
class PolicyAdmin(admin.ModelAdmin):
fields = ('name', 'routing', 'ttl')
readonly_fields = ()
list_display = ('__str__', 'routing', 'regions', 'status')
list_filter = ('routing', 'members__region')
inlines = (PolicyMemberInline,)
exclude = ('members',)
def get_queryset(self, request):
qs = super(PolicyAdmin, self).get_queryset(request)
qs = qs.prefetch_related('members')
return qs
def regions(self, obj):
# get_queryset prefetches related policy members so iterating over
# objects is ok because we are iterating over already fetched data
return ', '.join(sorted({m.region for m in obj.members.all()}))
@transaction.atomic
def save_model(self, request, obj, form, change):
rv = super().save_model(request, obj, form, change)
obj.change_trigger(form.changed_data)
return rv
def status(self, obj):
warnings = []
if obj.routing == 'latency':
members_by_region = {}
for member in obj.members.all():
members_by_region.setdefault(member.region, []).append(member)
if len(members_by_region) <= 1:
warnings.append('✖ Latency routed policy should span multiple regions!')
for region, members in members_by_region.items():
if len([m for m in members if m.weight > 0]) == 0:
warnings.append(
'✖ All members of region {} have weight zero!'.format(region))
elif obj.routing == 'weighted':
active_members = [m for m in obj.members.all() if m.weight > 0]
if len(active_members) == 0:
warnings.append('✖ All members have weight zero!')
if warnings:
return '<span style="color: red">{}</red>'.format("<br>".join(warnings))
else:
return "✔ ok"
status.allow_tags = True
status.short_description = 'Status'
| zinc/admin/policy.py | 2,411 | get_queryset prefetches related policy members so iterating over objects is ok because we are iterating over already fetched data | 129 | en | 0.893911 |
from django.shortcuts import render
from django.http import HttpResponse
from rango.models import Category
def index(request):
category_list = Category.objects.order_by('-likes')[:5]
context_dict = {}
context_dict['boldmessage'] = 'Crunchy, creamy, cookie, candy, cupcake!'
context_dict['categories'] = category_list
# Render the response and send it back!
return render(request, 'rango/index.html', context=context_dict)
def about(request):
context_dict = {'boldmessage': 'This tutorial has been put together by Decklin Johnston'}
return render(request, 'rango/about.html', context=context_dict)
def show_category(request, category_name_slug):
context_dict = {}
try:
category = Category.objects.get(slug=category_name_slug)
pages = Page.objects.filter(category=category)
context_dict['pages'] = pages
context_dict['category'] = category
except Category.DoesNotExist:
context_dict['pages'] = None
context_dict['category'] = None
return render(request, 'rango/category.html', context=context_dict) | main/files/rango/views.py | 1,100 | Render the response and send it back! | 37 | en | 0.912621 |
import numpy as np
from math import cos, sin, atan2
from errors_exceptions import OpenRAVEException
from openravepy import quatFromAxisAngle, matrixFromPose, poseFromMatrix, \
axisAngleFromRotationMatrix, KinBody, GeometryType, RaveCreateRobot, \
RaveCreateKinBody, TriMesh, Environment, DOFAffine, IkParameterization, IkParameterizationType, \
IkFilterOptions, matrixFromAxisAngle, quatFromRotationMatrix
from core.util_classes.robots import Robot, PR2, Baxter, Washer
from core.util_classes.items import Item, Box, Can, BlueCan, RedCan, Circle, BlueCircle, RedCircle, GreenCircle, Obstacle, Wall, Table, Basket
WALL_THICKNESS = 1
class OpenRAVEBody(object):
def __init__(self, env, name, geom):
assert env is not None
self.name = name
self._env = env
self._geom = geom
if env.GetKinBody(name) == None and env.GetRobot(name) == None:
if isinstance(geom, Robot):
self._add_robot(geom)
elif isinstance(geom, Item):
self._add_item(geom)
else:
raise OpenRAVEException("Geometry not supported for %s for OpenRAVEBody"%geom)
elif env.GetKinBody(name) != None:
self.env_body = env.GetKinBody(name)
else:
self.env_body = env.GetRobot(name)
self.set_transparency(0.5)
def delete(self):
self._env.Remove(self.env_body)
def set_transparency(self, transparency):
for link in self.env_body.GetLinks():
for geom in link.GetGeometries():
geom.SetTransparency(transparency)
def _add_robot(self, geom):
self.env_body = self._env.ReadRobotXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
geom.setup(self.env_body)
def _add_item(self, geom):
try:
fun_name = "self._add_{}".format(geom._type)
eval(fun_name)(geom)
except:
self._add_obj(geom)
def _add_circle(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, 2], color)
self._env.AddKinBody(self.env_body)
def _add_can(self, geom):
color = [1,0,0]
if hasattr(geom, "color") and geom.color == 'blue':
color = [0, 0, 1]
elif hasattr(geom, "color") and geom.color == 'green':
color = [0, 1, 0]
elif hasattr(geom, "color") and geom.color == 'red':
color = [1, 0, 0]
self.env_body = OpenRAVEBody.create_cylinder(self._env, self.name, np.eye(4),
[geom.radius, geom.height], color)
self._env.AddKinBody(self.env_body)
def _add_obstacle(self, geom):
obstacles = np.matrix('-0.576036866359447, 0.918128654970760, 1;\
-0.806451612903226,-1.07017543859649, 1;\
1.01843317972350,-0.988304093567252, 1;\
0.640552995391705,0.906432748538011, 1;\
-0.576036866359447, 0.918128654970760, -1;\
-0.806451612903226,-1.07017543859649, -1;\
1.01843317972350,-0.988304093567252, -1;\
0.640552995391705,0.906432748538011, -1')
body = RaveCreateKinBody(self._env, '')
vertices = np.array(obstacles)
indices = np.array([[0, 1, 2], [2, 3, 0], [4, 5, 6], [6, 7, 4], [0, 4, 5],
[0, 1, 5], [1, 2, 5], [5, 6, 2], [2, 3, 6], [6, 7, 3],
[0, 3, 7], [0, 4, 7]])
body.InitFromTrimesh(trimesh=TriMesh(vertices, indices), draw=True)
body.SetName(self.name)
for link in body.GetLinks():
for geom in link.GetGeometries():
geom.SetDiffuseColor((.9, .9, .9))
self.env_body = body
self._env.AddKinBody(body)
def _add_box(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, geom.dim, [0.5, 0.2, 0.1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_sphere(self, geom):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Sphere, [geom.radius], [0, 0, 1])
self.env_body = RaveCreateKinBody(self._env,'')
self.env_body.InitFromGeometries([infobox])
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_wall(self, geom):
self.env_body = OpenRAVEBody.create_wall(self._env, geom.wall_type)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_obj(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_table(self, geom):
self.env_body = OpenRAVEBody.create_table(self._env, geom)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def _add_basket(self, geom):
self.env_body = self._env.ReadKinBodyXMLFile(geom.shape)
self.env_body.SetName(self.name)
self._env.Add(self.env_body)
def set_pose(self, base_pose, rotation = [0, 0, 0]):
trans = None
if np.any(np.isnan(base_pose)) or np.any(np.isnan(rotation)):
return
if isinstance(self._geom, Robot) and not isinstance(self._geom, Washer):
trans = OpenRAVEBody.base_pose_to_mat(base_pose)
elif len(base_pose) == 2:
trans = OpenRAVEBody.base_pose_2D_to_mat(base_pose)
else:
trans = OpenRAVEBody.transform_from_obj_pose(base_pose, rotation)
self.env_body.SetTransform(trans)
def set_dof(self, dof_value_map):
"""
dof_value_map: A dict that maps robot attribute name to a list of corresponding values
"""
# make sure only sets dof for robot
# assert isinstance(self._geom, Robot)
if not isinstance(self._geom, Robot): return
# Get current dof value for each joint
dof_val = self.env_body.GetActiveDOFValues()
for k, v in dof_value_map.items():
if k not in self._geom.dof_map or np.any(np.isnan(v)): continue
inds = self._geom.dof_map[k]
try:
dof_val[inds] = v
except IndexError:
print(('\n\n\nBad index in set dof:', inds, k, v, self._geom, '\n\n\n'))
# Set new DOF value to the robot
self.env_body.SetActiveDOFValues(dof_val)
def _set_active_dof_inds(self, inds = None):
"""
Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in
"""
robot = self.env_body
if inds == None:
dof_inds = np.ndarray(0, dtype=np.int)
if robot.GetJoint("torso_lift_joint") != None:
dof_inds = np.r_[dof_inds, robot.GetJoint("torso_lift_joint").GetDOFIndex()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("leftarm").GetGripperIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetArmIndices()]
dof_inds = np.r_[dof_inds, robot.GetManipulator("rightarm").GetGripperIndices()]
robot.SetActiveDOFs(
dof_inds,
DOFAffine.X + DOFAffine.Y + DOFAffine.RotationAxis,
[0, 0, 1])
else:
robot.SetActiveDOFs(inds)
@staticmethod
def create_cylinder(env, body_name, t, dims, color=[0, 1, 1]):
infocylinder = OpenRAVEBody.create_body_info(GeometryType.Cylinder, dims, color)
if type(env) != Environment:
# import ipdb; ipdb.set_trace()
print("Environment object is not valid")
cylinder = RaveCreateKinBody(env, '')
cylinder.InitFromGeometries([infocylinder])
cylinder.SetName(body_name)
cylinder.SetTransform(t)
return cylinder
@staticmethod
def create_box(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, dims, color, 0, True)
box = RaveCreateKinBody(env,'')
box.InitFromGeometries([infobox])
box.SetName(name)
box.SetTransform(transform)
return box
@staticmethod
def create_sphere(env, name, transform, dims, color=[0,0,1]):
infobox = OpenRAVEBody.create_body_info(GeometryType.Sphere, dims, color)
sphere = RaveCreateKinBody(env,'')
sphere.InitFromGeometries([infobox])
sphere.SetName(name)
sphere.SetTransform(transform)
return sphere
@staticmethod
def create_body_info(body_type, dims, color, transparency = 0.8, visible = True):
infobox = KinBody.Link.GeometryInfo()
infobox._type = body_type
infobox._vGeomData = dims
infobox._bVisible = True
infobox._fTransparency = transparency
infobox._vDiffuseColor = color
return infobox
@staticmethod
def create_wall(env, wall_type):
component_type = KinBody.Link.GeomType.Box
wall_color = [0.5, 0.2, 0.1]
box_infos = []
if wall_type == 'closet':
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
else:
raise NotImplemented
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims = [dim_x, dim_y, 1]
box_info = OpenRAVEBody.create_body_info(component_type, dims, wall_color)
box_info._t = transform
box_infos.append(box_info)
wall = RaveCreateKinBody(env, '')
wall.InitFromGeometries(box_infos)
return wall
@staticmethod
def get_wall_dims(wall_type='closet'):
wall_endpoints = [[-6.0,-8.0],[-6.0,4.0],[1.9,4.0],[1.9,8.0],[5.0,8.0],[5.0,4.0],[13.0,4.0],[13.0,-8.0],[-6.0,-8.0]]
dims = []
for i, (start, end) in enumerate(zip(wall_endpoints[0:-1], wall_endpoints[1:])):
dim_x, dim_y = 0, 0
thickness = WALL_THICKNESS
if start[0] == end[0]:
ind_same, ind_diff = 0, 1
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = thickness, length/2 + thickness
elif start[1] == end[1]:
ind_same, ind_diff = 1, 0
length = abs(start[ind_diff] - end[ind_diff])
dim_x, dim_y = length/2 + thickness, thickness
else:
raise NotImplemented('Can only create axis-aligned walls')
transform = np.eye(4)
transform[ind_same, 3] = start[ind_same]
if start[ind_diff] < end[ind_diff]:
transform[ind_diff, 3] = start[ind_diff] + length/2
else:
transform[ind_diff, 3] = end[ind_diff] + length/2
dims.append(([dim_x, dim_y, 1], transform))
return dims
@staticmethod
def create_basket_col(env):
long_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
long_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.15,.015], [0, 0.75, 1])
short_info1 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
short_info2 = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.015,.15,.2], [0, 0.75, 1])
bottom_info = OpenRAVEBody.create_body_info(KinBody.Link.GeomType.Box, [.3,.015,.2], [0, 0.75, 1])
long_info1._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,0.208],[0,0,0.055])
long_info2._t = OpenRAVEBody.transform_from_obj_pose([0,-0.118,-0.208],[0,0,-0.055])
short_info1._t = OpenRAVEBody.transform_from_obj_pose([0.309,-0.118,0],[-0.055,0,0])
short_info2._t = OpenRAVEBody.transform_from_obj_pose([-0.309,-0.118,0],[0.055,0,0])
bottom_info._t = OpenRAVEBody.transform_from_obj_pose([0,-0.25,0],[0,0,0])
basket = RaveCreateRobot(env, '')
basket.InitFromGeometries([long_info1, long_info2, short_info1, short_info2, bottom_info])
return basket
@staticmethod
def create_table(env, geom):
thickness = geom.thickness
leg_height = geom.leg_height
back = geom.back
dim1, dim2 = geom.table_dim
legdim1, legdim2 = geom.leg_dim
table_color = [0.5, 0.2, 0.1]
component_type = KinBody.Link.GeomType.Box
tabletop = OpenRAVEBody.create_body_info(component_type, [dim1/2, dim2/2, thickness/2], table_color)
leg1 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg1._t[0, 3] = dim1/2 - legdim1/2
leg1._t[1, 3] = dim2/2 - legdim2/2
leg1._t[2, 3] = -leg_height/2 - thickness/2
leg2 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg2._t[0, 3] = dim1/2 - legdim1/2
leg2._t[1, 3] = -dim2/2 + legdim2/2
leg2._t[2, 3] = -leg_height/2 - thickness/2
leg3 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg3._t[0, 3] = -dim1/2 + legdim1/2
leg3._t[1, 3] = dim2/2 - legdim2/2
leg3._t[2, 3] = -leg_height/2 - thickness/2
leg4 = OpenRAVEBody.create_body_info(component_type, [legdim1/2, legdim2/2, leg_height/2], table_color)
leg4._t[0, 3] = -dim1/2 + legdim1/2
leg4._t[1, 3] = -dim2/2 + legdim2/2
leg4._t[2, 3] = -leg_height/2 - thickness/2
if back:
back_plate = OpenRAVEBody.create_body_info(component_type, [legdim1/10, dim2/2, leg_height-thickness/2], table_color)
back_plate._t[0, 3] = dim1/2 - legdim1/10
back_plate._t[1, 3] = 0
back_plate._t[2, 3] = -leg_height/2 - thickness/4
table = RaveCreateRobot(env, '')
if not back:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4])
else:
table.InitFromGeometries([tabletop, leg1, leg2, leg3, leg4, back_plate])
return table
@staticmethod
def base_pose_2D_to_mat(pose):
# x, y = pose
assert len(pose) == 2
x = pose[0]
y = pose[1]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def base_pose_3D_to_mat(pose):
# x, y, z = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
z = pose[2]
rot = 0
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, z]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose_2D(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
return np.array([x,y])
@staticmethod
def base_pose_to_mat(pose):
# x, y, rot = pose
assert len(pose) == 3
x = pose[0]
y = pose[1]
rot = pose[2]
q = quatFromAxisAngle((0, 0, rot)).tolist()
pos = [x, y, 0]
# pos = np.vstack((x,y,np.zeros(1)))
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def angle_pose_to_mat(pose):
assert len(pose) == 1
q = quatFromAxisAngle((0, 0, pose)).tolist()
matrix = matrixFromPose(q + pos)
return matrix
@staticmethod
def mat_to_base_pose(mat):
pose = poseFromMatrix(mat)
x = pose[4]
y = pose[5]
rot = axisAngleFromRotationMatrix(mat)[2]
return np.array([x,y,rot])
@staticmethod
def obj_pose_from_transform(transform):
trans = transform[:3,3]
rot_matrix = transform[:3,:3]
yaw, pitch, roll = OpenRAVEBody._ypr_from_rot_matrix(rot_matrix)
# ipdb.set_trace()
return np.array((trans[0], trans[1], trans[2], yaw, pitch, roll))
@staticmethod
def transform_from_obj_pose(pose, rotation = np.array([0,0,0])):
x, y, z = pose
alpha, beta, gamma = rotation
Rz, Ry, Rx = OpenRAVEBody._axis_rot_matrices(pose, rotation)
rot_mat = np.dot(Rz, np.dot(Ry, Rx))
matrix = np.eye(4)
matrix[:3,:3] = rot_mat
matrix[:3,3] = [x,y,z]
return matrix
@staticmethod
def _axis_rot_matrices(pose, rotation):
x, y, z = pose
alpha, beta, gamma = rotation
Rz_2d = np.array([[cos(alpha), -sin(alpha)], [sin(alpha), cos(alpha)]])
Ry_2d = np.array([[cos(beta), sin(beta)], [-sin(beta), cos(beta)]])
Rx_2d = np.array([[cos(gamma), -sin(gamma)], [sin(gamma), cos(gamma)]])
I = np.eye(3)
Rz = I.copy()
Rz[:2,:2] = Rz_2d
Ry = I.copy()
Ry[[[0],[2]],[0,2]] = Ry_2d
Rx = I.copy()
Rx[1:3,1:3] = Rx_2d
# ipdb.set_trace()
return Rz, Ry, Rx
@staticmethod
def _ypr_from_rot_matrix(r):
# alpha
yaw = atan2(r[1,0], r[0,0])
# beta
pitch = atan2(-r[2,0],np.sqrt(r[2,1]**2+r[2,2]**2))
# gamma
roll = atan2(r[2,1], r[2,2])
# ipdb.set_trace()
return (yaw, pitch, roll)
@staticmethod
def get_ik_transform(pos, rot, right_arm = True):
trans = OpenRAVEBody.transform_from_obj_pose(pos, rot)
# Openravepy flip the rotation axis by 90 degree, thus we need to change it back
if right_arm:
rot_mat = matrixFromAxisAngle([0, np.pi/2, 0])
else:
rot_mat = matrixFromAxisAngle([0, -np.pi/2, 0])
trans_mat = trans[:3, :3].dot(rot_mat[:3, :3])
trans[:3, :3] = trans_mat
return trans
def get_ik_arm_pose(self, pos, rot):
# assert isinstance(self._geom, PR2)
solutions = self.get_ik_from_pose(pos, rot, 'rightarm_torso')
return solutions
def get_ik_from_pose(self, pos, rot, manip_name, use6d=True):
trans = OpenRAVEBody.get_ik_transform(pos, rot)
solutions = self.get_ik_solutions(manip_name, trans, use6d)
return solutions
def get_ik_solutions(self, manip_name, trans, use6d=True):
manip = self.env_body.GetManipulator(manip_name)
if use6d:
iktype = IkParameterizationType.Transform6D
else:
iktype = IkParameterizationType.Translation3D
solutions = manip.FindIKSolutions(IkParameterization(trans, iktype),IkFilterOptions.CheckEnvCollisions)
return solutions
def get_close_ik_solution(self, manip_name, trans, dof_map=None):
if dof_map is not None:
self.set_dof(dof_map)
manip = self.env_body.GetManipulator(manip_name)
iktype = IkParameterizationType.Transform6D
ik_param = IkParameterization(trans, iktype)
solution = manip.FindIKSolution(ik_param, IkFilterOptions.IgnoreSelfCollisions)
return solution
def fwd_kinematics(self, manip_name, dof_map=None, mat_result=False):
if dof_map is not None:
self.set_dof(dof_map)
trans = self.env_body.GetLink(manip_name).GetTransform()
if mat_result:
return trans
pos = trans[:3, 3]
quat = quatFromRotationMatrix(trans[:3, :3])
return {'pos': pos, 'quat': quat}
def param_fwd_kinematics(self, param, manip_names, t, mat_result=False):
if not isinstance(self._geom, Robot): return
attrs = list(param._attr_types.keys())
dof_val = self.env_body.GetActiveDOFValues()
for attr in attrs:
if attr not in self._geom.dof_map: continue
val = getattr(param, attr)[:, t]
if np.any(np.isnan(val)): continue
inds = self._geom.dof_map[attr]
dof_val[inds] = val
self.env_body.SetActiveDOFValues(dof_val)
result = {}
for manip_name in manip_names:
result[manip_name] = self.fwd_kinematics(manip_name, mat_result=mat_result)
return result
| opentamp/src/core/util_classes/no_openrave_body.py | 21,682 | Set active dof index to the one we are interested
This function is implemented to simplify jacobian calculation in the CollisionPredicate
inds: Optional list of index specifying dof index we are interested in
dof_value_map: A dict that maps robot attribute name to a list of corresponding values
make sure only sets dof for robot assert isinstance(self._geom, Robot) Get current dof value for each joint Set new DOF value to the robot import ipdb; ipdb.set_trace() x, y = pose pos = np.vstack((x,y,np.zeros(1))) x, y, z = pose pos = np.vstack((x,y,np.zeros(1))) x, y, rot = pose pos = np.vstack((x,y,np.zeros(1))) ipdb.set_trace() ipdb.set_trace() alpha beta gamma ipdb.set_trace() Openravepy flip the rotation axis by 90 degree, thus we need to change it back assert isinstance(self._geom, PR2) | 797 | en | 0.621923 |
# coding: utf-8
"""
Account API
The <b>Account API</b> gives sellers the ability to configure their eBay seller accounts, including the seller's policies (the Fulfillment Policy, Payment Policy, and Return Policy), opt in and out of eBay seller programs, configure sales tax tables, and get account information. <br><br>For details on the availability of the methods in this API, see <a href=\"/api-docs/sell/account/overview.html#requirements\">Account API requirements and restrictions</a>. # noqa: E501
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RateTableResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'rate_tables': 'list[RateTable]'
}
attribute_map = {
'rate_tables': 'rateTables'
}
def __init__(self, rate_tables=None): # noqa: E501
"""RateTableResponse - a model defined in Swagger""" # noqa: E501
self._rate_tables = None
self.discriminator = None
if rate_tables is not None:
self.rate_tables = rate_tables
@property
def rate_tables(self):
"""Gets the rate_tables of this RateTableResponse. # noqa: E501
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:return: The rate_tables of this RateTableResponse. # noqa: E501
:rtype: list[RateTable]
"""
return self._rate_tables
@rate_tables.setter
def rate_tables(self, rate_tables):
"""Sets the rate_tables of this RateTableResponse.
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501
:type: list[RateTable]
"""
self._rate_tables = rate_tables
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RateTableResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RateTableResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| src/ebay_rest/api/sell_account/models/rate_table_response.py | 3,812 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
RateTableResponse - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the rate_tables of this RateTableResponse. # noqa: E501
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:return: The rate_tables of this RateTableResponse. # noqa: E501
:rtype: list[RateTable]
Sets the rate_tables of this RateTableResponse.
A list of elements that provide information on the seller-defined shipping rate tables. # noqa: E501
:param rate_tables: The rate_tables of this RateTableResponse. # noqa: E501
:type: list[RateTable]
Returns the model properties as a dict
Returns the string representation of the model
Account API
The <b>Account API</b> gives sellers the ability to configure their eBay seller accounts, including the seller's policies (the Fulfillment Policy, Payment Policy, and Return Policy), opt in and out of eBay seller programs, configure sales tax tables, and get account information. <br><br>For details on the availability of the methods in this API, see <a href="/api-docs/sell/account/overview.html#requirements">Account API requirements and restrictions</a>. # noqa: E501
OpenAPI spec version: v1.6.3
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 noqa: F401 noqa: E501 noqa: E501 | 1,486 | en | 0.66982 |
import numpy as np
import sys, os, re, gzip, struct
import random
import h5py
import copy
from keras import backend as K
from keras.utils import Sequence
import keras.utils
import tensorflow as tf
import multi_utils
import mat_utils
class NbestFixedDataGenerator(Sequence):
def __init__(self, file, key_file, batch_size=64, feat_dim=40, n_labels=1024,
procs=10, extras1=10, extras2=10, num_extras1=1, nbest=100, mode='train', shuffle=False,
mod=1):
self.file=file
self.batch_size=batch_size
self.feat_dim=feat_dim
self.n_labels=n_labels
self.procs=procs
self.extras1=extras1
self.extras2=extras2
self.num_extras1=num_extras1
self.nbest=nbest
self.shuffle=shuffle
self.keys=[]
self.sorted_keys=[]
self.mode=mode
self.mod=1
self.h5fd = h5py.File(self.file, 'r')
self.n_samples = len(self.h5fd.keys())
if key_file is not None:
with open(key_file, 'r') as f:
for line in f:
self.sorted_keys.append(line.strip())
for key in self.h5fd.keys():
self.keys.append(key)
self.n_samples = len(self.h5fd.keys())
for key in self.h5fd.keys():
self.keys.append(key)
if len(self.sorted_keys) > 0:
self.keys = self.sorted_keys
def __len__(self):
return int(np.ceil(self.n_samples)/self.batch_size)
def __getitem__(self, index, return_keys=False):
list_keys_temp = [self.keys[k] for k in range(index*self.batch_size,
min( (index+1)*self.batch_size,
len(self.keys) ) )]
# [input_sequences, label_sequences, inputs_lengths, labels_length]
if self.mode == 'train':
x, mask, y = self.__data_generation(list_keys_temp)
if return_keys == True:
return x, mask, y, list_keys_temp
else:
return x, mask, y
else:
x, mask = self.__data_generation(list_keys_temp)
if return_keys == True:
return x, mask, list_keys_temp
else:
return x, mask
def on_epoch_end(self):
if self.shuffle == True:
random.shuffle(self.keys)
def __data_generation(self, list_keys_temp):
max_num_blocks=0
max_num_frames=0
for i, key in enumerate(list_keys_temp):
mat = self.h5fd[key+'/data'][()]
mat = mat_utils.pad_mat(mat, self.mod)
[ex_blocks,ex_frames] = multi_utils.expected_num_blocks(mat,
self.procs,
self.extras1,
self.extras2,
self.num_extras1)
if ex_blocks > max_num_blocks:
max_num_blocks = ex_blocks
if ex_frames > max_num_frames:
max_num_frames = ex_frames
input_mat=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.feat_dim))
input_mask=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), 1))
if self.mode == 'train':
numer_labels=np.zeros((len(list_keys_temp), max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.n_labels+1))
numer_lmscores = np.zros((len(list_keys_temp), 1))
denom_labels=np.zeros((len(list_keys_temp), self.nbest, max_num_blocks,
self.procs+max(self.extras1, self.extras2), self.n_labels+1))
denom_lmlscores = np.zeros((len(list_keys_temp), self.nbest, 1))
for i, key in enumerate(list_keys_temp):
mat = self.h5fd[key+'/data'][()]
[ex_blocks, ex_frames] = multi_utils.expected_num_blocks(mat,
self.procs,
self.extras1,
self.extras2,
self.num_extras1)
blocked_mat, mask , _ = multi_utils.split_utt(mat, self.procs, self.extras1,
self.extras2,
self.num_extras1,
ex_blocks,
self.feat_dim, max_num_blocks)
input_mat[i,:,:,:] = np.expand_dims(blocked_mat, axis=0)
input_mask[i,:,:,:] = np.expand_dims(mask, axis=0)
if self.mode == 'train':
# label is a list of string starting from 0
numer = self.h5fd[key+'/1best'][()]
numer_labels = multi_utils.str2dict(numer)
numer_lmscores[i,0] = self.h5fd[key+'/1best_scores'][()]
denom = self.h5fd[key+'/nbest'][()]
denom_labels = multi_utils.str2nbest(denom)
denom_lmscores[i, :, 0] = self.h5fd[key+'/nbest_scores'][()]
# w/o padding for convenience
# splitting labels
# (blocks, frames, feats)
number_blocked_labels = multi_utils.split_post_label(numer_labels, self.procs, self.extras1,
self.extras2, self.num_extras1, ex_blocks,
self.n_labels+1, max_num_blocks)
# expand dimensions along with batch dim.
numer_labels[i,:,:,:] = np.expand_dims(numer_blocked_labels, axis=0)
# (nbest, blocks, time, feats)
denom_blocked_labels = muti_utils.split_nbest_label(denom_labels, self.procs, self.extra1,
self.extra2, self.num_extras1, ex_blocks,
self.n_labels+1, max_num_blocks)
denom_labels[i,:,:,:,:] = np.expand_dims(denom_blocked_labels, axis=0)
# transpose batch and block axes for outer loop in training
input_mat = input_mat.transpose((1,0,2,3))
input_mask = input_mask.transpose((1,0,2,3))
if self.mode == 'train':
# transpose batch dim. <-> block dim.
number_labels = numer_labels.transpose((1,0,2,3)) # (batch,, blocks, time, feats) -> (blocks, batch, time, feats)
denom_labels = denom_labels.transpose((2,1,0,3,4)) # (batch, nbest, blocks, time, feats)->(nbest, blocks, batch, time, feats)
if self.mode == 'train':
return input_mat, input_mask, [numer_labels, numer_lmscores, denom_labels, denom_lmscores]
else:
return input_mat, input_mask
| nbest_multi_fixed_generator.py | 7,333 | [input_sequences, label_sequences, inputs_lengths, labels_length] label is a list of string starting from 0 w/o padding for convenience splitting labels (blocks, frames, feats) expand dimensions along with batch dim. (nbest, blocks, time, feats) transpose batch and block axes for outer loop in training transpose batch dim. <-> block dim. (batch,, blocks, time, feats) -> (blocks, batch, time, feats) (batch, nbest, blocks, time, feats)->(nbest, blocks, batch, time, feats) | 474 | en | 0.857291 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import glob
import re
import sys
import urllib
import tarfile
import zipfile
import os.path as osp
from scipy.io import loadmat
import numpy as np
import h5py
from torchreid.utils.iotools import mkdir_if_missing, write_json, read_json
from .bases import BaseVideoDataset
class DukeMTMCVidReID(BaseVideoDataset):
"""
DukeMTMCVidReID
Reference:
Wu et al. Exploit the Unknown Gradually: One-Shot Video-Based Person
Re-Identification by Stepwise Learning. CVPR 2018.
URL: https://github.com/Yu-Wu/DukeMTMC-VideoReID
Dataset statistics:
# identities: 702 (train) + 702 (test)
# tracklets: 2196 (train) + 2636 (test)
"""
dataset_dir = 'dukemtmc-vidreid'
def __init__(self, root='data', min_seq_len=0, verbose=True, **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.dataset_url = 'http://vision.cs.duke.edu/DukeMTMC/data/misc/DukeMTMC-VideoReID.zip'
self.train_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/train')
self.query_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/query')
self.gallery_dir = osp.join(self.dataset_dir, 'DukeMTMC-VideoReID/gallery')
self.split_train_json_path = osp.join(self.dataset_dir, 'split_train.json')
self.split_query_json_path = osp.join(self.dataset_dir, 'split_query.json')
self.split_gallery_json_path = osp.join(self.dataset_dir, 'split_gallery.json')
self.min_seq_len = min_seq_len
self._download_data()
self._check_before_run()
print("Note: if root path is changed, the previously generated json files need to be re-generated (so delete them first)")
train = self._process_dir(self.train_dir, self.split_train_json_path, relabel=True)
query = self._process_dir(self.query_dir, self.split_query_json_path, relabel=False)
gallery = self._process_dir(self.gallery_dir, self.split_gallery_json_path, relabel=False)
if verbose:
print("=> DukeMTMC-VideoReID loaded")
self.print_dataset_statistics(train, query, gallery)
self.train = train
self.query = query
self.gallery = gallery
self.num_train_pids, _, self.num_train_cams = self.get_videodata_info(self.train)
self.num_query_pids, _, self.num_query_cams = self.get_videodata_info(self.query)
self.num_gallery_pids, _, self.num_gallery_cams = self.get_videodata_info(self.gallery)
def _download_data(self):
if osp.exists(self.dataset_dir):
print("This dataset has been downloaded.")
return
print("Creating directory {}".format(self.dataset_dir))
mkdir_if_missing(self.dataset_dir)
fpath = osp.join(self.dataset_dir, osp.basename(self.dataset_url))
print("Downloading DukeMTMC-VideoReID dataset")
urllib.urlretrieve(self.dataset_url, fpath)
print("Extracting files")
zip_ref = zipfile.ZipFile(fpath, 'r')
zip_ref.extractall(self.dataset_dir)
zip_ref.close()
def _check_before_run(self):
"""Check if all files are available before going deeper"""
if not osp.exists(self.dataset_dir):
raise RuntimeError("'{}' is not available".format(self.dataset_dir))
if not osp.exists(self.train_dir):
raise RuntimeError("'{}' is not available".format(self.train_dir))
if not osp.exists(self.query_dir):
raise RuntimeError("'{}' is not available".format(self.query_dir))
if not osp.exists(self.gallery_dir):
raise RuntimeError("'{}' is not available".format(self.gallery_dir))
def _process_dir(self, dir_path, json_path, relabel):
if osp.exists(json_path):
print("=> {} generated before, awesome!".format(json_path))
split = read_json(json_path)
return split['tracklets']
print("=> Automatically generating split (might take a while for the first time, have a coffe)")
pdirs = glob.glob(osp.join(dir_path, '*')) # avoid .DS_Store
print("Processing '{}' with {} person identities".format(dir_path, len(pdirs)))
pid_container = set()
for pdir in pdirs:
pid = int(osp.basename(pdir))
pid_container.add(pid)
pid2label = {pid:label for label, pid in enumerate(pid_container)}
tracklets = []
for pdir in pdirs:
pid = int(osp.basename(pdir))
if relabel: pid = pid2label[pid]
tdirs = glob.glob(osp.join(pdir, '*'))
for tdir in tdirs:
raw_img_paths = glob.glob(osp.join(tdir, '*.jpg'))
num_imgs = len(raw_img_paths)
if num_imgs < self.min_seq_len:
continue
img_paths = []
for img_idx in range(num_imgs):
# some tracklet starts from 0002 instead of 0001
img_idx_name = 'F' + str(img_idx+1).zfill(4)
res = glob.glob(osp.join(tdir, '*' + img_idx_name + '*.jpg'))
if len(res) == 0:
print("Warn: index name {} in {} is missing, jump to next".format(img_idx_name, tdir))
continue
img_paths.append(res[0])
img_name = osp.basename(img_paths[0])
if img_name.find('_') == -1:
# old naming format: 0001C6F0099X30823.jpg
camid = int(img_name[5]) - 1
else:
# new naming format: 0001_C6_F0099_X30823.jpg
camid = int(img_name[6]) - 1
img_paths = tuple(img_paths)
tracklets.append((img_paths, pid, camid))
print("Saving split to {}".format(json_path))
split_dict = {
'tracklets': tracklets,
}
write_json(split_dict, json_path)
return tracklets | torchreid/datasets/dukemtmcvidreid.py | 6,052 | DukeMTMCVidReID
Reference:
Wu et al. Exploit the Unknown Gradually: One-Shot Video-Based Person
Re-Identification by Stepwise Learning. CVPR 2018.
URL: https://github.com/Yu-Wu/DukeMTMC-VideoReID
Dataset statistics:
# identities: 702 (train) + 702 (test)
# tracklets: 2196 (train) + 2636 (test)
Check if all files are available before going deeper
avoid .DS_Store some tracklet starts from 0002 instead of 0001 old naming format: 0001C6F0099X30823.jpg new naming format: 0001_C6_F0099_X30823.jpg | 500 | en | 0.699843 |
# Copyright (c) 2019 Eric Steinberger
import numpy as np
from PokerRL.rl.base_cls.workers.WorkerBase import WorkerBase
class EvaluatorMasterBase(WorkerBase):
"""
Baseclass to all Evaluators. An Evaluator is an algorithm to evaluate an agent's performance in a certain metric.
"""
def __init__(self, t_prof, eval_env_bldr, chief_handle, evaluator_name, log_conf_interval=False):
"""
Args:
t_prof (TrainingProfile)
chief_handle (class instance or ray ActorHandle)
evaluator_name (str): Name of the evaluator
"""
super().__init__(t_prof=t_prof)
self._eval_env_bldr = eval_env_bldr
self._chief_handle = chief_handle
self._is_multi_stack = len(self._t_prof.eval_stack_sizes) > 1
self._log_conf_interval = log_conf_interval
self._evaluator_name = evaluator_name
self._exp_name_total, self._exp_names_conf = self._create_experiments(self_name=evaluator_name)
if self._is_multi_stack:
self._exp_name_multi_stack = {
eval_mode:
self._ray.get(
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "Multi_Stack"
+ ": " + evaluator_name
+ " Averaged Total"))
for eval_mode in self._t_prof.eval_modes_of_algo
}
if self._log_conf_interval:
self._exp_names_multi_stack_conf = {
eval_mode:
self._ray.get(
[
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ ": " + evaluator_name
+ " Conf_" + bound_end)
for bound_end in ["lower95", "upper95"]
]
)
for eval_mode in self._t_prof.eval_modes_of_algo
}
@property
def is_multi_stack(self):
"""
Whether the agent is evaluated in games that start with different stack sizes each time.
"""
return self._is_multi_stack
def evaluate(self, iter_nr):
""" Evaluate an agent and send the results as logs to the Chief. """
raise NotImplementedError
def update_weights(self):
""" Update the local weights on the master, for instance by calling .pull_current_strat_from_chief() """
raise NotImplementedError
def pull_current_strat_from_chief(self):
"""
Pulls and Returns weights or any other changing algorithm info of any format from the Chief.
"""
return self._ray.get(self._ray.remote(self._chief_handle.pull_current_eval_strategy,
self._evaluator_name
))
def _create_experiments(self, self_name, ):
"""
Registers a new experiment either for each player and their average or just for their average.
"""
if self._log_conf_interval:
exp_names_conf = {
eval_mode:
[
self._ray.get(
[
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "_stack_" + str(stack_size[0])
+ ": " + self_name
+ " Conf_" + bound_end)
for bound_end in ["lower95", "upper95"]
]
)
for stack_size in self._t_prof.eval_stack_sizes
]
for eval_mode in self._t_prof.eval_modes_of_algo
}
else:
exp_names_conf = None
exp_name_total = {
eval_mode:
[
self._ray.get(
self._ray.remote(self._chief_handle.create_experiment,
self._t_prof.name
+ " " + eval_mode
+ "_stack_" + str(stack_size[0])
+ ": " + self_name
+ " Total"))
for stack_size in self._t_prof.eval_stack_sizes
]
for eval_mode in self._t_prof.eval_modes_of_algo
}
return exp_name_total, exp_names_conf
def _get_95confidence(self, scores):
mean = np.mean(scores).item()
std = np.std(scores).item()
_d = 1.96 * std / np.sqrt(scores.shape[0])
return float(mean), float(_d)
def _log_results(self, agent_mode, stack_size_idx, iter_nr, score, upper_conf95=None, lower_conf95=None):
"""
Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.
Args:
agent_mode: Evaluation mode of the agent whose performance is logged
stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one
this data is from.
iter_nr: Algorithm Iteration of this data
score: Score in this evaluation (e.g. exploitability)
"""
graph_name = "Evaluation/" + self._eval_env_bldr.env_cls.WIN_METRIC
self._ray.remote(self._chief_handle.add_scalar,
self._exp_name_total[agent_mode][stack_size_idx], graph_name, iter_nr, score)
if self._log_conf_interval:
assert upper_conf95 is not None
assert lower_conf95 is not None
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_conf[agent_mode][stack_size_idx][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_conf[agent_mode][stack_size_idx][1], graph_name, iter_nr, upper_conf95)
def _log_multi_stack(self, agent_mode, iter_nr, score_total, upper_conf95=None, lower_conf95=None):
"""
Additional logging for multistack evaluations
"""
graph_name = "Evaluation/" + self._eval_env_bldr.env_cls.WIN_METRIC
self._ray.remote(self._chief_handle.add_scalar,
self._exp_name_multi_stack[agent_mode], graph_name, iter_nr, score_total)
if self._log_conf_interval:
assert upper_conf95 is not None
assert lower_conf95 is not None
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_multi_stack_conf[agent_mode][0], graph_name, iter_nr, lower_conf95)
self._ray.remote(self._chief_handle.add_scalar,
self._exp_names_multi_stack_conf[agent_mode][1], graph_name, iter_nr, upper_conf95)
| PokerRL/eval/_/EvaluatorMasterBase.py | 7,634 | Baseclass to all Evaluators. An Evaluator is an algorithm to evaluate an agent's performance in a certain metric.
Args:
t_prof (TrainingProfile)
chief_handle (class instance or ray ActorHandle)
evaluator_name (str): Name of the evaluator
Registers a new experiment either for each player and their average or just for their average.
Additional logging for multistack evaluations
Log evaluation results by sending these results to the Chief, who will later send them to the Crayon log server.
Args:
agent_mode: Evaluation mode of the agent whose performance is logged
stack_size_idx: If evaluating multiple starting stack sizes, this is an index describing which one
this data is from.
iter_nr: Algorithm Iteration of this data
score: Score in this evaluation (e.g. exploitability)
Evaluate an agent and send the results as logs to the Chief.
Whether the agent is evaluated in games that start with different stack sizes each time.
Pulls and Returns weights or any other changing algorithm info of any format from the Chief.
Update the local weights on the master, for instance by calling .pull_current_strat_from_chief()
Copyright (c) 2019 Eric Steinberger | 1,285 | en | 0.863197 |
# setup.py file
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="my-lambdata-dspt5", # the name that you will install via pip
version="1.0",
author="Devvin Kraatz",
author_email="devvnet97@gmai.com ",
description="Made as an example while taking Lambda School's Data Science Course, come join it's highly recommended!",
long_description=long_description,
long_description_content_type="text/markdown",
# required if using a md file for long desc
# license="MIT",
url="https://github.com/YOUR_USERNAME/YOUR_REPO_NAME",
# keywords="",
packages=find_packages() # ["my_lambdata"]
)
| module2-oop-code-style-and-reviews/lambdata_dspt5/setup.py | 709 | setup.py file the name that you will install via pip required if using a md file for long desc license="MIT", keywords="", ["my_lambdata"] | 138 | en | 0.628739 |
#! /usr/local/bin/python
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
with open("table.txt", "r") as f:
print "projections = ("
lines = f.readlines()
units_list = []
for line in lines:
lineparts = line.rstrip().split("|")
epsg = lineparts[0]
wkt = lineparts[3]
name = wkt.split("\"")[1]
location = line.find("+units=")
if location == -1:
unit_index = line.find("UNIT[")
unit_code = line[unit_index:].rstrip().split("\"")[1]
units_list.append(unit_code)
else:
unit_code = line[location:].rstrip().split(" ")[0].split("=")[1]
units_list.append(unit_code)
if unit_code == "m":
unit = "Meter"
unit_factor = 1
elif unit_code == "ft":
unit = "International Foot"
unit_factor = 0.3048
elif unit_code == "us-ft":
unit = "US Survey Foot"
unit_factor = 0.3048006096012192
elif unit_code == "grad":
unit = "Gradian"
unit_factor = 0.01470796326794897
elif unit_code == "degree":
unit = "Degree"
unit_factor = 0.0174532925199433
else:
unit = "Unknown"
unit_factor = 0
print "{"
print "wkt = \"" + wkt.replace("\"", "\\\"") + "\";"
print "name = \"" + name + "\";"
print "unit = \"" + unit + "\";"
print "unit_factor = " + str(unit_factor) + ";"
print "epsg = " + str(epsg) + ";"
print "},"
print ")"
| projection-scraping/parseSRS.py | 1,592 | ! /usr/local/bin/python pylint: disable=invalid-name pylint: disable=missing-docstring | 86 | en | 0.48927 |
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import math
from ccxt.base.errors import ExchangeError
class coingi(Exchange):
def describe(self):
return self.deep_extend(super(coingi, self).describe(), {
'id': 'coingi',
'name': 'Coingi',
'rateLimit': 1000,
'countries': ['PA', 'BG', 'CN', 'US'], # Panama, Bulgaria, China, US
'has': {
'CORS': False,
'fetchTickers': True,
},
'urls': {
'referral': 'https://www.coingi.com/?r=XTPPMC',
'logo': 'https://user-images.githubusercontent.com/1294454/28619707-5c9232a8-7212-11e7-86d6-98fe5d15cc6e.jpg',
'api': {
'www': 'https://coingi.com',
'current': 'https://api.coingi.com',
'user': 'https://api.coingi.com',
},
'www': 'https://coingi.com',
'doc': 'https://coingi.docs.apiary.io',
},
'api': {
'www': {
'get': [
'',
],
},
'current': {
'get': [
'order-book/{pair}/{askCount}/{bidCount}/{depth}',
'transactions/{pair}/{maxCount}',
'24hour-rolling-aggregation',
],
},
'user': {
'post': [
'balance',
'add-order',
'cancel-order',
'orders',
'transactions',
'create-crypto-withdrawal',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.2 / 100,
'maker': 0.2 / 100,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {
'BTC': 0.001,
'LTC': 0.01,
'DOGE': 2,
'PPC': 0.02,
'VTC': 0.2,
'NMC': 2,
'DASH': 0.002,
'USD': 10,
'EUR': 10,
},
'deposit': {
'BTC': 0,
'LTC': 0,
'DOGE': 0,
'PPC': 0,
'VTC': 0,
'NMC': 0,
'DASH': 0,
'USD': 5,
'EUR': 1,
},
},
},
})
def fetch_markets(self, params={}):
response = self.wwwGet(params)
parts = response.split('do=currencyPairSelector-selectCurrencyPair" class="active">')
currencyParts = parts[1].split('<div class="currency-pair-label">')
result = []
for i in range(1, len(currencyParts)):
currencyPart = currencyParts[i]
idParts = currencyPart.split('</div>')
id = idParts[0]
id = id.replace('/', '-')
id = id.lower()
baseId, quoteId = id.split('-')
base = baseId.upper()
quote = quoteId.upper()
base = self.safe_currency_code(base)
quote = self.safe_currency_code(quote)
symbol = base + '/' + quote
precision = {
'amount': 8,
'price': 8,
}
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': id,
'active': True,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': math.pow(10, precision['amount']),
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': 0,
'max': None,
},
},
})
return result
def fetch_balance(self, params={}):
self.load_markets()
lowercaseCurrencies = []
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
currency = currencies[i]
lowercaseCurrencies.append(currency.lower())
request = {
'currencies': ','.join(lowercaseCurrencies),
}
response = self.userPostBalance(self.extend(request, params))
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance['currency'], 'name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'available')
blocked = self.safe_float(balance, 'blocked')
inOrders = self.safe_float(balance, 'inOrders')
withdrawing = self.safe_float(balance, 'withdrawing')
account['used'] = self.sum(blocked, inOrders, withdrawing)
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=512, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'depth': 32, # maximum number of depth range steps 1-32
'askCount': limit, # maximum returned number of asks 1-512
'bidCount': limit, # maximum returned number of bids 1-512
}
orderbook = self.currentGetOrderBookPairAskCountBidCountDepth(self.extend(request, params))
return self.parse_order_book(orderbook, None, 'bids', 'asks', 'price', 'baseAmount')
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market is not None:
symbol = market['symbol']
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'highestBid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'lowestAsk'),
'askVolume': None,
'vwap': None,
'open': None,
'close': None,
'last': None,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'baseVolume'),
'quoteVolume': self.safe_float(ticker, 'counterVolume'),
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.currentGet24hourRollingAggregation(params)
result = {}
for t in range(0, len(response)):
ticker = response[t]
base = ticker['currencyPair']['base'].upper()
quote = ticker['currencyPair']['counter'].upper()
symbol = base + '/' + quote
market = None
if symbol in self.markets:
market = self.markets[symbol]
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
tickers = self.fetch_tickers(None, params)
if symbol in tickers:
return tickers[symbol]
raise ExchangeError(self.id + ' return did not contain ' + symbol)
def parse_trade(self, trade, market=None):
price = self.safe_float(trade, 'price')
amount = self.safe_float(trade, 'amount')
cost = None
if price is not None:
if amount is not None:
cost = price * amount
timestamp = self.safe_integer(trade, 'timestamp')
id = self.safe_string(trade, 'id')
marketId = self.safe_string(trade, 'currencyPair')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = None
if market is not None:
symbol = market['symbol']
return {
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': None, # type
'order': None,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'pair': market['id'],
'maxCount': 128,
}
response = self.currentGetTransactionsPairMaxCount(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'currencyPair': self.market_id(symbol),
'volume': amount,
'price': price,
'orderType': 0 if (side == 'buy') else 1,
}
response = self.userPostAddOrder(self.extend(request, params))
return {
'info': response,
'id': response['result'],
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'orderId': id,
}
return self.userPostCancelOrder(self.extend(request, params))
def sign(self, path, api='current', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
if api != 'www':
url += '/' + api + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'current':
if query:
url += '?' + self.urlencode(query)
elif api == 'user':
self.check_required_credentials()
nonce = self.nonce()
request = self.extend({
'token': self.apiKey,
'nonce': nonce,
}, query)
auth = str(nonce) + '$' + self.apiKey
request['signature'] = self.hmac(self.encode(auth), self.encode(self.secret))
body = self.json(request)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='current', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if not isinstance(response, basestring):
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
| python/ccxt/coingi.py | 12,014 | -*- coding: utf-8 -*- PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.mdhow-to-contribute-code ----------------------------------------------------------------------------- Python 3 Python 2 Panama, Bulgaria, China, US maximum number of depth range steps 1-32 maximum returned number of asks 1-512 maximum returned number of bids 1-512 type | 417 | en | 0.59014 |
# -*- coding: utf-8 -*-
import json
import copy
import types
import inspect
import re
import traceback
import datetime
import markdown2
import semver
import functools
import platform
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Constants
VERSION = "1.0.3"
BREAKINGVERSIONS = ["0.2.9-beta"]
WIN = platform.system() == "Windows"
MAC = platform.system() == "Darwin"
LINUX = platform.system() == "Linux"
# Response types (success, error, ...)
SUCCESS = "success"
ERROR = "error"
UNKNOWNFONT = "unknownFont"
INSUFFICIENTPERMISSION = "insufficientPermission"
SEATALLOWANCEREACHED = "seatAllowanceReached"
EXPIRED = "fontExpired"
UNKNOWNINSTALLATION = "unknownInstallation"
NOFONTSAVAILABLE = "noFontsAvailable"
TEMPORARILYUNAVAILABLE = "temporarilyUnavailable"
VALIDTYPEWORLDUSERACCOUNTREQUIRED = "validTypeWorldUserAccountRequired"
REVEALEDUSERIDENTITYREQUIRED = "revealedUserIdentityRequired"
LOGINREQUIRED = "loginRequired"
PROTOCOLS = ["typeworld"]
RESPONSES = {
SUCCESS: "The request has been processed successfully.",
ERROR: "There request produced an error. You may add a custom error message in the `errorMessage` field.",
UNKNOWNFONT: "No font could be identified for the given `fontID`.",
EXPIRED: "This font installation has expired.",
INSUFFICIENTPERMISSION: (
"The Type.World user account credentials "
"couldn’t be confirmed by the publisher (which are checked with the "
"central server) and therefore access to the subscription is denied."
),
SEATALLOWANCEREACHED: (
"The user has exhausted their seat allowances for "
"this font. The app may take them to the publisher’s website as "
"defined in ::LicenseUsage.upgradeURL:: to upgrade their font license."
),
UNKNOWNINSTALLATION: (
"This font installation (combination of app instance and user "
"credentials) is unknown. The response with this error message is "
"crucial to remote de-authorization of app instances. When a user "
"de-authorizes an entire app instance’s worth of font installations, "
"such as when a computer got bricked and re-installed or is lost, the "
"success of the remote de-authorization process is judged by either "
"`success` responses (app actually had this font installed and its "
"deletion has been recorded) or `unknownInstallation` responses "
"(app didn’t have this font installed). All other reponses count as "
"errors in the remote de-authorization process."
),
NOFONTSAVAILABLE: "This subscription exists but carries no fonts at the moment.",
TEMPORARILYUNAVAILABLE: "The service is temporarily unavailable but should work again later on.",
VALIDTYPEWORLDUSERACCOUNTREQUIRED: (
"The access to this subscription requires a valid Type.World user account connected to an app."
),
REVEALEDUSERIDENTITYREQUIRED: (
"The access to this subscription requires a valid Type.World user "
"account and that the user agrees to having their identity "
"(name and email address) submitted to the publisher upon font "
"installation (closed workgroups only)."
),
LOGINREQUIRED: (
"The access to this subscription requires that the user logs into "
"the publisher’s website again to authenticate themselves. "
"Normally, this happens after a subscription’s secret key has been "
"invalidated. The user will be taken to the publisher’s website "
"defined at ::EndpointResponse.loginURL::. After successful login, "
"a button should be presented to the user to reconnect to the same "
"subscription that they are trying to access. To identify the "
"subscription, the link that the user will be taken to will carry a "
"`subscriptionID` parameter with the subscriptionID as defined in "
"the subscription’s URL."
),
}
# Commands
ENDPOINTCOMMAND = {
"keyword": "endpoint",
"currentVersion": VERSION,
"responseTypes": [SUCCESS, ERROR],
"acceptableMimeTypes": ["application/json"],
}
INSTALLABLEFONTSCOMMAND = {
"keyword": "installableFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
NOFONTSAVAILABLE,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
INSTALLFONTSCOMMAND = {
"keyword": "installFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
REVEALEDUSERIDENTITYREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
UNINSTALLFONTSCOMMAND = {
"keyword": "uninstallFonts",
"currentVersion": VERSION,
"responseTypes": [
SUCCESS,
ERROR,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
],
"acceptableMimeTypes": ["application/json"],
}
INSTALLFONTASSETCOMMAND = {
"responseTypes": [
SUCCESS,
ERROR,
UNKNOWNFONT,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
REVEALEDUSERIDENTITYREQUIRED,
SEATALLOWANCEREACHED,
EXPIRED,
],
}
UNINSTALLFONTASSETCOMMAND = {
"responseTypes": [
SUCCESS,
ERROR,
UNKNOWNFONT,
INSUFFICIENTPERMISSION,
TEMPORARILYUNAVAILABLE,
VALIDTYPEWORLDUSERACCOUNTREQUIRED,
LOGINREQUIRED,
UNKNOWNINSTALLATION,
],
}
COMMANDS = [
ENDPOINTCOMMAND,
INSTALLABLEFONTSCOMMAND,
INSTALLFONTSCOMMAND,
UNINSTALLFONTSCOMMAND,
]
FONTPURPOSES = {
"desktop": {
"acceptableMimeTypes": [
"font/collection",
"font/otf",
"font/sfnt",
"font/ttf",
],
},
"web": {"acceptableMimeTypes": ["application/zip"]},
"app": {"acceptableMimeTypes": ["application/zip"]},
}
# https://tools.ietf.org/html/rfc8081
MIMETYPES = {
"font/sfnt": {"fileExtensions": ["otf", "ttf"]},
"font/ttf": {"fileExtensions": ["ttf"]},
"font/otf": {"fileExtensions": ["otf"]},
"font/collection": {"fileExtensions": ["ttc"]},
"font/woff": {"fileExtensions": ["woff"]},
"font/woff2": {"fileExtensions": ["woff2"]},
}
# Compile list of file extensions
FILEEXTENSIONS = []
for mimeType in list(MIMETYPES.keys()):
FILEEXTENSIONS = list(set(FILEEXTENSIONS) | set(MIMETYPES[mimeType]["fileExtensions"]))
FILEEXTENSIONNAMES = {
"otf": "OpenType",
"ttf": "TrueType",
"ttc": "TrueType collection",
"woff": "WOFF",
"woff2": "WOFF2",
}
MIMETYPEFORFONTTYPE = {
"otf": "font/otf",
"ttf": "font/ttf",
"ttc": "font/collection",
"woff": "font/woff",
"woff2": "font/woff2",
}
FONTENCODINGS = ["base64"]
OPENSOURCELICENSES = [
"0BSD",
"AAL",
"Abstyles",
"Adobe-2006",
"Adobe-Glyph",
"ADSL",
"AFL-1.1",
"AFL-1.2",
"AFL-2.0",
"AFL-2.1",
"AFL-3.0",
"Afmparse",
"AGPL-1.0",
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"Aladdin",
"AMDPLPA",
"AML",
"AMPAS",
"ANTLR-PD",
"Apache-1.0",
"Apache-1.1",
"Apache-2.0",
"APAFML",
"APL-1.0",
"APSL-1.0",
"APSL-1.1",
"APSL-1.2",
"APSL-2.0",
"Artistic-1.0-cl8",
"Artistic-1.0-Perl",
"Artistic-1.0",
"Artistic-2.0",
"Bahyph",
"Barr",
"Beerware",
"BitTorrent-1.0",
"BitTorrent-1.1",
"Borceux",
"BSD-1-Clause",
"BSD-2-Clause-FreeBSD",
"BSD-2-Clause-NetBSD",
"BSD-2-Clause-Patent",
"BSD-2-Clause",
"BSD-3-Clause-Attribution",
"BSD-3-Clause-Clear",
"BSD-3-Clause-LBNL",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause-No-Nuclear-License",
"BSD-3-Clause-No-Nuclear-Warranty",
"BSD-3-Clause",
"BSD-4-Clause-UC",
"BSD-4-Clause",
"BSD-Protection",
"BSD-Source-Code",
"BSL-1.0",
"bzip2-1.0.5",
"bzip2-1.0.6",
"Caldera",
"CATOSL-1.1",
"CC-BY-1.0",
"CC-BY-2.0",
"CC-BY-2.5",
"CC-BY-3.0",
"CC-BY-4.0",
"CC-BY-NC-1.0",
"CC-BY-NC-2.0",
"CC-BY-NC-2.5",
"CC-BY-NC-3.0",
"CC-BY-NC-4.0",
"CC-BY-NC-ND-1.0",
"CC-BY-NC-ND-2.0",
"CC-BY-NC-ND-2.5",
"CC-BY-NC-ND-3.0",
"CC-BY-NC-ND-4.0",
"CC-BY-NC-SA-1.0",
"CC-BY-NC-SA-2.0",
"CC-BY-NC-SA-2.5",
"CC-BY-NC-SA-3.0",
"CC-BY-NC-SA-4.0",
"CC-BY-ND-1.0",
"CC-BY-ND-2.0",
"CC-BY-ND-2.5",
"CC-BY-ND-3.0",
"CC-BY-ND-4.0",
"CC-BY-SA-1.0",
"CC-BY-SA-2.0",
"CC-BY-SA-2.5",
"CC-BY-SA-3.0",
"CC-BY-SA-4.0",
"CC0-1.0",
"CDDL-1.0",
"CDDL-1.1",
"CDLA-Permissive-1.0",
"CDLA-Sharing-1.0",
"CECILL-1.0",
"CECILL-1.1",
"CECILL-2.0",
"CECILL-2.1",
"CECILL-B",
"CECILL-C",
"ClArtistic",
"CNRI-Jython",
"CNRI-Python-GPL-Compatible",
"CNRI-Python",
"Condor-1.1",
"CPAL-1.0",
"CPL-1.0",
"CPOL-1.02",
"Crossword",
"CrystalStacker",
"CUA-OPL-1.0",
"Cube",
"curl",
"D-FSL-1.0",
"diffmark",
"DOC",
"Dotseqn",
"DSDP",
"dvipdfm",
"ECL-1.0",
"ECL-2.0",
"EFL-1.0",
"EFL-2.0",
"eGenix",
"Entessa",
"EPL-1.0",
"EPL-2.0",
"ErlPL-1.1",
"EUDatagrid",
"EUPL-1.0",
"EUPL-1.1",
"EUPL-1.2",
"Eurosym",
"Fair",
"Frameworx-1.0",
"FreeImage",
"FSFAP",
"FSFUL",
"FSFULLR",
"FTL",
"GFDL-1.1-only",
"GFDL-1.1-or-later",
"GFDL-1.2-only",
"GFDL-1.2-or-later",
"GFDL-1.3-only",
"GFDL-1.3-or-later",
"Giftware",
"GL2PS",
"Glide",
"Glulxe",
"gnuplot",
"GPL-1.0-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"GPL-2.0-or-later",
"GPL-3.0-only",
"GPL-3.0-or-later",
"gSOAP-1.3b",
"HaskellReport",
"HPND",
"IBM-pibs",
"ICU",
"IJG",
"ImageMagick",
"iMatix",
"Imlib2",
"Info-ZIP",
"Intel-ACPI",
"Intel",
"Interbase-1.0",
"IPA",
"IPL-1.0",
"ISC",
"JasPer-2.0",
"JSON",
"LAL-1.2",
"LAL-1.3",
"Latex2e",
"Leptonica",
"LGPL-2.0-only",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"LGPL-2.1-or-later",
"LGPL-3.0-only",
"LGPL-3.0-or-later",
"LGPLLR",
"Libpng",
"libtiff",
"LiLiQ-P-1.1",
"LiLiQ-R-1.1",
"LiLiQ-Rplus-1.1",
"LPL-1.0",
"LPL-1.02",
"LPPL-1.0",
"LPPL-1.1",
"LPPL-1.2",
"LPPL-1.3a",
"LPPL-1.3c",
"MakeIndex",
"MirOS",
"MIT-advertising",
"MIT-CMU",
"MIT-enna",
"MIT-feh",
"MIT",
"MITNFA",
"Motosoto",
"mpich2",
"MPL-1.0",
"MPL-1.1",
"MPL-2.0-no-copyleft-exception",
"MPL-2.0",
"MS-PL",
"MS-RL",
"MTLL",
"Multics",
"Mup",
"NASA-1.3",
"Naumen",
"NBPL-1.0",
"NCSA",
"Net-SNMP",
"NetCDF",
"Newsletr",
"NGPL",
"NLOD-1.0",
"NLPL",
"Nokia",
"NOSL",
"Noweb",
"NPL-1.0",
"NPL-1.1",
"NPOSL-3.0",
"NRL",
"NTP",
"OCCT-PL",
"OCLC-2.0",
"ODbL-1.0",
"OFL-1.0",
"OFL-1.1",
"OGTSL",
"OLDAP-1.1",
"OLDAP-1.2",
"OLDAP-1.3",
"OLDAP-1.4",
"OLDAP-2.0.1",
"OLDAP-2.0",
"OLDAP-2.1",
"OLDAP-2.2.1",
"OLDAP-2.2.2",
"OLDAP-2.2",
"OLDAP-2.3",
"OLDAP-2.4",
"OLDAP-2.5",
"OLDAP-2.6",
"OLDAP-2.7",
"OLDAP-2.8",
"OML",
"OpenSSL",
"OPL-1.0",
"OSET-PL-2.1",
"OSL-1.0",
"OSL-1.1",
"OSL-2.0",
"OSL-2.1",
"OSL-3.0",
"PDDL-1.0",
"PHP-3.0",
"PHP-3.01",
"Plexus",
"PostgreSQL",
"psfrag",
"psutils",
"Python-2.0",
"Qhull",
"QPL-1.0",
"Rdisc",
"RHeCos-1.1",
"RPL-1.1",
"RPL-1.5",
"RPSL-1.0",
"RSA-MD",
"RSCPL",
"Ruby",
"SAX-PD",
"Saxpath",
"SCEA",
"Sendmail",
"SGI-B-1.0",
"SGI-B-1.1",
"SGI-B-2.0",
"SimPL-2.0",
"SISSL-1.2",
"SISSL",
"Sleepycat",
"SMLNJ",
"SMPPL",
"SNIA",
"Spencer-86",
"Spencer-94",
"Spencer-99",
"SPL-1.0",
"SugarCRM-1.1.3",
"SWL",
"TCL",
"TCP-wrappers",
"TMate",
"TORQUE-1.1",
"TOSL",
"Unicode-DFS-2015",
"Unicode-DFS-2016",
"Unicode-TOU",
"Unlicense",
"UPL-1.0",
"Vim",
"VOSTROM",
"VSL-1.0",
"W3C-19980720",
"W3C-20150513",
"W3C",
"Watcom-1.0",
"Wsuipa",
"WTFPL",
"X11",
"Xerox",
"XFree86-1.1",
"xinetd",
"Xnet",
"xpp",
"XSkat",
"YPL-1.0",
"YPL-1.1",
"Zed",
"Zend-2.0",
"Zimbra-1.3",
"Zimbra-1.4",
"zlib-acknowledgement",
"Zlib",
"ZPL-1.1",
"ZPL-2.0",
"ZPL-2.1",
]
FONTSTATUSES = ["prerelease", "trial", "stable"]
PUBLISHERTYPES = ["free", "retail", "custom", "undefined"]
PUBLICPUBLISHERTYPES = ["free", "retail", "custom"]
PUBLISHERSIDEAPPANDUSERCREDENTIALSTATUSES = ["active", "deleted", "revoked"]
DEFAULT = "__default__"
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Helper methods
def makeSemVer(version):
"""Turn simple float number (0.1) into semver-compatible number
for comparison by adding .0(s): (0.1.0)"""
# Make string
version = str(version)
if version.count(".") < 2:
# Strip leading zeros
version = ".".join(map(str, list(map(int, version.split(".")))))
# Add .0(s)
version = version + (2 - version.count(".")) * ".0"
return version
def ResponsesDocu(responses):
text = "\n\n"
for response in responses:
text += "`%s`: %s\n\n" % (response, RESPONSES[response])
return text
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Basic Data Types
class DataType(object):
initialData = None
dataType = None
def __init__(self):
self.value = copy.copy(self.initialData)
if issubclass(self.__class__, (MultiLanguageText, MultiLanguageTextProxy)):
self.value = self.dataType()
def __repr__(self):
if issubclass(self.__class__, Proxy):
return "<%s>" % (self.dataType.__name__)
else:
return "<%s '%s'>" % (self.__class__.__name__, self.get())
def valid(self):
if not self.value:
return True
if type(self.value) == self.dataType:
return True
else:
return "Wrong data type. Is %s, should be: %s." % (
type(self.value),
self.dataType,
)
def get(self):
return self.value
def put(self, value):
self.value = self.shapeValue(value)
if issubclass(self.value.__class__, (DictBasedObject, ListProxy, Proxy, DataType)):
object.__setattr__(self.value, "_parent", self)
valid = self.valid()
if valid is not True and valid is not None:
raise ValueError(valid)
def shapeValue(self, value):
return value
def isEmpty(self):
return self.value is None or self.value == [] or self.value == ""
def isSet(self):
return not self.isEmpty()
def formatHint(self):
return None
def exampleData(self):
return None
class BooleanDataType(DataType):
dataType = bool
class IntegerDataType(DataType):
dataType = int
def shapeValue(self, value):
return int(value)
class FloatDataType(DataType):
dataType = float
def shapeValue(self, value):
return float(value)
class StringDataType(DataType):
dataType = str
def shapeValue(self, value):
return str(value)
class DictionaryDataType(DataType):
dataType = dict
def shapeValue(self, value):
return dict(value)
class FontDataType(StringDataType):
pass
class FontEncodingDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value not in FONTENCODINGS:
return "Encoding '%s' is unknown. Known are: %s" % (
self.value,
FONTENCODINGS,
)
return True
class VersionDataType(StringDataType):
dataType = str
def valid(self):
if not self.value:
return True
# Append .0 for semver comparison
try:
value = makeSemVer(self.value)
except ValueError:
return False
try:
semver.VersionInfo.parse(value)
except ValueError as e:
return str(e)
return True
def formatHint(self):
return (
"Simple float number (1 or 1.01) or semantic versioning "
"(2.0.0-rc.1) as per [semver.org](https://semver.org)"
)
class TimestampDataType(IntegerDataType):
pass
class DateDataType(StringDataType):
def valid(self):
if not self.value:
return True
try:
datetime.datetime.strptime(self.value, "%Y-%m-%d")
return True
except ValueError:
return traceback.format_exc().splitlines()[-1]
def formatHint(self):
return "YYYY-MM-DD"
class WebURLDataType(StringDataType):
def valid(self):
if not self.value:
return True
if not self.value.startswith("http://") and not self.value.startswith("https://"):
return "Needs to start with http:// or https://"
else:
return True
# # TODO: This is a stump. Expand.
# class TypeWorldURLDataType(StringDataType):
# def valid(self):
# if not self.value:
# return True
# if not self.value.startswith("http://") and not self.value.startswith(
# "https://"
# ):
# return "Needs to start with http:// or https://"
# else:
# return True
# def formatHint(self):
# return (
# "Type.World Subscription URL as per "
# "[Developer Docs](https://type.world/developer#the-subscription-url)"
# )
class TelephoneDataType(StringDataType):
def valid(self):
if not self.value:
return True
text = "Needs to start with + and contain only numbers 0-9"
match = re.match(r"(\+[0-9]+)", self.value)
if match:
match = self.value.replace(match.group(), "")
if match:
return text
else:
return text
return True
def formatHint(self):
return "+1234567890"
class WebResourceURLDataType(WebURLDataType):
def formatHint(self):
return (
"This resource may get downloaded and cached on the client "
"computer. To ensure up-to-date resources, append a unique ID "
"to the URL such as a timestamp of when the resources changed on your "
"server, e.g. "
"https://awesomefonts.com/xyz/regular/specimen.pdf?t=1548239062. "
"Don’t use the current time for a timestamp, as this will mean constant "
"reloading the resource when it actually hasn’t changed. Instead use "
"the resource’s server-side change timestamp."
)
class EmailDataType(StringDataType):
def valid(self):
if not self.value:
return True
if (
"@" in self.value
and "." in self.value
and self.value.find(".", self.value.find("@")) > 0
and self.value.count("@") == 1
and self.value.find("..") == -1
):
return True
else:
return "Not a valid email format: %s" % self.value
class HexColorDataType(StringDataType):
def valid(self):
if not self.value:
return True
if (len(self.value) == 3 or len(self.value) == 6) and re.match("^[A-Fa-f0-9]*$", self.value):
return True
else:
return "Not a valid hex color of format RRGGBB (like FF0000 for red): %s" % self.value
def formatHint(self):
return "Hex RRGGBB (without leading #)"
class ListProxy(DataType):
initialData = []
includeEmpty = False
# Data type of each list member
# Here commented out to enforce explicit setting of data type
# for each Proxy
# dataType = str
def __repr__(self):
if self.value:
return "%s" % ([x.get() for x in self.value])
else:
return "[]"
def __getitem__(self, i):
return self.value[i].get()
def __setitem__(self, i, value):
if issubclass(value.__class__, (DictBasedObject, Proxy, ListProxy, DataType)):
object.__setattr__(value, "_parent", self)
self.value[i].put(value)
object.__setattr__(self.value[i], "_parent", self)
def __delitem__(self, i):
del self.value[i]
def __iter__(self):
for element in self.value:
yield element.get()
def __len__(self):
return len(self.value)
def index(self, item):
return [x.get() for x in self.value].index(item)
def get(self):
return self
def put(self, values):
if not type(values) in (list, tuple):
raise ValueError("Wrong data type. Is %s, should be: %s." % (type(values), list))
self.value = []
for value in values:
self.append(value)
def append(self, value):
newData = self.dataType()
newData.put(value)
self.value.append(newData)
if issubclass(newData.__class__, (DictBasedObject, Proxy, ListProxy, DataType)):
object.__setattr__(newData, "_parent", self)
def extend(self, values):
for value in values:
self.append(value)
def remove(self, removeValue):
for i, value in enumerate(self.value):
if self[i] == removeValue:
del self[i]
def isEmpty(self):
if self.includeEmpty:
return False
else:
return not bool(self.value)
# def valid(self):
# if self.value:
# for data in self.value:
# valid = data.valid()
# return valid
# return True
class DictBasedObject(object):
_structure = {}
_deprecatedKeys = []
_possible_keys = []
_dataType_for_possible_keys = None
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
result.__dict__.update(self.__dict__)
return result
def __deepcopy__(self, memo):
cls = self.__class__
obj = cls()
obj.loadJSON(self.dumpJSON())
return obj
def sameContent(self, other):
# return self.difference(other) == {}
return json.dumps(self.dumpDict(validate=False), sort_keys=True) == json.dumps(
other.dumpDict(validate=False), sort_keys=True
)
# def difference(self, other):
# d1 = self.dumpDict(validate=False)
# d2 = other.dumpDict(validate=False)
# from deepdiff import DeepDiff
# r2 = DeepDiff(d1, d2, ignore_order=True)
# return r2
def nonListProxyBasedKeys(self):
_list = []
for keyword in self._structure.keys():
if ListProxy not in inspect.getmro(self._structure[keyword][0]):
_list.append(keyword)
_list.extend(self._deprecatedKeys)
return _list
def linkDocuText(self, text):
def my_replace(match):
match = match.group()
match = match[2:-2]
if "." in match:
className, attributeName = match.split(".")
if "()" in attributeName:
attributeName = attributeName[:-2]
match = "[%s.%s()](#user-content-class-%s-method-%s)" % (
className,
attributeName,
className.lower(),
attributeName.lower(),
)
else:
match = "[%s.%s](#user-content-class-%s-attribute-%s)" % (
className,
attributeName,
className.lower(),
attributeName.lower(),
)
else:
className = match
match = "[%s](#user-content-class-%s)" % (
className,
className.lower(),
)
return match
try:
text = re.sub(r"::.+?::", my_replace, text)
except Exception:
pass
return text or ""
def typeDescription(self, class_):
if issubclass(class_, ListProxy):
return "List of %s objects" % self.typeDescription(class_.dataType)
elif class_.dataType in (
dict,
list,
tuple,
str,
bytes,
set,
frozenset,
bool,
int,
float,
):
return class_.dataType.__name__.capitalize()
elif "" in ("%s" % class_.dataType):
return self.linkDocuText("::%s::" % class_.dataType.__name__)
# Seems unused
# elif 'typeworld.api.' in ("%s" % class_.dataType):
# return self.linkDocuText('::%s::' % class_.dataType.__name__)
# else:
# return class_.dataType.__name__.title()
def additionalDocu(self):
doc = ""
if hasattr(self, "sample"):
doc += f"""*Example JSON data:*
```json
{self.sample().dumpJSON(strict = False)}
```
"""
return doc
def docu(self):
classes = []
# Define string
docstring = ""
head = ""
attributes = ""
methods = ""
attributesList = []
methodsList = []
head += '<div id="class-%s"></div>\n\n' % self.__class__.__name__.lower()
head += "# _class_ %s()\n\n" % self.__class__.__name__
head += self.linkDocuText(inspect.getdoc(self))
head += "\n\n"
additionalDocu = self.additionalDocu()
if additionalDocu:
head += additionalDocu + "\n\n"
# attributes
attributes += "## Attributes\n\n"
for key in sorted(self._structure.keys()):
attributesList.append(key)
attributes += '<div id="class-%s-attribute-%s"></div>\n\n' % (
self.__class__.__name__.lower(),
key,
)
attributes += "### %s\n\n" % key
# Description
if self._structure[key][3]:
attributes += self.linkDocuText(self._structure[key][3]) + "\n\n"
attributes += "__Required:__ %s" % self._structure[key][1] + "<br />\n"
attributes += "__Type:__ %s" % self.typeDescription(self._structure[key][0]) + "<br />\n"
# Format Hint
hint = self._structure[key][0]().formatHint()
if hint:
attributes += "__Format:__ %s" % hint + "<br />\n"
if self._structure[key][2] is not None:
attributes += "__Default value:__ %s" % self._structure[key][2] + "\n\n"
# Example Data
example = self._structure[key][0]().exampleData()
if example:
attributes += "Example:\n"
attributes += "```json\n"
attributes += json.dumps(example, indent=4)
attributes += "\n```\n"
method_list = [
func
for func in dir(self)
if callable(getattr(self, func)) and not func.startswith("__") and inspect.getdoc(getattr(self, func))
]
if method_list:
methods += "## Methods\n\n"
for methodName in method_list:
methodsList.append(methodName)
methods += '<div id="class-%s-method-%s"></div>\n\n' % (
self.__class__.__name__.lower(),
methodName.lower(),
)
args = inspect.getfullargspec(getattr(self, methodName))
if args.args != ["self"]:
argList = []
if args.args and args.defaults:
startPoint = len(args.args) - len(args.defaults)
for i, defaultValue in enumerate(args.defaults):
argList.append("%s = %s" % (args.args[i + startPoint], defaultValue))
methods += "#### %s(%s)\n\n" % (
methodName,
", ".join(argList),
)
else:
methods += "#### %s()\n\n" % methodName
methods += self.linkDocuText(inspect.getdoc(getattr(self, methodName))) + "\n\n"
# Compile
docstring += head
# TOC
if attributesList:
docstring += "### Attributes\n\n"
for attribute in attributesList:
docstring += "[%s](#class-%s-attribute-%s)<br />" % (
attribute,
self.__class__.__name__.lower(),
attribute.lower(),
)
docstring += "\n\n"
if methodsList:
docstring += "### Methods\n\n"
for methodName in methodsList:
docstring += "[%s()](#class-%s-method-%s)<br />" % (
methodName,
self.__class__.__name__.lower(),
methodName.lower(),
)
docstring += "\n\n"
if attributesList:
docstring += attributes
docstring += "\n\n"
if methodsList:
docstring += methods
docstring += "\n\n"
# Add data
classes.append([self.__class__.__name__, docstring])
# Recurse
for key in list(self._structure.keys()):
if issubclass(self._structure[key][0], Proxy):
o = self._structure[key][0].dataType()
classes.extend(o.docu())
if issubclass(self._structure[key][0], ListProxy):
o = self._structure[key][0].dataType.dataType()
if hasattr(o, "docu"):
classes.extend(o.docu())
return classes
def __init__(self, json=None, dict=None):
super(DictBasedObject, self).__init__()
object.__setattr__(self, "_content", {})
object.__setattr__(
self,
"_allowedKeys",
set(self._structure.keys()) | set(self._possible_keys),
)
# Fill default values
for key in self._structure:
# Set default values
if self._structure[key][2] is not None:
setattr(self, key, self._structure[key][2])
if json:
self.loadJSON(json)
elif dict:
self.loadDict(dict)
def initAttr(self, key):
if key not in self._content:
if key in list(object.__getattribute__(self, "_structure").keys()):
self._content[key] = object.__getattribute__(self, "_structure")[key][0]()
elif key in self._possible_keys:
self._content[key] = self._dataType_for_possible_keys()
self._content[key]._parent = self
def __getattr__(self, key):
if key in self._allowedKeys:
self.initAttr(key)
return self._content[key].get()
else:
return object.__getattribute__(self, key)
def __setattr__(self, key, value):
if key in self._allowedKeys:
self.initAttr(key)
if issubclass(value.__class__, (DictBasedObject, ListProxy, Proxy, DataType)):
object.__setattr__(value, "_parent", self)
self.__dict__["_content"][key].put(value)
else:
object.__setattr__(self, key, value)
def set(self, key, value):
self.__setattr__(key, value)
def get(self, key):
return self.__getattr__(key)
def validate(self, strict=True):
information = []
warnings = []
critical = []
def extendWithKey(values, key=None, sourceObject=None):
# Remove duplicates
seen = set()
seen_add = seen.add
values = [x for x in values if not (x in seen or seen_add(x))]
# values = list(set(values))
_list = []
for value in values:
if sourceObject and key:
_list.append("%s.%s --> %s --> %s" % (self, key, sourceObject, value))
elif key:
_list.append("%s.%s --> %s" % (self, key, value))
else:
_list.append("%s --> %s" % (self, value))
return _list
# Check if required fields are filled
for key in list(self._structure.keys()):
self.initAttr(key)
if self.discardThisKey(key) is False:
if strict and self._structure[key][1] and self._content[key].isEmpty():
critical.append("%s.%s is a required attribute, but empty" % (self, key))
else:
# recurse
if issubclass(self._content[key].__class__, (Proxy)):
if self._content[key].isEmpty() is False:
(newInformation, newWarnings, newCritical,) = self._content[
key
].value.validate(strict=strict)
information.extend(extendWithKey(newInformation, key))
warnings.extend(extendWithKey(newWarnings, key))
critical.extend(extendWithKey(newCritical, key))
# Check custom messages:
if hasattr(self._content[key].value, "customValidation") and isinstance(
self._content[key].value.customValidation,
types.MethodType,
):
(
newInformation,
newWarnings,
newCritical,
) = self._content[key].value.customValidation()
information.extend(extendWithKey(newInformation, key, self._content[key]))
warnings.extend(extendWithKey(newWarnings, key, self._content[key]))
critical.extend(extendWithKey(newCritical, key, self._content[key]))
# recurse
if issubclass(self._content[key].__class__, (ListProxy)):
if self._content[key].isEmpty() is False:
for item in self._content[key]:
if hasattr(item, "validate") and isinstance(item.validate, types.MethodType):
(
newInformation,
newWarnings,
newCritical,
) = item.validate(strict=strict)
information.extend(extendWithKey(newInformation, key))
warnings.extend(extendWithKey(newWarnings, key))
critical.extend(extendWithKey(newCritical, key))
# Check custom messages:
if hasattr(item, "customValidation") and isinstance(
item.customValidation, types.MethodType
):
(
newInformation,
newWarnings,
newCritical,
) = item.customValidation()
information.extend(extendWithKey(newInformation, key, item))
warnings.extend(extendWithKey(newWarnings, key, item))
critical.extend(extendWithKey(newCritical, key, item))
# Check custom messages:
if (
issubclass(self.__class__, BaseResponse)
and hasattr(self, "customValidation")
and isinstance(self.customValidation, types.MethodType)
):
newInformation, newWarnings, newCritical = self.customValidation()
information.extend(extendWithKey(newInformation))
warnings.extend(extendWithKey(newWarnings))
critical.extend(extendWithKey(newCritical))
return information, warnings, critical
def discardThisKey(self, key):
return False
def dumpDict(self, strict=True, validate=True):
d = {}
# Auto-validate
if validate:
information, warnings, critical = self.validate(strict=strict)
if critical:
raise ValueError(critical[0])
for key in list(self._content.keys()):
if self.discardThisKey(key) is False:
attr = getattr(self, key)
if (
# required
(key in self._structure and self._structure[key][1])
# don't know
or attr
# is set
or (hasattr(attr, "isSet") and attr.isSet())
):
if hasattr(attr, "dumpDict"):
d[key] = attr.dumpDict(strict=strict, validate=validate)
elif issubclass(attr.__class__, (ListProxy)):
d[key] = list(attr)
if len(d[key]) > 0 and hasattr(d[key][0], "dumpDict"):
d[key] = [x.dumpDict(strict=strict, validate=validate) for x in d[key]]
else:
d[key] = attr
return d
def loadDict(self, d):
for key in d:
if key in self._allowedKeys:
if key in self._structure:
if issubclass(self._structure[key][0], (Proxy)):
try:
exec(
"self.%s = typeworld.api.%s()"
% (
key,
self._structure[key][0].dataType.__name__,
)
)
except Exception:
exec(
"self.%s = %s()"
% (
key,
self._structure[key][0].dataType.__name__,
)
)
exec("self.%s.loadDict(d[key])" % (key))
elif issubclass(self._structure[key][0], (ListProxy)):
_list = self.__getattr__(key)
_list.value = []
# allow empty
# if self._structure[key][0].includeEmpty:
# _list.value = []
for item in d[key]:
o = self._structure[key][0].dataType.dataType()
if hasattr(o, "loadDict"):
o.loadDict(item)
_list.append(o)
else:
_list.append(item)
exec("self._content[key] = _list")
else:
self.set(key, d[key])
def dumpJSON(self, strict=True, validate=False):
return json.dumps(self.dumpDict(strict=strict, validate=validate), indent=4, sort_keys=True)
def loadJSON(self, j):
self.loadDict(json.loads(j))
class Proxy(DataType):
pass
class ResponseCommandDataType(StringDataType):
def formatHint(self):
return (
"To ensure the proper function of the entire Type.World protocol, "
"your API endpoint *must* return the proper responses as per "
"[this flow chart](https://type.world/documentation/Type.World%20"
"Request%20Flow%20Chart.pdf). "
"In addition to ensure functionality, this enables the response "
"messages displayed to the user to be translated into all the "
"possible languages on our side."
)
class MultiLanguageText(DictBasedObject):
"""\
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Neither HTML nor Markdown code is permitted in `MultiLanguageText`.
"""
_possible_keys = [
"ab",
"aa",
"af",
"ak",
"sq",
"am",
"ar",
"an",
"hy",
"as",
"av",
"ae",
"ay",
"az",
"bm",
"ba",
"eu",
"be",
"bn",
"bh",
"bi",
"bs",
"br",
"bg",
"my",
"ca",
"ch",
"ce",
"ny",
"zh",
"cv",
"kw",
"co",
"cr",
"hr",
"cs",
"da",
"dv",
"nl",
"dz",
"en",
"eo",
"et",
"ee",
"fo",
"fj",
"fi",
"fr",
"ff",
"gl",
"ka",
"de",
"el",
"gn",
"gu",
"ht",
"ha",
"he",
"hz",
"hi",
"ho",
"hu",
"ia",
"id",
"ie",
"ga",
"ig",
"ik",
"io",
"is",
"it",
"iu",
"ja",
"jv",
"kl",
"kn",
"kr",
"ks",
"kk",
"km",
"ki",
"rw",
"ky",
"kv",
"kg",
"ko",
"ku",
"kj",
"la",
"lb",
"lg",
"li",
"ln",
"lo",
"lt",
"lu",
"lv",
"gv",
"mk",
"mg",
"ms",
"ml",
"mt",
"mi",
"mr",
"mh",
"mn",
"na",
"nv",
"nd",
"ne",
"ng",
"nb",
"nn",
"no",
"ii",
"nr",
"oc",
"oj",
"cu",
"om",
"or",
"os",
"pa",
"pi",
"fa",
"pl",
"ps",
"pt",
"qu",
"rm",
"rn",
"ro",
"ru",
"sa",
"sc",
"sd",
"se",
"sm",
"sg",
"sr",
"gd",
"sn",
"si",
"sk",
"sl",
"so",
"st",
"es",
"su",
"sw",
"ss",
"sv",
"ta",
"te",
"tg",
"th",
"ti",
"bo",
"tk",
"tl",
"tn",
"to",
"tr",
"ts",
"tt",
"tw",
"ty",
"ug",
"uk",
"ur",
"uz",
"ve",
"vi",
"vo",
"wa",
"cy",
"wo",
"fy",
"xh",
"yi",
"yo",
"za",
"zu",
]
_dataType_for_possible_keys = StringDataType
_length = 100
_markdownAllowed = False
# def __repr__(self):
# return '<MultiLanguageText>'
def __str__(self):
return str(self.getText())
def __bool__(self):
return self.isSet()
def sample(self):
o = self.__class__()
o.en = "Text in English"
o.de = "Text auf Deutsch"
return o
def getTextAndLocale(self, locale=["en"]):
"""Like getText(), but additionally returns the language of whatever
text was found first."""
if type(locale) == str:
if self.get(locale):
return self.get(locale), locale
elif type(locale) in (list, tuple):
for key in locale:
if self.get(key):
return self.get(key), key
# try english
if self.get("en"):
return self.get("en"), "en"
# try anything
for key in self._possible_keys:
if self.get(key):
return self.get(key), key
return None, None
def getText(self, locale=["en"]):
"""Returns the text in the first language found from the specified
list of languages. If that language can’t be found, we’ll try English
as a standard. If that can’t be found either, return the first language
you can find."""
text, locale = self.getTextAndLocale(locale)
return text
def customValidation(self):
information, warnings, critical = [], [], []
if self.isEmpty():
critical.append("Needs to contain at least one language field")
# Check for text length
for langId in self._possible_keys:
if self.get(langId):
string = self.get(langId)
if len(string) > self._length:
critical.append(
"Language entry '%s' is too long. Allowed are %s characters." % (langId, self._length)
)
if re.findall(r"(<.+?>)", string):
if self._markdownAllowed:
critical.append(
(
"String contains HTML code, which is not "
"allowed. You may use Markdown for text "
"formatting. String: " + string
)
)
else:
critical.append("String contains HTML code, which is not allowed. String: " + string)
if not self._markdownAllowed and string and "<p>" + string + "</p>\n" != markdown2.markdown(string):
critical.append("String contains Markdown code, which is not allowed.")
return information, warnings, critical
def isSet(self):
for langId in self._possible_keys:
if langId in self._content and self.get(langId) not in (None, ""):
return True
return False
def isEmpty(self):
return not self.isSet()
def loadDict(self, d):
for key in d:
self.set(key, d[key])
def MultiLanguageText_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent"):
return self._parent._parent
MultiLanguageText.parent = property(lambda self: MultiLanguageText_Parent(self))
class MultiLanguageTextProxy(Proxy):
dataType = MultiLanguageText
def isEmpty(self):
return self.value.isEmpty()
def formatHint(self):
text = "Maximum allowed characters: %s." % self.dataType._length
if self.dataType._markdownAllowed:
text += " Mardown code is permitted for text formatting."
return text
class MultiLanguageTextListProxy(ListProxy):
dataType = MultiLanguageTextProxy
###############################################################################
class MultiLanguageLongText(MultiLanguageText):
"""\
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Markdown is permitted in `MultiLanguageLongText`.
Line breaks need to be escaped as `\n` characters.
"""
_length = 3000
_markdownAllowed = True
class MultiLanguageLongTextProxy(MultiLanguageTextProxy):
dataType = MultiLanguageLongText
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# Top-Level Data Types
# class LanguageSupportDataType(DictionaryDataType):
# def valid(self):
# if not self.value:
# return True
# for script in self.value:
# if not len(script) == 4 or not script.islower():
# return "Script tag '%s' needs to be a four-letter lowercase tag." % (
# script
# )
# for language in self.value[script]:
# if not len(language) == 3 or not language.isupper():
# return (
# "Language tag '%s' needs to be a " "three-letter uppercase"
# ) % (language)
# return True
class OpenTypeFeatureDataType(StringDataType):
def valid(self):
if not self.value:
return True
if not len(self.value) == 4 or not self.value.islower():
return "OpenType feature tag '%s' needs to be a four-letter lowercase tag." % (self.value)
return True
class OpenTypeFeatureListProxy(ListProxy):
dataType = OpenTypeFeatureDataType
class OpenSourceLicenseIdentifierDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in OPENSOURCELICENSES:
return True
else:
return "Unknown license identifier: '%s'. See https://spdx.org/licenses/" % (self.value)
class SupportedAPICommandsDataType(StringDataType):
commands = [x["keyword"] for x in COMMANDS]
def valid(self):
if not self.value:
return True
if self.value in self.commands:
return True
else:
return "Unknown API command: '%s'. Possible: %s" % (
self.value,
self.commands,
)
class SupportedAPICommandsListProxy(ListProxy):
dataType = SupportedAPICommandsDataType
class SupportedPublisherTypeDataType(StringDataType):
types = PUBLISHERTYPES
def valid(self):
if not self.value:
return True
if self.value in self.types:
return True
else:
return "Unknown publisher type: '%s'. Possible: %s" % (
self.value,
self.types,
)
class SupportedPublisherTypeListProxy(ListProxy):
dataType = SupportedPublisherTypeDataType
class FontPurposeDataType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in list(FONTPURPOSES.keys()):
return True
else:
return "Unknown font type: '%s'. Possible: %s" % (
self.value,
list(FONTPURPOSES.keys()),
)
class FontMimeType(StringDataType):
def valid(self):
if not self.value:
return True
if self.value in list(FONTPURPOSES["desktop"]["acceptableMimeTypes"]):
return True
else:
return "Unknown font MIME Type: '%s'. Possible: %s" % (
self.value,
list(FONTPURPOSES["desktop"]["acceptableMimeTypes"]),
)
class FontStatusDataType(StringDataType):
statuses = FONTSTATUSES
def valid(self):
if not self.value:
return True
if self.value in self.statuses:
return True
else:
return "Unknown Font Status: '%s'. Possible: %s" % (
self.value,
self.statuses,
)
class FontExtensionDataType(StringDataType):
def valid(self):
if not self.value:
return True
found = False
for mimeType in list(MIMETYPES.keys()):
if self.value in MIMETYPES[mimeType]["fileExtensions"]:
found = True
break
if found:
return True
else:
return "Unknown font extension: '%s'. Possible: %s" % (
self.value,
FILEEXTENSIONS,
)
###############################################################################
# LicenseDefinition
class LicenseDefinition(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Machine-readable keyword under which the license will be referenced from the individual fonts.",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font license",
],
"URL": [
WebURLDataType,
True,
None,
"URL where the font license text can be viewed online",
],
}
def __repr__(self):
return "<LicenseDefinition '%s'>" % self.name or self.keyword or "undefined"
def sample(self):
o = self.__class__()
o.keyword = "awesomefontsEULA"
o.name.en = "Awesome Fonts End User License Agreement"
o.name.de = "Awesome Fonts Endnutzerlizenzvereinbarung"
o.URL = "https://awesomefonts.com/eula.html"
return o
def LicenseDefinition_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
LicenseDefinition.parent = property(lambda self: LicenseDefinition_Parent(self))
class LicenseDefinitionProxy(Proxy):
dataType = LicenseDefinition
class LicenseDefinitionListProxy(ListProxy):
dataType = LicenseDefinitionProxy
###############################################################################
# FontPackage
class FontPackage(DictBasedObject):
"""\
`FontPackages` are groups of fonts that serve a certain purpose
to the user.
They can be defined at ::InstallableFontsReponse.packages::,
::Foundry.packages::, ::Family.packages::
and are referenced by their keywords in ::Font.packageKeywords::.
On a font family level, defined at ::Family.packages::, a typical example
for defining a `FontPackage` would be the so called **Office Fonts**.
While they are technically identical to other OpenType fonts, they normally
have a sightly different set of glyphs and OpenType features.
Linking them to a `FontPackage` allows the UI to display them clearly as a
separate set of fonts that serve a different purpuse than the
regular fonts.
On a subscription-wide level, defined at
::InstallableFontsReponse.packages::, a `FontPackage` could represent a
curated collection of fonts of various foundries and families, for example
**Script Fonts** or **Brush Fonts** or **Corporate Fonts**.
Each font may be part of several `FontPackages`.
For the time being, only family-level FontPackages are supported in the UI.
"""
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Keyword of font packages. This keyword must be referenced in "
"::Font.packageKeywords:: and must be unique to this subscription.",
],
"name": [MultiLanguageTextProxy, True, None, "Name of package"],
"description": [MultiLanguageLongTextProxy, False, None, "Description"],
}
def __repr__(self):
return "<FontPackage '%s'>" % self.keyword or "undefined"
def sample(self):
o = self.__class__()
o.keyword = "officefonts"
o.name.en = "Office Fonts"
o.name.de = "Office-Schriften"
o.description.en = "These fonts are produced specifically to be used in Office applications."
o.description.de = "Diese Schriftdateien sind für die Benutzung in Office-Applikationen vorgesehen."
return o
def getFonts(self, filterByFontFormat=[], variableFont=None):
"""
Calculate list of fonts of this package by applying filters for
font.format and font.variableFont (possibly more in the future)
"""
def passedFilter(font):
# font.format filter
passed1 = not filterByFontFormat or (filterByFontFormat and font.format in filterByFontFormat)
# font.variableFont filter
passed2 = variableFont is None or (variableFont is not None and font.variableFont == variableFont)
return passed1 and passed2
return [x for x in self.fonts if passedFilter(x)]
def getFormats(self):
formats = []
if hasattr(self, "fonts"):
for font in self.fonts:
if font.format not in formats:
formats.append(font.format)
return formats
class FontPackageProxy(Proxy):
dataType = FontPackage
class FontPackageListProxy(ListProxy):
dataType = FontPackageProxy
class FontPackageReferencesListProxy(ListProxy):
dataType = StringDataType
###############################################################################
# LicenseUsage
class LicenseUsage(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Keyword reference of font’s license. This license must be specified in ::Foundry.licenses::",
],
"seatsAllowed": [
IntegerDataType,
False,
None,
"In case of desktop font (see ::Font.purpose::), number of installations permitted by the user’s license.",
],
"seatsInstalled": [
IntegerDataType,
False,
None,
"In case of desktop font (see ::Font.purpose::), number of "
"installations recorded by the API endpoint. This value will "
"need to be supplied dynamically by the API endpoint through "
"tracking all font installations through the `anonymousAppID` "
"parameter of the '%s' and '%s' command. Please note that the "
"Type.World client app is currently not designed to reject "
"installations of the fonts when the limits are exceeded. "
"Instead it is in the responsibility of the API endpoint to "
"reject font installations though the '%s' command when the "
"limits are exceeded. In that case the user will be presented "
"with one or more license upgrade links."
% (
INSTALLFONTSCOMMAND["keyword"],
UNINSTALLFONTSCOMMAND["keyword"],
INSTALLFONTSCOMMAND["keyword"],
),
],
"allowanceDescription": [
MultiLanguageTextProxy,
False,
None,
"In case of non-desktop font (see ::Font.purpose::), custom "
"string for web fonts or app fonts reminding the user of the "
"license’s limits, e.g. '100.000 page views/month'",
],
"upgradeURL": [
WebURLDataType,
False,
None,
"URL the user can be sent to to upgrade the license of the "
"font, for instance at the foundry’s online shop. If "
"possible, this link should be user-specific and guide "
"him/her as far into the upgrade process as possible.",
],
"dateAddedForUser": [
DateDataType,
False,
None,
"Date that the user has purchased this font or the font has "
"become available to the user otherwise (like a new font "
"within a foundry’s beta font repository). Will be used in "
"the UI to signal which fonts have become newly available "
"in addition to previously available fonts. This is not to "
"be confused with the ::Version.releaseDate::, although they "
"could be identical.",
],
}
def sample(self):
o = self.__class__()
o.keyword = "awesomefontsEULA"
o.seatsAllowed = 5
o.seatsInstalled = 2
o.upgradeURL = "https://awesomefonts.com/shop/upgradelicense/083487263904356"
return o
def __repr__(self):
return "<LicenseUsage '%s'>" % self.keyword or "undefined"
def customValidation(self):
information, warnings, critical = [], [], []
# Checking for existing license
if self.keyword and not self.getLicense():
critical.append(
"Has license '%s', but %s has no matching license." % (self.keyword, self.parent.parent.parent)
)
return information, warnings, critical
def getLicense(self):
"""\
Returns the ::License:: object that this font references.
"""
return self.parent.parent.parent.getLicenseByKeyword(self.keyword)
def LicenseUsage_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
LicenseUsage.parent = property(lambda self: LicenseUsage_Parent(self))
class LicenseUsageProxy(Proxy):
dataType = LicenseUsage
class LicenseUsageListProxy(ListProxy):
dataType = LicenseUsageProxy
#######################################################################################
# Designer
class Designer(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"keyword": [
StringDataType,
True,
None,
"Machine-readable keyword under which the designer will be referenced "
"from the individual fonts or font families",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of designer",
],
"websiteURL": [WebURLDataType, False, None, "Designer’s web site"],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of designer",
],
}
def sample(self):
o = self.__class__()
o.keyword = "johndoe"
o.name.en = "John Doe"
o.websiteURL = "https://johndoe.com"
return o
def __repr__(self):
return "<Designer '%s'>" % self.name.getText() or self.keyword or "undefined"
def Designer_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Designer.parent = property(lambda self: Designer_Parent(self))
class DesignerProxy(Proxy):
dataType = Designer
class DesignersListProxy(ListProxy):
dataType = DesignerProxy
class DesignersReferencesListProxy(ListProxy):
dataType = StringDataType
########################################################################################
# Font Family Version
class Version(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"number": [
VersionDataType,
True,
None,
"Font version number. This can be a simple float number (1.002) or a "
"semver version string (see https://semver.org). For comparison, "
"single-dot version numbers (or even integers) are appended with "
"another .0 (1.0 to 1.0.0), then compared using the Python `semver` "
"module.",
],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of font version",
],
"releaseDate": [DateDataType, False, None, "Font version’s release date."],
}
def sample(self):
o = self.__class__()
o.number = "1.2"
o.description.en = "Added capital SZ and Turkish Lira sign"
o.description.de = "Versal-SZ und türkisches Lira-Zeichen hinzugefügt"
o.releaseDate = "2020-05-21"
return o
def __repr__(self):
return "<Version %s (%s)>" % (
self.number if self.number else "None",
"font-specific" if self.isFontSpecific() else "family-specific",
)
def isFontSpecific(self):
"""\
Returns True if this version is defined at the font level.
Returns False if this version is defined at the family level.
"""
return issubclass(self.parent.__class__, Font)
def Version_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Version.parent = property(lambda self: Version_Parent(self))
class VersionProxy(Proxy):
dataType = Version
class VersionListProxy(ListProxy):
dataType = VersionProxy
########################################################################################
# Fonts
class BillboardListProxy(ListProxy):
dataType = WebResourceURLDataType
class Font(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font. This may include any additions that you "
"find useful to communicate to your users.",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the publisher. It will be used to ask for un/installation of the "
"font from the server in the `installFonts` and `uninstallFonts` "
"commands. Also, it will be used for the file name of the font on "
"disk, together with the version string and the file extension. "
"Together, they must not be longer than 220 characters and must "
"not contain the following characters: / ? < > \\ : * | ^ \n**Note:** This "
"ID **must not** include the font's version number, as then it would "
"be treated as a different font. Please also read the section on "
"[versioning](#versioning) above.\n"
"If you offer different font formats of the same font (TTF and OTF), "
"this should be reflected in the *uniqueID* as well.\n"
"Example:\n"
"`MyFoundry_MyFamily_MyFont-Regular_TTF`\n"
"`MyFoundry_MyFamily_MyFont-Regular_OTF`\n"
"`MyFoundry_MyFamily_MyFont-Regular_TTFVAR`\n",
],
"postScriptName": [
StringDataType,
True,
None,
"Complete PostScript name of font",
],
"packageKeywords": [
FontPackageReferencesListProxy,
False,
None,
"List of references to ::FontPackage:: objects by their keyword",
],
"versions": [
VersionListProxy,
False,
None,
"List of ::Version:: objects. These are font-specific versions; they "
"may exist only for this font. You may define additional versions at "
"the family object under ::Family.versions::, which are then expected "
"to be available for the entire family. However, either the fonts or "
"the font family *must* carry version information and the validator "
"will complain when they don't.\n\nPlease also read the section on "
"[versioning](#versioning) above.",
],
"designerKeywords": [
DesignersReferencesListProxy,
False,
None,
"List of keywords referencing designers. These are defined at "
"::InstallableFontsResponse.designers::. This attribute overrides the "
"designer definitions at the family level at ::Family.designers::.",
],
"free": [BooleanDataType, False, None, "Font is freeware. For UI signaling"],
"billboardURLs": [
BillboardListProxy,
False,
None,
"List of URLs pointing at images to show for this typeface. "
"We suggest to use square dimensions and uncompressed SVG "
"images because they scale to all sizes smoothly, "
"but ultimately any size or HTML-compatible image type "
"is possible.",
],
"status": [
FontStatusDataType,
True,
"stable",
"Font status. For UI signaling. Possible values are: %s" % FONTSTATUSES,
],
"variableFont": [
BooleanDataType,
False,
False,
"Font is an OpenType Variable Font. For UI signaling",
],
"purpose": [
FontPurposeDataType,
True,
None,
"Technical purpose of font. This influences how the app handles the "
"font. For instance, it will only install desktop fonts on the system, "
"and make other font types available though folders. Possible: %s" % (list(FONTPURPOSES.keys())),
],
"format": [
FontExtensionDataType,
False,
None,
"Font file format. Required value in case of `desktop` font (see ::Font.purpose::. Possible: %s"
% FILEEXTENSIONS,
],
"protected": [
BooleanDataType,
False,
False,
"Indication that font is (most likely) commercial and requires "
"a certain amount of special treatment over a free font: "
"1) The API Endpoint requires a valid subscriptionID to be used "
"for authentication. 2) The API Endpoint may limit the downloads "
"of fonts. "
"3) Most importantly, "
"the `uninstallFonts` command needs to be called on the "
"API Endpoint when the font gets uninstalled."
"This may also be used for fonts that are free to download, but their "
"installations want to be monitored or limited anyway. ",
],
"dateFirstPublished": [
DateDataType,
False,
None,
"Human readable date of the initial release of the font. May also be "
"defined family-wide at ::Family.dateFirstPublished::.",
],
"usedLicenses": [
LicenseUsageListProxy,
True,
None,
"List of ::LicenseUsage:: objects. These licenses represent the "
"different ways in which a user has access to this font. At least one "
"used license must be defined here, because a user needs to know under "
"which legal circumstances he/she is using the font. Several used "
"licenses may be defined for a single font in case a customer owns "
"several licenses that cover the same font. For instance, a customer "
"could have purchased a font license standalone, but also as part of "
"the foundry’s entire catalogue. It’s important to keep these separate "
"in order to provide the user with separate upgrade links where he/she "
"needs to choose which of several owned licenses needs to be upgraded. "
"Therefore, in case of a commercial retail foundry, used licenses "
"correlate to a user’s purchase history.",
],
"pdfURL": [
WebResourceURLDataType,
False,
None,
"URL of PDF file with type specimen and/or instructions for this "
"particular font. (See also: ::Family.pdf::",
],
"expiry": [
TimestampDataType,
False,
None,
"Unix timestamp of font’s expiry. The font will be deleted on that "
"moment. This could be set either upon initial installation of a trial "
"font, or also before initial installation as a general expiry moment.",
],
"expiryDuration": [
IntegerDataType,
False,
None,
"Minutes for which the user will be able to use the font after initial "
"installation. This attribute is used only as a visual hint in the UI "
"and should be set for trial fonts that expire a certain period after "
"initial installation, such as 60 minutes. If the font is a trial font "
"limited to a certain usage period after initial installation, it must "
"also be marked as ::Font.protected::, with no ::Font.expiry:: "
"timestamp set at first (because the expiry depends on the moment of "
"initial installation). On initial font installation by the user, the "
"publisher’s server needs to record that moment’s time, and from there "
"onwards serve the subscription with ::Font.expiry:: attribute set in "
"the future. Because the font is marked as ::Font.protected::, the app "
"will update the subscription directly after font installation, upon "
"when it will learn of the newly added ::Font.expiry:: attribute. "
"Please note that you *have* to set ::Font.expiry:: after initial "
"installation yourself. The Type.World app will not follow up on its "
"own on installed fonts just with the ::Font.expiryDuration:: "
"attribute, which is used only for display.",
],
"features": [
OpenTypeFeatureListProxy,
False,
None,
"List of supported OpenType features as per "
"https://docs.microsoft.com/en-us/typography/opentype/spec/featuretags",
],
# "languageSupport": [
# LanguageSupportDataType,
# False,
# None,
# "Dictionary of suppported languages as script/language combinations",
# ],
}
def __repr__(self):
return "<Font '%s'>" % (self.postScriptName or self.name.getText() or "undefined")
def sample(self):
o = self.__class__()
o.name.en = "Bold"
o.name.de = "Fette"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
o.postScriptName = "AwesomeFamily-Bold"
o.purpose = "desktop"
return o
def filename(self, version):
"""\
Returns the recommended font file name to be used to store the font on disk.
It is composed of the font’s uniqueID, its version string and the file
extension. Together, they must not exceed 220 characters.
"""
if not type(version) in (str, int, float):
raise ValueError("Supplied version must be str or int or float")
if self.format:
return "%s_%s.%s" % (self.uniqueID, version, self.format)
else:
return "%s_%s" % (self.uniqueID, version)
def hasVersionInformation(self):
return self.versions or self.parent.versions
def customValidation(self):
information, warnings, critical = [], [], []
# Checking font type/extension
if self.purpose == "desktop" and not self.format:
critical.append("Is a desktop font (see .purpose), but has no .format value.")
# Checking version information
if not self.hasVersionInformation():
critical.append(
"Has no version information, and neither has its family %s. "
"Either one needs to carry version information." % (self.parent)
)
# Checking for designers
for designerKeyword in self.designerKeywords:
if not self.parent.parent.parent.getDesignerByKeyword(designerKeyword):
critical.append(
"Has designer '%s', but %s.designers has no matching designer."
% (designerKeyword, self.parent.parent.parent)
)
# Checking uniqueID for file name contradictions:
forbidden = "/?<>\\:*|^,;"
for char in forbidden:
if self.uniqueID.count(char) > 0:
critical.append(
".uniqueID must not contain the character '%s' because it will "
"be used for the font’s file name on disk." % char
)
for version in self.getVersions():
filename = self.filename(version.number)
if len(filename) > 220:
critical.append("The suggested file name is longer than 220 characters: %s" % filename)
return information, warnings, critical
def getBillboardURLs(self):
"""\
Returns list billboardURLs compiled from ::Font.billboardURLs::
and ::Family.billboardURLs::, giving the font-level definitions priority
over family-level definitions.
"""
return self.billboardURLs or self.parent.billboardURLs
def getVersions(self):
"""\
Returns list of ::Version:: objects.
This is the final list based on the version information in this font object as
well as in its parent ::Family:: object. Please read the section about
[versioning](#versioning) above.
"""
if not self.hasVersionInformation():
raise ValueError(
"%s has no version information, and neither has its family %s. "
"Either one needs to carry version information." % (self, self.parent)
)
def compare(a, b):
return semver.VersionInfo.parse(makeSemVer(a.number)).compare(makeSemVer(b.number))
# return semver.compare(makeSemVer(a.number), makeSemVer(b.number))
versions = []
haveVersionNumbers = []
for version in self.versions:
versions.append(version)
haveVersionNumbers.append(makeSemVer(version.number))
for version in self.parent.versions:
if version.number not in haveVersionNumbers:
versions.append(version)
haveVersionNumbers.append(makeSemVer(version.number))
versions = sorted(versions, key=functools.cmp_to_key(compare))
return versions
def getDesigners(self):
"""\
Returns a list of ::Designer:: objects that this font references.
These are the combination of family-level designers and font-level designers.
The same logic as for versioning applies.
Please read the section about [versioning](#versioning) above.
"""
if not hasattr(self, "_designers"):
self._designers = []
# Family level designers
if self.parent.designerKeywords:
for designerKeyword in self.parent.designerKeywords:
self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword))
# Font level designers
if self.designerKeywords:
for designerKeyword in self.designerKeywords:
self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword))
return self._designers
def getPackageKeywords(self):
if self.packageKeywords:
return list(set(self.packageKeywords))
else:
return [DEFAULT]
def Font_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Font.parent = property(lambda self: Font_Parent(self))
class FontProxy(Proxy):
dataType = Font
class FontListProxy(ListProxy):
dataType = FontProxy
# Font Family
class Family(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"uniqueID": [
StringDataType,
True,
None,
"An string that uniquely identifies this family within the publisher.",
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of font family. This may include any additions "
"that you find useful to communicate to your users.",
],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of font family",
],
"billboardURLs": [
BillboardListProxy,
False,
None,
"List of URLs pointing at images to show for this typeface. "
"We suggest to use square dimensions and uncompressed SVG "
"images because they scale to all sizes smoothly, "
"but ultimately any size or HTML-compatible image type "
"is possible.",
],
"designerKeywords": [
DesignersReferencesListProxy,
False,
None,
"List of keywords referencing designers. These are defined at "
"::InstallableFontsResponse.designers::. In case designers differ "
"between fonts within the same family, they can also be defined at the "
"font level at ::Font.designers::. The font-level references take "
"precedence over the family-level references.",
],
"packages": [
FontPackageListProxy,
False,
None,
"Family-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
"sourceURL": [
WebURLDataType,
False,
None,
"URL pointing to the source of a font project, such as a GitHub repository",
],
"issueTrackerURL": [
WebURLDataType,
False,
None,
"URL pointing to an issue tracker system, where users can debate "
"about a typeface’s design or technicalities",
],
"galleryURL": [
WebURLDataType,
False,
None,
"URL pointing to a web site that shows real world examples of the "
"fonts in use or other types of galleries.",
],
"versions": [
VersionListProxy,
False,
None,
"List of ::Version:: objects. Versions specified here are expected to "
"be available for all fonts in the family, which is probably most "
"common and efficient. You may define additional font-specific "
"versions at the ::Font:: object. You may also rely entirely on "
"font-specific versions and leave this field here empty. However, "
"either the fonts or the font family *must* carry version information "
"and the validator will complain when they don’t.\n\nPlease also read "
"the section on [versioning](#versioning) above.",
],
"fonts": [
FontListProxy,
True,
[],
"List of ::Font:: objects. The order will be displayed unchanged in "
"the UI, so it’s in your responsibility to order them correctly.",
],
"dateFirstPublished": [
DateDataType,
False,
None,
"Human readable date of the initial release of the family. May be "
"overriden on font level at ::Font.dateFirstPublished::.",
],
"pdfURL": [
WebResourceURLDataType,
False,
None,
"URL of PDF file with type specimen and/or instructions for entire "
"family. May be overriden on font level at ::Font.pdfURL::.",
],
}
def sample(self):
o = self.__class__()
o.name.en = "Awesome Family"
o.description.en = "Nice big fat face with smooth corners"
o.description.de = "Fette Groteske mit runden Ecken"
o.uniqueID = "AwesomeFonts-AwesomeFamily"
return o
def __repr__(self):
return "<Family '%s'>" % self.name.getText() or "undefined"
def customValidation(self):
information, warnings, critical = [], [], []
# Checking for designers
for designerKeyword in self.designerKeywords:
if not self.parent.parent.getDesignerByKeyword(designerKeyword):
critical.append(
"Has designer '%s', but %s.designers has no matching designer."
% (designerKeyword, self.parent.parent)
)
return information, warnings, critical
def getDesigners(self):
if not hasattr(self, "_designers"):
self._designers = []
for designerKeyword in self.designerKeywords:
self._designers.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
return self._designers
def getAllDesigners(self):
"""\
Returns a list of ::Designer:: objects that represent all of the designers
referenced both at the family level as well as with all the family’s fonts,
in case the fonts carry specific designers. This could be used to give a
one-glance overview of all designers involved.
"""
if not hasattr(self, "_allDesigners"):
self._allDesigners = []
self._allDesignersKeywords = []
for designerKeyword in self.designerKeywords:
self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
self._allDesignersKeywords.append(designerKeyword)
for font in self.fonts:
for designerKeyword in font.designerKeywords:
if designerKeyword not in self._allDesignersKeywords:
self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword))
self._allDesignersKeywords.append(designerKeyword)
return self._allDesigners
def getPackages(self, filterByFontPurpose=[]):
packageKeywords = []
packages = []
packageByKeyword = {}
def passedFilter(font):
# Apply font.purpose filter
return not filterByFontPurpose or font.purpose in filterByFontPurpose
# Collect list of unique package keyword references in family's fonts
for font in self.fonts:
if passedFilter(font):
for keyword in font.getPackageKeywords():
if keyword not in packageKeywords:
packageKeywords.append(keyword)
# Prepend a DEFAULT package
if DEFAULT in packageKeywords:
defaultPackage = FontPackage()
defaultPackage.keyword = DEFAULT
defaultPackage.name.en = DEFAULT
packages.append(defaultPackage)
packageByKeyword[DEFAULT] = defaultPackage
# Build list of FontPackage objects
for package in self.packages:
if package.keyword in packageKeywords:
packages.append(package)
packageByKeyword[package.keyword] = package
# Attach fonts attribute to each package
for package in packages:
package.fonts = []
# Attach fonts to packages
for font in self.fonts:
if passedFilter(font):
for keyword in font.getPackageKeywords():
packageByKeyword[keyword].fonts.append(font)
return packages
def Family_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Family.parent = property(lambda self: Family_Parent(self))
class FamilyProxy(Proxy):
dataType = Family
class FamiliesListProxy(ListProxy):
dataType = FamilyProxy
########################################################################################
# Web Links
class WebURLListProxy(ListProxy):
dataType = WebURLDataType
########################################################################################
# Font Foundry
class StylingDataType(DictionaryDataType):
def exampleData(self):
return {
"light": {
"headerColor": "219BD3",
"headerTextColor": "000000",
"headerLinkColor": "145F7F",
"backgroundColor": "FFFFFF",
"textColor": "000000",
"linkColor": "F7AD22",
"selectionColor": "F7AD22",
"selectionTextColor": "000000",
"buttonColor": "197AA3",
"buttonTextColor": "FFFFFF",
"informationViewBackgroundColor": "F2F2F2",
"informationViewTextColor": "000000",
"informationViewLinkColor": "1D89B8",
"informationViewButtonColor": "197AA3",
"informationViewButtonTextColor": "FFFFFF",
"logoURL": "https://awesomefoundry.com/logo-lighttheme.svg",
},
"dark": {
"headerColor": "156486",
"headerTextColor": "000000",
"headerLinkColor": "53B9E4",
"backgroundColor": "262626",
"textColor": "999999",
"linkColor": "C07F07",
"selectionColor": "9A6606",
"selectionTextColor": "000000",
"buttonColor": "22A4DC",
"buttonTextColor": "000000",
"informationViewBackgroundColor": "1A1A1A",
"informationViewTextColor": "999999",
"informationViewLinkColor": "53B9E4",
"informationViewButtonColor": "22A4DC",
"informationViewButtonTextColor": "000000",
"logoURL": "https://awesomefoundry.com/logo-darktheme.svg",
},
}
class Foundry(DictBasedObject):
# key: [data type, required, default value, description]
_structure = {
"uniqueID": [
StringDataType,
True,
None,
"An string that uniquely identifies this foundry within the publisher.",
],
"name": [MultiLanguageTextProxy, True, None, "Name of foundry"],
"description": [
MultiLanguageLongTextProxy,
False,
None,
"Description of foundry",
],
"styling": [
StylingDataType,
False,
{"light": {}, "dark": {}},
"Dictionary of styling values, for light and dark theme. See example "
"below. If you want to style your foundry here, please start with the "
"light theme. You may omit the dark theme.",
],
"email": [
EmailDataType,
False,
None,
"General email address for this foundry.",
],
"websiteURL": [WebURLDataType, False, None, "Website for this foundry"],
"telephone": [
TelephoneDataType,
False,
None,
"Telephone number for this foundry",
],
"socialURLs": [
WebURLListProxy,
False,
None,
"List of web URLs pointing to social media channels",
],
"supportEmail": [
EmailDataType,
False,
None,
"Support email address for this foundry.",
],
"supportURL": [
WebURLDataType,
False,
None,
"Support website for this foundry, such as a chat room, forum, online service desk.",
],
"supportTelephone": [
TelephoneDataType,
False,
None,
"Support telephone number for this foundry.",
],
# data
"licenses": [
LicenseDefinitionListProxy,
True,
[],
"List of ::LicenseDefinition:: objects under which the fonts in this "
"response are issued. For space efficiency, these licenses are defined "
"at the foundry object and will be referenced in each font by their "
"keyword. Keywords need to be unique for this foundry and may repeat "
"across foundries.",
],
"families": [FamiliesListProxy, True, [], "List of ::Family:: objects."],
"packages": [
FontPackageListProxy,
False,
None,
"Foundry-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
}
_stylingColorAttributes = (
"headerColor",
"headerTextColor",
"headerLinkColor",
"backgroundColor",
"textColor",
"linkColor",
"selectionColor",
"selectionTextColor",
"buttonColor",
"buttonTextColor",
"informationViewBackgroundColor",
"informationViewTextColor",
"informationViewLinkColor",
"informationViewButtonColor",
"informationViewButtonTextColor",
)
def sample(self):
o = self.__class__()
o.name.en = "Awesome Fonts"
o.name.de = "Geile Schriften"
o.websiteURL = "https://awesomefonts.com"
o.uniqueID = "AwesomeFonts"
return o
def __repr__(self):
return "<Foundry '%s'>" % self.name.getText() or "undefined"
def getLicenseByKeyword(self, keyword):
if not hasattr(self, "_licensesDict"):
self._licensesDict = {}
for license in self.licenses:
self._licensesDict[license.keyword] = license
if keyword in self._licensesDict:
return self._licensesDict[keyword]
def customValidation(self):
information, warnings, critical = [], [], []
themes = ["light", "dark"]
if self.styling:
for theme in self.styling:
if theme not in themes:
critical.append("Styling keyword '%s' is unknown. Known are %s." % (theme, themes))
for colorKey in self._stylingColorAttributes:
if colorKey in self.styling[theme]:
c = HexColorDataType()
c.value = self.styling[theme][colorKey]
valid = c.valid()
if valid is not True:
critical.append(".styling color attribute '%s': %s" % (colorKey, valid))
if "logoURL" in self.styling[theme]:
logo = WebURLDataType()
logo.value = self.styling[theme]["logoURL"]
valid = logo.valid()
if valid is not True:
critical.append(".styling 'logoURL' attribute: %s" % (valid))
return information, warnings, critical
def Foundry_Parent(self):
if hasattr(self, "_parent") and hasattr(self._parent, "_parent") and hasattr(self._parent._parent, "_parent"):
return self._parent._parent._parent
Foundry.parent = property(lambda self: Foundry_Parent(self))
class FoundryProxy(Proxy):
dataType = Foundry
class FoundryListProxy(ListProxy):
dataType = FoundryProxy
class CommercialAppsAllowedProxy(Proxy):
dataType = str
class CommercialAppsAllowedListProxy(ListProxy):
dataType = CommercialAppsAllowedProxy
includeEmpty = True
########################################################################################
# Base Response
class BaseResponse(DictBasedObject):
def __repr__(self):
return "<%s>" % self.__class__.__name__
def customValidation(self):
information, warnings, critical = [], [], []
if hasattr(self, "response") and self.response == ERROR and self.errorMessage.isEmpty():
critical.append(f".response is '{ERROR}', but .errorMessage is missing.")
return information, warnings, critical
########################################################################################
# Available Fonts
class InstallableFontsResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLABLEFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLABLEFONTSCOMMAND["responseTypes"],
)
class InstallableFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installableFonts` parameter, and contains metadata about which fonts
are available to install for a user.
"""
_command = INSTALLABLEFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallableFontsResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(INSTALLABLEFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of ::InstallableFontsResponse.response:: being 'custom'.",
],
# Response-specific
"designers": [
DesignersListProxy,
False,
None,
"List of ::Designer:: objects, referenced in the fonts or font "
"families by the keyword. These are defined at the root of the "
"response for space efficiency, as one designer can be involved in "
"the design of several typefaces across several foundries.",
],
"foundries": [
FoundryListProxy,
True,
[],
"List of ::Foundry:: objects; foundries that this distributor "
"supports. In most cases this will be only one, as many foundries "
"are their own distributors.",
],
"packages": [
FontPackageListProxy,
False,
None,
"Publisher-wide list of ::FontPackage:: objects. These will be "
"referenced by their keyword in ::Font.packageKeywords::",
],
"name": [
MultiLanguageTextProxy,
False,
None,
"A name of this response and its contents. This is needed to manage "
"subscriptions in the UI. For instance 'Free Fonts' for all free and "
"non-restricted fonts, or 'Commercial Fonts' for all those fonts that "
"the use has commercially licensed, so their access is restricted. "
"In case of a free font website that offers individual subscriptions "
"for each typeface, this decription could be the name of the typeface.",
],
"userName": [
MultiLanguageTextProxy,
False,
None,
"The name of the user who these fonts are licensed to.",
],
"userEmail": [
EmailDataType,
False,
None,
"The email address of the user who these fonts are licensed to.",
],
"userIsVerified": [
BooleanDataType,
False,
False,
"This user is known to the publisher. The initial implication for this is to not display the Terms of"
" Service and Privacy banner to this user, as they have already agreed to the terms on the publisher’s"
" website. Only new users (invitees) will be presented with the banner.",
],
"prefersRevealedUserIdentity": [
BooleanDataType,
True,
False,
"Indicates that the publisher prefers to have the user reveal his/her "
"identity to the publisher when installing fonts. In the app, the user "
"will be asked via a dialog to turn the setting on, but is not "
"required to do so.",
],
}
def getFontByUniqueID(self, ID):
for foundry in self.foundries:
for family in foundry.families:
for font in family.fonts:
if font.uniqueID == ID:
return font
def getContentChanges(self, other, calculateOverallChanges=True):
comparison = {}
oldFonts = []
newFonts = []
newVersions = 0
# Accumulate old and new fonts
for foundry in self.foundries:
for family in foundry.families:
for font in family.fonts:
oldFonts.append(font.uniqueID)
for foundry in other.foundries:
for family in foundry.families:
for font in family.fonts:
newFonts.append(font.uniqueID)
# Versions
oldFont = self.getFontByUniqueID(font.uniqueID)
if oldFont and len(font.getVersions()) > len(oldFont.getVersions()):
newVersions += 1
# Added or removed fonts
addedFonts = set(newFonts) - set(oldFonts)
if addedFonts:
comparison["addedFonts"] = len(addedFonts)
comparison["overallChanges"] = True
removedFonts = set(oldFonts) - set(newFonts)
if removedFonts:
comparison["removedFonts"] = len(removedFonts)
comparison["overallChanges"] = True
if newVersions:
comparison["fontsWithAddedVersions"] = newVersions
comparison["overallChanges"] = True
# Other content changes (including the above ones)
if calculateOverallChanges:
identical = self.sameContent(other)
if not identical:
comparison["overallChanges"] = True
return comparison
def sample(self):
o = self.__class__()
o.response = "success"
return o
def getDesignerByKeyword(self, keyword):
if not hasattr(self, "_designersDict"):
self._designersDict = {}
for designer in self.designers:
self._designersDict[designer.keyword] = designer
if keyword in self._designersDict:
return self._designersDict[keyword]
def discardThisKey(self, key):
if key in ["foundries", "designers", "licenseIdentifier"] and self.response != "success":
return True
return False
def customValidation(self):
information, warnings, critical = [], [], []
if hasattr(self, "response") and self.response == ERROR and self.errorMessage.isEmpty():
critical.append(f".response is '{ERROR}', but .errorMessage is missing.")
if self.response == "success" and not self.name.getText():
warnings.append(
"The response has no .name value. It is not required, but highly "
"recommended, to describe the purpose of this subscription to the "
"user (such as 'Commercial Fonts', 'Free Fonts', etc. This is "
"especially useful if you offer several different subscriptions "
"to the same user."
)
# Check all uniqueIDs for duplicity
foundryIDs = []
familyIDs = []
fontIDs = []
for foundry in self.foundries:
foundryIDs.append(foundry.uniqueID)
for family in foundry.families:
familyIDs.append(family.uniqueID)
for font in family.fonts:
fontIDs.append(font.uniqueID)
import collections
duplicateFoundryIDs = [item for item, count in list(collections.Counter(foundryIDs).items()) if count > 1]
if duplicateFoundryIDs:
critical.append("Duplicate unique foundry IDs: %s" % duplicateFoundryIDs)
duplicateFamilyIDs = [item for item, count in list(collections.Counter(familyIDs).items()) if count > 1]
if duplicateFamilyIDs:
critical.append("Duplicate unique family IDs: %s" % duplicateFamilyIDs)
duplicateFontIDs = [item for item, count in list(collections.Counter(fontIDs).items()) if count > 1]
if duplicateFontIDs:
critical.append("Duplicate unique family IDs: %s" % duplicateFontIDs)
newInformation, newWarnings, newCritical = super().customValidation()
if newInformation:
information.extend(newInformation)
if newWarnings:
warnings.extend(newWarnings)
if newCritical:
critical.extend(newCritical)
return information, warnings, critical
########################################################################################
# InstallFonts
class InstallFontAssetResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLFONTASSETCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLFONTASSETCOMMAND["responseTypes"],
)
class InstallFontAsset(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallFontAssetResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(INSTALLFONTASSETCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the subscription. Must match the requested fonts.",
],
"version": [
VersionDataType,
True,
None,
"Font version. Must match the requested fonts.",
],
"mimeType": [
FontMimeType,
False,
None,
"MIME Type of data. For desktop fonts, these are %s." % FONTPURPOSES["desktop"]["acceptableMimeTypes"],
],
"dataURL": [
WebURLDataType,
False,
None,
"HTTP link of font file resource. ::InstallFontAsset.data:: and "
"::InstallFontAsset.dataURL:: are mutually exclusive; only one can be "
"specified. The HTTP resource must be served under the correct "
"MIME type specified in ::InstallFontAsset.mimeType:: and is expected "
"to be in raw binary encoding; ::InstallFontAsset.encoding:: "
"is not regarded.",
],
"data": [
FontDataType,
False,
None,
"Binary data as a string encoded as one of the following supported "
"encodings: ::InstallFontResponse.encoding::. "
"::InstallFontAsset.data:: and ::InstallFontAsset.dataURL:: are "
"mutually exclusive; only one can be specified.",
],
"encoding": [
FontEncodingDataType,
False,
None,
"Encoding type for font data in ::InstallFontResponse.data::. Currently supported: %s" % (FONTENCODINGS),
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
o.mimeType = "font/otf"
o.data = "emplNXpqdGpoNXdqdHp3enRq..."
o.encoding = "base64"
o.version = "1.1"
return o
def customValidation(self):
information, warnings, critical = [], [], []
if self.response == "success" and (not self.data and not self.dataURL):
critical.append(".response is set to success, but neither .data nor .dataURL are set.")
if self.data and not self.encoding:
critical.append(".data is set, but .encoding is missing")
if self.data and not self.mimeType:
critical.append(".data is set, but .mimeType is missing")
if self.dataURL and not self.mimeType:
critical.append(".dataURL is set, but .mimeType is missing")
if self.dataURL and self.data:
critical.append("Either .dataURL or .data can be defined, not both")
if self.response == ERROR and self.errorMessage.isEmpty():
critical.append(".response is '%s', but .errorMessage is missing." % (ERROR))
newInformation, newWarnings, newCritical = super().customValidation()
if newInformation:
information.extend(newInformation)
if newWarnings:
warnings.extend(newWarnings)
if newCritical:
critical.extend(newCritical)
return information, warnings, critical
class InstallFontResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in INSTALLFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
INSTALLFONTSCOMMAND["responseTypes"],
)
class InstallFontAssetProxy(Proxy):
dataType = InstallFontAsset
class InstallFontAssetListProxy(ListProxy):
dataType = InstallFontAssetProxy
class InstallFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter, and contains the requested binary fonts
attached as ::InstallFontAsset:: obects.
"""
_command = INSTALLFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
InstallFontResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"assets": [
InstallFontAssetListProxy,
False,
None,
"List of ::InstallFontAsset:: objects.",
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.assets = [InstallFontAsset().sample()]
return o
########################################################################################
# Uninstall Fonts
class UninstallFontAssedResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in UNINSTALLFONTASSETCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
UNINSTALLFONTASSETCOMMAND["responseTypes"],
)
class UninstallFontAsset(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
UninstallFontAssedResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTASSETCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"uniqueID": [
StringDataType,
True,
None,
"A machine-readable string that uniquely identifies this font within "
"the subscription. Must match the requested fonts.",
],
# Response-specific
}
def sample(self):
o = self.__class__()
o.response = "success"
o.uniqueID = "AwesomeFonts-AwesomeFamily-Bold"
return o
class UninstallFontResponseType(ResponseCommandDataType):
def valid(self):
if not self.value:
return True
if self.value in UNINSTALLFONTSCOMMAND["responseTypes"]:
return True
else:
return "Unknown response type: '%s'. Possible: %s" % (
self.value,
UNINSTALLFONTSCOMMAND["responseTypes"],
)
class UninstallFontAssetProxy(Proxy):
dataType = UninstallFontAsset
class UninstallFontAssetListProxy(ListProxy):
dataType = UninstallFontAssetProxy
class UninstallFontsResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter, and contains empty responses as
::UninstallFontAsset:: objects.
While empty of data, these asset objects are still necessary because each font
uninstallation request may return a different response, to which the GUI app needs
to respond to accordingly.
"""
_command = UNINSTALLFONTSCOMMAND
# key: [data type, required, default value, description]
_structure = {
# Root
"response": [
UninstallFontResponseType,
True,
None,
"Type of response: %s" % (ResponsesDocu(UNINSTALLFONTSCOMMAND["responseTypes"])),
],
"errorMessage": [
MultiLanguageTextProxy,
False,
None,
"Description of error in case of custom response type",
],
"assets": [
UninstallFontAssetListProxy,
False,
None,
"List of ::UninstallFontAsset:: objects.",
],
}
def sample(self):
o = self.__class__()
o.response = "success"
o.assets = [UninstallFontAsset().sample()]
return o
########################################################################################
class EndpointResponse(BaseResponse):
"""\
This is the response expected to be returned when the API is invoked using the
`?commands=endpoint` parameter.
This response contains some mandatory information about the API endpoint such as its
name and admin email, the copyright license under which the API endpoint issues its
data, and whether or not this endpoint can be publicized about.
"""
_command = ENDPOINTCOMMAND
# key: [data type, required, default value, description]
_structure = {
"canonicalURL": [
WebURLDataType,
True,
None,
(
"Same as the API Endpoint URL, bare of IDs and other parameters. "
"Used for grouping of subscriptions. It is expected that this URL "
"will not change. When it does, it will be treated as a different "
"publisher.<br />"
"The *API Endpoint URL* must begin with the *Canonical URL* "
"(if you indeed choose the two to be different) or otherwise "
"subscriptions could impersonate another publisher by displaying "
"their name and using their Canonical URL. In other words, "
"both must be located on the same server."
# TODO: Actually implement the above security feature
),
],
"adminEmail": [
EmailDataType,
True,
None,
"API endpoint Administrator. This email needs to be reachable for "
"various information around the Type.World protocol as well as "
"technical problems.",
],
"licenseIdentifier": [
OpenSourceLicenseIdentifierDataType,
True,
"CC-BY-NC-ND-4.0",
"Machine-readable identifier of license under which the API Endpoint "
"publishes its (metda)data, "
"as per [https://spdx.org/licenses/](). This license will not "
"be presented to the user. Instead, the software client that accesses "
"your API Endpoint needs to be aware of "
"the license and proceed only if allowed, otherwise decline the usage "
"of this API endpoint. In other words, the non-commercial "
"`CC-BY-NC-ND-4.0` license that is the default here forbids commercial "
"software from accessing your API Endpoint unless they have a separate "
"legal agreememt with you.",
],
"publisherTypes": [
SupportedPublisherTypeListProxy,
True,
[],
f"List of publisher business types: {PUBLISHERTYPES}. "
"In case ::EndpointResponse.public:: is set to `True`, only the "
f"following types are allowed: {PUBLICPUBLISHERTYPES}",
],
"supportedCommands": [
SupportedAPICommandsListProxy,
True,
None,
"List of commands this API endpoint supports: %s" % [x["keyword"] for x in COMMANDS],
],
"name": [
MultiLanguageTextProxy,
True,
None,
"Human-readable name of API endpoint",
],
"public": [
BooleanDataType,
True,
False,
"API endpoint is meant to be publicly visible and its existence may be publicized within the project",
],
"sendsLiveNotifications": [
BooleanDataType,
True,
False,
"API endpoint is sending live notifications through the central server,"
" namely through the `updateSubscription` command. "
"The app won’t start listening to live notifications unless a "
"subscription holds this setting. ",
],
"allowedCommercialApps": [
CommercialAppsAllowedListProxy,
True,
["world.type.app"],
"Machine-readable list of commercial apps that are allowed to "
"access this API Endpoint in case "
"::EndpointResponse.licenseIdentifier:: carries a non-commercial "
"copyright license such as the default `CC-BY-NC-ND-4.0`. "
"A reverse-domain notation for the app ID is recommended "
"but not required. "
"Note: As the originator of the entire technology, the Type.World App "
"is on this list by default, even though it is a commercial app. "
"This is for backwards-compatibility for endpoints that don’t "
"carry this attribute yet but are expected to allow access by "
"Type.World. If you don’t want the Type.World to access "
"your API Endpoint, you may explicitly unset this attribute to an "
"empty list: `endpoint.allowedCommercialApps = []`",
],
"logoURL": [
WebResourceURLDataType,
False,
None,
"URL of logo of API endpoint, for publication. Specifications to follow.",
],
"backgroundColor": [
HexColorDataType,
False,
None,
"Publisher’s preferred background color. This is meant to go as a "
"background color to the logo at ::APIRoot.logoURL::",
],
"websiteURL": [
WebURLDataType,
False,
None,
"URL of human-visitable website of API endpoint, for publication",
],
"privacyPolicyURL": [
WebURLDataType,
True,
"https://type.world/legal/default/PrivacyPolicy.html",
"URL of human-readable Privacy Policy of API endpoint. This will be "
"displayed to the user for consent when adding a subscription. "
"The default URL points to a document edited by Type.World that you "
"can use (at your own risk) instead of having to write your own.",
],
"termsOfServiceURL": [
WebURLDataType,
True,
"https://type.world/legal/default/TermsOfService.html",
"URL of human-readable Terms of Service Agreement of API endpoint. "
"This will be displayed to the user for consent when adding a "
"subscription. The default URL points to a document edited by "
"Type.World that you can use (at your own risk) instead of having to "
"write your own.",
],
"loginURL": [
WebURLDataType,
False,
None,
"URL for user to log in to publisher’s account in case a validation "
"is required. This normally work in combination with the "
"`loginRequired` response.",
],
}
def sample(self):
o = self.__class__()
o.canonicalURL = "https://awesomefonts.com/api/"
o.adminEmail = "admin@awesomefonts.com"
o.supportedCommands = [
"endpoint",
"installableFonts",
"installFonts",
"uninstallFonts",
]
o.name.en = "Awesome Fonts"
o.name.de = "Geile Schriften"
o.privacyPolicyURL = "https://awesomefonts.com/privacypolicy.html"
o.termsOfServiceURL = "https://awesomefonts.com/termsofservice.html"
o.public = True
return o
def customValidation(self):
information, warnings, critical = [], [], []
if self.canonicalURL and not self.canonicalURL.startswith("https://"):
warnings.append(".canonicalURL is not using SSL (https://). Consider using SSL to protect your data.")
if self.public:
for publisherType in self.publisherTypes:
if publisherType not in PUBLICPUBLISHERTYPES:
critical.append(
"When EndpointResponse.public is set to True, then only a "
"restricted set of types is allowed for "
f"EndpointResponse.publisherTypes: {PUBLICPUBLISHERTYPES}. "
f"You have '{publisherType}'"
)
return information, warnings, critical
########################################################################################
# Root Response
class EndpointResponseProxy(Proxy):
dataType = EndpointResponse
class InstallableFontsResponseProxy(Proxy):
dataType = InstallableFontsResponse
class InstallFontsResponseProxy(Proxy):
dataType = InstallFontsResponse
class UninstallFontsResponseProxy(Proxy):
dataType = UninstallFontsResponse
class RootResponse(BaseResponse):
"""\
This is the root object for each response, and contains one or more individual
response objects as requested in the `commands` parameter of API endpoint calls.
This exists to speed up processes by reducing server calls. For instance,
installing a protected fonts and afterwards asking for a refreshed
`installableFonts` command requires two separate calls to the publisher’s API
endpoint, which in turns needs to verify the requester’s identy with the central
type.world server. By requesting `installFonts,installableFonts` commands in one go,
a lot of time is saved.
"""
# key: [data type, required, default value, description]
_structure = {
# Root
"endpoint": [
EndpointResponseProxy,
False,
None,
"::EndpointResponse:: object.",
],
"installableFonts": [
InstallableFontsResponseProxy,
False,
None,
"::InstallableFontsResponse:: object.",
],
"installFonts": [
InstallFontsResponseProxy,
False,
None,
"::InstallFontsResponse:: object.",
],
"uninstallFonts": [
UninstallFontsResponseProxy,
False,
None,
"::UninstallFontsResponse:: object.",
],
"version": [
VersionDataType,
True,
INSTALLFONTSCOMMAND["currentVersion"],
"Version of '%s' response" % INSTALLFONTSCOMMAND["keyword"],
],
}
def sample(self):
o = self.__class__()
o.endpoint = EndpointResponse().sample()
o.installableFonts = InstallableFontsResponse().sample()
return o
| Lib/typeworld/api/__init__.py | 128,690 | This is the response expected to be returned when the API is invoked using the
`?commands=endpoint` parameter.
This response contains some mandatory information about the API endpoint such as its
name and admin email, the copyright license under which the API endpoint issues its
data, and whether or not this endpoint can be publicized about.
`FontPackages` are groups of fonts that serve a certain purpose
to the user.
They can be defined at ::InstallableFontsReponse.packages::,
::Foundry.packages::, ::Family.packages::
and are referenced by their keywords in ::Font.packageKeywords::.
On a font family level, defined at ::Family.packages::, a typical example
for defining a `FontPackage` would be the so called **Office Fonts**.
While they are technically identical to other OpenType fonts, they normally
have a sightly different set of glyphs and OpenType features.
Linking them to a `FontPackage` allows the UI to display them clearly as a
separate set of fonts that serve a different purpuse than the
regular fonts.
On a subscription-wide level, defined at
::InstallableFontsReponse.packages::, a `FontPackage` could represent a
curated collection of fonts of various foundries and families, for example
**Script Fonts** or **Brush Fonts** or **Corporate Fonts**.
Each font may be part of several `FontPackages`.
For the time being, only family-level FontPackages are supported in the UI.
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter.
This is the response expected to be returned when the API is invoked using the
`?commands=installFonts` parameter, and contains the requested binary fonts
attached as ::InstallFontAsset:: obects.
This is the response expected to be returned when the API is invoked using the
`?commands=installableFonts` parameter, and contains metadata about which fonts
are available to install for a user.
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Markdown is permitted in `MultiLanguageLongText`.
Line breaks need to be escaped as `
` characters.
Multi-language text. Attributes are language keys as per
[https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes]
The GUI app will then calculate the language data to be displayed using
::MultiLanguageText.getText():: with a prioritized list of languages that
the user can understand. They may be pulled from the operating system’s
language preferences.
These classes are already initiated wherever they are used, and can be
addresses instantly with the language attributes:
```python
api.name.en = u'Font Publisher XYZ'
api.name.de = u'Schriftenhaus XYZ'
```
If you are loading language information from an external source, you may use
the `.set()` method to enter data:
```python
# Simulating external data source
for languageCode, text in (
('en': u'Font Publisher XYZ'),
('de': u'Schriftenhaus XYZ'),
)
api.name.set(languageCode, text)
```
Neither HTML nor Markdown code is permitted in `MultiLanguageText`.
This is the root object for each response, and contains one or more individual
response objects as requested in the `commands` parameter of API endpoint calls.
This exists to speed up processes by reducing server calls. For instance,
installing a protected fonts and afterwards asking for a refreshed
`installableFonts` command requires two separate calls to the publisher’s API
endpoint, which in turns needs to verify the requester’s identy with the central
type.world server. By requesting `installFonts,installableFonts` commands in one go,
a lot of time is saved.
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter.
This is the response expected to be returned when the API is invoked using the
`?commands=uninstallFonts` parameter, and contains empty responses as
::UninstallFontAsset:: objects.
While empty of data, these asset objects are still necessary because each font
uninstallation request may return a different response, to which the GUI app needs
to respond to accordingly.
Returns the recommended font file name to be used to store the font on disk.
It is composed of the font’s uniqueID, its version string and the file
extension. Together, they must not exceed 220 characters.
Returns a list of ::Designer:: objects that represent all of the designers
referenced both at the family level as well as with all the family’s fonts,
in case the fonts carry specific designers. This could be used to give a
one-glance overview of all designers involved.
Returns list billboardURLs compiled from ::Font.billboardURLs::
and ::Family.billboardURLs::, giving the font-level definitions priority
over family-level definitions.
Returns a list of ::Designer:: objects that this font references.
These are the combination of family-level designers and font-level designers.
The same logic as for versioning applies.
Please read the section about [versioning](#versioning) above.
Calculate list of fonts of this package by applying filters for
font.format and font.variableFont (possibly more in the future)
Returns the ::License:: object that this font references.
Returns the text in the first language found from the specified
list of languages. If that language can’t be found, we’ll try English
as a standard. If that can’t be found either, return the first language
you can find.
Like getText(), but additionally returns the language of whatever
text was found first.
Returns list of ::Version:: objects.
This is the final list based on the version information in this font object as
well as in its parent ::Family:: object. Please read the section about
[versioning](#versioning) above.
Returns True if this version is defined at the font level.
Returns False if this version is defined at the family level.
Turn simple float number (0.1) into semver-compatible number
for comparison by adding .0(s): (0.1.0)
-*- coding: utf-8 -*- Constants Response types (success, error, ...) Commands https://tools.ietf.org/html/rfc8081 Compile list of file extensions Helper methods Make string Strip leading zeros Add .0(s) Basic Data Types Append .0 for semver comparison TODO: This is a stump. Expand. class TypeWorldURLDataType(StringDataType): def valid(self): if not self.value: return True if not self.value.startswith("http://") and not self.value.startswith( "https://" ): return "Needs to start with http:// or https://" else: return True def formatHint(self): return ( "Type.World Subscription URL as per " "[Developer Docs](https://type.world/developerthe-subscription-url)" ) Data type of each list member Here commented out to enforce explicit setting of data type for each Proxy dataType = str def valid(self): if self.value: for data in self.value: valid = data.valid() return valid return True return self.difference(other) == {} def difference(self, other): d1 = self.dumpDict(validate=False) d2 = other.dumpDict(validate=False) from deepdiff import DeepDiff r2 = DeepDiff(d1, d2, ignore_order=True) return r2 Seems unused elif 'typeworld.api.' in ("%s" % class_.dataType): return self.linkDocuText('::%s::' % class_.dataType.__name__) else: return class_.dataType.__name__.title() Define string attributes Description Format Hint Example Data Compile TOC Add data Recurse Fill default values Set default values Remove duplicates values = list(set(values)) Check if required fields are filled recurse Check custom messages: recurse Check custom messages: Check custom messages: Auto-validate required don't know is set allow empty if self._structure[key][0].includeEmpty: _list.value = [] def __repr__(self): return '<MultiLanguageText>' try english try anything Check for text length Top-Level Data Types class LanguageSupportDataType(DictionaryDataType): def valid(self): if not self.value: return True for script in self.value: if not len(script) == 4 or not script.islower(): return "Script tag '%s' needs to be a four-letter lowercase tag." % ( script ) for language in self.value[script]: if not len(language) == 3 or not language.isupper(): return ( "Language tag '%s' needs to be a " "three-letter uppercase" ) % (language) return True LicenseDefinition key: [data type, required, default value, description] FontPackage key: [data type, required, default value, description] font.format filter font.variableFont filter LicenseUsage key: [data type, required, default value, description] Checking for existing license Designer key: [data type, required, default value, description] Font Family Version key: [data type, required, default value, description] Fonts key: [data type, required, default value, description] "languageSupport": [ LanguageSupportDataType, False, None, "Dictionary of suppported languages as script/language combinations", ], Checking font type/extension Checking version information Checking for designers Checking uniqueID for file name contradictions: return semver.compare(makeSemVer(a.number), makeSemVer(b.number)) Family level designers Font level designers Font Family key: [data type, required, default value, description] Checking for designers Apply font.purpose filter Collect list of unique package keyword references in family's fonts Prepend a DEFAULT package Build list of FontPackage objects Attach fonts attribute to each package Attach fonts to packages Web Links Font Foundry key: [data type, required, default value, description] data Base Response Available Fonts key: [data type, required, default value, description] Root Response-specific Accumulate old and new fonts Versions Added or removed fonts Other content changes (including the above ones) Check all uniqueIDs for duplicity InstallFonts key: [data type, required, default value, description] Root key: [data type, required, default value, description] Root Uninstall Fonts key: [data type, required, default value, description] Root Response-specific key: [data type, required, default value, description] Root key: [data type, required, default value, description] TODO: Actually implement the above security feature Root Response key: [data type, required, default value, description] Root | 11,648 | en | 0.72831 |
import json
import datetime
import traceback
import re
from base64 import b64encode
from ast import literal_eval
from flask import Blueprint, render_template, render_template_string, make_response, url_for, current_app, request, redirect, jsonify, abort, flash, session
from flask_login import login_required, current_user
from ..decorators import operator_role_required, admin_role_required, history_access_required
from ..models.user import User
from ..models.account import Account
from ..models.account_user import AccountUser
from ..models.role import Role
from ..models.server import Server
from ..models.setting import Setting
from ..models.history import History
from ..models.domain import Domain
from ..models.domain_user import DomainUser
from ..models.record import Record
from ..models.domain_template import DomainTemplate
from ..models.domain_template_record import DomainTemplateRecord
from ..models.api_key import ApiKey
from ..models.base import db
from ..lib.schema import ApiPlainKeySchema
apikey_plain_schema = ApiPlainKeySchema(many=True)
admin_bp = Blueprint('admin',
__name__,
template_folder='templates',
url_prefix='/admin')
"""
changeSet is a list of tuples, in the following format
(old_state, new_state, change_type)
old_state: dictionary with "disabled" and "content" keys. {"disabled" : False, "content" : "1.1.1.1" }
new_state: similarly
change_type: "addition" or "deletion" or "status" for status change or "unchanged" for no change
Note: A change in "content", is considered a deletion and recreation of the same record,
holding the new content value.
"""
def get_record_changes(del_rrest, add_rrest):
changeSet = []
delSet = del_rrest['records'] if 'records' in del_rrest else []
addSet = add_rrest['records'] if 'records' in add_rrest else []
for d in delSet: # get the deletions and status changes
exists = False
for a in addSet:
if d['content'] == a['content']:
exists = True
if d['disabled'] != a['disabled']:
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
{"disabled":a['disabled'],"content":a['content']},
"status") )
break
if not exists: # deletion
changeSet.append( ({"disabled":d['disabled'],"content":d['content']},
None,
"deletion") )
for a in addSet: # get the additions
exists = False
for d in delSet:
if d['content'] == a['content']:
exists = True
# already checked for status change
break
if not exists:
changeSet.append( (None, {"disabled":a['disabled'], "content":a['content']}, "addition") )
continue
for a in addSet: # get the unchanged
exists = False
for c in changeSet:
if c[1] != None and c[1]["content"] == a['content']:
exists = True
break
if not exists:
changeSet.append( ( {"disabled":a['disabled'], "content":a['content']}, {"disabled":a['disabled'], "content":a['content']}, "unchanged") )
return changeSet
# out_changes is a list of HistoryRecordEntry objects in which we will append the new changes
# a HistoryRecordEntry represents a pair of add_rrest and del_rrest
def extract_changelogs_from_a_history_entry(out_changes, history_entry, change_num, record_name=None, record_type=None):
if history_entry.detail is None:
return
if "add_rrests" in history_entry.detail:
detail_dict = json.loads(history_entry.detail)
else: # not a record entry
return
add_rrests = detail_dict['add_rrests']
del_rrests = detail_dict['del_rrests']
for add_rrest in add_rrests:
exists = False
for del_rrest in del_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, add_rrest, "*"))
break
if not exists: # this is a new record
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, [], add_rrest, "+")) # (add_rrest, del_rrest, change_type)
for del_rrest in del_rrests:
exists = False
for add_rrest in add_rrests:
if del_rrest['name'] == add_rrest['name'] and del_rrest['type'] == add_rrest['type']:
exists = True # no need to add in the out_changes set
break
if not exists: # this is a deletion
if change_num not in out_changes:
out_changes[change_num] = []
out_changes[change_num].append(HistoryRecordEntry(history_entry, del_rrest, [], "-"))
# only used for changelog per record
if record_name != None and record_type != None: # then get only the records with the specific (record_name, record_type) tuple
if change_num in out_changes:
changes_i = out_changes[change_num]
else:
return
for hre in changes_i: # for each history record entry in changes_i
if 'type' in hre.add_rrest and hre.add_rrest['name'] == record_name and hre.add_rrest['type'] == record_type:
continue
elif 'type' in hre.del_rrest and hre.del_rrest['name'] == record_name and hre.del_rrest['type'] == record_type:
continue
else:
out_changes[change_num].remove(hre)
# records with same (name,type) are considered as a single HistoryRecordEntry
# history_entry is of type History - used to extract created_by and created_on
# add_rrest is a dictionary of replace
# del_rrest is a dictionary of remove
class HistoryRecordEntry:
def __init__(self, history_entry, del_rrest, add_rrest, change_type):
# search the add_rrest index into the add_rrest set for the key (name, type)
self.history_entry = history_entry
self.add_rrest = add_rrest
self.del_rrest = del_rrest
self.change_type = change_type # "*": edit or unchanged, "+" new tuple(name,type), "-" deleted (name,type) tuple
self.changed_fields = [] # contains a subset of : [ttl, name, type]
self.changeSet = [] # all changes for the records of this add_rrest-del_rrest pair
if change_type == "+": # addition
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "-": # removal
self.changed_fields.append("name")
self.changed_fields.append("type")
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
elif change_type == "*": # edit of unchanged
if add_rrest['ttl'] != del_rrest['ttl']:
self.changed_fields.append("ttl")
self.changeSet = get_record_changes(del_rrest, add_rrest)
def toDict(self):
return {
"add_rrest" : self.add_rrest,
"del_rrest" : self.del_rrest,
"changed_fields" : self.changed_fields,
"created_on" : self.history_entry.created_on,
"created_by" : self.history_entry.created_by,
"change_type" : self.change_type,
"changeSet" : self.changeSet
}
def __eq__(self, obj2): # used for removal of objects from a list
return True if obj2.toDict() == self.toDict() else False
@admin_bp.before_request
def before_request():
# Manage session timeout
session.permanent = True
# current_app.permanent_session_lifetime = datetime.timedelta(
# minutes=int(Setting().get('session_timeout')))
current_app.permanent_session_lifetime = datetime.timedelta(
minutes=int(Setting().get('session_timeout')))
session.modified = True
@admin_bp.route('/pdns', methods=['GET'])
@login_required
@operator_role_required
def pdns_stats():
if not Setting().get('pdns_api_url') or not Setting().get(
'pdns_api_key') or not Setting().get('pdns_version'):
return redirect(url_for('admin.setting_pdns'))
domains = Domain.query.all()
users = User.query.all()
server = Server(server_id='localhost')
configs = server.get_config()
statistics = server.get_statistic()
history_number = History.query.count()
if statistics:
uptime = list([
uptime for uptime in statistics if uptime['name'] == 'uptime'
])[0]['value']
else:
uptime = 0
return render_template('admin_pdns_stats.html',
domains=domains,
users=users,
configs=configs,
statistics=statistics,
uptime=uptime,
history_number=history_number)
@admin_bp.route('/user/edit/<user_username>', methods=['GET', 'POST'])
@admin_bp.route('/user/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_user(user_username=None):
if user_username:
user = User.query.filter(User.username == user_username).first()
create = False
if not user:
return render_template('errors/404.html'), 404
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return render_template('errors/401.html'), 401
else:
user = None
create = True
if request.method == 'GET':
return render_template('admin_edit_user.html',
user=user,
create=create)
elif request.method == 'POST':
fdata = request.form
if create:
user_username = fdata.get('username', '').strip()
user = User(username=user_username,
plain_text_password=fdata.get('password', ''),
firstname=fdata.get('firstname', '').strip(),
lastname=fdata.get('lastname', '').strip(),
email=fdata.get('email', '').strip(),
reload_info=False)
if create:
if not fdata.get('password', ''):
return render_template('admin_edit_user.html',
user=user,
create=create,
blank_password=True)
result = user.create_local_user()
history = History(msg='Created user {0}'.format(user.username),
created_by=current_user.username)
else:
result = user.update_local_user()
history = History(msg='Updated user {0}'.format(user.username),
created_by=current_user.username)
if result['status']:
history.add()
return redirect(url_for('admin.manage_user'))
return render_template('admin_edit_user.html',
user=user,
create=create,
error=result['msg'])
@admin_bp.route('/key/edit/<key_id>', methods=['GET', 'POST'])
@admin_bp.route('/key/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_key(key_id=None):
domains = Domain.query.all()
accounts = Account.query.all()
roles = Role.query.all()
apikey = None
create = True
plain_key = None
if key_id:
apikey = ApiKey.query.filter(ApiKey.id == key_id).first()
create = False
if not apikey:
return render_template('errors/404.html'), 404
if request.method == 'GET':
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create)
if request.method == 'POST':
fdata = request.form
description = fdata['description']
role = fdata.getlist('key_role')[0]
domain_list = fdata.getlist('key_multi_domain')
account_list = fdata.getlist('key_multi_account')
# Create new apikey
if create:
if role == "User":
domain_obj_list = Domain.query.filter(Domain.name.in_(domain_list)).all()
account_obj_list = Account.query.filter(Account.name.in_(account_list)).all()
else:
account_obj_list, domain_obj_list = [], []
apikey = ApiKey(desc=description,
role_name=role,
domains=domain_obj_list,
accounts=account_obj_list)
try:
apikey.create()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
raise ApiKeyCreateFail(message='Api key create failed')
plain_key = apikey_plain_schema.dump([apikey])[0]["plain_key"]
plain_key = b64encode(plain_key.encode('utf-8')).decode('utf-8')
history_message = "Created API key {0}".format(apikey.id)
# Update existing apikey
else:
try:
if role != "User":
domain_list, account_list = [], []
apikey.update(role,description,domain_list, account_list)
history_message = "Updated API key {0}".format(apikey.id)
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
history = History(msg=history_message,
detail = json.dumps({
'key': apikey.id,
'role': apikey.role.name,
'description': apikey.description,
'domains': [domain.name for domain in apikey.domains],
'accounts': [a.name for a in apikey.accounts]
}),
created_by=current_user.username)
history.add()
return render_template('admin_edit_key.html',
key=apikey,
domains=domains,
accounts=accounts,
roles=roles,
create=create,
plain_key=plain_key)
@admin_bp.route('/manage-keys', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_keys():
if request.method == 'GET':
try:
apikeys = ApiKey.query.all()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
abort(500)
return render_template('admin_manage_keys.html',
keys=apikeys)
elif request.method == 'POST':
jdata = request.json
if jdata['action'] == 'delete_key':
apikey = ApiKey.query.get(jdata['data'])
try:
history_apikey_id = apikey.id
history_apikey_role = apikey.role.name
history_apikey_description = apikey.description
history_apikey_domains = [ domain.name for domain in apikey.domains]
apikey.delete()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
current_app.logger.info('Delete API key {0}'.format(apikey.id))
history = History(msg='Delete API key {0}'.format(apikey.id),
detail = json.dumps({
'key': history_apikey_id,
'role': history_apikey_role,
'description': history_apikey_description,
'domains': history_apikey_domains
}),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Key has been removed.'
}), 200)
@admin_bp.route('/manage-user', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_user():
if request.method == 'GET':
roles = Role.query.all()
users = User.query.order_by(User.username).all()
return render_template('admin_manage_user.html',
users=users,
roles=roles)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_user', 'data': 'username'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'user_otp_disable':
user = User(username=data)
result = user.update_profile(enable_otp=False)
if result:
history = History(
msg='Two factor authentication disabled for user {0}'.
format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status':
'ok',
'msg':
'Two factor authentication has been disabled for user.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot disable two factor authentication for user.'
}), 500)
elif jdata['action'] == 'delete_user':
user = User(username=data)
if user.username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot delete yourself.'
}), 400)
# Remove account associations first
user_accounts = Account.query.join(AccountUser).join(
User).filter(AccountUser.user_id == user.id,
AccountUser.account_id == Account.id).all()
for uc in user_accounts:
uc.revoke_privileges_by_id(user.id)
# Then delete the user
result = user.delete()
if result:
history = History(msg='Delete user {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'User has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove user.'
}), 500)
elif jdata['action'] == 'revoke_user_privileges':
user = User(username=data)
result = user.revoke_privilege()
if result:
history = History(
msg='Revoke {0} user privileges'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Revoked user privileges.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot revoke user privilege.'
}), 500)
elif jdata['action'] == 'update_user_role':
username = data['username']
role_name = data['role_name']
if username == current_user.username:
return make_response(
jsonify({
'status': 'error',
'msg': 'You cannot change you own roles.'
}), 400)
user = User.query.filter(User.username == username).first()
if not user:
return make_response(
jsonify({
'status': 'error',
'msg': 'User does not exist.'
}), 404)
if user.role.name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to change Administrator users role.'
}), 400)
if role_name == 'Administrator' and current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status':
'error',
'msg':
'You do not have permission to promote a user to Administrator role.'
}), 400)
user = User(username=username)
result = user.set_role(role_name)
if result['status']:
history = History(
msg='Change user role of {0} to {1}'.format(
username, role_name),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status':
'error',
'msg':
'Cannot change user role. {0}'.format(
result['msg'])
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update user. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
@admin_bp.route('/account/edit/<account_name>', methods=['GET', 'POST'])
@admin_bp.route('/account/edit', methods=['GET', 'POST'])
@login_required
@operator_role_required
def edit_account(account_name=None):
users = User.query.all()
if request.method == 'GET':
if account_name is None:
return render_template('admin_edit_account.html',
account_user_ids=[],
users=users,
create=1)
else:
account = Account.query.filter(
Account.name == account_name).first()
account_user_ids = account.get_user()
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=0)
if request.method == 'POST':
fdata = request.form
new_user_list = request.form.getlist('account_multi_user')
# on POST, synthesize account and account_user_ids from form data
if not account_name:
account_name = fdata['accountname']
account = Account(name=account_name,
description=fdata['accountdescription'],
contact=fdata['accountcontact'],
mail=fdata['accountmail'])
account_user_ids = []
for username in new_user_list:
userid = User(username=username).get_user_info_by_username().id
account_user_ids.append(userid)
create = int(fdata['create'])
if create:
# account __init__ sanitizes and lowercases the name, so to manage expectations
# we let the user reenter the name until it's not empty and it's valid (ignoring the case)
if account.name == "" or account.name != account_name.lower():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
invalid_accountname=True)
if Account.query.filter(Account.name == account.name).first():
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
duplicate_accountname=True)
result = account.create_account()
history = History(msg='Create account {0}'.format(account.name),
created_by=current_user.username)
else:
result = account.update_account()
history = History(msg='Update account {0}'.format(account.name),
created_by=current_user.username)
if result['status']:
account.grant_privileges(new_user_list)
history.add()
return redirect(url_for('admin.manage_account'))
return render_template('admin_edit_account.html',
account=account,
account_user_ids=account_user_ids,
users=users,
create=create,
error=result['msg'])
@admin_bp.route('/manage-account', methods=['GET', 'POST'])
@login_required
@operator_role_required
def manage_account():
if request.method == 'GET':
accounts = Account.query.order_by(Account.name).all()
for account in accounts:
account.user_num = AccountUser.query.filter(
AccountUser.account_id == account.id).count()
return render_template('admin_manage_account.html', accounts=accounts)
if request.method == 'POST':
#
# post data should in format
# {'action': 'delete_account', 'data': 'accountname'}
#
try:
jdata = request.json
data = jdata['data']
if jdata['action'] == 'delete_account':
account = Account.query.filter(Account.name == data).first()
if not account:
return make_response(
jsonify({
'status': 'error',
'msg': 'Account not found.'
}), 404)
# Remove account association from domains first
for domain in account.domains:
Domain(name=domain.name).assoc_account(None)
# Then delete the account
result = account.delete_account()
if result:
history = History(msg='Delete account {0}'.format(data),
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Account has been removed.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Cannot remove account.'
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Action not supported.'
}), 400)
except Exception as e:
current_app.logger.error(
'Cannot update account. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status':
'error',
'msg':
'There is something wrong, please contact Administrator.'
}), 400)
class DetailedHistory():
def __init__(self, history, change_set):
self.history = history
self.detailed_msg = ""
self.change_set = change_set
if not history.detail:
self.detailed_msg = ""
return
detail_dict = json.loads(history.detail)
if 'domain_type' in detail_dict and 'account_id' in detail_dict: # this is a domain creation
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain type:</td><td>{{ domaintype }}</td></tr>
<tr><td>Account:</td><td>{{ account }}</td></tr>
</table>
""",
domaintype=detail_dict['domain_type'],
account=Account.get_name_by_id(self=None, account_id=detail_dict['account_id']) if detail_dict['account_id'] != "0" else "None")
elif 'authenticator' in detail_dict: # this is a user authentication
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped" style="width:565px;">
<thead>
<tr>
<th colspan="3" style="background: rgba({{ background_rgba }});">
<p style="color:white;">User {{ username }} authentication {{ auth_result }}</p>
</th>
</tr>
</thead>
<tbody>
<tr>
<td>Authenticator Type:</td>
<td colspan="2">{{ authenticator }}</td>
</tr>
<tr>
<td>IP Address</td>
<td colspan="2">{{ ip_address }}</td>
</tr>
</tbody>
</table>
""",
background_rgba="68,157,68" if detail_dict['success'] == 1 else "201,48,44",
username=detail_dict['username'],
auth_result="success" if detail_dict['success'] == 1 else "failure",
authenticator=detail_dict['authenticator'],
ip_address=detail_dict['ip_address'])
elif 'add_rrests' in detail_dict: # this is a domain record change
# changes_set = []
self.detailed_msg = ""
# extract_changelogs_from_a_history_entry(changes_set, history, 0)
elif 'name' in detail_dict and 'template' in history.msg: # template creation / deletion
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Template name:</td><td>{{ template_name }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
</table>
""",
template_name=DetailedHistory.get_key_val(detail_dict, "name"),
description=DetailedHistory.get_key_val(detail_dict, "description"))
elif 'Change domain' in history.msg and 'access control' in history.msg: # added or removed a user from a domain
users_with_access = DetailedHistory.get_key_val(detail_dict, "user_has_access")
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Users with access to this domain</td><td>{{ users_with_access }}</td></tr>
<tr><td>Number of users:</td><td>{{ users_with_access | length }}</td><tr>
</table>
""",
users_with_access=users_with_access)
elif 'Created API key' in history.msg or 'Updated API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
<tr><td>Accessible accounts with this API key:</td><td>{{ linked_accounts }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains" if "domains" in detail_dict else "domain_acl"),
linked_accounts=DetailedHistory.get_key_val(detail_dict, "accounts"))
elif 'Delete API key' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Key: </td><td>{{ keyname }}</td></tr>
<tr><td>Role:</td><td>{{ rolename }}</td></tr>
<tr><td>Description:</td><td>{{ description }}</td></tr>
<tr><td>Accessible domains with this API key:</td><td>{{ linked_domains }}</td></tr>
</table>
""",
keyname=DetailedHistory.get_key_val(detail_dict, "key"),
rolename=DetailedHistory.get_key_val(detail_dict, "role"),
description=DetailedHistory.get_key_val(detail_dict, "description"),
linked_domains=DetailedHistory.get_key_val(detail_dict, "domains"))
elif 'Update type for domain' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain: </td><td>{{ domain }}</td></tr>
<tr><td>Domain type:</td><td>{{ domain_type }}</td></tr>
<tr><td>Masters:</td><td>{{ masters }}</td></tr>
</table>
""",
domain=DetailedHistory.get_key_val(detail_dict, "domain"),
domain_type=DetailedHistory.get_key_val(detail_dict, "type"),
masters=DetailedHistory.get_key_val(detail_dict, "masters"))
elif 'reverse' in history.msg:
self.detailed_msg = render_template_string("""
<table class="table table-bordered table-striped">
<tr><td>Domain Type: </td><td>{{ domain_type }}</td></tr>
<tr><td>Domain Master IPs:</td><td>{{ domain_master_ips }}</td></tr>
</table>
""",
domain_type=DetailedHistory.get_key_val(detail_dict, "domain_type"),
domain_master_ips=DetailedHistory.get_key_val(detail_dict, "domain_master_ips"))
elif DetailedHistory.get_key_val(detail_dict, 'msg') and DetailedHistory.get_key_val(detail_dict, 'status'):
self.detailed_msg = render_template_string('''
<table class="table table-bordered table-striped">
<tr><td>Status: </td><td>{{ history_status }}</td></tr>
<tr><td>Message:</td><td>{{ history_msg }}</td></tr>
</table>
''',
history_status=DetailedHistory.get_key_val(detail_dict, 'status'),
history_msg=DetailedHistory.get_key_val(detail_dict, 'msg'))
# check for lower key as well for old databases
@staticmethod
def get_key_val(_dict, key):
return str(_dict.get(key, _dict.get(key.title(), '')))
# convert a list of History objects into DetailedHistory objects
def convert_histories(histories):
changes_set = dict()
detailedHistories = []
j = 0
for i in range(len(histories)):
if histories[i].detail and ('add_rrests' in histories[i].detail or 'del_rrests' in histories[i].detail):
extract_changelogs_from_a_history_entry(changes_set, histories[i], j)
if j in changes_set:
detailedHistories.append(DetailedHistory(histories[i], changes_set[j]))
else: # no changes were found
detailedHistories.append(DetailedHistory(histories[i], None))
j += 1
else:
detailedHistories.append(DetailedHistory(histories[i], None))
return detailedHistories
@admin_bp.route('/history', methods=['GET', 'POST'])
@login_required
@history_access_required
def history():
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
if request.method == 'GET':
doms = accounts = users = ""
if current_user.role.name in [ 'Administrator', 'Operator']:
all_domain_names = Domain.query.all()
all_account_names = Account.query.all()
all_user_names = User.query.all()
for d in all_domain_names:
doms += d.name + " "
for acc in all_account_names:
accounts += acc.name + " "
for usr in all_user_names:
users += usr.username + " "
else: # special autocomplete for users
all_domain_names = db.session.query(Domain) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_account_names = db.session.query(Account) \
.outerjoin(Domain, Domain.account_id == Account.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).all()
all_user_names = []
for a in all_account_names:
temp = db.session.query(User) \
.join(AccountUser, AccountUser.user_id == User.id) \
.outerjoin(Account, Account.id == AccountUser.account_id) \
.filter(
db.or_(
Account.id == a.id,
AccountUser.account_id == a.id
)
) \
.all()
for u in temp:
if u in all_user_names:
continue
all_user_names.append(u)
for d in all_domain_names:
doms += d.name + " "
for a in all_account_names:
accounts += a.name + " "
for u in all_user_names:
users += u.username + " "
return render_template('admin_history.html', all_domain_names=doms, all_account_names=accounts, all_usernames=users)
# local_offset is the offset of the utc to the local time
# offset must be int
# return the date converted and simplified
def from_utc_to_local(local_offset, timeframe):
offset = str(local_offset *(-1))
date_split = str(timeframe).split(".")[0]
date_converted = datetime.datetime.strptime(date_split, '%Y-%m-%d %H:%M:%S') + datetime.timedelta(minutes=int(offset))
return date_converted
@admin_bp.route('/history_table', methods=['GET', 'POST'])
@login_required
@history_access_required
def history_table(): # ajax call data
if request.method == 'POST':
if current_user.role.name != 'Administrator':
return make_response(
jsonify({
'status': 'error',
'msg': 'You do not have permission to remove history.'
}), 401)
h = History()
result = h.remove_all()
if result:
history = History(msg='Remove all histories',
created_by=current_user.username)
history.add()
return make_response(
jsonify({
'status': 'ok',
'msg': 'Changed user role successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Can not remove histories.'
}), 500)
detailedHistories = []
lim = int(Setting().get('max_history_records')) # max num of records
if request.method == 'GET':
if current_user.role.name in [ 'Administrator', 'Operator' ]:
base_query = History.query
else:
# if the user isn't an administrator or operator,
# allow_user_view_history must be enabled to get here,
# so include history for the domains for the user
base_query = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
))
domain_name = request.args.get('domain_name_filter') if request.args.get('domain_name_filter') != None \
and len(request.args.get('domain_name_filter')) != 0 else None
account_name = request.args.get('account_name_filter') if request.args.get('account_name_filter') != None \
and len(request.args.get('account_name_filter')) != 0 else None
user_name = request.args.get('auth_name_filter') if request.args.get('auth_name_filter') != None \
and len(request.args.get('auth_name_filter')) != 0 else None
min_date = request.args.get('min') if request.args.get('min') != None and len( request.args.get('min')) != 0 else None
if min_date != None: # get 1 day earlier, to check for timezone errors
min_date = str(datetime.datetime.strptime(min_date, '%Y-%m-%d') - datetime.timedelta(days=1))
max_date = request.args.get('max') if request.args.get('max') != None and len( request.args.get('max')) != 0 else None
if max_date != None: # get 1 day later, to check for timezone errors
max_date = str(datetime.datetime.strptime(max_date, '%Y-%m-%d') + datetime.timedelta(days=1))
tzoffset = request.args.get('tzoffset') if request.args.get('tzoffset') != None and len(request.args.get('tzoffset')) != 0 else None
changed_by = request.args.get('user_name_filter') if request.args.get('user_name_filter') != None \
and len(request.args.get('user_name_filter')) != 0 else None
"""
Auth methods: LOCAL, Github OAuth, Azure OAuth, SAML, OIDC OAuth, Google OAuth
"""
auth_methods = []
if (request.args.get('auth_local_only_checkbox') is None \
and request.args.get('auth_oauth_only_checkbox') is None \
and request.args.get('auth_saml_only_checkbox') is None and request.args.get('auth_all_checkbox') is None):
auth_methods = []
if request.args.get('auth_all_checkbox') == "on":
auth_methods.append("")
if request.args.get('auth_local_only_checkbox') == "on":
auth_methods.append("LOCAL")
if request.args.get('auth_oauth_only_checkbox') == "on":
auth_methods.append("OAuth")
if request.args.get('auth_saml_only_checkbox') == "on":
auth_methods.append("SAML")
if request.args.get('domain_changelog_only_checkbox') != None:
changelog_only = True if request.args.get('domain_changelog_only_checkbox') == "on" else False
else:
changelog_only = False
# users cannot search for authentication
if user_name != None and current_user.role.name not in [ 'Administrator', 'Operator']:
histories = []
elif domain_name != None:
if not changelog_only:
histories = base_query \
.filter(
db.and_(
db.or_(
History.msg.like("%domain "+ domain_name) if domain_name != "*" else History.msg.like("%domain%"),
History.msg.like("%domain "+ domain_name + " access control") if domain_name != "*" else History.msg.like("%domain%access control")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()).limit(lim).all()
else:
# search for records changes only
histories = base_query \
.filter(
db.and_(
History.msg.like("Apply record changes to domain " + domain_name) if domain_name != "*" \
else History.msg.like("Apply record changes to domain%"),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif account_name != None:
if current_user.role.name in ['Administrator', 'Operator']:
histories = base_query \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
else:
histories = base_query \
.filter(
db.and_(
Account.id == Domain.account_id,
account_name == Account.name if account_name != "*" else True,
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
).order_by(History.created_on.desc()) \
.limit(lim).all()
elif user_name != None and current_user.role.name in [ 'Administrator', 'Operator']: # only admins can see the user login-logouts
histories = History.query \
.filter(
db.and_(
db.or_(
History.msg.like("User "+ user_name + " authentication%") if user_name != "*" and user_name != None else History.msg.like("%authentication%"),
History.msg.like("User "+ user_name + " was not authorized%") if user_name != "*" and user_name != None else History.msg.like("User%was not authorized%")
),
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
temp = []
for h in histories:
for method in auth_methods:
if method in h.detail:
temp.append(h)
break
histories = temp
elif (changed_by != None or max_date != None) and current_user.role.name in [ 'Administrator', 'Operator'] : # select changed by and date filters only
histories = History.query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif (changed_by != None or max_date != None): # special filtering for user because one user does not have access to log-ins logs
histories = base_query \
.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
History.created_by == changed_by if changed_by != None else True
)
) \
.order_by(History.created_on.desc()).limit(lim).all()
elif max_date != None: # if changed by == null and only date is applied
histories = base_query.filter(
db.and_(
History.created_on <= max_date if max_date != None else True,
History.created_on >= min_date if min_date != None else True,
)
).order_by(History.created_on.desc()).limit(lim).all()
else: # default view
if current_user.role.name in [ 'Administrator', 'Operator']:
histories = History.query.order_by(History.created_on.desc()).limit(lim).all()
else:
histories = db.session.query(History) \
.join(Domain, History.domain_id == Domain.id) \
.outerjoin(DomainUser, Domain.id == DomainUser.domain_id) \
.outerjoin(Account, Domain.account_id == Account.id) \
.outerjoin(AccountUser, Account.id == AccountUser.account_id) \
.order_by(History.created_on.desc()) \
.filter(
db.or_(
DomainUser.user_id == current_user.id,
AccountUser.user_id == current_user.id
)).limit(lim).all()
detailedHistories = convert_histories(histories)
# Remove dates from previous or next day that were brought over
if tzoffset != None:
if min_date != None:
min_date_split = min_date.split()[0]
if max_date != None:
max_date_split = max_date.split()[0]
for i, history_rec in enumerate(detailedHistories):
local_date = str(from_utc_to_local(int(tzoffset), history_rec.history.created_on).date())
if (min_date != None and local_date == min_date_split) or (max_date != None and local_date == max_date_split):
detailedHistories[i] = None
# Remove elements previously flagged as None
detailedHistories = [h for h in detailedHistories if h is not None]
return render_template('admin_history_table.html', histories=detailedHistories, len_histories=len(detailedHistories), lim=lim)
@admin_bp.route('/setting/basic', methods=['GET'])
@login_required
@operator_role_required
def setting_basic():
if request.method == 'GET':
settings = [
'maintenance', 'fullscreen_layout', 'record_helper',
'login_ldap_first', 'default_record_table_size',
'default_domain_table_size', 'auto_ptr', 'record_quick_edit',
'pretty_ipv6_ptr', 'dnssec_admins_only',
'allow_user_create_domain', 'allow_user_remove_domain', 'allow_user_view_history', 'bg_domain_updates', 'site_name',
'session_timeout', 'warn_session_timeout', 'ttl_options',
'pdns_api_timeout', 'verify_ssl_connections', 'verify_user_email',
'delete_sso_accounts', 'otp_field_enabled', 'custom_css', 'enable_api_rr_history', 'max_history_records', 'otp_force'
]
return render_template('admin_setting_basic.html', settings=settings)
@admin_bp.route('/setting/basic/<path:setting>/edit', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_edit(setting):
jdata = request.json
new_value = jdata['value']
result = Setting().set(setting, new_value)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/basic/<path:setting>/toggle', methods=['POST'])
@login_required
@operator_role_required
def setting_basic_toggle(setting):
result = Setting().toggle(setting)
if (result):
return make_response(
jsonify({
'status': 'ok',
'msg': 'Toggled setting successfully.'
}), 200)
else:
return make_response(
jsonify({
'status': 'error',
'msg': 'Unable to toggle setting.'
}), 500)
@admin_bp.route('/setting/pdns', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_pdns():
if request.method == 'GET':
pdns_api_url = Setting().get('pdns_api_url')
pdns_api_key = Setting().get('pdns_api_key')
pdns_version = Setting().get('pdns_version')
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
elif request.method == 'POST':
pdns_api_url = request.form.get('pdns_api_url')
pdns_api_key = request.form.get('pdns_api_key')
pdns_version = request.form.get('pdns_version')
Setting().set('pdns_api_url', pdns_api_url)
Setting().set('pdns_api_key', pdns_api_key)
Setting().set('pdns_version', pdns_version)
return render_template('admin_setting_pdns.html',
pdns_api_url=pdns_api_url,
pdns_api_key=pdns_api_key,
pdns_version=pdns_version)
@admin_bp.route('/setting/dns-records', methods=['GET', 'POST'])
@login_required
@operator_role_required
def setting_records():
if request.method == 'GET':
_fr = Setting().get('forward_records_allow_edit')
_rr = Setting().get('reverse_records_allow_edit')
f_records = literal_eval(_fr) if isinstance(_fr, str) else _fr
r_records = literal_eval(_rr) if isinstance(_rr, str) else _rr
return render_template('admin_setting_records.html',
f_records=f_records,
r_records=r_records)
elif request.method == 'POST':
fr = {}
rr = {}
records = Setting().defaults['forward_records_allow_edit']
for r in records:
fr[r] = True if request.form.get('fr_{0}'.format(
r.lower())) else False
rr[r] = True if request.form.get('rr_{0}'.format(
r.lower())) else False
Setting().set('forward_records_allow_edit', str(fr))
Setting().set('reverse_records_allow_edit', str(rr))
return redirect(url_for('admin.setting_records'))
def has_an_auth_method(local_db_enabled=None,
ldap_enabled=None,
google_oauth_enabled=None,
github_oauth_enabled=None,
oidc_oauth_enabled=None,
azure_oauth_enabled=None):
if local_db_enabled is None:
local_db_enabled = Setting().get('local_db_enabled')
if ldap_enabled is None:
ldap_enabled = Setting().get('ldap_enabled')
if google_oauth_enabled is None:
google_oauth_enabled = Setting().get('google_oauth_enabled')
if github_oauth_enabled is None:
github_oauth_enabled = Setting().get('github_oauth_enabled')
if oidc_oauth_enabled is None:
oidc_oauth_enabled = Setting().get('oidc_oauth_enabled')
if azure_oauth_enabled is None:
azure_oauth_enabled = Setting().get('azure_oauth_enabled')
return local_db_enabled or ldap_enabled or google_oauth_enabled or github_oauth_enabled or oidc_oauth_enabled or azure_oauth_enabled
@admin_bp.route('/setting/authentication', methods=['GET', 'POST'])
@login_required
@admin_role_required
def setting_authentication():
if request.method == 'GET':
return render_template('admin_setting_authentication.html')
elif request.method == 'POST':
conf_type = request.form.get('config_tab')
result = None
if conf_type == 'general':
local_db_enabled = True if request.form.get(
'local_db_enabled') else False
signup_enabled = True if request.form.get(
'signup_enabled', ) else False
if not has_an_auth_method(local_db_enabled=local_db_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('local_db_enabled', local_db_enabled)
Setting().set('signup_enabled', signup_enabled)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'ldap':
ldap_enabled = True if request.form.get('ldap_enabled') else False
if not has_an_auth_method(ldap_enabled=ldap_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('ldap_enabled', ldap_enabled)
Setting().set('ldap_type', request.form.get('ldap_type'))
Setting().set('ldap_uri', request.form.get('ldap_uri'))
Setting().set('ldap_base_dn', request.form.get('ldap_base_dn'))
Setting().set('ldap_admin_username',
request.form.get('ldap_admin_username'))
Setting().set('ldap_admin_password',
request.form.get('ldap_admin_password'))
Setting().set('ldap_filter_basic',
request.form.get('ldap_filter_basic'))
Setting().set('ldap_filter_group',
request.form.get('ldap_filter_group'))
Setting().set('ldap_filter_username',
request.form.get('ldap_filter_username'))
Setting().set('ldap_filter_groupname',
request.form.get('ldap_filter_groupname'))
Setting().set(
'ldap_sg_enabled', True
if request.form.get('ldap_sg_enabled') == 'ON' else False)
Setting().set('ldap_admin_group',
request.form.get('ldap_admin_group'))
Setting().set('ldap_operator_group',
request.form.get('ldap_operator_group'))
Setting().set('ldap_user_group',
request.form.get('ldap_user_group'))
Setting().set('ldap_domain', request.form.get('ldap_domain'))
Setting().set(
'autoprovisioning', True
if request.form.get('autoprovisioning') == 'ON' else False)
Setting().set('autoprovisioning_attribute',
request.form.get('autoprovisioning_attribute'))
if request.form.get('autoprovisioning')=='ON':
if validateURN(request.form.get('urn_value')):
Setting().set('urn_value',
request.form.get('urn_value'))
else:
return render_template('admin_setting_authentication.html',
error="Invalid urn")
else:
Setting().set('urn_value',
request.form.get('urn_value'))
Setting().set('purge', True
if request.form.get('purge') == 'ON' else False)
result = {'status': True, 'msg': 'Saved successfully'}
elif conf_type == 'google':
google_oauth_enabled = True if request.form.get(
'google_oauth_enabled') else False
if not has_an_auth_method(google_oauth_enabled=google_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('google_oauth_enabled', google_oauth_enabled)
Setting().set('google_oauth_client_id',
request.form.get('google_oauth_client_id'))
Setting().set('google_oauth_client_secret',
request.form.get('google_oauth_client_secret'))
Setting().set('google_token_url',
request.form.get('google_token_url'))
Setting().set('google_oauth_scope',
request.form.get('google_oauth_scope'))
Setting().set('google_authorize_url',
request.form.get('google_authorize_url'))
Setting().set('google_base_url',
request.form.get('google_base_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'github':
github_oauth_enabled = True if request.form.get(
'github_oauth_enabled') else False
if not has_an_auth_method(github_oauth_enabled=github_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('github_oauth_enabled', github_oauth_enabled)
Setting().set('github_oauth_key',
request.form.get('github_oauth_key'))
Setting().set('github_oauth_secret',
request.form.get('github_oauth_secret'))
Setting().set('github_oauth_scope',
request.form.get('github_oauth_scope'))
Setting().set('github_oauth_api_url',
request.form.get('github_oauth_api_url'))
Setting().set('github_oauth_token_url',
request.form.get('github_oauth_token_url'))
Setting().set('github_oauth_authorize_url',
request.form.get('github_oauth_authorize_url'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'azure':
azure_oauth_enabled = True if request.form.get(
'azure_oauth_enabled') else False
if not has_an_auth_method(azure_oauth_enabled=azure_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set('azure_oauth_enabled', azure_oauth_enabled)
Setting().set('azure_oauth_key',
request.form.get('azure_oauth_key'))
Setting().set('azure_oauth_secret',
request.form.get('azure_oauth_secret'))
Setting().set('azure_oauth_scope',
request.form.get('azure_oauth_scope'))
Setting().set('azure_oauth_api_url',
request.form.get('azure_oauth_api_url'))
Setting().set('azure_oauth_token_url',
request.form.get('azure_oauth_token_url'))
Setting().set('azure_oauth_authorize_url',
request.form.get('azure_oauth_authorize_url'))
Setting().set(
'azure_sg_enabled', True
if request.form.get('azure_sg_enabled') == 'ON' else False)
Setting().set('azure_admin_group',
request.form.get('azure_admin_group'))
Setting().set('azure_operator_group',
request.form.get('azure_operator_group'))
Setting().set('azure_user_group',
request.form.get('azure_user_group'))
Setting().set(
'azure_group_accounts_enabled', True
if request.form.get('azure_group_accounts_enabled') == 'ON' else False)
Setting().set('azure_group_accounts_name',
request.form.get('azure_group_accounts_name'))
Setting().set('azure_group_accounts_name_re',
request.form.get('azure_group_accounts_name_re'))
Setting().set('azure_group_accounts_description',
request.form.get('azure_group_accounts_description'))
Setting().set('azure_group_accounts_description_re',
request.form.get('azure_group_accounts_description_re'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
elif conf_type == 'oidc':
oidc_oauth_enabled = True if request.form.get(
'oidc_oauth_enabled') else False
if not has_an_auth_method(oidc_oauth_enabled=oidc_oauth_enabled):
result = {
'status':
False,
'msg':
'Must have at least one authentication method enabled.'
}
else:
Setting().set(
'oidc_oauth_enabled',
True if request.form.get('oidc_oauth_enabled') else False)
Setting().set('oidc_oauth_key',
request.form.get('oidc_oauth_key'))
Setting().set('oidc_oauth_secret',
request.form.get('oidc_oauth_secret'))
Setting().set('oidc_oauth_scope',
request.form.get('oidc_oauth_scope'))
Setting().set('oidc_oauth_api_url',
request.form.get('oidc_oauth_api_url'))
Setting().set('oidc_oauth_token_url',
request.form.get('oidc_oauth_token_url'))
Setting().set('oidc_oauth_authorize_url',
request.form.get('oidc_oauth_authorize_url'))
Setting().set('oidc_oauth_logout_url',
request.form.get('oidc_oauth_logout_url'))
Setting().set('oidc_oauth_username',
request.form.get('oidc_oauth_username'))
Setting().set('oidc_oauth_firstname',
request.form.get('oidc_oauth_firstname'))
Setting().set('oidc_oauth_last_name',
request.form.get('oidc_oauth_last_name'))
Setting().set('oidc_oauth_email',
request.form.get('oidc_oauth_email'))
Setting().set('oidc_oauth_account_name_property',
request.form.get('oidc_oauth_account_name_property'))
Setting().set('oidc_oauth_account_description_property',
request.form.get('oidc_oauth_account_description_property'))
result = {
'status': True,
'msg':
'Saved successfully. Please reload PDA to take effect.'
}
else:
return abort(400)
return render_template('admin_setting_authentication.html',
result=result)
@admin_bp.route('/templates', methods=['GET', 'POST'])
@admin_bp.route('/templates/list', methods=['GET', 'POST'])
@login_required
@operator_role_required
def templates():
templates = DomainTemplate.query.all()
return render_template('template.html', templates=templates)
@admin_bp.route('/template/create', methods=['GET', 'POST'])
@login_required
@operator_role_required
def create_template():
if request.method == 'GET':
return render_template('template_add.html')
if request.method == 'POST':
try:
name = request.form.getlist('name')[0]
description = request.form.getlist('description')[0]
if ' ' in name or not name or not type:
flash("Please correct your input", 'error')
return redirect(url_for('admin.create_template'))
if DomainTemplate.query.filter(
DomainTemplate.name == name).first():
flash(
"A template with the name {0} already exists!".format(
name), 'error')
return redirect(url_for('admin.create_template'))
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail = json.dumps({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.create_template'))
except Exception as e:
current_app.logger.error(
'Cannot create domain template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
@admin_bp.route('/template/create-from-zone', methods=['POST'])
@login_required
@operator_role_required
def create_template_from_zone():
try:
jdata = request.json
name = jdata['name']
description = jdata['description']
domain_name = jdata['domain']
if ' ' in name or not name or not type:
return make_response(
jsonify({
'status': 'error',
'msg': 'Please correct template name'
}), 400)
if DomainTemplate.query.filter(DomainTemplate.name == name).first():
return make_response(
jsonify({
'status':
'error',
'msg':
'A template with the name {0} already exists!'.format(name)
}), 409)
t = DomainTemplate(name=name, description=description)
result = t.create()
if result['status'] == 'ok':
history = History(msg='Add domain template {0}'.format(name),
detail = json.dumps({
'name': name,
'description': description
}),
created_by=current_user.username)
history.add()
# After creating the domain in Domain Template in the,
# local DB. We add records into it Record Template.
records = []
domain = Domain.query.filter(Domain.name == domain_name).first()
if domain:
# Query zone's rrsets from PowerDNS API
rrsets = Record().get_rrsets(domain.name)
if rrsets:
for r in rrsets:
name = '@' if r['name'] == domain_name + '.' else r[
'name'].replace('.{}.'.format(domain_name), '')
for record in r['records']:
t_record = DomainTemplateRecord(
name=name,
type=r['type'],
status=False if record['disabled'] else True,
ttl=r['ttl'],
data=record['content'])
records.append(t_record)
result = t.replace_records(records)
if result['status'] == 'ok':
return make_response(
jsonify({
'status': 'ok',
'msg': result['msg']
}), 200)
else:
# Revert the domain template (remove it)
# ff we cannot add records.
t.delete_template()
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
else:
return make_response(
jsonify({
'status': 'error',
'msg': result['msg']
}), 500)
except Exception as e:
current_app.logger.error(
'Cannot create template from zone. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/edit', methods=['GET'])
@login_required
@operator_role_required
def edit_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
records_allow_to_edit = Setting().get_records_allow_to_edit()
quick_edit = Setting().get('record_quick_edit')
ttl_options = Setting().get_ttl_options()
if t is not None:
records = []
for jr in t.records:
if jr.type in records_allow_to_edit:
record = DomainTemplateRecord(
name=jr.name,
type=jr.type,
status='Active' if jr.status else 'Disabled',
ttl=jr.ttl,
data=jr.data,
comment=jr.comment if jr.comment else '')
records.append(record)
return render_template('template_edit.html',
template=t.name,
records=records,
editable_records=records_allow_to_edit,
quick_edit=quick_edit,
ttl_options=ttl_options)
except Exception as e:
current_app.logger.error(
'Cannot open domain template page. DETAIL: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/template/<path:template>/apply',
methods=['POST'],
strict_slashes=False)
@login_required
def apply_records(template):
try:
jdata = request.json
records = []
for j in jdata['records']:
name = '@' if j['record_name'] in ['@', ''] else j['record_name']
type = j['record_type']
data = j['record_data']
comment = j['record_comment']
status = 0 if j['record_status'] == 'Disabled' else 1
ttl = int(j['record_ttl']) if j['record_ttl'] else 3600
dtr = DomainTemplateRecord(name=name,
type=type,
data=data,
comment=comment,
status=status,
ttl=ttl)
records.append(dtr)
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
result = t.replace_records(records)
if result['status'] == 'ok':
jdata.pop('_csrf_token',
None) # don't store csrf token in the history.
history = History(
msg='Apply domain template record changes to domain template {0}'
.format(template),
detail = json.dumps(jdata),
created_by=current_user.username)
history.add()
return make_response(jsonify(result), 200)
else:
return make_response(jsonify(result), 400)
except Exception as e:
current_app.logger.error(
'Cannot apply record changes to the template. Error: {0}'.format(
e))
current_app.logger.debug(traceback.format_exc())
return make_response(
jsonify({
'status': 'error',
'msg': 'Error when applying new changes'
}), 500)
@admin_bp.route('/template/<path:template>/delete', methods=['POST'])
@login_required
@operator_role_required
def delete_template(template):
try:
t = DomainTemplate.query.filter(
DomainTemplate.name == template).first()
if t is not None:
result = t.delete_template()
if result['status'] == 'ok':
history = History(
msg='Deleted domain template {0}'.format(template),
detail = json.dumps({'name': template}),
created_by=current_user.username)
history.add()
return redirect(url_for('admin.templates'))
else:
flash(result['msg'], 'error')
return redirect(url_for('admin.templates'))
except Exception as e:
current_app.logger.error(
'Cannot delete template. Error: {0}'.format(e))
current_app.logger.debug(traceback.format_exc())
abort(500)
return redirect(url_for('admin.templates'))
@admin_bp.route('/global-search', methods=['GET'])
@login_required
@operator_role_required
def global_search():
if request.method == 'GET':
domains = []
records = []
comments = []
query = request.args.get('q')
if query:
server = Server(server_id='localhost')
results = server.global_search(object_type='all', query=query)
# Format the search result
for result in results:
if result['object_type'] == 'zone':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
domains.append(result)
elif result['object_type'] == 'record':
# Remove the dot at the end of string
result['name'] = result['name'][:-1]
result['zone_id'] = result['zone_id'][:-1]
records.append(result)
elif result['object_type'] == 'comment':
# Get the actual record name, exclude the domain part
result['name'] = result['name'].replace(result['zone_id'], '')
if result['name']:
result['name'] = result['name'][:-1]
else:
result['name'] = '@'
# Remove the dot at the end of string
result['zone_id'] = result['zone_id'][:-1]
comments.append(result)
else:
pass
return render_template('admin_global_search.html', domains=domains, records=records, comments=comments)
def validateURN(value):
NID_PATTERN = re.compile(r'^[0-9a-z][0-9a-z-]{1,31}$', flags=re.IGNORECASE)
NSS_PCHAR = '[a-z0-9-._~]|%[a-f0-9]{2}|[!$&\'()*+,;=]|:|@'
NSS_PATTERN = re.compile(fr'^({NSS_PCHAR})({NSS_PCHAR}|/|\?)*$', re.IGNORECASE)
prefix=value.split(':')
if (len(prefix)<3):
current_app.logger.warning( "Too small urn prefix" )
return False
urn=prefix[0]
nid=prefix[1]
nss=value.replace(urn+":"+nid+":", "")
if not urn.lower()=="urn":
current_app.logger.warning( urn + ' contains invalid characters ' )
return False
if not re.match(NID_PATTERN, nid.lower()):
current_app.logger.warning( nid + ' contains invalid characters ' )
return False
if not re.match(NSS_PATTERN, nss):
current_app.logger.warning( nss + ' contains invalid characters ' )
return False
return True
| powerdnsadmin/routes/admin.py | 82,532 | get the deletions and status changes deletion get the additions already checked for status change get the unchanged out_changes is a list of HistoryRecordEntry objects in which we will append the new changes a HistoryRecordEntry represents a pair of add_rrest and del_rrest not a record entry this is a new record (add_rrest, del_rrest, change_type) no need to add in the out_changes set this is a deletion only used for changelog per record then get only the records with the specific (record_name, record_type) tuple for each history record entry in changes_i records with same (name,type) are considered as a single HistoryRecordEntry history_entry is of type History - used to extract created_by and created_on add_rrest is a dictionary of replace del_rrest is a dictionary of remove search the add_rrest index into the add_rrest set for the key (name, type) "*": edit or unchanged, "+" new tuple(name,type), "-" deleted (name,type) tuple contains a subset of : [ttl, name, type] all changes for the records of this add_rrest-del_rrest pair addition removal edit of unchanged used for removal of objects from a list Manage session timeout current_app.permanent_session_lifetime = datetime.timedelta( minutes=int(Setting().get('session_timeout'))) Create new apikey Update existing apikey post data should in format {'action': 'delete_user', 'data': 'username'} Remove account associations first Then delete the user on POST, synthesize account and account_user_ids from form data account __init__ sanitizes and lowercases the name, so to manage expectations we let the user reenter the name until it's not empty and it's valid (ignoring the case) post data should in format {'action': 'delete_account', 'data': 'accountname'} Remove account association from domains first Then delete the account this is a domain creation this is a user authentication this is a domain record change changes_set = [] extract_changelogs_from_a_history_entry(changes_set, history, 0) template creation / deletion added or removed a user from a domain check for lower key as well for old databases convert a list of History objects into DetailedHistory objects no changes were found special autocomplete for users local_offset is the offset of the utc to the local time offset must be int return the date converted and simplified ajax call data max num of records if the user isn't an administrator or operator, allow_user_view_history must be enabled to get here, so include history for the domains for the user get 1 day earlier, to check for timezone errors get 1 day later, to check for timezone errors users cannot search for authentication search for records changes only only admins can see the user login-logouts select changed by and date filters only special filtering for user because one user does not have access to log-ins logs if changed by == null and only date is applied default view Remove dates from previous or next day that were brought over Remove elements previously flagged as None After creating the domain in Domain Template in the, local DB. We add records into it Record Template. Query zone's rrsets from PowerDNS API Revert the domain template (remove it) ff we cannot add records. don't store csrf token in the history. Format the search result Remove the dot at the end of string Remove the dot at the end of string Get the actual record name, exclude the domain part Remove the dot at the end of string | 3,425 | en | 0.815857 |
import torch
import numpy as np
from torch import nn
import torch.nn.functional as F
class Actor(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, action_shape, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, np.prod(action_shape))
def forward(self, s, state=None, info={}):
r"""s -> Q(s, \*)"""
logits, h = self.preprocess(s, state)
logits = F.softmax(self.last(logits), dim=-1)
return logits, h
class Critic(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, preprocess_net, hidden_layer_size=128):
super().__init__()
self.preprocess = preprocess_net
self.last = nn.Linear(hidden_layer_size, 1)
def forward(self, s, **kwargs):
"""s -> V(s)"""
logits, h = self.preprocess(s, state=kwargs.get('state', None))
logits = self.last(logits)
return logits
class DQN(nn.Module):
"""For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
"""
def __init__(self, h, w, action_shape, device='cpu'):
super(DQN, self).__init__()
self.device = device
self.conv1 = nn.Conv2d(4, 16, kernel_size=5, stride=2)
self.bn1 = nn.BatchNorm2d(16)
self.conv2 = nn.Conv2d(16, 32, kernel_size=5, stride=2)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 32, kernel_size=5, stride=2)
self.bn3 = nn.BatchNorm2d(32)
def conv2d_size_out(size, kernel_size=5, stride=2):
return (size - (kernel_size - 1) - 1) // stride + 1
convw = conv2d_size_out(conv2d_size_out(conv2d_size_out(w)))
convh = conv2d_size_out(conv2d_size_out(conv2d_size_out(h)))
linear_input_size = convw * convh * 32
self.fc = nn.Linear(linear_input_size, 512)
self.head = nn.Linear(512, action_shape)
def forward(self, x, state=None, info={}):
r"""x -> Q(x, \*)"""
if not isinstance(x, torch.Tensor):
x = torch.tensor(x, device=self.device, dtype=torch.float32)
x = x.permute(0, 3, 1, 2)
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = self.fc(x.reshape(x.size(0), -1))
return self.head(x), state
| tianshou/utils/net/discrete.py | 2,575 | For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
For advanced usage (how to customize the network), please refer to
:ref:`build_the_network`.
s -> Q(s, \*)
s -> V(s)
x -> Q(x, \*) | 316 | en | 0.882755 |
"""Fixer that changes raw_input(...) into input(...)."""
# Author: Andre Roberge
# Local imports
from .. import fixer_base
from ..fixer_util import Name
class FixRawInput(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< name='raw_input' trailer< '(' [any] ')' > any* >
"""
def transform(self, node, results):
name = results["name"]
name.replace(Name(u"input", prefix=name.prefix))
| utils/py27/Lib/lib2to3/fixes/fix_raw_input.py | 472 | Fixer that changes raw_input(...) into input(...).
Author: Andre Roberge Local imports | 88 | en | 0.435662 |
"""
This module is the commandline interface of bowl.
Created on 14 March 2014
@author: Charlie Lewis
"""
import argparse
from bowl.cli_opts import add
from bowl.cli_opts import connect
from bowl.cli_opts import delete
from bowl.cli_opts import disconnect
from bowl.cli_opts import grant
from bowl.cli_opts import hosts
from bowl.cli_opts import image_import
from bowl.cli_opts import images
from bowl.cli_opts import info
from bowl.cli_opts import kill
from bowl.cli_opts import link
from bowl.cli_opts import list
from bowl.cli_opts import login
from bowl.cli_opts import logout
from bowl.cli_opts import logs
from bowl.cli_opts import new
from bowl.cli_opts import remove
from bowl.cli_opts import repositories
from bowl.cli_opts import revoke
from bowl.cli_opts import services
from bowl.cli_opts import snapshot
from bowl.cli_opts import snapshots
from bowl.cli_opts import start
from bowl.cli_opts import stop
from bowl.cli_opts import subtract
from bowl.cli_opts import test
from bowl.cli_opts import unlink
from bowl.cli_opts import update
from bowl.cli_opts import version
class cli(object):
"""
This class is responsible for all commandline operations.
"""
def parse_args(self):
default_metadata_path = "~/.bowl"
default_path = "/bowl"
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(title='bowl commands')
# add
parse_add = subparsers.add_parser('add',
help='add a service')
parse_add.add_argument('OS',
help='specify operating system for service')
parse_add.add_argument('VERSION',
help='specify version of operating system')
parse_add.add_argument('TYPE',
help='specify type of service (databases, environment, services, tools)')
parse_add.add_argument('NAME',
help='specify name of service')
parse_add.add_argument('JSON',
help='JSON object or path to JSON object that contains associated metadata')
parse_add.add_argument('PATH',
help='path that contains the Dockerfile')
parse_add.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
# !! TODO use default, and restructure if/else in add
parse_add.add_argument('--repository', '-r',
help='specify repository to add service to, use localhost by default')
parse_add.set_defaults(func=add.add.main)
# connect
parse_connect = subparsers.add_parser('connect',
help='connect to a docker host')
parse_connect.add_argument('DOCKER_HOST',
help='specify docker host to connect to (hostname or IP)')
parse_connect.add_argument('-e',
action='store_true',
help='use environment variables to establish connection details')
parse_connect.add_argument('--port', '-p',
default="2375",
help='specify docker host port to connect to')
parse_connect.add_argument('--sock', '-s',
default=None,
help='specify docker host socket to connect to, i.e. unix://var/run/docker.sock')
parse_connect.add_argument('--tlsverify',
action='store_true',
help='verify the server certificate for TLS')
parse_connect.add_argument('--tlscert',
default=None,
help='/path/to/client-cert.pem for TLS')
parse_connect.add_argument('--tlskey',
default=None,
help='/path/to/client-key.pem for TLS')
parse_connect.add_argument('--tlscacert',
default=None,
help='/path/to/ca.pem for TLS')
parse_connect.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_connect.set_defaults(func=connect.connect.main)
# delete
parse_delete = subparsers.add_parser('delete',
help='delete an image')
parse_delete.add_argument('IMAGE_NAME',
help='specify name of image to delete')
parse_delete.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_delete.set_defaults(func=delete.delete.main)
# disconnect
parse_disconnect = subparsers.add_parser('disconnect',
help='disconnect from a docker host')
parse_disconnect.add_argument('DOCKER_HOST',
help='specify docker host to disconnect from')
parse_disconnect.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_disconnect.set_defaults(func=disconnect.disconnect.main)
# grant
parse_grant = subparsers.add_parser('grant',
help='grant access to container for a user')
parse_grant.add_argument('USER',
help='specify user to grant access')
parse_grant.add_argument('--container', '-c',
default="all",
help='specify container to add access to for the specified user, default all')
parse_grant.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_grant.add_argument('-z',
action='store_true',
help='do not print any output')
parse_grant.set_defaults(func=grant.grant.main)
# hosts
parse_hosts = subparsers.add_parser('hosts',
help='list hosts that are registered')
parse_hosts.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_hosts.add_argument('-z',
action='store_true',
help='do not print any output')
parse_hosts.set_defaults(func=hosts.hosts.main)
# images
parse_images = subparsers.add_parser('images',
help='list images')
parse_images.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_images.add_argument('-z',
action='store_true',
help='do not print any output')
parse_images.set_defaults(func=images.images.main)
# import
parse_import = subparsers.add_parser('import',
help='import an image')
parse_import.add_argument('IMAGE_NAME',
help='specify name of image to import')
parse_import.add_argument('DOCKER_HOST',
help='specify Docker host of image to import')
parse_import.add_argument('-d', '--description',
help='description of image to import')
parse_import.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_import.add_argument('-u', '--uuid',
help='uuid of image to import')
# use non-standard naming scheme to not conflict with python's import
parse_import.set_defaults(func=image_import.image_import.main)
# info
parse_info = subparsers.add_parser('info',
help='display system-wide information')
parse_info.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_info.add_argument('-z',
action='store_true',
help='do not print any output')
parse_info.set_defaults(func=info.info.main)
# kill
parse_kill = subparsers.add_parser('kill',
help='kill running container')
parse_kill.add_argument('CONTAINER',
help='specify container to kill')
parse_kill.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_kill.add_argument('-z',
action='store_true',
help='do not print any output')
parse_kill.set_defaults(func=kill.kill.main)
# link
parse_link = subparsers.add_parser('link',
help='link to a service repository host')
parse_link.add_argument('SERVICE_HOST',
help='specify service repository host to connect to')
parse_link.add_argument('NAME',
help='specify a name for the repository')
parse_link.add_argument('--path',
default=default_metadata_path,
help='specify path where services live, default '+default_metadata_path)
parse_link.add_argument('--port', '-p',
default='8080',
help='specify port that service host is running on, default=8080')
parse_link.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_link.add_argument('-z',
action='store_true',
help='do not print any output')
parse_link.set_defaults(func=link.link.main)
# list
parse_list = subparsers.add_parser('list',
help='list containers running')
parse_list.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_list.add_argument('-z',
action='store_true',
help='do not print any output')
parse_list.set_defaults(func=list.list.main)
# login
parse_login = subparsers.add_parser('login',
help='login with credentials')
parse_login.add_argument('-e', '--email',
help='email address')
parse_login.add_argument('-u', '--username',
help='username')
parse_login.add_argument('PASSWORD',
help='password')
parse_login.set_defaults(func=login.login.main)
# logout
parse_logout = subparsers.add_parser('logout',
help='logout')
parse_logout.set_defaults(func=logout.logout.main)
# logs
parse_logs = subparsers.add_parser('logs',
help='container logs')
parse_logs.add_argument('CONTAINER',
help='specify container to get logs from')
parse_logs.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_logs.add_argument('-z',
action='store_true',
help='do not print any output')
parse_logs.set_defaults(func=logs.logs.main)
# new
parse_new = subparsers.add_parser('new',
help='new container')
parse_new.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_new.add_argument('--toggle_default',
action='store_true',
help='toggle using default services, uses them by default')
parse_new.add_argument('--no_curses', '-n',
action='store_true',
help='do not use curses')
parse_new.add_argument('--service', '-s',
action='append',
help='add a service to the container, can be used more than once, only used with no_curses')
parse_new.add_argument('--image', '-i',
help='specify an image, only used with no_curses')
parse_new.add_argument('--host',
action='append',
help='add a host to run the container one, can be used more than once, only used with no_curses')
parse_new.add_argument('--command', '-c',
action='store_true',
help='override command at runtime of container, only used with no_curses')
parse_new.add_argument('--entrypoint', '-e',
action='store_true',
help='override entrypoint at runtime of container, only used with no_curses')
parse_new.add_argument('--volume',
action='store_true',
help='add volumes at runtime of container, only used with no_curses')
parse_new.add_argument('--volume_from',
action='store_true',
help='add volumes from other containers at runtime of container, only used with no_curses')
parse_new.add_argument('--port', '-p',
action='store_true',
help='set ports at runtime of container, only used with no_curses')
parse_new.add_argument('--link', '-l',
action='store_true',
help='add links to containers at runtime of container, only used with no_curses')
parse_new.add_argument('--name',
action='store_true',
help='set ports at runtime of container, only used with no_curses')
parse_new.add_argument('--unique', '-u',
action='store_true',
help='set different runtime parameters for each container, only used with no_curses')
parse_new.add_argument('--user',
action='store_true',
help='add a user at runtime of container, only used with no_curses')
parse_new.set_defaults(func=new.new.main)
# remove
parse_remove = subparsers.add_parser('rm',
help='remove a container')
parse_remove.add_argument('CONTAINER',
help='specify container to remove')
parse_remove.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_remove.add_argument('-z',
action='store_true',
help='do not print any output')
parse_remove.set_defaults(func=remove.remove.main)
# repositories
parse_repositories = subparsers.add_parser('repositories',
help='list repositories that are registered')
parse_repositories.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_repositories.add_argument('-z',
action='store_true',
help='do not print any output')
parse_repositories.set_defaults(func=repositories.repositories.main)
# revoke
parse_revoke = subparsers.add_parser('revoke',
help='revoke access to container for a user')
parse_revoke.add_argument('USER',
help='specify user to revoke access')
parse_revoke.add_argument('--container', '-c',
default="all",
help='specify container to remove access to for the specified user, default all')
parse_revoke.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_revoke.add_argument('-z',
action='store_true',
help='do not print any output')
parse_revoke.set_defaults(func=revoke.revoke.main)
# services
parse_services = subparsers.add_parser('services',
help='list services')
parse_services.add_argument('-j', '--json',
action='store_true',
help='print complete JSON object for each service')
parse_services.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_services.add_argument('-q', '--quiet',
action='store_true',
help='print only the name, will ignore -j if also supplied')
parse_services.add_argument('-z',
action='store_true',
help='do not print any output')
parse_services.set_defaults(func=services.services.main)
# snapshot
parse_snapshot = subparsers.add_parser('snapshot',
help='snapshot running container')
parse_snapshot.add_argument('CONTAINER',
help='specify container to snapshot')
parse_snapshot.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_snapshot.add_argument('-z',
action='store_true',
help='do not print any output')
parse_snapshot.set_defaults(func=snapshot.snapshot.main)
# snapshots
parse_snapshots = subparsers.add_parser('snapshots',
help='list snapshots')
parse_snapshots.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_snapshots.add_argument('-z',
action='store_true',
help='do not print any output')
parse_snapshots.set_defaults(func=snapshots.snapshots.main)
# start
parse_start = subparsers.add_parser('start',
help='start the api/repository service server')
parse_start.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_start.add_argument('-z',
action='store_true',
help='do not print any output')
parse_start.set_defaults(func=start.start.main)
# stop
parse_stop = subparsers.add_parser('stop',
help='stop the api/repository service server')
parse_stop.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_stop.add_argument('-z',
action='store_true',
help='do not print any output')
parse_stop.set_defaults(func=stop.stop.main)
# subtract
parse_subtract = subparsers.add_parser('subtract',
help='subtract a service')
parse_subtract.add_argument('OS',
help='specify operating system for service')
parse_subtract.add_argument('VERSION',
help='specify version of operating system')
parse_subtract.add_argument('TYPE',
help='specify type of service (database, environment, service, tool)')
parse_subtract.add_argument('NAME',
help='specify name of service')
parse_subtract.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
# !! TODO use default, and restructure if/else in subtract
parse_subtract.add_argument('--repository', '-r',
help='specify repository to subtract service from, use localhost by default')
parse_subtract.set_defaults(func=subtract.subtract.main)
# test
parse_test = subparsers.add_parser('test',
help='run tests')
parse_test.add_argument('-c',
action='store_true',
help='send to coveralls')
parse_test.add_argument('-f',
action='store_true',
help='do not run tests')
parse_test.add_argument('--path', '-p',
default=default_path,
help='path to test, default '+default_path)
parse_test.set_defaults(func=test.test.main)
# unlink
parse_unlink = subparsers.add_parser('unlink',
help='unlink a service repository')
parse_unlink.add_argument('NAME',
help='specify name of service repository to disconnect from')
parse_unlink.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_unlink.set_defaults(func=unlink.unlink.main)
# update
parse_update = subparsers.add_parser('update',
help='update service repository hosts')
parse_update.add_argument('--metadata_path', '-m',
default=default_metadata_path,
help='metadata path, default '+default_metadata_path)
parse_update.add_argument('-r' '--repository',
help='specify service repository host to get updates from')
parse_update.add_argument('-z',
action='store_true',
help='do not print any output')
parse_update.set_defaults(func=update.update.main)
# version
parse_version = subparsers.add_parser('version',
help='show version')
parse_version.add_argument('-z',
action='store_true',
help='do not print any output')
parse_version.set_defaults(func=version.version.main)
args = parser.parse_args()
if args.func:
args.func(args)
def main():
cli().parse_args()
if __name__ == "__main__": # pragma: no cover
main()
| bowl/cli.py | 25,765 | This class is responsible for all commandline operations.
This module is the commandline interface of bowl.
Created on 14 March 2014
@author: Charlie Lewis
add !! TODO use default, and restructure if/else in add connect delete disconnect grant hosts images import use non-standard naming scheme to not conflict with python's import info kill link list login logout logs new remove repositories revoke services snapshot snapshots start stop subtract !! TODO use default, and restructure if/else in subtract test unlink update version pragma: no cover | 552 | en | 0.681088 |
from flask import render_template, Blueprint, request, current_app
from flask_sqlalchemy import SQLAlchemy
# from QA_api import get_traintest_images
import QA_api
from QA_config import config
from QA_db import Image, Project, Job, Roi, get_latest_modelid, get_imagetable
html = Blueprint("html", __name__, static_folder="static", template_folder="templates")
db = SQLAlchemy()
@html.route('/favicon.ico')
def favicon():
return html.send_static_file("favicon.ico")
@html.route('/')
def index():
projects = db.session.query(Project.name, Project.date, Project.iteration, Project.description, Project.id,
Project.images,
db.func.count(Roi.id).label('nROIs'),
(db.func.count(Roi.id) - db.func.ifnull(db.func.sum(Roi.testingROI), 0))
.label('nTrainingROIs'), db.func.count(db.func.distinct(Image.id)).label('nImages'),
db.func.ifnull(db.func.sum(db.func.distinct(Image.nobjects)), 0).label('nObjects')) \
.outerjoin(Image, Image.projId == Project.id) \
.outerjoin(Roi, Roi.imageId == Image.id).group_by(Project.id).all()
return render_template("index.html", projects=projects)
@html.route('/<project_name>', methods=['GET'])
@html.route('/<project_name>/images', methods=['GET'])
def get_imagelist(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
images = get_imagetable(project)
return render_template("images.html", project=project, images=images)
@html.route('/<project_name>/images/images-main', methods=['GET'])
def images_main(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
else:
return render_template("images-main.js", project=project)
@html.route('/<project_name>/dataset/<type>', methods=['GET'])
def display_sample_images(project_name, type):
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
imglist = QA_api.get_traintest_images(project_name, type)
return render_template("sampleimages.html", project_name=project_name, imglist=imglist)
@html.route("/<project_name>/embed", methods=['GET'])
def plotembed(project_name):
current_app.logger.info('Plotting patch embedding:')
project = Project.query.filter_by(name=project_name).first()
if not project:
current_app.logger.error('No project found.')
return render_template("error.html")
latest_modelid = get_latest_modelid(project_name)
selected_modelid = request.args.get('modelid', default=latest_modelid, type=int)
if selected_modelid > latest_modelid or selected_modelid < 0:
error_message = f"Your selected View Embed Model ID is {selected_modelid}. A valid Model ID ranges from 0 to {latest_modelid}."
current_app.logger.error(error_message)
return render_template("embed.html", project_name=project_name, data="None",
project_iteration=project.iteration, current_modelId=selected_modelid,
error_message=error_message)
return render_template("embed.html", project_name=project_name, project_iteration=project.iteration,
current_modelId=selected_modelid)
@html.route("/<project_name>/embed/embed-main.js", methods=['GET']) # --- should not need this function
def embed_main(project_name):
# Get the image list for the project
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
return render_template("embed-main.js", project_name=project_name)
@html.route('/<project_name>/<image_name>/annotation', methods=['GET'])
def annotation(project_name, image_name):
project = Project.query.filter_by(name=project_name).first()
if not project:
return render_template("error.html")
# Method 1
# image = Image.query.filter_by(projId=project.id, name=image_name).first()
# image.nROIs = Roi.query.filter_by(imageId=image.id).count()
# image.nTrainingROIs = Roi.query.filter_by(imageId=image.id, testingROI=0).count()
# Method 2 (corresponding sql code)
# SELECT image.id, count(roi.id)
# FROM image
# JOIN roi
# ON roi.imageId = image.id
# WHERE image.id = 1
# GROUP BY image.id
image = db.session.query(Image.id, Image.projId, Image.name, Image.path, Image.height, Image.width, Image.date,
Image.rois, Image.make_patches_time, Image.nobjects,
db.func.count(Roi.id).label('nROIs'),
(db.func.count(Roi.id) - db.func.ifnull(db.func.sum(Roi.testingROI), 0))
.label('nTrainingROIs')). \
outerjoin(Roi, Roi.imageId == Image.id). \
filter(Image.name == image_name).filter(Image.projId == project.id).group_by(Image.id).first()
x = request.args.get('startX', "#")
y = request.args.get('startY', "#")
defaultCropSize = config.getint('common', 'patchsize', fallback=256)
return render_template("annotation.html", project=project, image=image, startX=x, startY=y,
defaultCropSize=defaultCropSize)
# For templates which just use the project and image name:
def rendered_project_image(template_name, project_name, image_name):
project = Project.query.filter_by(name=project_name).first()
image = Image.query.filter_by(projId=project.id, name=image_name).first()
defaultCropSize = config.getint('common', 'patchsize', fallback=256)
return render_template(template_name, project=project, image=image, defaultCropSize=defaultCropSize)
@html.route('/<project_name>/<image_name>/annotation-main.js', methods=['GET'])
def annotation_main(project_name, image_name):
return rendered_project_image('annotation-main.js', project_name, image_name)
@html.route('/<project_name>/<image_name>/annotation-tool.js', methods=['GET'])
def annotation_tool(project_name, image_name):
return rendered_project_image('annotation-tool.js', project_name, image_name)
@html.route('/<project_name>/<image_name>/annotation-utils.js', methods=['GET'])
def annotation_utils(project_name, image_name):
return rendered_project_image('annotation-utils.js', project_name, image_name)
@html.route("/jobs", methods=['GET'])
@html.route("/<project_name>/jobs", methods=['GET'])
def renderprojectjob(project_name=None):
if (project_name):
proj = Project.query.filter_by(name=project_name).first()
if not proj:
return render_template("error.html")
jobs = proj.jobs
else:
jobs = Job.query.all()
return render_template('jobs.html', jobs=jobs)
| QA_html.py | 7,023 | from QA_api import get_traintest_images Get the image list for the project Get the image list for the project --- should not need this function Get the image list for the project Method 1 image = Image.query.filter_by(projId=project.id, name=image_name).first() image.nROIs = Roi.query.filter_by(imageId=image.id).count() image.nTrainingROIs = Roi.query.filter_by(imageId=image.id, testingROI=0).count() Method 2 (corresponding sql code) SELECT image.id, count(roi.id) FROM image JOIN roi ON roi.imageId = image.id WHERE image.id = 1 GROUP BY image.id For templates which just use the project and image name: | 608 | en | 0.560787 |
# -*- coding: utf-8 -*-
from django.core.cache import get_cache
from django.utils.functional import cached_property
from jinja2 import BytecodeCache as _BytecodeCache
class BytecodeCache(_BytecodeCache):
"""
A bytecode cache for Jinja2 that uses Django's caching framework.
"""
def __init__(self, cache_name):
self._cache_name = cache_name
@cached_property
def backend(self):
return get_cache(self._cache_name)
def load_bytecode(self, bucket):
key = 'jinja2_%s' % str(bucket.key)
bytecode = self.backend.get(key)
if bytecode:
bucket.bytecode_from_string(bytecode)
def dump_bytecode(self, bucket):
key = 'jinja2_%s' % str(bucket.key)
self.backend.set(key, bucket.bytecode_to_string())
| django_jinja/cache.py | 791 | A bytecode cache for Jinja2 that uses Django's caching framework.
-*- coding: utf-8 -*- | 89 | en | 0.618019 |
from django.contrib import admin
from .transitions import TransactionLog, TransitionLog, EvaluationLog
# Register your models here.
admin.site.register(TransactionLog)
admin.site.register(TransitionLog)
admin.site.register(EvaluationLog) | tira-application/src/tira/admin.py | 239 | Register your models here. | 26 | en | 0.957485 |
from unittest import TestCase
from pyVHDLParser.Blocks.List import PortList
from pyVHDLParser.Blocks.List.PortList import PortListInterfaceSignalBlock
from pyVHDLParser.Token import WordToken, StartOfDocumentToken, SpaceToken, CharacterToken, EndOfDocumentToken
from pyVHDLParser.Blocks import StartOfDocumentBlock, EndOfDocumentBlock
from pyVHDLParser.Blocks.Common import WhitespaceBlock
from pyVHDLParser.Blocks.Structural import Entity
from tests.unit.Common import Initializer, ExpectedDataMixin, LinkingTests, TokenLinking, TokenSequence, BlockSequence, ExpectedTokenStream, ExpectedBlockStream
if __name__ == "__main__":
print("ERROR: you called a testcase declaration file as an executable module.")
print("Use: 'python -m unitest <testcase module>'")
exit(1)
def setUpModule():
i = Initializer()
class SimplePortList_OneLine_SinglePort(TestCase, ExpectedDataMixin, LinkingTests, TokenSequence, BlockSequence):
code = "entity e is port (port1 : bit); end;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(SpaceToken, " "),
(WordToken, "port"),
(SpaceToken, " "),
(CharacterToken, "("),
(WordToken, "port1"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "bit"),
(CharacterToken, ")"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "end"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(PortList.OpenBlock, "port ("), # port (
(PortListInterfaceSignalBlock, "port1 : bit"), # port1 : bit
(PortList.CloseBlock, ");"), # );
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end;"), # end;
(EndOfDocumentBlock, None) #
]
)
class SimplePortList_OneLine_DoublePort(TestCase, ExpectedDataMixin, TokenLinking, TokenSequence, BlockSequence):
code = "entity e is port (port1 : bit; port2 : boolean ); end;"
tokenStream = ExpectedTokenStream(
[ (StartOfDocumentToken, None),
(WordToken, "entity"),
(SpaceToken, " "),
(WordToken, "e"),
(SpaceToken, " "),
(WordToken, "is"),
(SpaceToken, " "),
(WordToken, "port"),
(SpaceToken, " "),
(CharacterToken, "("),
(WordToken, "port1"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "bit"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "port2"),
(SpaceToken, " "),
(CharacterToken, ":"),
(SpaceToken, " "),
(WordToken, "boolean"),
(SpaceToken, " "),
(CharacterToken, ")"),
(CharacterToken, ";"),
(SpaceToken, " "),
(WordToken, "end"),
(CharacterToken, ";"),
(EndOfDocumentToken, None)
]
)
blockStream = ExpectedBlockStream(
[ (StartOfDocumentBlock, None), #
(Entity.NameBlock, "entity e is"), # entity e is
(WhitespaceBlock, " "), #
(PortList.OpenBlock, "port ("), # port (
(PortListInterfaceSignalBlock, "port1 : bit"), # port1 : bit
(PortList.DelimiterBlock, ";"), # ;
(PortListInterfaceSignalBlock, "port2 : boolean "), # port2 : boolean
(PortList.CloseBlock, ");"), # );
(WhitespaceBlock, " "), #
(Entity.EndBlock, "end;"), # end;
(EndOfDocumentBlock, None) #
]
)
| tests/unit/SimpleBlockSequences/PortList.py | 4,296 | entity e is port ( port1 : bit ); end; entity e is port ( port1 : bit ; port2 : boolean ); end; | 95 | en | 0.585833 |
# Future
from __future__ import division, print_function, unicode_literals
# Standard Library
import time
# Third Party
import pytest
import ratelimit
# DocumentCloud
from documentcloud.constants import RATE_LIMIT
from documentcloud.exceptions import APIError, CredentialsFailedError
# pylint: disable=protected-access
def test_set_tokens_credentials(client):
"""Test setting the tokens using credentials"""
client.refresh_token = None
del client.session.headers["Authorization"]
client._set_tokens()
assert client.refresh_token
assert "Authorization" in client.session.headers
def test_set_tokens_refresh(client):
"""Test setting the tokens using refresh token"""
# first set tokens sets, refresh token, second one uses it
client.refresh_token = None
del client.session.headers["Authorization"]
client._set_tokens()
client._set_tokens()
assert client.refresh_token
assert "Authorization" in client.session.headers
def test_set_tokens_none(public_client):
"""Test setting the tokens with no credentials"""
public_client._set_tokens()
assert public_client.refresh_token is None
assert "Authorization" not in public_client.session.headers
def test_get_tokens(client):
"""Test getting access and refresh tokens using valid credentials"""
access, refresh = client._get_tokens(client.username, client.password)
assert access
assert refresh
def test_get_tokens_bad_credentials(client):
"""Test getting access and refresh tokens using invalid credentials"""
with pytest.raises(CredentialsFailedError):
client._get_tokens(client.username, "foo")
def test_refresh_tokens(client):
"""Test refreshing the tokens"""
access, refresh = client._refresh_tokens(client.refresh_token)
assert access
assert refresh
def test_user_id(client):
assert client.user_id
def test_user_id_public(public_client):
# pylint: disable=pointless-statement
with pytest.raises(APIError, match=r"404"):
public_client.user_id
def test_bad_attr(client):
with pytest.raises(AttributeError):
assert client.foo
def test_rate_limit(rate_client):
with pytest.raises(ratelimit.RateLimitException):
for _ in range(RATE_LIMIT * 2):
rate_client.users.get("me")
@pytest.mark.short
@pytest.mark.vcr(cassette_library_dir="tests/cassettes/short_fixtures")
def test_expired_access_token(short_client, record_mode):
# get fresh tokens
short_client._set_tokens()
old_refresh_token = short_client.refresh_token
# wait for the access token to expire
if record_mode == "all":
time.sleep(3)
# make a request
assert short_client.users.get("me")
# check the refresh token was updated
assert old_refresh_token != short_client.refresh_token
@pytest.mark.short
@pytest.mark.vcr(cassette_library_dir="tests/cassettes/short_fixtures")
def test_expired_refresh_token(short_client, record_mode):
# get fresh tokens
short_client._set_tokens()
old_refresh_token = short_client.refresh_token
# wait for the access and refresh tokens to expire
if record_mode == "all":
time.sleep(6)
# make a request
assert short_client.users.get("me")
# check the refresh token was updated
assert old_refresh_token != short_client.refresh_token
| tests/test_client.py | 3,349 | Test getting access and refresh tokens using valid credentials
Test getting access and refresh tokens using invalid credentials
Test refreshing the tokens
Test setting the tokens using credentials
Test setting the tokens with no credentials
Test setting the tokens using refresh token
Future Standard Library Third Party DocumentCloud pylint: disable=protected-access first set tokens sets, refresh token, second one uses it pylint: disable=pointless-statement get fresh tokens wait for the access token to expire make a request check the refresh token was updated get fresh tokens wait for the access and refresh tokens to expire make a request check the refresh token was updated | 683 | en | 0.731561 |
# -*- coding: utf-8 -*-
"""
Esendex sms gateway backend. (http://www.esendex.es/)
Configuration example.
~~~~~~~~~~~~~~~~~~~~~~
Modify your settings.py::
ESENDEX_USERNAME = 'yourusername'
ESENDEX_PASSWORD = 'mysecretpassword'
ESENDEX_ACCOUNT = 'account-key-provided-by-esendex'
ESENDEX_SANDBOX = False # True if yo like test first
INSTALLED_APPS += ['sendsms']
Usage::
from sendsms.message import SmsMessage
message = SmsMessage(
body = 'my 160 chars sms',
from_phone = '111111111',
to = ['222222222']
)
message.send()
"""
from django.conf import settings
import requests
from .base import BaseSmsBackend
ESENDEX_API_URL = "https://www.esendex.com/secure/messenger/formpost/SendSMS.aspx"
ESENDEX_USERNAME = getattr(settings, "ESENDEX_USERNAME", "")
ESENDEX_PASSWORD = getattr(settings, "ESENDEX_PASSWORD", "")
ESENDEX_ACCOUNT = getattr(settings, "ESENDEX_ACCOUNT", "")
ESENDEX_SANDBOX = getattr(settings, "ESENDEX_SANDBOX", False)
class SmsBackend(BaseSmsBackend):
"""
SMS Backend for esendex.es provider.
The methods "get_xxxxxx" serve to facilitate the inheritance. Thus if a private
project in the access data are dynamic, and are stored in the database. A child
class overrides the method "get_xxxx" to return data stored in the database.
"""
def get_username(self):
return ESENDEX_USERNAME
def get_password(self):
return ESENDEX_PASSWORD
def get_account(self):
return ESENDEX_ACCOUNT
def _parse_response(self, response):
"""
Parse http raw respone into python
dictionary object.
:param str response: http response
:returns: response dict
:rtype: dict
"""
response_dict = {}
for line in response.splitlines():
key, value = response.split("=", 1)
response_dict[key] = value
return response_dict
def _send(self, message):
"""
Private method to send one message.
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool
"""
params = {
"EsendexUsername": self.get_username(),
"EsendexPassword": self.get_password(),
"EsendexAccount": self.get_account(),
"EsendexOriginator": message.from_phone,
"EsendexRecipient": ",".join(message.to),
"EsendexBody": message.body,
"EsendexPlainText": "1",
}
if ESENDEX_SANDBOX:
params["EsendexTest"] = "1"
response = requests.post(ESENDEX_API_URL, params)
if response.status_code != 200:
if not self.fail_silently:
raise Exception("Bad status code")
else:
return False
if not response.content.startswith(b"Result"):
if not self.fail_silently:
raise Exception("Bad result")
else:
return False
response = self._parse_response(response.content.decode("utf8"))
if ESENDEX_SANDBOX and response["Result"] == "Test":
return True
else:
if response["Result"].startswith("OK"):
return True
else:
if not self.fail_silently:
raise Exception("Bad result")
return False
def send_messages(self, messages):
"""
Send messages.
:param list messages: List of SmsMessage instances.
:returns: number of messages sended successful.
:rtype: int
"""
counter = 0
for message in messages:
res = self._send(message)
if res:
counter += 1
return counter
| sendsms/backends/esendex.py | 3,824 | SMS Backend for esendex.es provider.
The methods "get_xxxxxx" serve to facilitate the inheritance. Thus if a private
project in the access data are dynamic, and are stored in the database. A child
class overrides the method "get_xxxx" to return data stored in the database.
Parse http raw respone into python
dictionary object.
:param str response: http response
:returns: response dict
:rtype: dict
Private method to send one message.
:param SmsMessage message: SmsMessage class instance.
:returns: True if message is sent else False
:rtype: bool
Send messages.
:param list messages: List of SmsMessage instances.
:returns: number of messages sended successful.
:rtype: int
Esendex sms gateway backend. (http://www.esendex.es/)
Configuration example.
~~~~~~~~~~~~~~~~~~~~~~
Modify your settings.py::
ESENDEX_USERNAME = 'yourusername'
ESENDEX_PASSWORD = 'mysecretpassword'
ESENDEX_ACCOUNT = 'account-key-provided-by-esendex'
ESENDEX_SANDBOX = False # True if yo like test first
INSTALLED_APPS += ['sendsms']
Usage::
from sendsms.message import SmsMessage
message = SmsMessage(
body = 'my 160 chars sms',
from_phone = '111111111',
to = ['222222222']
)
message.send()
-*- coding: utf-8 -*- | 1,266 | en | 0.56139 |
import torch
class FGM():
def __init__(self, model):
self.model = model
self.backup = {}
def attack(self, epsilon=1e-6, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
self.backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = epsilon * param.grad / norm
param.data.add_(r_at)
def restore(self, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.backup
param.data = self.backup[name]
self.backup = {}
class PGD():
def __init__(self, model):
self.model = model
self.emb_backup = {}
self.grad_backup = {}
def attack(self, epsilon=1e-6, alpha=0.3, emb_name='embed', is_first_attack=False):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
# print (name)
if is_first_attack:
self.emb_backup[name] = param.data.clone()
norm = torch.norm(param.grad)
if norm != 0 and not torch.isnan(norm):
r_at = alpha * param.grad / norm
param.data.add_(r_at)
param.data = self.project(name, param.data, epsilon)
def restore(self, emb_name='embed'):
# emb_name这个参数要换成你模型中embedding的参数名
for name, param in self.model.named_parameters():
if param.requires_grad and emb_name in name:
assert name in self.emb_backup
param.data = self.emb_backup[name]
self.emb_backup = {}
def project(self, param_name, param_data, epsilon):
r = param_data - self.emb_backup[param_name]
if torch.norm(r) > epsilon:
r = epsilon * r / torch.norm(r)
return self.emb_backup[param_name] + r
def backup_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
self.grad_backup[name] = param.grad.clone()
def restore_grad(self):
for name, param in self.model.named_parameters():
if param.requires_grad:
param.grad = self.grad_backup[name]
| modules.py | 2,716 | emb_name这个参数要换成你模型中embedding的参数名 emb_name这个参数要换成你模型中embedding的参数名 emb_name这个参数要换成你模型中embedding的参数名 print (name) emb_name这个参数要换成你模型中embedding的参数名 | 144 | zh | 0.749797 |
import unittest
import numpy as np
import numpy.testing as npt
import wisdem.drivetrainse.gearbox as gb
class TestGearbox(unittest.TestCase):
def setUp(self):
self.inputs = {}
self.outputs = {}
self.discrete_inputs = {}
self.discrete_outputs = {}
# 5MW inputs
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["shaft_factor"] = "normal"
self.discrete_inputs["planet_numbers"] = [3, 3, 0]
self.inputs["gear_ratio"] = 97.0
self.inputs["rotor_diameter"] = 126.0
self.inputs["rated_torque"] = 3946e3
self.inputs["machine_rating"] = 5e3
self.myobj = gb.Gearbox(direct_drive=False)
def testDirectDrive(self):
self.myobj = gb.Gearbox(direct_drive=True)
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
npt.assert_equal(self.outputs["stage_ratios"], 0.0)
self.assertEqual(self.outputs["gearbox_mass"], 0.0)
npt.assert_equal(self.outputs["gearbox_I"], 0.0)
self.assertEqual(self.outputs["L_gearbox"], 0.0)
self.assertEqual(self.outputs["D_gearbox"], 0.0)
def testEEP(self):
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP3(self):
self.discrete_inputs["gear_configuration"] = "eep_3"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep3", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
self.assertEqual(self.outputs["stage_ratios"][-1], 3.0)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP2(self):
self.discrete_inputs["gear_configuration"] = "eep_2"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep2", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
self.assertEqual(self.outputs["stage_ratios"][-1], 2.0)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP_planet4_1(self):
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["planet_numbers"] = [4, 3, 0]
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep_4-1", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEEP_planet4_2(self):
self.discrete_inputs["gear_configuration"] = "eep"
self.discrete_inputs["planet_numbers"] = [3, 4, 0]
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("eep_4-2", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testEPP(self):
self.discrete_inputs["gear_configuration"] = "epp"
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("epp", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 126.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 126.0)
def testLargeMachine(self):
self.inputs["gear_ratio"] = 200.0
self.inputs["rotor_diameter"] = 200.0
self.inputs["rotor_torque"] = 10e3
self.myobj.compute(self.inputs, self.outputs, self.discrete_inputs, self.discrete_outputs)
print("large", self.outputs["stage_ratios"], self.outputs["gearbox_mass"])
self.assertAlmostEqual(np.prod(self.outputs["stage_ratios"]), self.inputs["gear_ratio"], 1)
# self.assertEqual(self.outputs['gearbox_mass'], 0.0)
npt.assert_equal(
self.outputs["gearbox_I"][0], 0.5 * self.outputs["gearbox_mass"] * 0.25 * self.outputs["D_gearbox"] ** 2
)
npt.assert_almost_equal(
self.outputs["gearbox_I"][1:],
self.outputs["gearbox_mass"]
* (0.75 * self.outputs["D_gearbox"] ** 2 + self.outputs["L_gearbox"] ** 2)
/ 12.0,
)
self.assertEqual(self.outputs["L_gearbox"], 0.012 * 200.0)
self.assertEqual(self.outputs["D_gearbox"], 0.75 * 0.015 * 200.0)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestGearbox))
return suite
if __name__ == "__main__":
result = unittest.TextTestRunner().run(suite())
if result.wasSuccessful():
exit(0)
else:
exit(1)
| WISDEM/wisdem/test/test_drivetrainse/test_gearbox.py | 8,440 | 5MW inputs self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) self.assertEqual(self.outputs['gearbox_mass'], 0.0) | 374 | en | 0.202779 |
# client-pypeln-pl.task.py
from aiohttp import ClientSession, TCPConnector
import asyncio
import sys
import pypeln as pl
limit = 1000
urls = ("http://localhost:8080/{}".format(i) for i in range(int(sys.argv[1])))
async def fetch(url, session):
async with session.get(url) as response:
return await response.read()
pl.task.each(
fetch,
urls,
workers=limit,
on_start=lambda: dict(session=ClientSession(connector=TCPConnector(limit=None))),
on_done=lambda session: session.close(),
run=True,
)
| benchmarks/100_million_downloads/client-pypeln-io.py | 534 | client-pypeln-pl.task.py | 24 | ar | 0.121497 |
from __future__ import annotations
import time
import disnake
from disnake.ext import commands
from PIL import Image
from main import tracked_templates
from utils.arguments_parser import MyParser
from utils.pxls.template_manager import (
Combo,
layer,
)
from utils.setup import stats
from utils.discord_utils import image_to_file, CreateTemplateView
class Layer(commands.Cog):
def __init__(self, bot: commands.Bot) -> None:
self.bot: commands.Bot = bot
@commands.slash_command(name="layer")
async def _layer(
self,
inter: disnake.AppCmdInter,
templates: str,
):
"""Layer several templates.
Parameters
----------
templates: List of templates (URL or name) separated by a space (last goes above).
"""
await inter.response.defer()
# Remove unused entries, equal to None
template_uris = templates.split(" ")
await self.layer(inter, template_uris)
@commands.command(
name="layer",
description="Layer several templates.",
usage="<templates>",
help="""
- `<templates>`: List of templates (URL or name) separated by a space (last goes above).
""",
)
async def p_layer(self, ctx, *args):
parser = MyParser(add_help=False)
parser.add_argument("templates", nargs="+")
try:
parsed_args, _ = parser.parse_known_args(args)
template_uris = parsed_args.templates
except ValueError as e:
return await ctx.send(f"❌ {e}")
async with ctx.typing():
await self.layer(ctx, template_uris)
@staticmethod
async def layer(ctx, template_uris):
try:
templates = await tracked_templates.get_templates(template_uris)
except ValueError as e:
return await ctx.send(f"❌ {e}")
start = time.time()
ox, oy, palettized_array = layer(templates)
if palettized_array.size == 0:
return await ctx.send("❌ No placeable pixels in the layered template.")
img = Image.fromarray(stats.palettize_array(palettized_array))
end = time.time()
embed = disnake.Embed(color=0x66C5CC, title="Layered")
embed.set_footer(text=f"Layered in {round((end-start),3)}s")
file = await image_to_file(img, "layered.png", embed)
# Use the combo object here because it doesn't generate a placeable mask
template = Combo(None, palettized_array, ox, oy, None, None, None)
view = CreateTemplateView(ctx, template)
m = await ctx.send(file=file, embed=embed, view=view)
# save the URL of the image sent to use it to generate templates later
if isinstance(ctx, disnake.AppCmdInter):
m = await ctx.original_message()
view.template_image_url = m.embeds[0].image.url
view.message = m
def setup(bot: commands.Bot):
bot.add_cog(Layer(bot))
| src/cogs/pxls_template/layer.py | 2,968 | Remove unused entries, equal to None Use the combo object here because it doesn't generate a placeable mask save the URL of the image sent to use it to generate templates later | 176 | en | 0.82395 |
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2010
"cuda"
import os
from waflib import Task
from waflib.TaskGen import extension
from waflib.Tools import ccroot, c_preproc
from waflib.Configure import conf
class cuda(Task.Task):
run_str = '${NVCC} ${CUDAFLAGS} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT}'
color = 'GREEN'
ext_in = ['.h']
vars = ['CCDEPS']
scan = c_preproc.scan
shell = False
@extension('.cu', '.cuda')
def c_hook(self, node):
return self.create_compiled_task('cuda', node)
def configure(conf):
conf.find_program('nvcc', var='NVCC')
conf.find_cuda_libs()
@conf
def find_cuda_libs(self):
"""
find the cuda include and library folders
use ctx.program(source='main.c', target='app', use='CUDA CUDART')
"""
if not self.env.NVCC:
self.fatal('check for nvcc first')
d = self.root.find_node(self.env.NVCC[0]).parent.parent
node = d.find_node('include')
_includes = node and node.abspath() or ''
_libpath = []
for x in ('lib64', 'lib'):
try:
_libpath.append(d.find_node(x).abspath())
except:
pass
# this should not raise any error
self.check_cxx(header='cuda.h', lib='cuda', libpath=_libpath, includes=_includes)
self.check_cxx(header='cuda.h', lib='cudart', libpath=_libpath, includes=_includes)
| Firmware/ardupilot/modules/waf/playground/cuda/cuda.py | 1,357 | find the cuda include and library folders
use ctx.program(source='main.c', target='app', use='CUDA CUDART')
cuda
!/usr/bin/env python encoding: utf-8 Thomas Nagy, 2010 this should not raise any error | 201 | en | 0.483151 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django import template
from floreal import models as m
register = template.Library()
@register.filter
def price(f):
return u"%.02f€" % f
@register.filter
def price_nocurrency(f):
return u"%.02f" % f
@register.filter
def weight(w):
if w>=1: return u"%.2gkg" % w
else: return u"%dg" % (w*1000)
@register.filter
def email(u):
return '"%s %s" <%s>' % (u.first_name, u.last_name, u.email)
@register.filter
def unit_multiple(unit):
if unit[0].isdigit():
return u"×"+unit
else:
return u" "+unit
@register.filter
def subgroup_state(sg, dv):
x = dv.subgroupstatefordelivery_set.filter(delivery=dv, subgroup=sg)
return x[0].state if x else m.SubgroupStateForDelivery.DEFAULT
@register.filter
def subgroup_has_purchases(sg, dv):
return m.Purchase.objects.filter(product__delivery_id=dv,
user__in=m.Subgroup.objects.get(pk=sg).users.all()).exists()
@register.filter
def order(dv, u):
return m.Order(u, dv)
| floreal/templatetags/floreal_filters.py | 1,059 | !/usr/bin/python -*- coding: utf-8 -*- | 38 | en | 0.437977 |
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import hashlib
import os
import os.path
import shutil
import tempfile
import jinja2
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import netutils
import six
from ironic.common import exception
from ironic.common import utils
from ironic.tests import base
CONF = cfg.CONF
class BareMetalUtilsTestCase(base.TestCase):
def test_create_link(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.return_value = None
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
def test_create_link_EEXIST(self):
with mock.patch.object(os, "symlink", autospec=True) as symlink_mock:
symlink_mock.side_effect = OSError(errno.EEXIST)
utils.create_link_without_raise("/fake/source", "/fake/link")
symlink_mock.assert_called_once_with("/fake/source", "/fake/link")
class ExecuteTestCase(base.TestCase):
@mock.patch.object(processutils, 'execute', autospec=True)
@mock.patch.object(os.environ, 'copy', return_value={}, autospec=True)
def test_execute_use_standard_locale_no_env_variables(self, env_mock,
execute_mock):
utils.execute('foo', use_standard_locale=True)
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_use_standard_locale_with_env_variables(self,
execute_mock):
utils.execute('foo', use_standard_locale=True,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'LC_ALL': 'C',
'foo': 'bar'})
@mock.patch.object(processutils, 'execute', autospec=True)
def test_execute_not_use_standard_locale(self, execute_mock):
utils.execute('foo', use_standard_locale=False,
env_variables={'foo': 'bar'})
execute_mock.assert_called_once_with('foo',
env_variables={'foo': 'bar'})
def test_execute_get_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
helper = utils._get_root_helper()
utils.execute('foo', run_as_root=True)
execute_mock.assert_called_once_with('foo', run_as_root=True,
root_helper=helper)
def test_execute_without_root_helper(self):
with mock.patch.object(
processutils, 'execute', autospec=True) as execute_mock:
utils.execute('foo', run_as_root=False)
execute_mock.assert_called_once_with('foo', run_as_root=False)
class GenericUtilsTestCase(base.TestCase):
@mock.patch.object(utils, 'hashlib', autospec=True)
def test__get_hash_object(self, hashlib_mock):
algorithms_available = ('md5', 'sha1', 'sha224',
'sha256', 'sha384', 'sha512')
hashlib_mock.algorithms_guaranteed = algorithms_available
hashlib_mock.algorithms = algorithms_available
# | WHEN |
utils._get_hash_object('md5')
utils._get_hash_object('sha1')
utils._get_hash_object('sha224')
utils._get_hash_object('sha256')
utils._get_hash_object('sha384')
utils._get_hash_object('sha512')
# | THEN |
calls = [mock.call.md5(), mock.call.sha1(), mock.call.sha224(),
mock.call.sha256(), mock.call.sha384(), mock.call.sha512()]
hashlib_mock.assert_has_calls(calls)
def test__get_hash_object_throws_for_invalid_or_unsupported_hash_name(
self):
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue,
utils._get_hash_object,
'hickory-dickory-dock')
def test_hash_file_for_md5(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.md5(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_md5_not_binary(self):
# | GIVEN |
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
file_like_object = six.StringIO(data)
expected = hashlib.md5(data.encode('utf-8')).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object) # using default, 'md5'
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha1(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha1(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha1')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_for_sha512(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
expected = hashlib.sha512(data).hexdigest()
# | WHEN |
actual = utils.hash_file(file_like_object, 'sha512')
# | THEN |
self.assertEqual(expected, actual)
def test_hash_file_throws_for_invalid_or_unsupported_hash(self):
# | GIVEN |
data = b'Mary had a little lamb, its fleece as white as snow'
file_like_object = six.BytesIO(data)
# | WHEN | & | THEN |
self.assertRaises(exception.InvalidParameterValue, utils.hash_file,
file_like_object, 'hickory-dickory-dock')
def test_file_has_content_equal(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_equal_not_binary(self):
data = u'Mary had a little lamb, its fleece as white as sno\u0449'
ref = data
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertTrue(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_file_has_content_differ(self):
data = b'Mary had a little lamb, its fleece as white as snow'
ref = data + b'!'
with mock.patch('ironic.common.utils.open',
mock.mock_open(read_data=data)) as mopen:
self.assertFalse(utils.file_has_content('foo', ref))
mopen.assert_called_once_with('foo', 'rb')
def test_is_valid_datapath_id(self):
self.assertTrue(utils.is_valid_datapath_id("525400cf2d319fdf"))
self.assertTrue(utils.is_valid_datapath_id("525400CF2D319FDF"))
self.assertFalse(utils.is_valid_datapath_id("52"))
self.assertFalse(utils.is_valid_datapath_id("52:54:00:cf:2d:31"))
self.assertFalse(utils.is_valid_datapath_id("notadatapathid00"))
self.assertFalse(utils.is_valid_datapath_id("5525400CF2D319FDF"))
def test_is_hostname_safe(self):
self.assertTrue(utils.is_hostname_safe('spam'))
self.assertFalse(utils.is_hostname_safe('spAm'))
self.assertFalse(utils.is_hostname_safe('SPAM'))
self.assertFalse(utils.is_hostname_safe('-spam'))
self.assertFalse(utils.is_hostname_safe('spam-'))
self.assertTrue(utils.is_hostname_safe('spam-eggs'))
self.assertFalse(utils.is_hostname_safe('spam_eggs'))
self.assertFalse(utils.is_hostname_safe('spam eggs'))
self.assertTrue(utils.is_hostname_safe('spam.eggs'))
self.assertTrue(utils.is_hostname_safe('9spam'))
self.assertTrue(utils.is_hostname_safe('spam7'))
self.assertTrue(utils.is_hostname_safe('br34kf4st'))
self.assertFalse(utils.is_hostname_safe('$pam'))
self.assertFalse(utils.is_hostname_safe('egg$'))
self.assertFalse(utils.is_hostname_safe('spam#eggs'))
self.assertFalse(utils.is_hostname_safe(' eggs'))
self.assertFalse(utils.is_hostname_safe('spam '))
self.assertTrue(utils.is_hostname_safe('s'))
self.assertTrue(utils.is_hostname_safe('s' * 63))
self.assertFalse(utils.is_hostname_safe('s' * 64))
self.assertFalse(utils.is_hostname_safe(''))
self.assertFalse(utils.is_hostname_safe(None))
# Need to ensure a binary response for success or fail
self.assertIsNotNone(utils.is_hostname_safe('spam'))
self.assertIsNotNone(utils.is_hostname_safe('-spam'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com'))
self.assertTrue(utils.is_hostname_safe('www.rackspace.com.'))
self.assertTrue(utils.is_hostname_safe('http._sctp.www.example.com'))
self.assertTrue(utils.is_hostname_safe('mail.pets_r_us.net'))
self.assertTrue(utils.is_hostname_safe('mail-server-15.my_host.org'))
self.assertFalse(utils.is_hostname_safe('www.nothere.com_'))
self.assertFalse(utils.is_hostname_safe('www.nothere_.com'))
self.assertFalse(utils.is_hostname_safe('www..nothere.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertTrue(utils.is_hostname_safe(long_str))
self.assertFalse(utils.is_hostname_safe(long_str + '.'))
self.assertFalse(utils.is_hostname_safe('a' * 255))
def test_is_valid_logical_name(self):
valid = (
'spam', 'spAm', 'SPAM', 'spam-eggs', 'spam.eggs', 'spam_eggs',
'spam~eggs', '9spam', 'spam7', '~spam', '.spam', '.~-_', '~',
'br34kf4st', 's', 's' * 63, 's' * 255)
invalid = (
' ', 'spam eggs', '$pam', 'egg$', 'spam#eggs',
' eggs', 'spam ', '', None, 'spam%20')
for hostname in valid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for success. assertTrue
# is too generous, and would pass this test if, for
# instance, a regex Match object were returned.
self.assertIs(result, True,
"%s is unexpectedly invalid" % hostname)
for hostname in invalid:
result = utils.is_valid_logical_name(hostname)
# Need to ensure a binary response for
# success. assertFalse is too generous and would pass this
# test if None were returned.
self.assertIs(result, False,
"%s is unexpectedly valid" % hostname)
def test_validate_and_normalize_mac(self):
mac = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(mac.lower(),
utils.validate_and_normalize_mac(mac))
def test_validate_and_normalize_datapath_id(self):
datapath_id = 'AA:BB:CC:DD:EE:FF'
with mock.patch.object(utils, 'is_valid_datapath_id',
autospec=True) as m_mock:
m_mock.return_value = True
self.assertEqual(datapath_id.lower(),
utils.validate_and_normalize_datapath_id(
datapath_id))
def test_validate_and_normalize_mac_invalid_format(self):
with mock.patch.object(netutils, 'is_valid_mac',
autospec=True) as m_mock:
m_mock.return_value = False
self.assertRaises(exception.InvalidMAC,
utils.validate_and_normalize_mac, 'invalid-mac')
def test_safe_rstrip(self):
value = '/test/'
rstripped_value = '/test'
not_rstripped = '/'
self.assertEqual(rstripped_value, utils.safe_rstrip(value, '/'))
self.assertEqual(not_rstripped, utils.safe_rstrip(not_rstripped, '/'))
def test_safe_rstrip_not_raises_exceptions(self):
# Supplying an integer should normally raise an exception because it
# does not save the rstrip() method.
value = 10
# In the case of raising an exception safe_rstrip() should return the
# original value.
self.assertEqual(value, utils.safe_rstrip(value))
@mock.patch.object(os.path, 'getmtime', return_value=1439465889.4964755,
autospec=True)
def test_unix_file_modification_datetime(self, mtime_mock):
expected = datetime.datetime(2015, 8, 13, 11, 38, 9, 496475)
self.assertEqual(expected,
utils.unix_file_modification_datetime('foo'))
mtime_mock.assert_called_once_with('foo')
def test_is_valid_no_proxy(self):
# Valid values for 'no_proxy'
valid_no_proxy = [
('a' * 63 + '.' + '0' * 63 + '.c.' + 'd' * 61 + '.' + 'e' * 61),
('A' * 63 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.' + 'E' * 61),
('.' + 'a' * 62 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61),
',,example.com:3128,',
'192.168.1.1', # IP should be valid
]
# Test each one individually, so if failure easier to determine which
# one failed.
for no_proxy in valid_no_proxy:
self.assertTrue(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be valid: {}".format(no_proxy))
# Test valid when joined together
self.assertTrue(utils.is_valid_no_proxy(','.join(valid_no_proxy)))
# Test valid when joined together with whitespace
self.assertTrue(utils.is_valid_no_proxy(' , '.join(valid_no_proxy)))
# empty string should also be valid
self.assertTrue(utils.is_valid_no_proxy(''))
# Invalid values for 'no_proxy'
invalid_no_proxy = [
('A' * 64 + '.' + '0' * 63 + '.C.' + 'D' * 61 + '.'
+ 'E' * 61), # too long (> 253)
('a' * 100),
'a..com',
('.' + 'a' * 63 + '.' + '0' * 62 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # too long (> 251 after deleting .)
('*.' + 'a' * 60 + '.' + '0' * 60 + '.c.' + 'd' * 61 + '.'
+ 'e' * 61), # starts with *.
'c.-a.com',
'c.a-.com',
]
for no_proxy in invalid_no_proxy:
self.assertFalse(
utils.is_valid_no_proxy(no_proxy),
msg="'no_proxy' value should be invalid: {}".format(no_proxy))
@mock.patch.object(utils, 'LOG', autospec=True)
def test_warn_about_deprecated_extra_vif_port_id(self, mock_log):
# Set variable to default value
utils.warn_deprecated_extra_vif_port_id = False
utils.warn_about_deprecated_extra_vif_port_id()
utils.warn_about_deprecated_extra_vif_port_id()
self.assertEqual(1, mock_log.warning.call_count)
self.assertIn("extra['vif_port_id'] is deprecated and will not",
mock_log.warning.call_args[0][0])
class TempFilesTestCase(base.TestCase):
def test_tempdir(self):
dirname = None
with utils.tempdir() as tempdir:
self.assertTrue(os.path.isdir(tempdir))
dirname = tempdir
self.assertFalse(os.path.exists(dirname))
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked(self, mkdtemp_mock, rmtree_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
kwargs = {'dir': 'b'}
with utils.tempdir(**kwargs) as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
mkdtemp_mock.assert_called_once_with(**kwargs)
rmtree_mock.assert_called_once_with(tempdir_created)
@mock.patch.object(utils, 'LOG', autospec=True)
@mock.patch.object(shutil, 'rmtree', autospec=True)
@mock.patch.object(tempfile, 'mkdtemp', autospec=True)
def test_tempdir_mocked_error_on_rmtree(self, mkdtemp_mock, rmtree_mock,
log_mock):
self.config(tempdir='abc')
mkdtemp_mock.return_value = 'temp-dir'
rmtree_mock.side_effect = OSError
with utils.tempdir() as tempdir:
self.assertEqual('temp-dir', tempdir)
tempdir_created = tempdir
rmtree_mock.assert_called_once_with(tempdir_created)
self.assertTrue(log_mock.error.called)
@mock.patch.object(os.path, 'exists', autospec=True)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_with_pass_in(self, mock_free_space, mock_dir_writable,
mock_exists):
mock_exists.return_value = True
# test passing in a directory and size
utils.check_dir(directory_to_check='/fake/path', required_space=5)
mock_exists.assert_called_once_with('/fake/path')
mock_dir_writable.assert_called_once_with('/fake/path')
mock_free_space.assert_called_once_with('/fake/path', 5)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_no_dir(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = False
self.assertRaises(exception.PathNotFound, utils.check_dir)
mock_exists.assert_called_once_with(CONF.tempdir)
self.assertFalse(mock_free_space.called)
self.assertFalse(mock_dir_writable.called)
@mock.patch.object(utils, '_check_dir_writable', autospec=True)
@mock.patch.object(utils, '_check_dir_free_space', autospec=True)
def test_check_dir_ok(self, mock_free_space, mock_dir_writable):
self.config(tempdir='/fake/path')
# NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock
# on the method level.
with mock.patch.object(os.path, 'exists',
autospec=True) as mock_exists:
mock_exists.return_value = True
utils.check_dir()
mock_exists.assert_called_once_with(CONF.tempdir)
mock_dir_writable.assert_called_once_with(CONF.tempdir)
mock_free_space.assert_called_once_with(CONF.tempdir, 1)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_ok(self, mock_access):
mock_access.return_value = True
self.assertIsNone(utils._check_dir_writable("/fake/path"))
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'access', autospec=True)
def test__check_dir_writable_not_writable(self, mock_access):
mock_access.return_value = False
self.assertRaises(exception.DirectoryNotWritable,
utils._check_dir_writable, "/fake/path")
mock_access.assert_called_once_with("/fake/path", os.W_OK)
@mock.patch.object(os, 'statvfs', autospec=True)
def test__check_dir_free_space_ok(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 5
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024 * 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
utils._check_dir_free_space("/fake/path")
mock_stat.assert_called_once_with("/fake/path")
@mock.patch.object(os, 'statvfs', autospec=True)
def test_check_dir_free_space_raises(self, mock_stat):
statvfs_mock_return = mock.MagicMock()
statvfs_mock_return.f_bsize = 1
statvfs_mock_return.f_frsize = 0
statvfs_mock_return.f_blocks = 0
statvfs_mock_return.f_bfree = 0
statvfs_mock_return.f_bavail = 1024
statvfs_mock_return.f_files = 0
statvfs_mock_return.f_ffree = 0
statvfs_mock_return.f_favail = 0
statvfs_mock_return.f_flag = 0
statvfs_mock_return.f_namemax = 0
mock_stat.return_value = statvfs_mock_return
self.assertRaises(exception.InsufficientDiskSpace,
utils._check_dir_free_space, "/fake/path")
mock_stat.assert_called_once_with("/fake/path")
class GetUpdatedCapabilitiesTestCase(base.TestCase):
def test_get_updated_capabilities(self):
capabilities = {'ilo_firmware_version': 'xyz'}
cap_string = 'ilo_firmware_version:xyz'
cap_returned = utils.get_updated_capabilities(None, capabilities)
self.assertEqual(cap_string, cap_returned)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_multiple_keys(self):
capabilities = {'ilo_firmware_version': 'xyz',
'foo': 'bar', 'somekey': 'value'}
cap_string = 'ilo_firmware_version:xyz,foo:bar,somekey:value'
cap_returned = utils.get_updated_capabilities(None, capabilities)
set1 = set(cap_string.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_invalid_capabilities(self):
capabilities = 'ilo_firmware_version'
self.assertRaises(ValueError,
utils.get_updated_capabilities,
capabilities, {})
def test_get_updated_capabilities_capabilities_not_dict(self):
capabilities = ['ilo_firmware_version:xyz', 'foo:bar']
self.assertRaises(ValueError,
utils.get_updated_capabilities,
None, capabilities)
def test_get_updated_capabilities_add_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'uefi'}
expected_capabilities = 'BootMode:uefi,foo:bar'
cap_returned = utils.get_updated_capabilities('foo:bar',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_get_updated_capabilities_replace_to_existing_capabilities(self):
new_capabilities = {'BootMode': 'bios'}
expected_capabilities = 'BootMode:bios'
cap_returned = utils.get_updated_capabilities('BootMode:uefi',
new_capabilities)
set1 = set(expected_capabilities.split(','))
set2 = set(cap_returned.split(','))
self.assertEqual(set1, set2)
self.assertIsInstance(cap_returned, str)
def test_validate_network_port(self):
port = utils.validate_network_port('0', 'message')
self.assertEqual(0, port)
port = utils.validate_network_port('65535')
self.assertEqual(65535, port)
def test_validate_network_port_fail(self):
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "65536" is not a valid port.',
utils.validate_network_port,
'65536')
self.assertRaisesRegex(exception.InvalidParameterValue,
'fake_port "-1" is not a valid port.',
utils.validate_network_port,
'-1',
'fake_port')
self.assertRaisesRegex(exception.InvalidParameterValue,
'Port "invalid" is not a valid port.',
utils.validate_network_port,
'invalid')
class JinjaTemplatingTestCase(base.TestCase):
def setUp(self):
super(JinjaTemplatingTestCase, self).setUp()
self.template = '{{ foo }} {{ bar }}'
self.params = {'foo': 'spam', 'bar': 'ham'}
self.expected = 'spam ham'
def test_render_string(self):
self.assertEqual(self.expected,
utils.render_template(self.template,
self.params,
is_file=False))
@mock.patch('ironic.common.utils.jinja2.FileSystemLoader', autospec=True)
def test_render_file(self, jinja_fsl_mock):
path = '/path/to/template.j2'
jinja_fsl_mock.return_value = jinja2.DictLoader(
{'template.j2': self.template})
self.assertEqual(self.expected,
utils.render_template(path,
self.params))
jinja_fsl_mock.assert_called_once_with('/path/to')
| ironic/tests/unit/common/test_utils.py | 26,630 | Copyright 2011 Justin Santa Barbara Copyright 2012 Hewlett-Packard Development Company, L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | WHEN | | THEN | | WHEN | & | THEN | | GIVEN | | WHEN | using default, 'md5' | THEN | | GIVEN | | WHEN | using default, 'md5' | THEN | | GIVEN | | WHEN | | THEN | | GIVEN | | WHEN | | THEN | | GIVEN | | WHEN | & | THEN | Need to ensure a binary response for success or fail Need to ensure a binary response for success. assertTrue is too generous, and would pass this test if, for instance, a regex Match object were returned. Need to ensure a binary response for success. assertFalse is too generous and would pass this test if None were returned. Supplying an integer should normally raise an exception because it does not save the rstrip() method. In the case of raising an exception safe_rstrip() should return the original value. Valid values for 'no_proxy' IP should be valid Test each one individually, so if failure easier to determine which one failed. Test valid when joined together Test valid when joined together with whitespace empty string should also be valid Invalid values for 'no_proxy' too long (> 253) too long (> 251 after deleting .) starts with *. Set variable to default value test passing in a directory and size NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock on the method level. NOTE(dtantsur): self.config uses os.path.exists, so we cannot mock on the method level. | 1,958 | en | 0.788418 |
# Copyright (c) 2019 Bernd Wiesner. bernduwiesner@yahoo.co.uk
# All rights reseArgumentS_Resultsed
#
""" Display the command line options in a window
"""
from argparse import Namespace
from typing import Tuple, Union
import PySimpleGUI as sg
import constants as C
from gui_utility import popup_window
ArgumentsResults = Tuple[Union[str, None], Namespace]
def arguments_window(args: Namespace) -> ArgumentsResults:
"""Window interface
:param args: the arguments passed from the command line
:return: Tuple[Union[str, None], Namespace] - The new arguments
"""
filename: str = C.SAVE_FILE_DIR + args.lottery_type + C.SAVE_FILE_TYPE
layout = [
[
sg.Text(text="Lottery type:"),
sg.InputCombo(
values=tuple(C.LOTTERY_TYPES),
default_value=args.lottery_type,
readonly=True,
enable_events=True,
size=(10, 1),
tooltip="Choose a lottery type",
key=C.ELEMENT_NAMES["LOTTO"],
),
sg.Frame(
layout=[
[
sg.Text(text="Number of lines"),
sg.InputText(
default_text=args.number_of_lines,
enable_events=True,
size=(3, 1),
justification="right",
key=C.ELEMENT_NAMES["COUNT"],
),
]
],
title="",
tooltip="Choose the number of lines to generate",
relief=sg.RELIEF_FLAT,
key=C.ELEMENT_NAMES["LINES"],
),
],
[
sg.Frame(
layout=[
[
sg.Radio(
text="Save",
group_id="R",
default=not args.no_save,
tooltip="Save the generated numbers",
enable_events=True,
key=C.ELEMENT_NAMES["SAVE"],
),
sg.Radio(
text="Do NOT save",
group_id="R",
default=args.no_save,
tooltip="Do not save the generated numbers",
enable_events=True,
key=C.ELEMENT_NAMES["NOSAVE"],
),
sg.Radio(
text="Delete",
group_id="R",
default=args.delete,
enable_events=True,
tooltip="Delete a saved file",
key=C.ELEMENT_NAMES["DELETE"],
),
sg.Radio(
text="Show",
group_id="R",
default=args.print,
tooltip="Display a previously saved file",
enable_events=True,
key=C.ELEMENT_NAMES["SHOW"],
),
]
],
title="Saved file options",
relief=sg.RELIEF_SOLID,
size=(0, 40),
)
],
[
sg.Text(
text="File name: " + filename,
key=C.ELEMENT_NAMES["FILENAME"],
size=(50, 2),
tooltip="The name of the file to save or to display",
justification="left",
)
],
[
sg.OK(key="OK", focus=True),
sg.Quit(key="Cancel", tooltip="Do nothing and quit"),
],
]
window = sg.Window(
title="Lottery number Generator Arguments",
layout=layout,
text_justification=C.GUI_JUSTIFY,
font=(C.GUI_FONT_NAME, C.GUI_FONT_SIZE),
)
while True:
event, values = window.Read()
if event == C.ELEMENT_NAMES["DELETE"]:
window.Element(key="OK").Update("Delete Saved File")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=False)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File to delete: " + filename
)
elif event == C.ELEMENT_NAMES["SHOW"]:
window.Element(key="OK").Update("Show Saved File")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=False)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File to display: " + filename
)
elif event in (C.ELEMENT_NAMES["NOSAVE"], C.ELEMENT_NAMES["SAVE"]):
window.Element(key="OK").Update("Generate Numbers")
window.Element(key=C.ELEMENT_NAMES["LINES"]).Update(visible=True)
if event == C.ELEMENT_NAMES["NOSAVE"]:
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File will not be saved"
)
elif event == C.ELEMENT_NAMES["SAVE"]:
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"Will be saved as: " + filename
)
if event == C.ELEMENT_NAMES["LOTTO"]:
filename = (
C.SAVE_FILE_DIR + values[C.ELEMENT_NAMES["LOTTO"]] + C.SAVE_FILE_TYPE
)
window.Element(key=C.ELEMENT_NAMES["FILENAME"]).Update(
"File name: " + filename
)
elif event == C.ELEMENT_NAMES["COUNT"]:
if values[C.ELEMENT_NAMES["COUNT"]].isnumeric():
temp = int(values[C.ELEMENT_NAMES["COUNT"]])
else:
temp = False
if temp < C.MIN_LINES or temp > C.MAX_LINES:
elem = window.Element(key=C.ELEMENT_NAMES["COUNT"])
elem.Update(C.DEFAULT_LINES)
msg = "number of lines must be in the range 1-100"
popup_window(text=msg)
elif event == "OK" or event == "Cancel" or event is None:
break
if event != "Cancel" and event is not None:
args.lottery_type = values[C.ELEMENT_NAMES["LOTTO"]] # str
args.number_of_lines = int(values[C.ELEMENT_NAMES["COUNT"]]) # int
args.delete = values[C.ELEMENT_NAMES["DELETE"]] # bool
args.print = values[C.ELEMENT_NAMES["SHOW"]] # bool
args.no_save = values[C.ELEMENT_NAMES["NOSAVE"]] # bool
window.Close()
return event, args
| gui_arguments.py | 6,700 | Window interface
:param args: the arguments passed from the command line
:return: Tuple[Union[str, None], Namespace] - The new arguments
Display the command line options in a window
Copyright (c) 2019 Bernd Wiesner. bernduwiesner@yahoo.co.uk All rights reseArgumentS_Resultsed str int bool bool bool | 306 | en | 0.348044 |
import itertools as it
import re
import string
import warnings
from xml.dom import minidom
from manimlib.constants import *
from manimlib.mobject.geometry import Circle
from manimlib.mobject.geometry import Rectangle
from manimlib.mobject.geometry import RoundedRectangle
from manimlib.mobject.types.vectorized_mobject import VGroup
from manimlib.mobject.types.vectorized_mobject import VMobject
from manimlib.utils.color import *
from manimlib.utils.config_ops import digest_config
from manimlib.utils.config_ops import digest_locals
def string_to_numbers(num_string):
num_string = num_string.replace("-", ",-")
num_string = num_string.replace("e,-", "e-")
return [float(s) for s in re.split("[ ,]", num_string) if s != ""]
class SVGMobject(VMobject):
CONFIG = {
"should_center": True,
"height": 2,
"width": None,
# Must be filled in in a subclass, or when called
"file_name": None,
"unpack_groups": True, # if False, creates a hierarchy of VGroups
"stroke_width": DEFAULT_STROKE_WIDTH,
"fill_opacity": 1.0,
# "fill_color" : LIGHT_GREY,
}
def __init__(self, file_name=None, **kwargs):
digest_config(self, kwargs)
self.file_name = file_name or self.file_name
self.ensure_valid_file()
VMobject.__init__(self, **kwargs)
self.move_into_position()
def ensure_valid_file(self):
if self.file_name is None:
raise Exception("Must specify file for SVGMobject")
possible_paths = [
os.path.join(os.path.join("assets", "svg_images"), self.file_name),
os.path.join(os.path.join("assets", "svg_images"),
self.file_name + ".svg"),
os.path.join(os.path.join("assets", "svg_images"),
self.file_name + ".xdv"),
self.file_name,
]
for path in possible_paths:
if os.path.exists(path):
self.file_path = path
return
raise IOError("No file matching %s in image directory" %
self.file_name)
def generate_points(self):
doc = minidom.parse(self.file_path)
self.ref_to_element = {}
for svg in doc.getElementsByTagName("svg"):
mobjects = self.get_mobjects_from(svg)
if self.unpack_groups:
self.add(*mobjects)
else:
self.add(*mobjects[0].submobjects)
doc.unlink()
def get_mobjects_from(self, element):
result = []
if not isinstance(element, minidom.Element):
return result
if element.tagName == 'defs':
self.update_ref_to_element(element)
elif element.tagName == 'style':
pass # TODO, handle style
elif element.tagName in ['g', 'svg', 'symbol']:
result += it.chain(*[
self.get_mobjects_from(child) for child in element.childNodes
])
elif element.tagName == 'path':
result.append(
self.path_string_to_mobject(element.getAttribute('d')))
elif element.tagName == 'use':
result += self.use_to_mobjects(element)
elif element.tagName == 'rect':
result.append(self.rect_to_mobject(element))
elif element.tagName == 'circle':
result.append(self.circle_to_mobject(element))
elif element.tagName == 'ellipse':
result.append(self.ellipse_to_mobject(element))
elif element.tagName in ['polygon', 'polyline']:
result.append(self.polygon_to_mobject(element))
else:
pass # TODO
# warnings.warn("Unknown element type: " + element.tagName)
result = [m for m in result if m is not None]
self.handle_transforms(element, VGroup(*result))
if len(result) > 1 and not self.unpack_groups:
result = [VGroup(*result)]
return result
def g_to_mobjects(self, g_element):
mob = VGroup(*self.get_mobjects_from(g_element))
self.handle_transforms(g_element, mob)
return mob.submobjects
def path_string_to_mobject(self, path_string):
return VMobjectFromSVGPathstring(path_string)
def use_to_mobjects(self, use_element):
# Remove initial "#" character
ref = use_element.getAttribute("xlink:href")[1:]
if ref not in self.ref_to_element:
warnings.warn("%s not recognized" % ref)
return VGroup()
return self.get_mobjects_from(self.ref_to_element[ref])
def attribute_to_float(self, attr):
stripped_attr = "".join(
[char for char in attr if char in string.digits + "." + "-"])
return float(stripped_attr)
def polygon_to_mobject(self, polygon_element):
# TODO, This seems hacky...
path_string = polygon_element.getAttribute("points")
for digit in string.digits:
path_string = path_string.replace(" " + digit, " L" + digit)
path_string = "M" + path_string
return self.path_string_to_mobject(path_string)
# <circle class="st1" cx="143.8" cy="268" r="22.6"/>
def circle_to_mobject(self, circle_element):
x, y, r = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key) else 0.0
for key in ("cx", "cy", "r")
]
return Circle(radius=r).shift(x * RIGHT + y * DOWN)
def ellipse_to_mobject(self, circle_element):
x, y, rx, ry = [
self.attribute_to_float(circle_element.getAttribute(key))
if circle_element.hasAttribute(key) else 0.0
for key in ("cx", "cy", "rx", "ry")
]
return Circle().scale(rx * RIGHT + ry * UP).shift(x * RIGHT + y * DOWN)
def rect_to_mobject(self, rect_element):
fill_color = rect_element.getAttribute("fill")
stroke_color = rect_element.getAttribute("stroke")
stroke_width = rect_element.getAttribute("stroke-width")
corner_radius = rect_element.getAttribute("rx")
# input preprocessing
if fill_color in ["", "none", "#FFF", "#FFFFFF"
] or Color(fill_color) == Color(WHITE):
opacity = 0
fill_color = BLACK # shdn't be necessary but avoids error msgs
if fill_color in ["#000", "#000000"]:
fill_color = WHITE
if stroke_color in ["", "none", "#FFF", "#FFFFFF"
] or Color(stroke_color) == Color(WHITE):
stroke_width = 0
stroke_color = BLACK
if stroke_color in ["#000", "#000000"]:
stroke_color = WHITE
if stroke_width in ["", "none", "0"]:
stroke_width = 0
if corner_radius in ["", "0", "none"]:
corner_radius = 0
corner_radius = float(corner_radius)
if corner_radius == 0:
mob = Rectangle(width=self.attribute_to_float(
rect_element.getAttribute("width")),
height=self.attribute_to_float(
rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity)
else:
mob = RoundedRectangle(width=self.attribute_to_float(
rect_element.getAttribute("width")),
height=self.attribute_to_float(
rect_element.getAttribute("height")),
stroke_width=stroke_width,
stroke_color=stroke_color,
fill_color=fill_color,
fill_opacity=opacity,
corner_radius=corner_radius)
mob.shift(mob.get_center() - mob.get_corner(UP + LEFT))
return mob
def handle_transforms(self, element, mobject):
x, y = 0, 0
try:
x = self.attribute_to_float(element.getAttribute('x'))
# Flip y
y = -self.attribute_to_float(element.getAttribute('y'))
mobject.shift(x * RIGHT + y * UP)
except:
pass
transform = element.getAttribute('transform')
try: # transform matrix
prefix = "matrix("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
transform = string_to_numbers(transform)
transform = np.array(transform).reshape([3, 2])
x = transform[2][0]
y = -transform[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
except:
pass
try: # transform scale
prefix = "scale("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
scale_values = string_to_numbers(transform)
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]),
about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
except:
pass
try: # transform translate
prefix = "translate("
suffix = ")"
if not transform.startswith(prefix) or not transform.endswith(
suffix):
raise Exception()
transform = transform[len(prefix):-len(suffix)]
x, y = string_to_numbers(transform)
mobject.shift(x * RIGHT + y * DOWN)
except:
pass
# TODO, ...
def flatten(self, input_list):
output_list = []
for i in input_list:
if isinstance(i, list):
output_list.extend(self.flatten(i))
else:
output_list.append(i)
return output_list
def get_all_childNodes_have_id(self, element):
all_childNodes_have_id = []
if not isinstance(element, minidom.Element):
return
if element.hasAttribute('id'):
return [element]
for e in element.childNodes:
all_childNodes_have_id.append(self.get_all_childNodes_have_id(e))
return self.flatten([e for e in all_childNodes_have_id if e])
def update_ref_to_element(self, defs):
new_refs = dict([(e.getAttribute('id'), e)
for e in self.get_all_childNodes_have_id(defs)])
self.ref_to_element.update(new_refs)
def move_into_position(self):
if self.should_center:
self.center()
if self.height is not None:
self.set_height(self.height)
if self.width is not None:
self.set_width(self.width)
class VMobjectFromSVGPathstring(VMobject):
def __init__(self, path_string, **kwargs):
digest_locals(self)
VMobject.__init__(self, **kwargs)
def get_path_commands(self):
result = [
"M", # moveto
"L", # lineto
"H", # horizontal lineto
"V", # vertical lineto
"C", # curveto
"S", # smooth curveto
"Q", # quadratic Bezier curve
"T", # smooth quadratic Bezier curveto
"A", # elliptical Arc
"Z", # closepath
]
result += [s.lower() for s in result]
return result
def generate_points(self):
pattern = "[%s]" % ("".join(self.get_path_commands()))
pairs = list(
zip(re.findall(pattern, self.path_string),
re.split(pattern, self.path_string)[1:]))
# Which mobject should new points be added to
self = self
for command, coord_string in pairs:
self.handle_command(command, coord_string)
# people treat y-coordinate differently
self.rotate(np.pi, RIGHT, about_point=ORIGIN)
def handle_command(self, command, coord_string):
isLower = command.islower()
command = command.upper()
# new_points are the points that will be added to the curr_points
# list. This variable may get modified in the conditionals below.
points = self.points
new_points = self.string_to_points(coord_string)
if isLower and len(points) > 0:
new_points += points[-1]
if command == "M": # moveto
self.start_new_path(new_points[0])
if len(new_points) <= 1:
return
# Draw relative line-to values.
points = self.points
new_points = new_points[1:]
command = "L"
for p in new_points:
if isLower:
# Treat everything as relative line-to until empty
p[0] += self.points[-1, 0]
p[1] += self.points[-1, 1]
self.add_line_to(p)
return
elif command in ["L", "H", "V"]: # lineto
if command == "H":
new_points[0, 1] = points[-1, 1]
elif command == "V":
if isLower:
new_points[0, 0] -= points[-1, 0]
new_points[0, 0] += points[-1, 1]
new_points[0, 1] = new_points[0, 0]
new_points[0, 0] = points[-1, 0]
self.add_line_to(new_points[0])
return
if command == "C": # curveto
pass # Yay! No action required
elif command in ["S", "T"]: # smooth curveto
self.add_smooth_curve_to(*new_points)
# handle1 = points[-1] + (points[-1] - points[-2])
# new_points = np.append([handle1], new_points, axis=0)
return
elif command == "Q": # quadratic Bezier curve
# TODO, this is a suboptimal approximation
new_points = np.append([new_points[0]], new_points, axis=0)
elif command == "A": # elliptical Arc
raise Exception("Not implemented")
elif command == "Z": # closepath
return
# Add first three points
self.add_cubic_bezier_curve_to(*new_points[0:3])
# Handle situations where there's multiple relative control points
if len(new_points) > 3:
# Add subsequent offset points relatively.
for i in range(3, len(new_points), 3):
if isLower:
new_points[i:i + 3] -= points[-1]
new_points[i:i + 3] += new_points[i - 1]
self.add_cubic_bezier_curve_to(*new_points[i:i + 3])
def string_to_points(self, coord_string):
numbers = string_to_numbers(coord_string)
if len(numbers) % 2 == 1:
numbers.append(0)
num_points = len(numbers) // 2
result = np.zeros((num_points, self.dim))
result[:, :2] = np.array(numbers).reshape((num_points, 2))
return result
def get_original_path_string(self):
return self.path_string
| manimlib/mobject/svg/svg_mobject.py | 15,787 | Must be filled in in a subclass, or when called if False, creates a hierarchy of VGroups "fill_color" : LIGHT_GREY, TODO, handle style TODO warnings.warn("Unknown element type: " + element.tagName) Remove initial "" character TODO, This seems hacky... <circle class="st1" cx="143.8" cy="268" r="22.6"/> input preprocessing shdn't be necessary but avoids error msgs Flip y transform matrix transform scale transform translate TODO, ... moveto lineto horizontal lineto vertical lineto curveto smooth curveto quadratic Bezier curve smooth quadratic Bezier curveto elliptical Arc closepath Which mobject should new points be added to people treat y-coordinate differently new_points are the points that will be added to the curr_points list. This variable may get modified in the conditionals below. moveto Draw relative line-to values. Treat everything as relative line-to until empty lineto curveto Yay! No action required smooth curveto handle1 = points[-1] + (points[-1] - points[-2]) new_points = np.append([handle1], new_points, axis=0) quadratic Bezier curve TODO, this is a suboptimal approximation elliptical Arc closepath Add first three points Handle situations where there's multiple relative control points Add subsequent offset points relatively. | 1,256 | en | 0.712923 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Zimeng Qiu <zimengq@andrew.cmu.edu>
"""
F19 11-411/611 NLP Assignment 3 Task 1
N-gram Language Model Implementation Script
Zimeng Qiu Sep 2019
This is a simple implementation of N-gram language model
Write your own implementation in this file!
"""
import argparse
from utils import *
import numpy as np
class LanguageModel(object):
"""
Base class for all language models
"""
def __init__(self, corpus, ngram, min_freq, uniform=False):
"""
Initialize language model
:param corpus: input text corpus to build LM on
:param ngram: number of n-gram, e.g. 1, 2, 3, ...
:param min_freq: minimum frequency threshold to set a word to UNK placeholder
set to 1 to not use this threshold
:param uniform: boolean flag, set to True to indicate this model is a simple uniform LM
otherwise will be an N-gram model
"""
# write your initialize code below
self.corpus = corpus
self.ngram = ngram
self.min_freq = min_freq
self.uniform = uniform
self.uniform_table = None
self.unigram_table = None
self.bigram_table = None
self.trigram_table = None
self.infrequent_words = find_infrequent_words(self.corpus,self.min_freq)
replace_infrequent_words(self.corpus,self.infrequent_words)
self.corpus_1gram,self.vocabulary,self.V,self.N = get_vocabulary(self.corpus)
self.word_to_idx,self.idx_to_word = get_word_mappings(self.vocabulary)
self.counter_1gram = get_counter(self.corpus_1gram)
self.build()
def build(self):
"""
Build LM from text corpus
"""
# Write your own implementation here
# uniform
if self.uniform:
self.uniform_table = get_uniform_tables(self.V)
else:
# unigram
if self.ngram == 1:
self.unigram_table = get_unigram_tables(self.V,self.N,self.counter_1gram,self.word_to_idx)
# bigram
elif self.ngram == 2:
self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]
self.counter_2gram = get_counter(self.corpus_2gram)
self.bigram_table = get_bigram_tables(self.V,self.counter_1gram,self.counter_2gram,self.word_to_idx,self.idx_to_word)
# trigram
elif self.ngram == 3:
self.corpus_2gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1]) for i in range(len(self.corpus_1gram)-1)]
self.counter_2gram = get_counter(self.corpus_2gram)
self.corpus_3gram = [(self.corpus_1gram[i],self.corpus_1gram[i+1],self.corpus_1gram[i+2]) for i in range(len(self.corpus_1gram)-2)]
self.counter_3gram = get_counter(self.corpus_3gram)
self.trigram_table = get_trigram_tables(self.V,self.counter_2gram,self.counter_3gram,self.word_to_idx)
def most_common_words(self, k):
"""
Return the top-k most frequent n-grams and their frequencies in sorted order.
For uniform models, the frequency should be "1" for each token.
Your return should be sorted in descending order of frequency.
Sort according to ascending alphabet order when multiple words have same frequency.
:return: list[tuple(token, freq)] of top k most common tokens
"""
# Write your own implementation here
if self.uniform:
return [(word,1) for word in sorted(self.vocabulary)[0:k]]
else:
if self.ngram == 1:
return sorted(self.counter_1gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]
elif self.ngram == 2:
return [(token[0]+' '+token[1],num) for token, num in sorted(self.counter_2gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]
elif self.ngram == 3:
return [(token[0]+' '+token[1]+' '+token[2],num) for token,num in sorted(self.counter_3gram.most_common(),key=lambda x:(-x[1],x[0]))[0:k]]
return
def calculate_perplexity(models, coefs, data):
"""
Calculate perplexity with given model
:param models: language models
:param coefs: coefficients
:param data: test data
:return: perplexity
"""
# Write your own implementation here
pp = 0
uniform_prob = []
unigram_prob = []
bigram_prob = []
trigram_prob = []
prob_table_unifrom = None
prob_table_1gram = None
prob_table_2gram = None
prob_table_3gram = None
min_freq = models[0].min_freq
train_vocabulary = models[0].vocabulary
word_to_idx,idx_to_word = models[0].word_to_idx,models[0].idx_to_word
test_infrequent_words = find_infrequent_words(data,min_freq)
replace_infrequent_words(data,test_infrequent_words)
for i in range(len(data)):
for j in range(len(data[i])):
if data[i][j] not in train_vocabulary:
data[i][j] = 'UNK'
corpus_1gram,vocabulary,V,N = get_vocabulary(data)
corpus_2gram = [(corpus_1gram[i],corpus_1gram[i+1]) for i in range(len(corpus_1gram)-1)]
corpus_3gram = [(corpus_1gram[i],corpus_1gram[i+1],corpus_1gram[i+2]) for i in range(len(corpus_1gram)-2)]
for i in range(len(models)):
model = models[i]
if model.uniform:
prob_table_unifrom = model.uniform_table
for word in corpus_1gram:
uniform_prob.append(prob_table_unifrom[0][word_to_idx[word]]*coefs[0])
else:
if model.ngram == 1:
prob_table_1gram = model.unigram_table
for word in corpus_1gram:
unigram_prob.append(prob_table_1gram[0][word_to_idx[word]]*coefs[1])
elif model.ngram == 2:
prob_table_2gram = model.bigram_table
bigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_2gram[0][0]]])
for words in corpus_2gram:
word1 = words[0]
word2 = words[1]
prob_1gram = prob_table_1gram[0][word_to_idx[word2]]
prob_2gram = prob_table_2gram[word_to_idx[word1]][word_to_idx[word2]]
if prob_2gram != 0:
bigram_prob.append(prob_2gram*coefs[2])
else:
bigram_prob.append(prob_1gram*coefs[2])
elif model.ngram == 3:
prob_table_3gram = model.trigram_table
train_corpus_3gram = set(model.corpus_3gram)
trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][0]]])
trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][1]]])
for words in corpus_3gram:
word1 = words[0]
word2 = words[1]
word3 = words[2]
if words in train_corpus_3gram:
prob_3gram = prob_table_3gram[(word1,word2,word3)]
trigram_prob.append(prob_3gram*coefs[3])
else:
prob_1gram = prob_table_1gram[0][word_to_idx[word3]]
prob_2gram = prob_table_2gram[word_to_idx[word2]][word_to_idx[word3]]
if prob_2gram != 0:
trigram_prob.append(prob_2gram*coefs[3])
else:
trigram_prob.append(prob_1gram*coefs[3])
prob = np.zeros((N,),dtype=np.float64)
for i in range(len(prob)):
prob[i] += uniform_prob[i]
prob[i] += unigram_prob[i]
prob[i] += bigram_prob[i]
prob[i] += trigram_prob[i]
for p in prob:
pp += np.log2(p)
pp /= -N
pp = np.power(2,pp)
return pp
# Do not modify this function!
def parse_args():
"""
Parse input positional arguments from command line
:return: args - parsed arguments
"""
parser = argparse.ArgumentParser('N-gram Language Model')
parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float)
parser.add_argument('coef_uni', help='coefficient for the unigram model.', type=float)
parser.add_argument('coef_bi', help='coefficient for the bigram model.', type=float)
parser.add_argument('coef_tri', help='coefficient for the trigram model.', type=float)
parser.add_argument('min_freq', type=int,
help='minimum frequency threshold for substitute '
'with UNK token, set to 1 for not use this threshold')
parser.add_argument('testfile', help='test text file.')
parser.add_argument('trainfile', help='training text file.', nargs='+')
return parser.parse_args()
# Main executable script provided for your convenience
# Not executed on autograder, so do what you want
if __name__ == '__main__':
# parse arguments
args = parse_args()
# load and preprocess train and test data
train = preprocess(load_dataset(args.trainfile))
test = preprocess(read_file(args.testfile))
# build language models
uniform = LanguageModel(train, ngram=1, min_freq=args.min_freq, uniform=True)
unigram = LanguageModel(train, ngram=1, min_freq=args.min_freq)
# print('Unique 1-gram types:',len(unigram.counter_1gram.most_common()))
# print('top 15 unigram:',unigram.counter_1gram.most_common()[:15])
bigram = LanguageModel(train, ngram=2, min_freq=args.min_freq)
# print('Unique 2-gram types:',len(bigram.counter_2gram.most_common()))
# print('top 15 bigram:',bigram.counter_2gram.most_common()[:15])
trigram = LanguageModel(train, ngram=3, min_freq=args.min_freq)
# print('Unique 3-gram types:',len(trigram.counter_3gram.most_common()))
# print('top 15 trigram:',trigram.counter_3gram.most_common()[:50])
# calculate perplexity on test file
ppl = calculate_perplexity(
models=[uniform, unigram, bigram, trigram],
coefs=[args.coef_unif, args.coef_uni, args.coef_bi, args.coef_tri],
data=test)
print("Perplexity: {}".format(ppl))
| lm.py | 10,230 | Base class for all language models
Initialize language model
:param corpus: input text corpus to build LM on
:param ngram: number of n-gram, e.g. 1, 2, 3, ...
:param min_freq: minimum frequency threshold to set a word to UNK placeholder
set to 1 to not use this threshold
:param uniform: boolean flag, set to True to indicate this model is a simple uniform LM
otherwise will be an N-gram model
Build LM from text corpus
Calculate perplexity with given model
:param models: language models
:param coefs: coefficients
:param data: test data
:return: perplexity
Return the top-k most frequent n-grams and their frequencies in sorted order.
For uniform models, the frequency should be "1" for each token.
Your return should be sorted in descending order of frequency.
Sort according to ascending alphabet order when multiple words have same frequency.
:return: list[tuple(token, freq)] of top k most common tokens
Parse input positional arguments from command line
:return: args - parsed arguments
F19 11-411/611 NLP Assignment 3 Task 1
N-gram Language Model Implementation Script
Zimeng Qiu Sep 2019
This is a simple implementation of N-gram language model
Write your own implementation in this file!
!/usr/bin/env python -*- coding: utf-8 -*- Copyright (C) 2019 Zimeng Qiu <zimengq@andrew.cmu.edu> write your initialize code below Write your own implementation here uniform unigram bigram trigram Write your own implementation here Write your own implementation here Do not modify this function! Main executable script provided for your convenience Not executed on autograder, so do what you want parse arguments load and preprocess train and test data build language models print('Unique 1-gram types:',len(unigram.counter_1gram.most_common())) print('top 15 unigram:',unigram.counter_1gram.most_common()[:15]) print('Unique 2-gram types:',len(bigram.counter_2gram.most_common())) print('top 15 bigram:',bigram.counter_2gram.most_common()[:15]) print('Unique 3-gram types:',len(trigram.counter_3gram.most_common())) print('top 15 trigram:',trigram.counter_3gram.most_common()[:50]) calculate perplexity on test file | 2,151 | en | 0.675535 |
"""
---
title: Compressive Transformer Experiment
summary: This experiment trains a compressive transformer model on tiny Shakespeare dataset.
---
# Compressive Transformer Experiment
This is an annotated PyTorch experiment to train a compressive transformer model.
"""
from typing import List, Tuple, NamedTuple
import torch
import torch.nn as nn
from labml import experiment, tracker, monit, logger
from labml.configs import option
from labml.logger import Text
from labml_helpers.metrics.simple_state import SimpleStateModule
from labml_helpers.module import Module
from labml_helpers.train_valid import BatchIndex, hook_model_outputs
from labml_nn.experiments.nlp_autoregression import NLPAutoRegressionConfigs
from labml_nn.transformers.compressive import CompressiveTransformer, AttentionReconstructionLoss, \
CompressiveTransformerLayer, Conv1dCompression
class CompressedMemory(NamedTuple):
mem: List[torch.Tensor]
c_mem: List[torch.Tensor]
class AutoregressiveModel(Module):
"""
## Auto regressive model
"""
def __init__(self, n_vocab: int, d_model: int, transformer: CompressiveTransformer):
super().__init__()
# Token embedding module
self.src_embed = nn.Embedding(n_vocab, d_model)
# Transformer
self.transformer = transformer
# Final layer
self.generator = nn.Linear(d_model, n_vocab)
# Masks
self.mask_x = None
self.mask_mem = None
def forward(self, x: torch.Tensor, mem: CompressedMemory):
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem = []
c_mem = []
# Total length of the memory and compressed memory (for masks)
m_len = len(mem[0]) if mem else 0
if c_mem:
m_len += len(c_mem[0])
# Create a subsequent mask for tokens
if self.mask_x is None or self.mask_x.shape[0] < len(x):
from labml_nn.transformers.utils import subsequent_mask
self.mask_x = subsequent_mask(len(x)).to(x.device)
# Create an all ones (full visibility) mask for memory
if self.mask_mem is None or self.mask_mem.shape[1] < m_len or self.mask_mem.shape[0] < len(x):
self.mask_mem = self.mask_x.new_ones(len(x), m_len, 1)
# Concatenate the masks if there is memory
if m_len:
mask = torch.cat((self.mask_mem[:len(x), :m_len], self.mask_x[:len(x), :len(x)]), dim=1)
# Use only the subsequent mask otherwise
else:
mask = self.mask_x[:len(x), :len(x)]
# Token embeddings
x = self.src_embed(x)
# Run it through the transformer
res, mem = self.transformer(x, mem, c_mem, mask)
# Generate logits of the next token
res = self.generator(res)
#
return res, mem
class Configs(NLPAutoRegressionConfigs):
"""
## Configurations
The default configurations can and will be overridden when we start the experiment.
"""
model: AutoregressiveModel
# Token embedding size
d_model: int = 128
# Number of attention heads
heads: int = 4
# Dropout probability
dropout: float = 0.0
# Number of features in FFN hidden layer
d_ff: int = 256
# Number of transformer layers
n_layers: int = 6
# Number of memories to keep
mem_len: int = 8
# State module to maintain memories when switching between training and validation
memory = SimpleStateModule()
# Attention Reconstruction Loss
attention_reconstruction_loss: AttentionReconstructionLoss
# Compression rate
compression_rate: int = 4
# Compressed memory length
c_mem_len: int = 128
def init(self):
# Set tracker configurations
tracker.set_scalar("accuracy.*", True)
tracker.set_scalar("loss.*", True)
# Do not print the attention reconstruction loss in the terminal
tracker.set_scalar("ar_loss.*", False)
# Add a hook to log module outputs
hook_model_outputs(self.mode, self.model, 'model')
# This will keep the accuracy metric stats and memories separate for training and validation.
self.state_modules = [self.accuracy, self.memory]
@torch.no_grad()
def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) \
-> Tuple[CompressedMemory, List[torch.Tensor]]:
"""
Concatenate new memories and compress the oldest memories.
"""
# If the configurations specify not to use memory
if self.mem_len == 0 and self.c_mem_len == 0:
return CompressedMemory([], []), []
# Get memory and compressed memory
if mem is not None:
mem, c_mem = mem.mem, mem.c_mem
else:
mem, c_mem = [], []
# Concatenate new memories with old memory
if mem:
mem = [torch.cat((m, x), dim=0) for m, x in zip(mem, new_mem)]
else:
mem = new_mem
# Compress the oldest memories if there are more memories than `mem_len`
if len(mem[0]) > self.mem_len:
# Calculate the number of compressed memories to make $n_{cm} = \bigg\lceil\frac{n'_m - N_m}{c}\bigg\rceil$,
# where $n'_m$ is the number of memories we have
# and $N_m$ is the maximum number of memories we maintain (`mem_len`).
n_c_mem = (len(mem[0]) - self.mem_len + self.compression_rate - 1) // self.compression_rate
# Number of memories to compress $c n_{cm}$
n_old = n_c_mem * self.compression_rate
# A list to keep memories that need to be compressed for each layer.
mem_to_compress = []
# A list to keep the memories that do not get compressed for each layer.
uncompressed_mem = []
# Iterate through memories of each layer.
for m in mem:
# Split the memories at $c n_{cm}$
cm, m = torch.split(m, [n_old, len(m) - n_old])
# Collect memories to compress
mem_to_compress.append(cm)
# Collect remaining memories
uncompressed_mem.append(m)
# Update the memories
mem = uncompressed_mem
# Compress the memories
new_c_mem = []
for i, layer in enumerate(self.model.transformer.layers):
new_c_mem.append(layer.compress(mem_to_compress[i]))
# Concatenate newly compressed memories with old compressed memories
if c_mem:
c_mem = [torch.cat((m, nm), dim=0) for m, nm in zip(c_mem, new_c_mem)]
# If there are no old compressed memories
else:
c_mem = new_c_mem
# Truncate old memories
if len(c_mem[0]) > self.c_mem_len:
c_mem = [m[-self.c_mem_len:] for m in c_mem]
# No memories are compressed if the number of memories is less than `mem_len`
else:
mem_to_compress = []
# Return memories and the memories that were compressed.
# Memories that were compressed are needed for the reconstruction loss computation.
return CompressedMemory(mem, c_mem), mem_to_compress
def step(self, batch: any, batch_idx: BatchIndex):
"""
### Training/validation step
"""
# Move data to the device
data, target = batch[0].to(self.device), batch[1].to(self.device)
# Update global step (number of tokens processed) when in training mode
if self.mode.is_train:
tracker.add_global_step(data.shape[0] * data.shape[1])
# Whether to capture model outputs
with self.mode.update(is_log_activations=batch_idx.is_last):
# Get memories
mem = self.memory.get()
# Run the model
output, new_mem = self.model(data, mem)
# Merge and compress memory
mem, mem_to_compress = self.merge_compress_memory(mem, new_mem)
# Update memories
self.memory.set(mem)
# Calculate and log cross entropy loss
loss = self.loss_func(output, target)
tracker.add("loss.", loss)
# Calculate attention reconstruction loss if memories were compressed in this step
if mem_to_compress:
# Get attention reconstruction loss
ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress)
# Track attention reconstruction loss
tracker.add("ar_loss.", ar_loss)
# Add attention reconstruction loss to loss
loss = loss + ar_loss
# Calculate and log accuracy
self.accuracy(output, target)
self.accuracy.track()
# Train the model
if self.mode.is_train:
# Calculate gradients
loss.backward()
# Clip gradients
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip)
# Take optimizer step
self.optimizer.step()
# Log the model parameters and gradients on last batch of every epoch
if batch_idx.is_last:
tracker.add('model', self.model)
# Clear the gradients
self.optimizer.zero_grad()
# Save the tracked metrics
tracker.save()
def sample(self):
"""
### Sampling function to generate samples periodically while training
"""
# Starting prompt
prompt = self.prompt
# Collect output for printing
log = [(prompt, Text.subtle)]
# memory
mem = CompressedMemory([], [])
# Sample 25 tokens
for i in monit.iterate('Sample', 25):
# Tokenize the prompt
data = self.text.text_to_i(prompt).unsqueeze(-1)
# Move to device
data = data.to(self.device)
# Get the model output
output, new_mem = self.model(data, mem)
# Get the model prediction (greedy)
output = output.argmax(dim=-1).squeeze(1)
# Add the prediction to prompt
prompt += self.prompt_separator + self.text.itos[output[-1]]
# Only feed the last character to model in next iteration, rest will go in as memories
prompt = prompt[-1:]
# Add the prediction for logging
log += [(self.prompt_separator + self.text.itos[output[-1]], Text.value)]
# Update and compress memory
mem, _ = self.merge_compress_memory(mem, new_mem)
# Print the sampled output
logger.log(log)
@option(Configs.model)
def autoregressive_model(c: Configs):
"""
### Initialize the auto-regressive model
"""
from labml_nn.transformers.xl import RelativeMultiHeadAttention
from labml_nn.transformers.feed_forward import FeedForward
m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(
CompressiveTransformerLayer(d_model=c.d_model,
self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout),
feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout),
dropout_prob=c.dropout,
compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers))
return m.to(c.device)
@option(Configs.attention_reconstruction_loss)
def attention_reconstruction_loss(c: Configs):
"""
### Initialize the attention reconstruction loss
"""
return AttentionReconstructionLoss(c.model.transformer.layers)
def main():
"""
### Run the experiment
"""
# Create experiment
experiment.create(name="compressive_transformer", comment='')
# Create configs
conf = Configs()
# Load configurations
experiment.configs(conf,
# A dictionary of configurations to override
{'tokenizer': 'character',
'text': 'tiny_shakespeare',
'optimizer.learning_rate': 2.5e-4,
'optimizer.optimizer': 'AdamW',
'prompt': 'It is',
'prompt_separator': '',
'train_loader': 'sequential_train_loader',
'valid_loader': 'sequential_valid_loader',
'seq_len': 8,
'mem_len': 8,
'epochs': 128,
'batch_size': 32,
'inner_iterations': 25,
'compression_rate': 2,
})
# Set models for saving and loading
experiment.add_pytorch_models({'model': conf.model})
# Start the experiment
with experiment.start():
# `TrainValidConfigs.run`
conf.run()
#
if __name__ == '__main__':
main()
| labml_nn/transformers/compressive/experiment.py | 13,020 | ## Auto regressive model
## Configurations
The default configurations can and will be overridden when we start the experiment.
### Initialize the attention reconstruction loss
### Initialize the auto-regressive model
### Run the experiment
Concatenate new memories and compress the oldest memories.
### Sampling function to generate samples periodically while training
### Training/validation step
---
title: Compressive Transformer Experiment
summary: This experiment trains a compressive transformer model on tiny Shakespeare dataset.
---
# Compressive Transformer Experiment
This is an annotated PyTorch experiment to train a compressive transformer model.
Token embedding module Transformer Final layer Masks Get memory and compressed memory Total length of the memory and compressed memory (for masks) Create a subsequent mask for tokens Create an all ones (full visibility) mask for memory Concatenate the masks if there is memory Use only the subsequent mask otherwise Token embeddings Run it through the transformer Generate logits of the next token Token embedding size Number of attention heads Dropout probability Number of features in FFN hidden layer Number of transformer layers Number of memories to keep State module to maintain memories when switching between training and validation Attention Reconstruction Loss Compression rate Compressed memory length Set tracker configurations Do not print the attention reconstruction loss in the terminal Add a hook to log module outputs This will keep the accuracy metric stats and memories separate for training and validation. If the configurations specify not to use memory Get memory and compressed memory Concatenate new memories with old memory Compress the oldest memories if there are more memories than `mem_len` Calculate the number of compressed memories to make $n_{cm} = \bigg\lceil\frac{n'_m - N_m}{c}\bigg\rceil$, where $n'_m$ is the number of memories we have and $N_m$ is the maximum number of memories we maintain (`mem_len`). Number of memories to compress $c n_{cm}$ A list to keep memories that need to be compressed for each layer. A list to keep the memories that do not get compressed for each layer. Iterate through memories of each layer. Split the memories at $c n_{cm}$ Collect memories to compress Collect remaining memories Update the memories Compress the memories Concatenate newly compressed memories with old compressed memories If there are no old compressed memories Truncate old memories No memories are compressed if the number of memories is less than `mem_len` Return memories and the memories that were compressed. Memories that were compressed are needed for the reconstruction loss computation. Move data to the device Update global step (number of tokens processed) when in training mode Whether to capture model outputs Get memories Run the model Merge and compress memory Update memories Calculate and log cross entropy loss Calculate attention reconstruction loss if memories were compressed in this step Get attention reconstruction loss Track attention reconstruction loss Add attention reconstruction loss to loss Calculate and log accuracy Train the model Calculate gradients Clip gradients Take optimizer step Log the model parameters and gradients on last batch of every epoch Clear the gradients Save the tracked metrics Starting prompt Collect output for printing memory Sample 25 tokens Tokenize the prompt Move to device Get the model output Get the model prediction (greedy) Add the prediction to prompt Only feed the last character to model in next iteration, rest will go in as memories Add the prediction for logging Update and compress memory Print the sampled output Create experiment Create configs Load configurations A dictionary of configurations to override Set models for saving and loading Start the experiment `TrainValidConfigs.run` | 3,868 | en | 0.788086 |
# coding:utf8
from flask import request
routes = dict()
class ApiServiceBase(type):
def __new__(cls, name, base, attrs):
# super(type, obj) require isinstance(obj, type)
return super(ApiServiceBase, cls).__new__(cls, name, base, attrs)
def __init__(self, name, base, attrs):
if name == 'ApiService':
pass
else:
route = '/' + self.app + '/' + self.resource
if self.resource:
route += '/'
routes[route] = {
'cls': self
}
class ApiService(object):
__metaclass__ = ApiServiceBase
def handle(self):
self.request = request
req_method = getattr(self, self.request.method.lower(), None)
return req_method()
| flask/api_service.py | 650 | coding:utf8 super(type, obj) require isinstance(obj, type) | 58 | en | 0.53124 |
"""
# EXCEL SHEET COLUMN TITLE
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
For example:
1 -> A
2 -> B
3 -> C
...
26 -> Z
27 -> AA
28 -> AB
...
Example 1:
Input: 1
Output: "A"
Example 2:
Input: 28
Output: "AB"
Example 3:
Input: 701
Output: "ZY"
"""
class Solution:
def convertToTitle(self, n: int) -> str:
if n == 0:
return ""
res = ""
while n > 0:
rem = n % 26
if rem == 0:
rem = 26
res = chr(64+rem) + res
n = n // 26
if rem == 26:
n -= 1
return res | Leetcode/easy/excel-sheet-column-title.py | 705 | # EXCEL SHEET COLUMN TITLE
Given a positive integer, return its corresponding column title as appear in an Excel sheet.
For example:
1 -> A
2 -> B
3 -> C
...
26 -> Z
27 -> AA
28 -> AB
...
Example 1:
Input: 1
Output: "A"
Example 2:
Input: 28
Output: "AB"
Example 3:
Input: 701
Output: "ZY" | 329 | en | 0.601338 |
from __future__ import print_function
import random
import struct
import sys
import time
import os
import zmq
from msgpack import ExtType, packb, unpackb
class AlignClient(object):
# A synchronous Python2 alignment client
REQ_PREFIX = struct.Struct('=HH')
REQ_SUFFIX = struct.Struct('=Ld')
RESP = struct.Struct('=HHLd?')
def __init__(self, port):
self.port = port
self.ctx = zmq.Context()
self.sock = self.ctx.socket(zmq.DEALER)
self.sock.connect('tcp://127.0.0.1:{}'.format(self.port))
self.counter = 0
self.prefix = self.REQ_PREFIX.pack(os.getpid() % 0x10000,
random.randrange(0x10000))
def packb(self, data):
return packb(data, encoding='utf-8', use_bin_type=True)
def unpackb(self, packed):
return unpackb(packed, use_list=False, encoding='utf-8')
def _new_id(self):
self.counter += 1
if self.counter > 0xffffffff:
self.counter = 0
return (self.prefix + self.REQ_SUFFIX.pack(self.counter, time.time()),
self.counter)
def align(self, sequence):
header, req_id = self._new_id()
bname = 'align'.encode('utf-8')
bargs = self.packb([sequence.decode()])
bkwargs = self.packb(dict())
msg = [header, bname, bargs, bkwargs]
self.sock.send_multipart(msg)
while True:
try:
data = self.sock.recv_multipart(zmq.NOBLOCK)
except zmq.ZMQError as e:
time.sleep(0.1)
else:
header, banswer = data
pid, rnd, res_req_id, timestamp, is_error = self.RESP.unpack(header)
if res_req_id != req_id:
raise ValueError('Received response for request {}, but send {}.'.format(res_res_id, req_id))
answer = self.unpackb(banswer)
return answer
if __name__ == '__main__':
port, seq = sys.argv[1:3]
client = AlignClient(port)
alignments = client.align(seq)
print(alignments)
| pomoxis/align/py2client.py | 2,106 | A synchronous Python2 alignment client | 38 | en | 0.720643 |
##########################################################################
#
# Copyright (c) 2017, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferUITest
import GafferScene
import GafferSceneUI
import GafferDelight
import GafferDelightUI
class DocumentationTest( GafferUITest.TestCase ) :
def test( self ) :
self.maxDiff = None
self.assertNodesAreDocumented(
GafferDelight,
additionalTerminalPlugTypes = ( GafferScene.ScenePlug, )
)
if __name__ == "__main__":
unittest.main()
| python/GafferDelightUITest/DocumentationTest.py | 2,151 | Copyright (c) 2017, John Haddon. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of John Haddon nor the names of any other contributors to this software may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 1,572 | en | 0.888161 |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import numpy as np
from ._base import Ordination, OrdinationResults
from ._utils import svd_rank
class CA(Ordination):
r"""Compute correspondence analysis, a multivariate statistical
technique for ordination.
In general, rows in the data table will correspond to sites and
columns to species, but the method is symmetric. In order to
measure the correspondence between rows and columns, the
:math:`\chi^2` distance is used, and those distances are preserved
in the transformed space. The :math:`\chi^2` distance doesn't take
double zeros into account, and so it is expected to produce better
ordination that PCA when the data has lots of zero values.
It is related to Principal Component Analysis (PCA) but it should
be preferred in the case of steep or long gradients, that is, when
there are many zeros in the input data matrix.
Parameters
----------
X : array_like
Contingency table. It can be applied to different kinds of
data tables but data must be non-negative and dimensionally
homogeneous (quantitative or binary).
Notes
-----
The algorithm is based on [1]_, \S 9.4.1., and is expected to give
the same results as ``cca(X)`` in R's package vegan.
See Also
--------
CCA
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
"""
short_method_name = 'CA'
long_method_name = 'Canonical Analysis'
def __init__(self, X, row_ids, column_ids):
self.X = np.asarray(X, dtype=np.float64)
self._ca()
self.row_ids = row_ids
self.column_ids = column_ids
def _ca(self):
X = self.X
r, c = X.shape
if X.min() < 0:
raise ValueError("Input matrix elements must be non-negative.")
# Step 1 (similar to Pearson chi-square statistic)
grand_total = X.sum()
Q = X / grand_total
column_marginals = Q.sum(axis=0)
row_marginals = Q.sum(axis=1)
# Let's store them since they're needed to compute scores
self.column_marginals = column_marginals
self.row_marginals = row_marginals
# Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's
# an scaled version of the contribution of each cell towards
# Pearson chi-square statistic.
expected = np.outer(row_marginals, column_marginals)
Q_bar = (Q - expected) / np.sqrt(expected) # Eq. 9.32
# Step 2 (Singular Value Decomposition)
U_hat, W, Ut = np.linalg.svd(Q_bar, full_matrices=False)
# Due to the centering, there are at most min(r, c) - 1 non-zero
# eigenvalues (which are all positive)
rank = svd_rank(Q_bar.shape, W)
assert rank <= min(r, c) - 1
self.U_hat = U_hat[:, :rank]
self.W = W[:rank]
self.U = Ut[:rank].T
def scores(self, scaling):
r"""Compute site and species scores for different scalings.
Parameters
----------
scaling : int
For a more detailed explanation of the interpretation, check
Legendre & Legendre 1998, section 9.4.3. The notes that
follow are quick recommendations.
Scaling type 1 maintains :math:`\chi^2` distances between
rows (sites): in the transformed space, the euclidean
distances between rows are equal to the :math:`\chi^2`
distances between rows in the original space. It should be
used when studying the ordination of sites. Rows (sites)
that are near a column (species) have high contributions
from it.
Scaling type 2 preserves :math:`\chi^2` distances between
columns (species), so euclidean distance between columns
after transformation is equal to :math:`\chi^2` distance
between columns in the original space. It is best used
when we are interested in the ordination of species. A
column (species) that is next to a row (site) means that
it is more abundant there.
Other types of scalings are currently not implemented, as
they're less used by ecologists (Legendre & Legendre 1998,
p. 456).
In general, species appearing far from the center of the
biplot and far from its edges will probably exhibit better
relationships than species either in the center (may be
multimodal species, not related to the shown ordination
axes...) or the edges (sparse species...).
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates, etc.
See Also
--------
OrdinationResults
"""
if scaling not in {1, 2}:
raise NotImplementedError(
"Scaling {0} not implemented.".format(scaling))
# Both scalings are a bit intertwined, so we'll compute both and
# then choose
V = self.column_marginals[:, None]**-0.5 * self.U
V_hat = self.row_marginals[:, None]**-0.5 * self.U_hat
F = V_hat * self.W
# According to Formula 9.43, this should hold
# assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V))
# but it doesn't (notice that W**2==Lambda):
# (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W
# = D(p_i+)^{-1/2} Q_bar U W^{-1} W (substituting 9.38)
# = D(p_i+)^{-1/2} Q_bar U
# (9.43b) F = D(p_i+)^{-1} Q V
# = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U (substituting 9.41)
# = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U
# = D(p_i+)^{-1/2} Q_tilde U (using 9.40)
# It holds if we replace Q in 9.43b with Q after centering, ie
# assert np.allclose(
# F,
# (row_marginals**-1)[:, None] * (Q - expected).dot(V))
# Comparing results with vegan and the examples in the book, 9.43a
# is the right one. The same issue happens in 9.44, where also
# 9.44a is the one that matches vegan's output.
# (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W
# = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39)
# = D(p_+j)^{-1/2} Q_bar' U_hat
# (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat
# = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42)
F_hat = V * self.W
# Eigenvalues
eigvals = self.W**2
# Species scores
species_scores = [V, F_hat][scaling - 1]
# Site scores (weighted averages of species scores)
site_scores = [F, V_hat][scaling - 1]
return OrdinationResults(eigvals=eigvals, species=species_scores,
site=site_scores, site_ids=self.row_ids,
species_ids=self.column_ids)
| skbio/stats/ordination/_correspondence_analysis.py | 7,512 | Compute correspondence analysis, a multivariate statistical
technique for ordination.
In general, rows in the data table will correspond to sites and
columns to species, but the method is symmetric. In order to
measure the correspondence between rows and columns, the
:math:`\chi^2` distance is used, and those distances are preserved
in the transformed space. The :math:`\chi^2` distance doesn't take
double zeros into account, and so it is expected to produce better
ordination that PCA when the data has lots of zero values.
It is related to Principal Component Analysis (PCA) but it should
be preferred in the case of steep or long gradients, that is, when
there are many zeros in the input data matrix.
Parameters
----------
X : array_like
Contingency table. It can be applied to different kinds of
data tables but data must be non-negative and dimensionally
homogeneous (quantitative or binary).
Notes
-----
The algorithm is based on [1]_, \S 9.4.1., and is expected to give
the same results as ``cca(X)`` in R's package vegan.
See Also
--------
CCA
References
----------
.. [1] Legendre P. and Legendre L. 1998. Numerical
Ecology. Elsevier, Amsterdam.
Compute site and species scores for different scalings.
Parameters
----------
scaling : int
For a more detailed explanation of the interpretation, check
Legendre & Legendre 1998, section 9.4.3. The notes that
follow are quick recommendations.
Scaling type 1 maintains :math:`\chi^2` distances between
rows (sites): in the transformed space, the euclidean
distances between rows are equal to the :math:`\chi^2`
distances between rows in the original space. It should be
used when studying the ordination of sites. Rows (sites)
that are near a column (species) have high contributions
from it.
Scaling type 2 preserves :math:`\chi^2` distances between
columns (species), so euclidean distance between columns
after transformation is equal to :math:`\chi^2` distance
between columns in the original space. It is best used
when we are interested in the ordination of species. A
column (species) that is next to a row (site) means that
it is more abundant there.
Other types of scalings are currently not implemented, as
they're less used by ecologists (Legendre & Legendre 1998,
p. 456).
In general, species appearing far from the center of the
biplot and far from its edges will probably exhibit better
relationships than species either in the center (may be
multimodal species, not related to the shown ordination
axes...) or the edges (sparse species...).
Returns
-------
OrdinationResults
Object that stores the computed eigenvalues, the
proportion explained by each of them (per unit),
transformed coordinates, etc.
See Also
--------
OrdinationResults
---------------------------------------------------------------------------- Copyright (c) 2013--, scikit-bio development team. Distributed under the terms of the Modified BSD License. The full license is in the file COPYING.txt, distributed with this software. ---------------------------------------------------------------------------- Step 1 (similar to Pearson chi-square statistic) Let's store them since they're needed to compute scores Formula 9.32 in Lagrange & Lagrange (1998). Notice that it's an scaled version of the contribution of each cell towards Pearson chi-square statistic. Eq. 9.32 Step 2 (Singular Value Decomposition) Due to the centering, there are at most min(r, c) - 1 non-zero eigenvalues (which are all positive) Both scalings are a bit intertwined, so we'll compute both and then choose According to Formula 9.43, this should hold assert np.allclose(F, (row_marginals**-1)[:, None] * Q.dot(V)) but it doesn't (notice that W**2==Lambda): (9.43a) F = V_hat W = D(p_i+)^{-1/2} U_hat W = D(p_i+)^{-1/2} Q_bar U W^{-1} W (substituting 9.38) = D(p_i+)^{-1/2} Q_bar U (9.43b) F = D(p_i+)^{-1} Q V = D(p_i+)^{-1} Q D(p_+j)^{-1/2} U (substituting 9.41) = D(p_i+)^{-1/2} D(p_i+)^{-1/2} Q D(p_+j)^{-1/2} U = D(p_i+)^{-1/2} Q_tilde U (using 9.40) It holds if we replace Q in 9.43b with Q after centering, ie assert np.allclose( F, (row_marginals**-1)[:, None] * (Q - expected).dot(V)) Comparing results with vegan and the examples in the book, 9.43a is the right one. The same issue happens in 9.44, where also 9.44a is the one that matches vegan's output. (9.44a) F_hat = V W = D(p_+j)^{-1/2} U W = D(p_+j)^{-1/2} Q_bar' U_hat W^{-1} W (using 9.39) = D(p_+j)^{-1/2} Q_bar' U_hat (9.44b) F_hat = D(p_+j)^{-1} Q' V_hat = D(p_+j)^{-1/2} Q_tilde' U_hat (using 9.40 and 9.42) Eigenvalues Species scores Site scores (weighted averages of species scores) | 4,828 | en | 0.817447 |
"""
Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (version 1.1.1),
which is included in SZpack.v1.1.1 and will be automatically installed.
Website for the SZpack library: http://www.chluba.de/SZpack/
For details on the computations involved please refer to the following references:
Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
Many thanks to John ZuHone, who wrote the yt part of this model.
"""
import numpy as np
from pymsz.SZpacklib import SZpack
# I0 = (2 * (kboltz * Tcmb)**3 / ((hcgs * clight)**2) / units.sr).in_units("MJy/steradian")
class SZpack_model(object):
r""" Theoretical calculation of y and T_sz -map for the thermal SZ effect.
model = TH_model(model_file, npixel, axis)
Parameters
----------
simudata : the simulation data from load_data
freqs : The frequencies (in GHz) at which to compute the SZ spectral distortion. array_like
npixel : number of pixels for your image, int.
Assume that x-y have the same number of pixels
axis : can be 'x', 'y', 'z', or a list of degrees [alpha, beta, gamma],
which will rotate the data points by $\alpha$ around the x-axis,
$\beta$ around the y-axis, and $\gamma$ around the z-axis
neighbours: this parameter only works with simulation data (not yt data).
If this is set, it will force the SPH particles smoothed into nearby N
neighbours, HSML from the simulation will be ignored.
If no HSML provided in the simulation, neighbours = 27
AR : angular resolution in arcsec.
Default : None, which gives npixel = 2 * cluster radius
and ignores the cluster's redshift.
Otherwise, cluster's redshift with AR decides how large the cluster looks.
redshift : The redshift where the cluster is at.
Default : None, we will look it from simulation data.
If redshift = 0, it will be automatically put into 0.02,
unless AR is set to None.
high_order : boolean, optional
Should we calculate high-order moments of velocity and temperature?
Returns
-------
Theoretical projected y-map in a given direction. 2D mesh data right now.
See also
--------
SZ_models for the mock SZ signal at different frequencies.
Notes
-----
Examples
--------
>>> freqs = [90., 180., 240.]
>>> szprj = SZProjection(ds, freqs, high_order=True)
"""
def __init__(self, simudata, freqs, npixel=500, neighbours=None, axis='z', AR=None,
redshift=None):
self.npl = npixel
self.ngb = neighbours
self.ax = axis
self.ar = AR
self.red = redshift
self.pxs = 0
self.ydata = np.array([])
self.freqs = np.asarray(freqs)
if simudata.data_type == "snapshot":
self._cal_ss(simudata)
elif simudata.data_type == "yt_data":
self._cal_yt(simudata)
else:
raise ValueError("Do not accept this data type %s"
"Please try to use load_data to get the data" % simudata.data_type)
# def _cal_ss(self, simd):
# Kpc = 3.0856775809623245e+21 # cm
# simd.prep_ss_SZ()
#
# def _cal_yt(self, simd):
# from yt.config import ytcfg
# from yt.utilities.physical_constants import sigma_thompson, clight, mh
# # kboltz, Tcmb, hcgs,
# from yt.funcs import fix_axis, get_pbar
# from yt.visualization.volume_rendering.off_axis_projection import \
# off_axis_projection
# from yt.utilities.parallel_tools.parallel_analysis_interface import \
# communication_system, parallel_root_only
# # from yt import units
# from yt.utilities.on_demand_imports import _astropy
#
# def generate_beta_par(L):
# def _beta_par(field, data):
# vpar = data["density"] * (data["velocity_x"] * L[0] +
# data["velocity_y"] * L[1] +
# data["velocity_z"] * L[2])
# return vpar / clight
# return _beta_par
# Ptype = simd.prep_yt_SZ()
#
# # self.ds = ds
# # self.num_freqs = len(freqs)
# # self.high_order = high_order
# # self.freqs = ds.arr(freqs, "GHz")
# # self.mueinv = 1. / mue
# # self.xinit = hcgs * self.freqs.in_units("Hz") / (kboltz * Tcmb)
# # self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs]
# # self.data = {}
# #
# # self.display_names = {}
# # self.display_names["TeSZ"] = r"$\mathrm{T_e}$"
# # self.display_names["Tau"] = r"$\mathrm{\tau}$"
# #
# # for f, field in zip(self.freqs, self.freq_fields):
# # self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % int(f)
# #
# # def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None):
# # r""" Make an on-axis projection of the SZ signal.
# #
# # Parameters
# # ----------
# # axis : integer or string
# # The axis of the simulation domain along which to make the SZprojection.
# # center : A sequence of floats, a string, or a tuple.
# # The coordinate of the center of the image. If set to 'c', 'center' or
# # left blank, the plot is centered on the middle of the domain. If set to
# # 'max' or 'm', the center will be located at the maximum of the
# # ('gas', 'density') field. Centering on the max or min of a specific
# # field is supported by providing a tuple such as ("min","temperature") or
# # ("max","dark_matter_density"). Units can be specified by passing in *center*
# # as a tuple containing a coordinate and string unit name or by passing
# # in a YTArray. If a list or unitless array is supplied, code units are
# # assumed.
# # width : tuple or a float.
# # Width can have four different formats to support windows with variable
# # x and y widths. They are:
# #
# # ================================== =======================
# # format example
# # ================================== =======================
# # (float, string) (10,'kpc')
# # ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
# # float 0.2
# # (float, float) (0.2, 0.3)
# # ================================== =======================
# #
# # For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
# # wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
# # window that is 10 kiloparsecs wide along the x axis and 15
# # kiloparsecs wide along the y axis. In the other two examples, code
# # units are assumed, for example (0.2, 0.3) requests a plot that has an
# # x width of 0.2 and a y width of 0.3 in code units. If units are
# # provided the resulting plot axis labels will use the supplied units.
# # nx : integer, optional
# # The dimensions on a side of the projection image.
# # source : yt.data_objects.data_containers.YTSelectionContainer, optional
# # If specified, this will be the data source used for selecting regions to project.
# #
# # Examples
# # --------
# # >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere)
# # """
#
# axis = fix_axis(axis, self.ds)
# ctr, dctr = self.ds.coordinates.sanitize_center(center, axis)
# width = self.ds.coordinates.sanitize_width(axis, width, None)
#
# L = np.zeros(3)
# L[axis] = 1.0
#
# beta_par = generate_beta_par(L)
# self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3")
# setup_sunyaev_zeldovich_fields(self.ds)
# proj = self.ds.proj("density", axis, center=ctr, data_source=source)
# frb = proj.to_frb(width[0], nx, height=width[1])
# dens = frb["density"]
# Te = frb["t_sz"] / dens
# bpar = frb["beta_par"] / dens
# omega1 = frb["t_squared"] / dens / (Te * Te) - 1.
# bperp2 = np.zeros((nx, nx))
# sigma1 = np.zeros((nx, nx))
# kappa1 = np.zeros((nx, nx))
# if self.high_order:
# bperp2 = frb["beta_perp_squared"] / dens
# sigma1 = frb["t_beta_par"] / dens / Te - bpar
# kappa1 = frb["beta_par_squared"] / dens - bpar * bpar
# tau = sigma_thompson * dens * self.mueinv / mh
#
# nx, ny = frb.buff_size
# self.bounds = frb.bounds
# self.dx = (frb.bounds[1] - frb.bounds[0]) / nx
# self.dy = (frb.bounds[3] - frb.bounds[2]) / ny
# self.nx = nx
#
# self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
# np.array(omega1), np.array(sigma1),
# np.array(kappa1), np.array(bperp2))
#
# self.ds.field_info.pop(("gas", "beta_par"))
#
# def off_axis(self, L, center="c", width=(1.0, "unitary"), depth=(1.0, "unitary"),
# nx=800, nz=800, north_vector=None, no_ghost=False, source=None):
# r""" Make an off-axis projection of the SZ signal.
#
# Parameters
# ----------
# L : array_like
# The normal vector of the projection.
# center : A sequence of floats, a string, or a tuple.
# The coordinate of the center of the image. If set to 'c', 'center' or
# left blank, the plot is centered on the middle of the domain. If set to
# 'max' or 'm', the center will be located at the maximum of the
# ('gas', 'density') field. Centering on the max or min of a specific
# field is supported by providing a tuple such as ("min","temperature") or
# ("max","dark_matter_density"). Units can be specified by passing in *center*
# as a tuple containing a coordinate and string unit name or by passing
# in a YTArray. If a list or unitless array is supplied, code units are
# assumed.
# width : tuple or a float.
# Width can have four different formats to support windows with variable
# x and y widths. They are:
#
# ================================== =======================
# format example
# ================================== =======================
# (float, string) (10,'kpc')
# ((float, string), (float, string)) ((10,'kpc'),(15,'kpc'))
# float 0.2
# (float, float) (0.2, 0.3)
# ================================== =======================
#
# For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs
# wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a
# window that is 10 kiloparsecs wide along the x axis and 15
# kiloparsecs wide along the y axis. In the other two examples, code
# units are assumed, for example (0.2, 0.3) requests a plot that has an
# x width of 0.2 and a y width of 0.3 in code units. If units are
# provided the resulting plot axis labels will use the supplied units.
# depth : A tuple or a float
# A tuple containing the depth to project through and the string
# key of the unit: (width, 'unit'). If set to a float, code units
# are assumed
# nx : integer, optional
# The dimensions on a side of the projection image.
# nz : integer, optional
# Deprecated, this is still in the function signature for API
# compatibility
# north_vector : a sequence of floats
# A vector defining the 'up' direction in the plot. This
# option sets the orientation of the slicing plane. If not
# set, an arbitrary grid-aligned north-vector is chosen.
# no_ghost: bool, optional
# Optimization option for off-axis cases. If True, homogenized bricks will
# extrapolate out from grid instead of interpolating from
# ghost zones that have to first be calculated. This can
# lead to large speed improvements, but at a loss of
# accuracy/smoothness in resulting image. The effects are
# less notable when the transfer function is smooth and
# broad. Default: True
# source : yt.data_objects.data_containers.YTSelectionContainer, optional
# If specified, this will be the data source used for selecting regions
# to project.
#
# Examples
# --------
# >>> L = np.array([0.5, 1.0, 0.75])
# >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc"))
# """
# wd = self.ds.coordinates.sanitize_width(L, width, depth)
# w = tuple(el.in_units('code_length').v for el in wd)
# ctr, dctr = self.ds.coordinates.sanitize_center(center, L)
# res = (nx, nx)
#
# if source is None:
# source = self.ds
#
# beta_par = generate_beta_par(L)
# self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3")
# setup_sunyaev_zeldovich_fields(self.ds)
#
# dens = off_axis_projection(source, ctr, L, w, res, "density",
# north_vector=north_vector, no_ghost=no_ghost)
# Te = off_axis_projection(source, ctr, L, w, res, "t_sz",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# bpar = off_axis_projection(source, ctr, L, w, res, "beta_par",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# omega1 = omega1 / (Te * Te) - 1.
# if self.high_order:
# bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# sigma1 = sigma1 / Te - bpar
# kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared",
# north_vector=north_vector, no_ghost=no_ghost) / dens
# kappa1 -= bpar
# else:
# bperp2 = np.zeros((nx, nx))
# sigma1 = np.zeros((nx, nx))
# kappa1 = np.zeros((nx, nx))
# tau = sigma_thompson * dens * self.mueinv / mh
#
# self.bounds = (-0.5 * wd[0], 0.5 * wd[0], -0.5 * wd[1], 0.5 * wd[1])
# self.dx = wd[0] / nx
# self.dy = wd[1] / nx
# self.nx = nx
#
# self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar),
# np.array(omega1), np.array(sigma1),
# np.array(kappa1), np.array(bperp2))
#
# self.ds.field_info.pop(("gas", "beta_par"))
#
# def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2):
#
# # Bad hack, but we get NaNs if we don't do something like this
# small_beta = np.abs(bpar) < 1.0e-20
# bpar[small_beta] = 1.0e-20
#
# comm = communication_system.communicators[-1]
#
# nx, ny = self.nx, self.nx
# signal = np.zeros((self.num_freqs, nx, ny))
# xo = np.zeros(self.num_freqs)
#
# k = int(0)
#
# start_i = comm.rank * nx // comm.size
# end_i = (comm.rank + 1) * nx // comm.size
#
# pbar = get_pbar("Computing SZ signal.", nx * nx)
#
# for i in range(start_i, end_i):
# for j in range(ny):
# xo[:] = self.xinit[:]
# SZpack.compute_combo_means(xo, tau[i, j], Te[i, j],
# bpar[i, j], omega1[i, j],
# sigma1[i, j], kappa1[i, j], bperp2[i, j])
# signal[:, i, j] = xo[:]
# pbar.update(k)
# k += 1
#
# signal = comm.mpi_allreduce(signal)
#
# pbar.finish()
#
# for i, field in enumerate(self.freq_fields):
# self.data[field] = I0 * self.xinit[i]**3 * signal[i, :, :]
# self.data["Tau"] = self.ds.arr(tau, "dimensionless")
# self.data["TeSZ"] = self.ds.arr(Te, "keV")
#
# def write_fits(self, filename, sky_scale=None, sky_center=None, clobber=True):
# r""" Export images to a FITS file. Writes the SZ distortion in all
# specified frequencies as well as the mass-weighted temperature and the
# optical depth. Distance units are in kpc, unless *sky_center*
# and *scale* are specified.
#
# Parameters
# ----------
# filename : string
# The name of the FITS file to be written.
# sky_scale : tuple
# Conversion between an angle unit and a length unit, if sky
# coordinates are desired, e.g. (1.0, "arcsec/kpc")
# sky_center : tuple, optional
# The (RA, Dec) coordinate in degrees of the central pixel. Must
# be specified with *sky_scale*.
# clobber : boolean, optional
# If the file already exists, do we overwrite?
#
# Examples
# --------
# >>> # This example just writes out a FITS file with kpc coords
# >>> szprj.write_fits("SZbullet.fits", clobber=False)
# >>> # This example uses sky coords
# >>> sky_scale = (1., "arcsec/kpc") # One arcsec per kpc
# >>> sky_center = (30., 45., "deg")
# >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale)
# """
# from yt.visualization.fits_image import FITSImageData
#
# dx = self.dx.in_units("kpc")
# dy = dx
#
# w = _astropy.pywcs.WCS(naxis=2)
# w.wcs.crpix = [0.5 * (self.nx + 1)] * 2
# w.wcs.cdelt = [dx.v, dy.v]
# w.wcs.crval = [0.0, 0.0]
# w.wcs.cunit = ["kpc"] * 2
# w.wcs.ctype = ["LINEAR"] * 2
#
# fib = FITSImageData(self.data, fields=self.data.keys(), wcs=w)
# if sky_scale is not None and sky_center is not None:
# fib.create_sky_wcs(sky_center, sky_scale)
# fib.writeto(filename, clobber=clobber)
#
# @parallel_root_only
# def write_png(self, filename_prefix, cmap_name=None,
# axes_units="kpc", log_fields=None):
# r""" Export images to PNG files. Writes the SZ distortion in all
# specified frequencies as well as the mass-weighted temperature and the
# optical depth. Distance units are in kpc.
#
# Parameters
# ----------
# filename_prefix : string
# The prefix of the image filenames.
#
# Examples
# --------
# >>> szprj.write_png("SZsloshing")
# """
# if cmap_name is None:
# cmap_name = ytcfg.get("yt", "default_colormap")
#
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
# if log_fields is None:
# log_fields = {}
# ticks_font = matplotlib.font_manager.FontProperties(family='serif', size=16)
# extent = tuple([bound.in_units(axes_units).value for bound in self.bounds])
# for field, image in self.items():
# data = image.copy()
# vmin, vmax = image.min(), image.max()
# negative = False
# crossover = False
# if vmin < 0 and vmax < 0:
# data *= -1
# negative = True
# if field in log_fields:
# log_field = log_fields[field]
# else:
# log_field = True
# if log_field:
# formatter = matplotlib.ticker.LogFormatterMathtext()
# norm = matplotlib.colors.LogNorm()
# if vmin < 0 and vmax > 0:
# crossover = True
# linthresh = min(vmax, -vmin) / 100.
# norm = matplotlib.colors.SymLogNorm(linthresh,
# vmin=vmin, vmax=vmax)
# else:
# norm = None
# formatter = None
# filename = filename_prefix + "_" + field + ".png"
# cbar_label = self.display_names[field]
# units = self.data[field].units.latex_representation()
# if units is not None and units != "":
# cbar_label += r'$\ \ (' + units + r')$'
# fig = plt.figure(figsize=(10.0, 8.0))
# ax = fig.add_subplot(111)
# cax = ax.imshow(data.d, norm=norm, extent=extent, cmap=cmap_name, origin="lower")
# for label in ax.get_xticklabels():
# label.set_fontproperties(ticks_font)
# for label in ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
# ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % axes_units, fontsize=16)
# ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % axes_units, fontsize=16)
# cbar = fig.colorbar(cax, format=formatter)
# cbar.ax.set_ylabel(cbar_label, fontsize=16)
# if negative:
# cbar.ax.set_yticklabels(["-" + label.get_text()
# for label in cbar.ax.get_yticklabels()])
# if crossover:
# yticks = list(-10**np.arange(np.floor(np.log10(-vmin)),
# np.rint(np.log10(linthresh)) - 1, -1)) + [0] + \
# list(10**np.arange(np.rint(np.log10(linthresh)),
# np.ceil(np.log10(vmax)) + 1))
# cbar.set_ticks(yticks)
# for label in cbar.ax.get_yticklabels():
# label.set_fontproperties(ticks_font)
# fig.tight_layout()
# plt.savefig(filename)
#
# @parallel_root_only
# def write_hdf5(self, filename):
# r"""Export the set of S-Z fields to a set of HDF5 datasets.
#
# Parameters
# ----------
# filename : string
# This file will be opened in "write" mode.
#
# Examples
# --------
# >>> szprj.write_hdf5("SZsloshing.h5")
# """
# for field, data in self.items():
# data.write_hdf5(filename, dataset_name=field)
#
# def keys(self):
# return self.data.keys()
#
# def items(self):
# return self.data.items()
#
# def values(self):
# return self.data.values()
#
# def has_key(self, key):
# return key in self.data.keys()
#
# def __getitem__(self, key):
# return self.data[key]
#
# @property
# def shape(self):
# return (self.nx, self.nx)
| pymsz/SZpack_models.py | 24,244 | Theoretical calculation of y and T_sz -map for the thermal SZ effect.
model = TH_model(model_file, npixel, axis)
Parameters
----------
simudata : the simulation data from load_data
freqs : The frequencies (in GHz) at which to compute the SZ spectral distortion. array_like
npixel : number of pixels for your image, int.
Assume that x-y have the same number of pixels
axis : can be 'x', 'y', 'z', or a list of degrees [alpha, beta, gamma],
which will rotate the data points by $\alpha$ around the x-axis,
$\beta$ around the y-axis, and $\gamma$ around the z-axis
neighbours: this parameter only works with simulation data (not yt data).
If this is set, it will force the SPH particles smoothed into nearby N
neighbours, HSML from the simulation will be ignored.
If no HSML provided in the simulation, neighbours = 27
AR : angular resolution in arcsec.
Default : None, which gives npixel = 2 * cluster radius
and ignores the cluster's redshift.
Otherwise, cluster's redshift with AR decides how large the cluster looks.
redshift : The redshift where the cluster is at.
Default : None, we will look it from simulation data.
If redshift = 0, it will be automatically put into 0.02,
unless AR is set to None.
high_order : boolean, optional
Should we calculate high-order moments of velocity and temperature?
Returns
-------
Theoretical projected y-map in a given direction. 2D mesh data right now.
See also
--------
SZ_models for the mock SZ signal at different frequencies.
Notes
-----
Examples
--------
>>> freqs = [90., 180., 240.]
>>> szprj = SZProjection(ds, freqs, high_order=True)
Projection class for the Sunyaev-Zeldovich effect. Requires SZpack (version 1.1.1),
which is included in SZpack.v1.1.1 and will be automatically installed.
Website for the SZpack library: http://www.chluba.de/SZpack/
For details on the computations involved please refer to the following references:
Chluba, Nagai, Sazonov, Nelson, MNRAS, 2012, arXiv:1205.5778
Chluba, Switzer, Nagai, Nelson, MNRAS, 2012, arXiv:1211.3206
Many thanks to John ZuHone, who wrote the yt part of this model.
I0 = (2 * (kboltz * Tcmb)**3 / ((hcgs * clight)**2) / units.sr).in_units("MJy/steradian") def _cal_ss(self, simd): Kpc = 3.0856775809623245e+21 cm simd.prep_ss_SZ() def _cal_yt(self, simd): from yt.config import ytcfg from yt.utilities.physical_constants import sigma_thompson, clight, mh kboltz, Tcmb, hcgs, from yt.funcs import fix_axis, get_pbar from yt.visualization.volume_rendering.off_axis_projection import \ off_axis_projection from yt.utilities.parallel_tools.parallel_analysis_interface import \ communication_system, parallel_root_only from yt import units from yt.utilities.on_demand_imports import _astropy def generate_beta_par(L): def _beta_par(field, data): vpar = data["density"] * (data["velocity_x"] * L[0] + data["velocity_y"] * L[1] + data["velocity_z"] * L[2]) return vpar / clight return _beta_par Ptype = simd.prep_yt_SZ() self.ds = ds self.num_freqs = len(freqs) self.high_order = high_order self.freqs = ds.arr(freqs, "GHz") self.mueinv = 1. / mue self.xinit = hcgs * self.freqs.in_units("Hz") / (kboltz * Tcmb) self.freq_fields = ["%d_GHz" % (int(freq)) for freq in freqs] self.data = {} self.display_names = {} self.display_names["TeSZ"] = r"$\mathrm{T_e}$" self.display_names["Tau"] = r"$\mathrm{\tau}$" for f, field in zip(self.freqs, self.freq_fields): self.display_names[field] = r"$\mathrm{\Delta{I}_{%d\ GHz}}$" % int(f) def on_axis(self, axis, center="c", width=(1, "unitary"), nx=800, source=None): r""" Make an on-axis projection of the SZ signal. Parameters ---------- axis : integer or string The axis of the simulation domain along which to make the SZprojection. center : A sequence of floats, a string, or a tuple. The coordinate of the center of the image. If set to 'c', 'center' or left blank, the plot is centered on the middle of the domain. If set to 'max' or 'm', the center will be located at the maximum of the ('gas', 'density') field. Centering on the max or min of a specific field is supported by providing a tuple such as ("min","temperature") or ("max","dark_matter_density"). Units can be specified by passing in *center* as a tuple containing a coordinate and string unit name or by passing in a YTArray. If a list or unitless array is supplied, code units are assumed. width : tuple or a float. Width can have four different formats to support windows with variable x and y widths. They are: ================================== ======================= format example ================================== ======================= (float, string) (10,'kpc') ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) float 0.2 (float, float) (0.2, 0.3) ================================== ======================= For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along the y axis. In the other two examples, code units are assumed, for example (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 in code units. If units are provided the resulting plot axis labels will use the supplied units. nx : integer, optional The dimensions on a side of the projection image. source : yt.data_objects.data_containers.YTSelectionContainer, optional If specified, this will be the data source used for selecting regions to project. Examples -------- >>> szprj.on_axis("y", center="max", width=(1.0, "Mpc"), source=my_sphere) """ axis = fix_axis(axis, self.ds) ctr, dctr = self.ds.coordinates.sanitize_center(center, axis) width = self.ds.coordinates.sanitize_width(axis, width, None) L = np.zeros(3) L[axis] = 1.0 beta_par = generate_beta_par(L) self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3") setup_sunyaev_zeldovich_fields(self.ds) proj = self.ds.proj("density", axis, center=ctr, data_source=source) frb = proj.to_frb(width[0], nx, height=width[1]) dens = frb["density"] Te = frb["t_sz"] / dens bpar = frb["beta_par"] / dens omega1 = frb["t_squared"] / dens / (Te * Te) - 1. bperp2 = np.zeros((nx, nx)) sigma1 = np.zeros((nx, nx)) kappa1 = np.zeros((nx, nx)) if self.high_order: bperp2 = frb["beta_perp_squared"] / dens sigma1 = frb["t_beta_par"] / dens / Te - bpar kappa1 = frb["beta_par_squared"] / dens - bpar * bpar tau = sigma_thompson * dens * self.mueinv / mh nx, ny = frb.buff_size self.bounds = frb.bounds self.dx = (frb.bounds[1] - frb.bounds[0]) / nx self.dy = (frb.bounds[3] - frb.bounds[2]) / ny self.nx = nx self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar), np.array(omega1), np.array(sigma1), np.array(kappa1), np.array(bperp2)) self.ds.field_info.pop(("gas", "beta_par")) def off_axis(self, L, center="c", width=(1.0, "unitary"), depth=(1.0, "unitary"), nx=800, nz=800, north_vector=None, no_ghost=False, source=None): r""" Make an off-axis projection of the SZ signal. Parameters ---------- L : array_like The normal vector of the projection. center : A sequence of floats, a string, or a tuple. The coordinate of the center of the image. If set to 'c', 'center' or left blank, the plot is centered on the middle of the domain. If set to 'max' or 'm', the center will be located at the maximum of the ('gas', 'density') field. Centering on the max or min of a specific field is supported by providing a tuple such as ("min","temperature") or ("max","dark_matter_density"). Units can be specified by passing in *center* as a tuple containing a coordinate and string unit name or by passing in a YTArray. If a list or unitless array is supplied, code units are assumed. width : tuple or a float. Width can have four different formats to support windows with variable x and y widths. They are: ================================== ======================= format example ================================== ======================= (float, string) (10,'kpc') ((float, string), (float, string)) ((10,'kpc'),(15,'kpc')) float 0.2 (float, float) (0.2, 0.3) ================================== ======================= For example, (10, 'kpc') requests a plot window that is 10 kiloparsecs wide in the x and y directions, ((10,'kpc'),(15,'kpc')) requests a window that is 10 kiloparsecs wide along the x axis and 15 kiloparsecs wide along the y axis. In the other two examples, code units are assumed, for example (0.2, 0.3) requests a plot that has an x width of 0.2 and a y width of 0.3 in code units. If units are provided the resulting plot axis labels will use the supplied units. depth : A tuple or a float A tuple containing the depth to project through and the string key of the unit: (width, 'unit'). If set to a float, code units are assumed nx : integer, optional The dimensions on a side of the projection image. nz : integer, optional Deprecated, this is still in the function signature for API compatibility north_vector : a sequence of floats A vector defining the 'up' direction in the plot. This option sets the orientation of the slicing plane. If not set, an arbitrary grid-aligned north-vector is chosen. no_ghost: bool, optional Optimization option for off-axis cases. If True, homogenized bricks will extrapolate out from grid instead of interpolating from ghost zones that have to first be calculated. This can lead to large speed improvements, but at a loss of accuracy/smoothness in resulting image. The effects are less notable when the transfer function is smooth and broad. Default: True source : yt.data_objects.data_containers.YTSelectionContainer, optional If specified, this will be the data source used for selecting regions to project. Examples -------- >>> L = np.array([0.5, 1.0, 0.75]) >>> szprj.off_axis(L, center="c", width=(2.0, "Mpc")) """ wd = self.ds.coordinates.sanitize_width(L, width, depth) w = tuple(el.in_units('code_length').v for el in wd) ctr, dctr = self.ds.coordinates.sanitize_center(center, L) res = (nx, nx) if source is None: source = self.ds beta_par = generate_beta_par(L) self.ds.add_field(("gas", "beta_par"), function=beta_par, units="g/cm**3") setup_sunyaev_zeldovich_fields(self.ds) dens = off_axis_projection(source, ctr, L, w, res, "density", north_vector=north_vector, no_ghost=no_ghost) Te = off_axis_projection(source, ctr, L, w, res, "t_sz", north_vector=north_vector, no_ghost=no_ghost) / dens bpar = off_axis_projection(source, ctr, L, w, res, "beta_par", north_vector=north_vector, no_ghost=no_ghost) / dens omega1 = off_axis_projection(source, ctr, L, w, res, "t_squared", north_vector=north_vector, no_ghost=no_ghost) / dens omega1 = omega1 / (Te * Te) - 1. if self.high_order: bperp2 = off_axis_projection(source, ctr, L, w, res, "beta_perp_squared", north_vector=north_vector, no_ghost=no_ghost) / dens sigma1 = off_axis_projection(source, ctr, L, w, res, "t_beta_par", north_vector=north_vector, no_ghost=no_ghost) / dens sigma1 = sigma1 / Te - bpar kappa1 = off_axis_projection(source, ctr, L, w, res, "beta_par_squared", north_vector=north_vector, no_ghost=no_ghost) / dens kappa1 -= bpar else: bperp2 = np.zeros((nx, nx)) sigma1 = np.zeros((nx, nx)) kappa1 = np.zeros((nx, nx)) tau = sigma_thompson * dens * self.mueinv / mh self.bounds = (-0.5 * wd[0], 0.5 * wd[0], -0.5 * wd[1], 0.5 * wd[1]) self.dx = wd[0] / nx self.dy = wd[1] / nx self.nx = nx self._compute_intensity(np.array(tau), np.array(Te), np.array(bpar), np.array(omega1), np.array(sigma1), np.array(kappa1), np.array(bperp2)) self.ds.field_info.pop(("gas", "beta_par")) def _compute_intensity(self, tau, Te, bpar, omega1, sigma1, kappa1, bperp2): Bad hack, but we get NaNs if we don't do something like this small_beta = np.abs(bpar) < 1.0e-20 bpar[small_beta] = 1.0e-20 comm = communication_system.communicators[-1] nx, ny = self.nx, self.nx signal = np.zeros((self.num_freqs, nx, ny)) xo = np.zeros(self.num_freqs) k = int(0) start_i = comm.rank * nx // comm.size end_i = (comm.rank + 1) * nx // comm.size pbar = get_pbar("Computing SZ signal.", nx * nx) for i in range(start_i, end_i): for j in range(ny): xo[:] = self.xinit[:] SZpack.compute_combo_means(xo, tau[i, j], Te[i, j], bpar[i, j], omega1[i, j], sigma1[i, j], kappa1[i, j], bperp2[i, j]) signal[:, i, j] = xo[:] pbar.update(k) k += 1 signal = comm.mpi_allreduce(signal) pbar.finish() for i, field in enumerate(self.freq_fields): self.data[field] = I0 * self.xinit[i]**3 * signal[i, :, :] self.data["Tau"] = self.ds.arr(tau, "dimensionless") self.data["TeSZ"] = self.ds.arr(Te, "keV") def write_fits(self, filename, sky_scale=None, sky_center=None, clobber=True): r""" Export images to a FITS file. Writes the SZ distortion in all specified frequencies as well as the mass-weighted temperature and the optical depth. Distance units are in kpc, unless *sky_center* and *scale* are specified. Parameters ---------- filename : string The name of the FITS file to be written. sky_scale : tuple Conversion between an angle unit and a length unit, if sky coordinates are desired, e.g. (1.0, "arcsec/kpc") sky_center : tuple, optional The (RA, Dec) coordinate in degrees of the central pixel. Must be specified with *sky_scale*. clobber : boolean, optional If the file already exists, do we overwrite? Examples -------- >>> This example just writes out a FITS file with kpc coords >>> szprj.write_fits("SZbullet.fits", clobber=False) >>> This example uses sky coords >>> sky_scale = (1., "arcsec/kpc") One arcsec per kpc >>> sky_center = (30., 45., "deg") >>> szprj.write_fits("SZbullet.fits", sky_center=sky_center, sky_scale=sky_scale) """ from yt.visualization.fits_image import FITSImageData dx = self.dx.in_units("kpc") dy = dx w = _astropy.pywcs.WCS(naxis=2) w.wcs.crpix = [0.5 * (self.nx + 1)] * 2 w.wcs.cdelt = [dx.v, dy.v] w.wcs.crval = [0.0, 0.0] w.wcs.cunit = ["kpc"] * 2 w.wcs.ctype = ["LINEAR"] * 2 fib = FITSImageData(self.data, fields=self.data.keys(), wcs=w) if sky_scale is not None and sky_center is not None: fib.create_sky_wcs(sky_center, sky_scale) fib.writeto(filename, clobber=clobber) @parallel_root_only def write_png(self, filename_prefix, cmap_name=None, axes_units="kpc", log_fields=None): r""" Export images to PNG files. Writes the SZ distortion in all specified frequencies as well as the mass-weighted temperature and the optical depth. Distance units are in kpc. Parameters ---------- filename_prefix : string The prefix of the image filenames. Examples -------- >>> szprj.write_png("SZsloshing") """ if cmap_name is None: cmap_name = ytcfg.get("yt", "default_colormap") import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt if log_fields is None: log_fields = {} ticks_font = matplotlib.font_manager.FontProperties(family='serif', size=16) extent = tuple([bound.in_units(axes_units).value for bound in self.bounds]) for field, image in self.items(): data = image.copy() vmin, vmax = image.min(), image.max() negative = False crossover = False if vmin < 0 and vmax < 0: data *= -1 negative = True if field in log_fields: log_field = log_fields[field] else: log_field = True if log_field: formatter = matplotlib.ticker.LogFormatterMathtext() norm = matplotlib.colors.LogNorm() if vmin < 0 and vmax > 0: crossover = True linthresh = min(vmax, -vmin) / 100. norm = matplotlib.colors.SymLogNorm(linthresh, vmin=vmin, vmax=vmax) else: norm = None formatter = None filename = filename_prefix + "_" + field + ".png" cbar_label = self.display_names[field] units = self.data[field].units.latex_representation() if units is not None and units != "": cbar_label += r'$\ \ (' + units + r')$' fig = plt.figure(figsize=(10.0, 8.0)) ax = fig.add_subplot(111) cax = ax.imshow(data.d, norm=norm, extent=extent, cmap=cmap_name, origin="lower") for label in ax.get_xticklabels(): label.set_fontproperties(ticks_font) for label in ax.get_yticklabels(): label.set_fontproperties(ticks_font) ax.set_xlabel(r"$\mathrm{x\ (%s)}$" % axes_units, fontsize=16) ax.set_ylabel(r"$\mathrm{y\ (%s)}$" % axes_units, fontsize=16) cbar = fig.colorbar(cax, format=formatter) cbar.ax.set_ylabel(cbar_label, fontsize=16) if negative: cbar.ax.set_yticklabels(["-" + label.get_text() for label in cbar.ax.get_yticklabels()]) if crossover: yticks = list(-10**np.arange(np.floor(np.log10(-vmin)), np.rint(np.log10(linthresh)) - 1, -1)) + [0] + \ list(10**np.arange(np.rint(np.log10(linthresh)), np.ceil(np.log10(vmax)) + 1)) cbar.set_ticks(yticks) for label in cbar.ax.get_yticklabels(): label.set_fontproperties(ticks_font) fig.tight_layout() plt.savefig(filename) @parallel_root_only def write_hdf5(self, filename): r"""Export the set of S-Z fields to a set of HDF5 datasets. Parameters ---------- filename : string This file will be opened in "write" mode. Examples -------- >>> szprj.write_hdf5("SZsloshing.h5") """ for field, data in self.items(): data.write_hdf5(filename, dataset_name=field) def keys(self): return self.data.keys() def items(self): return self.data.items() def values(self): return self.data.values() def has_key(self, key): return key in self.data.keys() def __getitem__(self, key): return self.data[key] @property def shape(self): return (self.nx, self.nx) | 20,623 | en | 0.525232 |
"""
Example file on how to display a networkx graph on a browser
"""
import json
import networkx as nx
from networkx.readwrite import json_graph
import http_server
import random
# https://www.alanzucconi.com/2015/11/03/recreational-maths-python/
# Converts a number in the list of its digits
def int_to_list(n):
# The number is firstly converted into a string using str(n)
# map -> converts each character of the string into an integer
return map(int, str(n))
# https://www.alanzucconi.com/2015/11/01/interactive-graphs-in-the-browser/
def toy_graph():
G = nx.DiGraph()
for i in range(1, 1000):
tree = list(set(list(int_to_list(random.randint(1, i)))))
# Add the entire sequence to the tree
for j in range(0, len(tree) - 1):
G.add_edge(tree[j], tree[j + 1])
for n in G:
G.node[n]['name'] = n
d = json_graph.node_link_data(G)
json.dump(d, open('graph/graph.json', 'w'))
# The http_server is just a short piece of code that used to be in the
# examples directory of the networkx library.
http_server.load_url('graph/graph.html')
if __name__ == '__main__':
toy_graph()
| program.py | 1,169 | Example file on how to display a networkx graph on a browser
https://www.alanzucconi.com/2015/11/03/recreational-maths-python/ Converts a number in the list of its digits The number is firstly converted into a string using str(n) map -> converts each character of the string into an integer https://www.alanzucconi.com/2015/11/01/interactive-graphs-in-the-browser/ Add the entire sequence to the tree The http_server is just a short piece of code that used to be in the examples directory of the networkx library. | 515 | en | 0.819458 |
# TODO: not very happy with this state of affairs of having unencrypted passwords (use keyring ?)
ALYX_PWD = 'alyxpassword'
HTTP_DATA_SERVER_PWD = 'httpserverpass' # password for flat iron server for IBLail
| python/oneibl/params_secret.py | 208 | TODO: not very happy with this state of affairs of having unencrypted passwords (use keyring ?) password for flat iron server for IBLail | 136 | en | 0.888545 |
from typing import Optional, List
from pydantic import BaseModel
class LogSetting(BaseModel):
LOG_LEVEL: Optional[str] = 'DEBUG'
LOG_PATH: str
class ServiceSetting(BaseModel):
# openapi swagger
INCLUDE_IN_SCHEMA: Optional[bool] = True
# socket.io on
SOCKET_IO_ON: Optional[bool] = False
class SocketIOSetting(BaseModel):
SOCKET_IO_NAMESPACES: Optional[List[str]] = ['/']
SOCKET_IO_MOUNT: Optional[str] = '/'
| conf/settings.py | 448 | openapi swagger socket.io on | 28 | en | 0.515969 |
# --------------
# Importing header files
import numpy as np
# print(path)
# Path of the file has been stored in variable called 'path'
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Code starts here
census = np.array([])
data=np.genfromtxt(path, delimiter=",", skip_header=1)
census = np.concatenate((data , new_record))
# print(census)
# --------------
#Code starts here
age = census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = np.mean(age)
age_std = np.std(age)
# --------------
# Code starts here
# race_0 = np.array([])
# race_1 = np.array([])
# race_2 = np.array([])
# race_3 = np.array([])
# race_4 = np.array([])
# for i in range(0,census.shape[0]):
# if int(census[i,2]) == 0:
# race_0 = np.concatenate(race_0 , np.array([census[i , :]]))
# elif int(census[i,2]) == 1:
# race_1 = np.concatenate(race_1 , np.array([census[i , :]]))
# elif int(census[i,2]) == 2:
# race_2 = np.concatenate(race_2 , np.array([census[i , :]]))
# elif int(census[i,2]) == 3:
# race_3 = np.concatenate(race_3 , np.array([census[i , :]]))
# else:
# race_4 = np.concatenate(race_4 , np.array([census[i , :]]))
# print('r0 \n' , race_0)
# print(census[0 , :])
# len_0 , len_1 , len_2 , len_3 , len_4 = len(race_0) , len(race_1) , len(race_2) , len(race_3) , len(race_4)
# minority_race = np.min(np.array([len_0 , len_1 , len_2 , len_3 , len_4]))
# race_0 = np.array([])
# for i in range(0,census.shape[0]):
# if int(census[i,2]) == 0:
# race_0 = np.append(race_0 , np.array([census[i , :]]))
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
Race_list=[len_0,len_1,len_2,len_3,len_4]
minority_race=Race_list.index(min(Race_list))
print(minority_race)
# --------------
#Code starts here
senior_citizens = census[census[:,0]>60]
working_hours_sum = senior_citizens.sum(axis=0)[6]
senior_citizens_len = len(senior_citizens)
avg_working_hours = (working_hours_sum)/(senior_citizens_len)
print(avg_working_hours)
# --------------
#Code starts here
high = census[census[:,1]>10]
low = census[census[:,1]<=10]
avg_pay_high = high.mean(axis=0)[7]
avg_pay_low = low.mean(axis=0)[7]
avg_pay_high,avg_pay_low.mean()
| Make-Sense-of-census/code.py | 2,474 | -------------- Importing header files print(path) Path of the file has been stored in variable called 'path'New recordCode starts here print(census) --------------Code starts here -------------- Code starts here race_0 = np.array([]) race_1 = np.array([]) race_2 = np.array([]) race_3 = np.array([]) race_4 = np.array([]) for i in range(0,census.shape[0]): if int(census[i,2]) == 0: race_0 = np.concatenate(race_0 , np.array([census[i , :]])) elif int(census[i,2]) == 1: race_1 = np.concatenate(race_1 , np.array([census[i , :]])) elif int(census[i,2]) == 2: race_2 = np.concatenate(race_2 , np.array([census[i , :]])) elif int(census[i,2]) == 3: race_3 = np.concatenate(race_3 , np.array([census[i , :]])) else: race_4 = np.concatenate(race_4 , np.array([census[i , :]])) print('r0 \n' , race_0) print(census[0 , :]) len_0 , len_1 , len_2 , len_3 , len_4 = len(race_0) , len(race_1) , len(race_2) , len(race_3) , len(race_4) minority_race = np.min(np.array([len_0 , len_1 , len_2 , len_3 , len_4])) race_0 = np.array([]) for i in range(0,census.shape[0]): if int(census[i,2]) == 0: race_0 = np.append(race_0 , np.array([census[i , :]])) --------------Code starts here --------------Code starts here | 1,273 | en | 0.314756 |
"""
Generate and operate on geometric elements and meshes.
"""
from __future__ import absolute_import
from .geometry import Polygon, Square, Prism, Tesseroid, Sphere
from .geometry import PolygonalPrism
from .mesh import SquareMesh, PointGrid, PrismRelief, PrismMesh, TesseroidMesh
| fatiando/mesher/__init__.py | 283 | Generate and operate on geometric elements and meshes. | 54 | en | 0.811383 |
import subprocess
import gdspy
import shutil
from utils import *
from codegen.caravel_codegen import generate_openlane_files
from urllib.parse import urlparse
import os, json
REQUIRED_KEYS_SINGLE = ["project", "caravel_test", "module_test", "wrapper_proof", "openlane", "gds"]
class Project(object):
def __init__(self, args, repo, commit, required_interfaces, system_config):
self.args = args
self.system_config = system_config
self.repo = repo # the repo on github
self.commit = commit # not strictly a commit, could be a branch
project_dir = self.system_config['configuration']['project_directory']
# the project's directory is made by joining project dir to last part of the repo url
parsed = urlparse(repo)
self.directory = os.path.join(project_dir, parsed.path.rpartition('/')[-1])
if args.clone_repos:
self.clone_repo()
self.gitsha = get_git_sha(self.directory)
yaml_file = os.path.join(self.directory, 'info.yaml')
self.config = parse_config(yaml_file, REQUIRED_KEYS_SINGLE)
self.id = int(self.config['caravel_test']['id'])
self.module_name = self.config['caravel_test']['module_name']
self.interfaces = required_interfaces + self.config['interfaces']
self.gds_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['gds_filename'])
self.lef_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['lef_filename'])
self.lvs_filename = os.path.join(self.config['gds']['directory'], self.config['gds']['lvs_filename'])
self.title = self.config['project']['title']
self.author = self.config['project']['author']
def __str__(self):
return "%2d %-30s : %s" % (self.id, self.title, self.directory)
def run_tests(self):
# print out info about the project
if self.args.dump_hash:
logging.info("%-30s %-20s %s %s" % (self.author, self.title, self.gitsha, self.repo))
else:
logging.info(self)
if self.args.test_all or self.args.test_module:
self.test_module()
if self.args.test_all or self.args.prove_wrapper:
self.prove_wrapper()
if self.args.test_all or self.args.test_caravel:
self.test_caravel()
# don't run this as part of test-all
if self.args.test_caravel_gl:
self.test_caravel(gl=True)
if self.args.test_all or self.args.test_gds:
self.test_gds()
# currently broken, waiting on testing a new netgen
if self.args.test_all or self.args.test_lvs:
self.test_lvs()
if self.args.test_all or self.args.test_ports:
self.validate_ports()
if self.args.test_all or self.args.test_tristate_z:
self.test_tristate_z()
if self.args.test_all or self.args.test_git:
self.test_git_match()
def clone_repo(self):
clone_repo(self.repo, self.commit, self.directory, self.args.force_delete)
# hack - better to add this to the info.yaml but for now we do it by searching all the source files. not all are called wrapper.v
def get_top_module(self):
paths = self.get_module_source_paths(absolute=False)
top_instance = 'module %s' % self.config['caravel_test']['module_name']
# now check each source for the top_name
for path in paths:
abs_path = os.path.abspath(os.path.join(self.directory, path))
with open(abs_path) as fh:
if top_instance in fh.read():
return path
else:
logging.error("couldn't find top module for %s" % self)
exit(1)
def get_module_source_paths(self, absolute=True):
paths = []
for path in self.config['source']:
if absolute:
paths.append(os.path.abspath(os.path.join(self.directory, path)))
else:
paths.append(path)
return paths
def test_module(self):
conf = self.config["module_test"]
cwd = os.path.join(self.directory, conf["directory"])
cmd = ["make", "-f", conf["makefile"], conf["recipe"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
try:
subprocess.run(cmd, cwd=cwd, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("test pass")
def test_git_match(self):
self.gitsha = get_git_sha(self.directory)
if self.gitsha != self.commit:
logging.error("gitsha on disk doesn't match config")
exit(1)
else:
logging.info("git pass")
def prove_wrapper(self):
# TODO need to also check properties.sby - could have a few things to cksum and make wrapper_cksum able to check a few files
conf = self.config["wrapper_proof"]
cwd = os.path.join(self.directory, conf["directory"])
cmd = ["sby", "-f", conf["sby"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
try:
subprocess.run(cmd, cwd=cwd, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("proof pass")
def copy_project_to_caravel_rtl(self):
src = self.directory
dst = os.path.join(self.system_config['caravel']['rtl_dir'], os.path.basename(self.directory))
try_copy_tree(src, dst, self.args.force_delete)
def copy_gl(self):
dst = os.path.join(self.system_config['caravel']['gl_dir'], self.config['gds']['lvs_filename'])
src = os.path.join(self.directory, self.config['gds']['directory'], self.config['gds']['lvs_filename'])
shutil.copyfile(src, dst)
def test_caravel(self, gl=False):
conf = self.config["caravel_test"]
# copy src into caravel verilog dir
self.copy_project_to_caravel_rtl()
# generate includes & instantiate inside user project wrapper
# could this be removed and just do it in collect.py ?
user_project_wrapper_path = os.path.join(self.system_config['caravel']['rtl_dir'], "user_project_wrapper.v")
caravel_includes_path = os.path.join(self.system_config['caravel']['rtl_dir'], "uprj_netlists.v")
user_project_includes_path = os.path.join(self.system_config['caravel']['rtl_dir'], "user_project_includes.v")
interface_definitions = {
**self.system_config['interfaces']['required'],
**self.system_config['interfaces']['optional']
}
generate_openlane_files(
[self],
interface_definitions,
user_project_wrapper_path,
user_project_includes_path,
caravel_includes_path,
self.args.openram
)
# copy test inside caravel
src = os.path.join(self.directory, conf["directory"])
dst = os.path.join(self.system_config['caravel']['test_dir'], conf["directory"])
try_copy_tree(src, dst, self.args.force_delete)
# set up env
test_env = os.environ.copy()
test_env["GCC_PATH"] = self.system_config['env']['GCC_PATH']
test_env["GCC_PREFIX"] = self.system_config['env']['GCC_PREFIX']
test_env["PDK_PATH"] = self.system_config['env']['PDK_PATH']
test_env["CARAVEL_ROOT"] = os.path.join(self.system_config['caravel']['root'], 'caravel')
cwd = os.path.join(self.system_config['caravel']['test_dir'], conf["directory"])
cmd = ["make", conf["recipe"]]
# if gl, use the gl_recipe
if gl:
cmd = ["make", conf["gl_recipe"]]
logging.info("attempting to run %s in %s" % (cmd, cwd))
# run makefile
try:
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("caravel test pass")
def get_gds_size(self):
conf = self.config["gds"]
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
gdsii = gdspy.GdsLibrary(infile=gds_file)
toplevel = gdsii.top_level()[0]
return toplevel.get_bounding_box()[1]
def test_gds(self):
if 'waive_gds' in self.config['project']:
logging.info("skipping GDS in this test due to %s" % self.config['project']['waive_gds'])
return
conf = self.config["gds"]
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
gdsii = gdspy.GdsLibrary(infile=gds_file)
toplevel = gdsii.top_level()[0]
# nothing on metal 5
if self.system_config["tests"]["gds"]["metal5_id"] in toplevel.get_layers():
logging.error("%s has layers on metal5" % gds_file)
exit(1)
logging.info("GDS pass")
def test_lvs(self):
if 'waive_lvs' in self.config['project']:
logging.info("skipping LVS in this test due to %s" % self.config['project']['waive_lvs'])
return
module_name = self.config['caravel_test']['module_name']
conf = self.config["gds"]
# given
lvs_test_dir = 'lvstest'
try_mkdir(lvs_test_dir, self.args.force_delete)
# copy the gds and verilog to local directory
gds_file = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["gds_filename"]))
powered_verilog = os.path.abspath(os.path.join(self.directory, conf["directory"], conf["lvs_filename"]))
shutil.copyfile(gds_file, os.path.join(lvs_test_dir, conf["gds_filename"]))
shutil.copyfile(powered_verilog, os.path.join(lvs_test_dir, conf["lvs_filename"]))
gds_file = conf["gds_filename"]
powered_verilog = conf["lvs_filename"]
# generated files
ext_file = module_name + ".ext"
log_file = module_name + ".log"
spice_file = module_name + '.spice'
netgen_log_file = module_name + '.netgen_log'
netgen_json = module_name + '.json'
extract_tcl = 'extract.tcl'
# config files
pdk_path = self.system_config['lvs']['PDK_PATH']
openlane_root = self.system_config['lvs']['OPENLANE']
logging.info("using PDK %s and OpenLANE %s" % (pdk_path, openlane_root))
# env
test_env = os.environ.copy()
test_env["MAGIC_EXT_USE_GDS"] = "1"
test_env["PDKPATH"] = pdk_path
netgen_setup_file = os.path.join(pdk_path, 'libs.tech', 'netgen', 'sky130A_setup.tcl')
cwd = lvs_test_dir
# create tcl script for magic
tcl_contents = """
gds read %s;
load %s -dereference
extract do local;
extract no capacitance;
extract no coupling;
extract no resistance;
extract no adjust;
extract unique;
extract;
ext2spice lvs;
ext2spice %s;
feedback save %s;
exit;
""" % (gds_file, module_name, ext_file, log_file)
with open(os.path.join(lvs_test_dir, extract_tcl), 'w') as tcl:
tcl.write(tcl_contents)
magic_rcfile = os.path.join(pdk_path, 'libs.tech', 'magic', 'sky130A.magicrc')
cmd = ['magic', '-rcfile', magic_rcfile, '-noc', '-dnull', extract_tcl]
logging.info(' '.join(cmd))
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
left_side = '%s %s' % (spice_file, module_name)
right_side = '%s %s' % (powered_verilog, module_name)
# only way to get this quoted stuff to work was to use shell=True in the subprocess call
cmd = 'netgen -batch lvs "%s" "%s" %s %s -json' % (left_side, right_side, netgen_setup_file, netgen_log_file)
logging.info(cmd)
subprocess.run(cmd, env=test_env, cwd=cwd, check=True, shell=True)
lvs_count_cmd = os.path.join(openlane_root, 'scripts', 'count_lvs.py')
cmd = [lvs_count_cmd, '--file', netgen_json]
logging.info(cmd)
# lvs count command doesn't return valid exit codes
try:
result = subprocess.run(cmd, cwd=cwd, capture_output=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
# so search for string in output
if 'Total errors = 0' in str(result.stdout):
logging.info("LVS passed")
elif 'Total errors = 6' in str(result.stdout) and 'unmatched pins = 6' in str(result.stdout):
logging.info("LVS passed (waived 6 unconnected power pins)")
else:
logging.error(result.stdout)
exit(1)
def test_tristate_z(self):
# env
test_env = os.environ.copy()
test_env["POWERED_VERILOG"] = powered_verilog = os.path.abspath(os.path.join(self.directory, self.config["gds"]["directory"], self.config["gds"]["lvs_filename"]))
test_env["TOPLEVEL"] = self.config["caravel_test"]["module_name"]
test_env["PDK_ROOT"] = self.system_config["lvs"]["PDK_ROOT"]
cmd = ["make", "clean", "test"]
cwd = "buffertest"
logging.info("attempting to run %s in %s" % (cmd, cwd))
# run makefile
try:
subprocess.run(cmd, cwd=cwd, env=test_env, check=True)
except subprocess.CalledProcessError as e:
logging.error(e)
exit(1)
logging.info("tristate z test pass")
def validate_ports(self):
# assume first source is top, bad idea
sources = ""
for source_file in self.config['source']:
sources += os.path.join(self.directory, source_file)
sources += " "
top = self.config['caravel_test']['module_name']
# use yosys to parse the verilog and dump a list of ports
json_file = '/tmp/ports.json'
os.system("yosys -qp 'read_verilog -sv %s; hierarchy -top %s ; proc; json -o %s x:*' -DUSE_POWER_PINS=1 -DMPRJ_IO_PADS=38" % (sources, top, json_file))
with open(json_file) as fh:
ports = json.load(fh)
module_ports = ports['modules'][self.config['caravel_test']['module_name']]['ports']
# check required ports
for port_type, port_def in self.system_config['interfaces']['required'].items():
for port_name, bits in port_def.items():
# assert port is there
if port_name not in module_ports:
logging.error("required port %s not in interface" % port_name)
exit(1)
# and it's the correct length
if len(module_ports[port_name]['bits']) != bits:
logging.error("required port %s is wrong size" % port_name)
exit(1)
# delete it
del module_ports[port_name]
# for all the optional ports defined in the projects yaml
for optional_port in self.config['interfaces']:
# look up its definition
for port_name, bits in self.system_config['interfaces']['optional'][optional_port].items():
# assert port is there
if port_name not in module_ports:
logging.error("optional port %s was set but %s is not in interface" % (optional_port, port_name))
exit(1)
# and it's the correct length
if len(module_ports[port_name]['bits']) != bits:
logging.error("optional port %s is wrong size" % (port_name))
exit(1)
# delete it
del module_ports[port_name]
# module def should now be empty
if len(module_ports) != 0:
logging.error("additional interfaces found in module")
logging.error(module_ports)
exit(1)
logging.info("test ports pass")
| project.py | 16,201 | the repo on github not strictly a commit, could be a branch the project's directory is made by joining project dir to last part of the repo url print out info about the project don't run this as part of test-all currently broken, waiting on testing a new netgen hack - better to add this to the info.yaml but for now we do it by searching all the source files. not all are called wrapper.v now check each source for the top_name TODO need to also check properties.sby - could have a few things to cksum and make wrapper_cksum able to check a few files copy src into caravel verilog dir generate includes & instantiate inside user project wrapper could this be removed and just do it in collect.py ? copy test inside caravel set up env if gl, use the gl_recipe run makefile nothing on metal 5 given copy the gds and verilog to local directory generated files config files env create tcl script for magic only way to get this quoted stuff to work was to use shell=True in the subprocess call lvs count command doesn't return valid exit codes so search for string in output env run makefile assume first source is top, bad idea use yosys to parse the verilog and dump a list of ports check required ports assert port is there and it's the correct length delete it for all the optional ports defined in the projects yaml look up its definition assert port is there and it's the correct length delete it module def should now be empty | 1,430 | en | 0.892664 |
"""
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import mx_platform_python
from mx_platform_python.model.statement_response import StatementResponse
class TestStatementResponse(unittest.TestCase):
"""StatementResponse unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testStatementResponse(self):
"""Test StatementResponse"""
# FIXME: construct object with mandatory attributes with example values
# model = StatementResponse() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| test/test_statement_response.py | 935 | StatementResponse unit test stubs
Test StatementResponse
MX Platform API
The MX Platform API is a powerful, fully-featured API designed to make aggregating and enhancing financial data easy and reliable. It can seamlessly connect your app or website to tens of thousands of financial institutions. # noqa: E501
The version of the OpenAPI document: 0.1.0
Generated by: https://openapi-generator.tech
FIXME: construct object with mandatory attributes with example values model = StatementResponse() noqa: E501 | 514 | en | 0.795463 |
"""Criteria for parametric methods.
.. topic:: This module provides criteria to automatically select order in
parametric PSD estimate or pseudo spectrum estimates (e.g, music).
Some criteria such as the AIC criterion helps to chose the order of PSD
models such as the ARMA model. Nevertheless, it is difficult to estimate
correctly the order of an ARMA model even by using these criteria. The
reason being that even the Akaike criteria (AIC) does not provide the
proper order with a probability of 1 with infinite samples.
The order choice is related to an expertise of the signal. There is no
exact criteria. However, they may provide useful information.
AIC, AICc, KIC and AKICc are based on information theory. They attempt
to balance the complexity (or length) of the model against how well the
model fits the data. AIC and KIC are biased estimates of the asymmetric
and the symmetric Kullback-Leibler divergence respectively. AICc and
AKICc attempt to correct the bias.
There are also criteria related to eigen analysis, which takes as input
the eigen values of any PSD estimate method.
.. rubric:: Example
.. plot::
:width: 80%
:include-source:
from spectrum import aryule, AIC, marple_data
from pylab import plot, arange
order = arange(1, 25)
rho = [aryule(marple_data, i, norm='biased')[1] for i in order]
plot(order, AIC(len(marple_data), rho, order), label='AIC')
:References: bd-Krim Seghouane and Maiza Bekara
"A small sample model selection criterion based on Kullback's symmetric
divergence", IEEE Transactions on Signal Processing,
Vol. 52(12), pp 3314-3323, Dec. 2004
"""
class Criteria(object):
"""Criteria class for an automatic selection of ARMA order.
Available criteria are
======= =====================
======= =====================
AIC see :func:`AIC`
AICc see :func:`AICc`
KIC see :func:`KIC`
AKICc see :func:`AKICc`
FPE see :func:`FPE`
MDL see :func:`MDL`
CAT see :func:`_CAT`
======= =====================
"""
valid_criteria_names = ['AIC', 'AICc', 'KIC', 'FPE', 'AKICc', 'MDL']
error_incorrect_name = 'Invalid name provided. Correct names are %s ' \
% valid_criteria_names
error_no_criteria_found = 'No names match the valid criteria names (%s)' \
% valid_criteria_names
def __init__(self, name, N):
"""Create a criteria object
:param name: a string or list of strings containing valid criteria
method's name
:param int N: size of the data sample.
"""
#valid attributes
self.__name = None
self.name = name
self.__N = N
self.__rho = 0
self.__k = None
self.__old_data = None
self.__data = None
self.__norm = True
def _getName(self):
return self.__name
def _setName(self, name):
assert isinstance(name, str), 'name must be a string'
if name in self.valid_criteria_names:
self.__name = name
else:
raise ValueError(self.error_no_criteria_found)
name = property(fget=_getName, fset=_setName, doc="Getter/Setter for the criteria name")
def _getData(self):
return self.__data
def _setData(self, data):
# save the data value in old_data is there is something to save
if self.data is None:
self.__data = data
self.__old_data = 2.*data
else:
self.__old_data = self.data
self.__data = data
data = property(fget=_getData, fset=_setData, doc="Getter/Setter for the criteria output")
def _getOldData(self):
return self.__old_data
old_data = property(fget=_getOldData, doc="Getter/Setter for the previous value")
def _getK(self):
return self.__k
k = property(fget=_getK, doc="Getter for k the order of evaluation")
def _getN(self):
return self.__N
def _setN(self, N):
assert N > 0, 'N must be positive'
self.__N = N
N = property(fget=_getN, fset=_setN, doc="Getter/Setter for N")
def _getRho(self):
return self.__rho
def _setRho(self, rho):
self.__rho = rho
rho = property(fget=_getRho, fset=_setRho, doc="Getter/Setter for rho")
def __call__(self, rho=None, k=None, N=None, norm=True):
"""Call the criteria function corresponding to :attr:`name`."""
self.__norm = norm
if N is not None:
self.N = N
# we update rho only if it is needed (input different from self.rho)
# if such case, we also update k
if rho is not None:
self.rho = rho
if k is not None:
self.__k = k
self.__norm = norm
#used to check if the criteria is reached or not
f = eval(self.name)
self.data = f(self.N, self.rho, self.k)
# compare the new data with the previous one and return
# False if the new value is larger so as to stop the iteration
if self.old_data is not None and self.data is not None:
if self.data > self.old_data:
return False
else:
return True
return True
def AIC(N, rho, k):
r"""Akaike Information Criterion
:param rho: rho at order k
:param N: sample size
:param k: AR order.
If k is the AR order and N the size of the sample, then Akaike criterion is
.. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N}
::
AIC(64, [0.5,0.3,0.2], [1,2,3])
:validation: double checked versus octave.
"""
from numpy import log, array
#k+1 #todo check convention. agrees with octave
res = N * log(array(rho)) + 2.* (array(k)+1)
return res
def AICc(N, rho, k, norm=True):
r"""corrected Akaike information criterion
.. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2}
:validation: double checked versus octave.
"""
from numpy import log, array
p = k #todo check convention. agrees with octave
res = log(rho) + 2. * (p+1) / (N-p-2)
return res
def KIC(N, rho, k):
r"""Kullback information criterion
.. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N}
:validation: double checked versus octave.
"""
from numpy import log, array
res = log(rho) + 3. * (k+1.) /float(N)
return res
def AKICc(N, rho, k):
r"""approximate corrected Kullback information
.. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2}
"""
from numpy import log, array
p = k
res = log(rho) + p/N/(N-p) + (3.-(p+2.)/N) * (p+1.) / (N-p-2.)
return res
def FPE(N,rho, k=None):
r"""Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave.
"""
#k #todo check convention. agrees with octave
fpe = rho * (N + k + 1.) / (N- k -1)
return fpe
def MDL(N, rho, k):
r"""Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
"""
from numpy import log
#p = arange(1, len(rho)+1)
mdl = N* log(rho) + k * log(N)
return mdl
def CAT(N, rho, k):
r"""Criterion Autoregressive Transfer Function :
.. math:: CAT(k) = \frac{1}{N} \sum_{i=1}^k \frac{1}{\rho_i} - \frac{\rho_i}{\rho_k}
.. todo:: validation
"""
from numpy import zeros, arange
cat = zeros(len(rho))
for p in arange(1, len(rho)+1):
rho_p = float(N)/(N-p)*rho[p-1]
s = 0
for j in range(1, p+1):
rho_j = float(N)/(N-j)*rho[j-1]
s = s + 1./rho_j
#print(s, s/float(N), 1./rho_p)
cat[p-1] = s/float(N) - 1./rho_p
return cat
def aic_eigen(s, N):
r"""AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)
is:
.. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k)
where the arithmetic sum :math:`a(k)` is:
.. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i
and the geometric sum :math:`g(k)` is:
.. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)}
The number of relevant sinusoids in the signal subspace is determined by
selecting the minimum of `AIC`.
.. seealso:: :func:`~spectrum.eigenfreq.eigen`
.. todo:: define precisely the input parameter N. Should be the input
data length but when using correlation matrix (SVD), I suspect it
should be the length of the correlation matrix rather than the
original data.
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kaic = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kaic.append( -2.*(n-k)*N * np.log(gk/ak) + 2.*k*(2.*n-k))
return kaic
def mdl_eigen(s, N):
r"""MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)
.. seealso:: :func:`aic_eigen` for details
:References:
* [Marple]_ Chap 13,
* [Wax]_
"""
import numpy as np
kmdl = []
n = len(s)
for k in range(0, n-1):
ak = 1./(n-k) * np.sum(s[k+1:])
gk = np.prod(s[k+1:]**(1./(n-k)))
kmdl.append( -(n-k)*N * np.log(gk/ak) + 0.5*k*(2.*n-k)*np.log(N))
return kmdl
| src/spectrum/criteria.py | 9,878 | Criteria class for an automatic selection of ARMA order.
Available criteria are
======= =====================
======= =====================
AIC see :func:`AIC`
AICc see :func:`AICc`
KIC see :func:`KIC`
AKICc see :func:`AKICc`
FPE see :func:`FPE`
MDL see :func:`MDL`
CAT see :func:`_CAT`
======= =====================
Akaike Information Criterion
:param rho: rho at order k
:param N: sample size
:param k: AR order.
If k is the AR order and N the size of the sample, then Akaike criterion is
.. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N}
::
AIC(64, [0.5,0.3,0.2], [1,2,3])
:validation: double checked versus octave.
corrected Akaike information criterion
.. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2}
:validation: double checked versus octave.
approximate corrected Kullback information
.. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2}
Criterion Autoregressive Transfer Function :
.. math:: CAT(k) = \frac{1}{N} \sum_{i=1}^k \frac{1}{\rho_i} - \frac{\rho_i}{\rho_k}
.. todo:: validation
Final prediction error criterion
.. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k
:validation: double checked versus octave.
Kullback information criterion
.. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N}
:validation: double checked versus octave.
Minimum Description Length
.. math:: MDL(k) = N log \rho_k + p \log N
:validation: results
Call the criteria function corresponding to :attr:`name`.
Create a criteria object
:param name: a string or list of strings containing valid criteria
method's name
:param int N: size of the data sample.
AIC order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
Given :math:`n` sorted eigen values :math:`\lambda_i` with
:math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)
is:
.. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k)
where the arithmetic sum :math:`a(k)` is:
.. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i
and the geometric sum :math:`g(k)` is:
.. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)}
The number of relevant sinusoids in the signal subspace is determined by
selecting the minimum of `AIC`.
.. seealso:: :func:`~spectrum.eigenfreq.eigen`
.. todo:: define precisely the input parameter N. Should be the input
data length but when using correlation matrix (SVD), I suspect it
should be the length of the correlation matrix rather than the
original data.
:References:
* [Marple]_ Chap 13,
* [Wax]_
MDL order-selection using eigen values
:param s: a list of `p` sorted eigen values
:param N: the size of the input data. To be defined precisely.
:return:
* an array containing the AIC values
.. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)
.. seealso:: :func:`aic_eigen` for details
:References:
* [Marple]_ Chap 13,
* [Wax]_
Criteria for parametric methods.
.. topic:: This module provides criteria to automatically select order in
parametric PSD estimate or pseudo spectrum estimates (e.g, music).
Some criteria such as the AIC criterion helps to chose the order of PSD
models such as the ARMA model. Nevertheless, it is difficult to estimate
correctly the order of an ARMA model even by using these criteria. The
reason being that even the Akaike criteria (AIC) does not provide the
proper order with a probability of 1 with infinite samples.
The order choice is related to an expertise of the signal. There is no
exact criteria. However, they may provide useful information.
AIC, AICc, KIC and AKICc are based on information theory. They attempt
to balance the complexity (or length) of the model against how well the
model fits the data. AIC and KIC are biased estimates of the asymmetric
and the symmetric Kullback-Leibler divergence respectively. AICc and
AKICc attempt to correct the bias.
There are also criteria related to eigen analysis, which takes as input
the eigen values of any PSD estimate method.
.. rubric:: Example
.. plot::
:width: 80%
:include-source:
from spectrum import aryule, AIC, marple_data
from pylab import plot, arange
order = arange(1, 25)
rho = [aryule(marple_data, i, norm='biased')[1] for i in order]
plot(order, AIC(len(marple_data), rho, order), label='AIC')
:References: bd-Krim Seghouane and Maiza Bekara
"A small sample model selection criterion based on Kullback's symmetric
divergence", IEEE Transactions on Signal Processing,
Vol. 52(12), pp 3314-3323, Dec. 2004
valid attributes save the data value in old_data is there is something to save we update rho only if it is needed (input different from self.rho) if such case, we also update kused to check if the criteria is reached or not compare the new data with the previous one and return False if the new value is larger so as to stop the iterationk+1 todo check convention. agrees with octavetodo check convention. agrees with octavek todo check convention. agrees with octavep = arange(1, len(rho)+1)print(s, s/float(N), 1./rho_p) | 5,225 | en | 0.65429 |
__author__ = "Laurence Elliott - 16600748"
import os, math
import numpy as np
# sampleLens = []
# count = 0
# for file in os.listdir("corpus"):
# sample = np.load("corpus/" + file)
# zeroArr = [0]
# try:
# zerosInSample = np.isin(sample, zeroArr)
# zerosIndexes = np.where(zerosInSample)
# zerosStart = zerosIndexes[0][0]
# sample = sample[:zerosStart]
# sampleLen = len(sample)
# print(count, sampleLen)
# sampleLens.append(len(sample))
# except:
# sampleLen = len(sample)
# print(count, sampleLen)
# sampleLens.append(len(sample))
# count += 1
# # sample = np.concatenate((sample[0:200], sample[::-1][0:200]))
#
# minSampleLen = np.min(sampleLens)
# print(minSampleLen)
# Min sample length is 18 bytes D:
maxSequenceLen = 10000
lenSqrt = int(math.sqrt(maxSequenceLen))
print(lenSqrt)
count = 0
for file in os.listdir("corpus"):
sample = np.load("corpus/" + file)[:maxSequenceLen]
sample = np.rint(((sample - np.min(sample)) /
(np.max(sample) - np.min(sample))) * 255)\
.astype('int').reshape(lenSqrt, lenSqrt, 1)
np.save("corpusTrunc/" + file, sample)
print(count)
count += 1 | bin-utf8-vec/truncateCorpus.py | 1,226 | sampleLens = [] count = 0 for file in os.listdir("corpus"): sample = np.load("corpus/" + file) zeroArr = [0] try: zerosInSample = np.isin(sample, zeroArr) zerosIndexes = np.where(zerosInSample) zerosStart = zerosIndexes[0][0] sample = sample[:zerosStart] sampleLen = len(sample) print(count, sampleLen) sampleLens.append(len(sample)) except: sampleLen = len(sample) print(count, sampleLen) sampleLens.append(len(sample)) count += 1 sample = np.concatenate((sample[0:200], sample[::-1][0:200])) minSampleLen = np.min(sampleLens) print(minSampleLen) Min sample length is 18 bytes D: | 685 | en | 0.351961 |
# This file is part of the MapProxy project.
# Copyright (C) 2010, 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import sys
from collections import deque
from contextlib import contextmanager
import time
try:
import Queue
except ImportError:
import queue as Queue
from mapproxy.config import base_config
from mapproxy.grid import MetaGrid
from mapproxy.source import SourceError
from mapproxy.config import local_base_config
from mapproxy.compat.itertools import izip_longest
from mapproxy.util.lock import LockTimeout
from mapproxy.seed.util import format_seed_task, timestamp
from mapproxy.seed.cachelock import DummyCacheLocker, CacheLockedError
from mapproxy.seed.util import (exp_backoff, limit_sub_bbox,
status_symbol, BackoffError)
import logging
log = logging.getLogger(__name__)
NONE = 0
CONTAINS = -1
INTERSECTS = 1
# do not use multiprocessing on windows, it blows
# no lambdas, no anonymous functions/classes, no base_config(), etc.
if sys.platform == 'win32':
import threading
proc_class = threading.Thread
queue_class = Queue.Queue
else:
import multiprocessing
proc_class = multiprocessing.Process
queue_class = multiprocessing.Queue
class TileWorkerPool(object):
"""
Manages multiple TileWorker.
"""
def __init__(self, task, worker_class, size=2, dry_run=False, progress_logger=None):
self.tiles_queue = queue_class(size)
self.task = task
self.dry_run = dry_run
self.procs = []
self.progress_logger = progress_logger
conf = base_config()
for _ in range(size):
worker = worker_class(self.task, self.tiles_queue, conf)
worker.start()
self.procs.append(worker)
def process(self, tiles, progress):
if not self.dry_run:
while True:
try:
self.tiles_queue.put(tiles, timeout=5)
except Queue.Full:
alive = False
for proc in self.procs:
if proc.is_alive():
alive = True
break
if not alive:
log.warn('no workers left, stopping')
raise SeedInterrupted
continue
else:
break
if self.progress_logger:
self.progress_logger.log_step(progress)
def stop(self, force=False):
"""
Stop seed workers by sending None-sentinel and joining the workers.
:param force: Skip sending None-sentinel and join with a timeout.
For use when workers might be shutdown already by KeyboardInterrupt.
"""
if not force:
alives = 0
for proc in self.procs:
if proc.is_alive():
alives += 1
while alives:
# put None-sentinels to queue as long as we have workers alive
try:
self.tiles_queue.put(None, timeout=1)
alives -= 1
except Queue.Full:
alives = 0
for proc in self.procs:
if proc.is_alive():
alives += 1
if force:
timeout = 1.0
else:
timeout = None
for proc in self.procs:
proc.join(timeout)
class TileWorker(proc_class):
def __init__(self, task, tiles_queue, conf):
proc_class.__init__(self)
proc_class.daemon = True
self.task = task
self.tile_mgr = task.tile_manager
self.tiles_queue = tiles_queue
self.conf = conf
def run(self):
with local_base_config(self.conf):
try:
self.work_loop()
except KeyboardInterrupt:
return
except BackoffError:
return
class TileSeedWorker(TileWorker):
def work_loop(self):
while True:
tiles = self.tiles_queue.get()
if tiles is None:
return
with self.tile_mgr.session():
exp_backoff(self.tile_mgr.load_tile_coords, args=(tiles,),
max_repeat=100, max_backoff=600,
exceptions=(SourceError, IOError), ignore_exceptions=(LockTimeout, ))
class TileCleanupWorker(TileWorker):
def work_loop(self):
while True:
tiles = self.tiles_queue.get()
if tiles is None:
return
with self.tile_mgr.session():
self.tile_mgr.remove_tile_coords(tiles)
class SeedProgress(object):
def __init__(self, old_progress_identifier=None):
self.progress = 0.0
self.level_progress_percentages = [1.0]
self.level_progresses = None
self.level_progresses_level = 0
self.progress_str_parts = []
self.old_level_progresses = old_progress_identifier
def step_forward(self, subtiles=1):
self.progress += self.level_progress_percentages[-1] / subtiles
@property
def progress_str(self):
return ''.join(self.progress_str_parts)
@contextmanager
def step_down(self, i, subtiles):
if self.level_progresses is None:
self.level_progresses = []
self.level_progresses = self.level_progresses[:self.level_progresses_level]
self.level_progresses.append((i, subtiles))
self.level_progresses_level += 1
self.progress_str_parts.append(status_symbol(i, subtiles))
self.level_progress_percentages.append(self.level_progress_percentages[-1] / subtiles)
yield
self.level_progress_percentages.pop()
self.progress_str_parts.pop()
self.level_progresses_level -= 1
if self.level_progresses_level == 0:
self.level_progresses = []
def already_processed(self):
return self.can_skip(self.old_level_progresses, self.level_progresses)
def current_progress_identifier(self):
if self.already_processed() or self.level_progresses is None:
return self.old_level_progresses
return self.level_progresses[:]
@staticmethod
def can_skip(old_progress, current_progress):
"""
Return True if the `current_progress` is behind the `old_progress` -
when it isn't as far as the old progress.
>>> SeedProgress.can_skip(None, [(0, 4)])
False
>>> SeedProgress.can_skip([], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], None)
False
>>> SeedProgress.can_skip([(0, 4)], [(0, 4)])
False
>>> SeedProgress.can_skip([(1, 4)], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)])
True
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)])
False
"""
if current_progress is None:
return False
if old_progress is None:
return False
if old_progress == []:
return True
for old, current in izip_longest(old_progress, current_progress, fillvalue=None):
if old is None:
return False
if current is None:
return False
if old < current:
return False
if old > current:
return True
return False
def running(self):
return True
class StopProcess(Exception):
pass
class SeedInterrupted(Exception):
pass
class TileWalker(object):
"""
TileWalker traverses through all tiles in a tile grid and calls worker_pool.process
for each (meta) tile. It traverses the tile grid (pyramid) depth-first.
Intersection with coverages are checked before handling subtiles in the next level,
allowing to determine if all subtiles should be seeded or skipped.
"""
def __init__(self, task, worker_pool, handle_stale=False, handle_uncached=False,
work_on_metatiles=True, skip_geoms_for_last_levels=0, progress_logger=None,
seed_progress=None):
self.tile_mgr = task.tile_manager
self.task = task
self.worker_pool = worker_pool
self.handle_stale = handle_stale
self.handle_uncached = handle_uncached
self.work_on_metatiles = work_on_metatiles
self.skip_geoms_for_last_levels = skip_geoms_for_last_levels
self.progress_logger = progress_logger
num_seed_levels = len(task.levels)
if num_seed_levels >= 4:
self.report_till_level = task.levels[num_seed_levels-2]
else:
self.report_till_level = task.levels[num_seed_levels-1]
meta_size = self.tile_mgr.meta_grid.meta_size if self.tile_mgr.meta_grid else (1, 1)
self.tiles_per_metatile = meta_size[0] * meta_size[1]
self.grid = MetaGrid(self.tile_mgr.grid, meta_size=meta_size, meta_buffer=0)
self.count = 0
self.seed_progress = seed_progress or SeedProgress()
# It is possible that we 'walk' through the same tile multiple times
# when seeding irregular tile grids[0]. limit_sub_bbox prevents that we
# recurse into the same area multiple times, but it is still possible
# that a tile is processed multiple times. Locking prevents that a tile
# is seeded multiple times, but it is possible that we count the same tile
# multiple times (in dry-mode, or while the tile is in the process queue).
# Tile counts can be off by 280% with sqrt2 grids.
# We keep a small cache of already processed tiles to skip most duplicates.
# A simple cache of 64 tile coordinates for each level already brings the
# difference down to ~8%, which is good enough and faster than a more
# sophisticated FIFO cache with O(1) lookup, or even caching all tiles.
# [0] irregular tile grids: where one tile does not have exactly 4 subtiles
# Typically when you use res_factor, or a custom res list.
self.seeded_tiles = {l: deque(maxlen=64) for l in task.levels}
def walk(self):
assert self.handle_stale or self.handle_uncached
bbox = self.task.coverage.extent.bbox_for(self.tile_mgr.grid.srs)
if self.seed_progress.already_processed():
# nothing to seed
self.seed_progress.step_forward()
else:
try:
self._walk(bbox, self.task.levels)
except StopProcess:
pass
self.report_progress(self.task.levels[0], self.task.coverage.bbox)
def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False):
"""
:param cur_bbox: the bbox to seed in this call
:param levels: list of levels to seed
:param all_subtiles: seed all subtiles and do not check for
intersections with bbox/geom
"""
bbox_, tiles, subtiles = self.grid.get_affected_level_tiles(cur_bbox, current_level)
total_subtiles = tiles[0] * tiles[1]
if len(levels) < self.skip_geoms_for_last_levels:
# do not filter in last levels
all_subtiles = True
subtiles = self._filter_subtiles(subtiles, all_subtiles)
if current_level in levels and current_level <= self.report_till_level:
self.report_progress(current_level, cur_bbox)
if not self.seed_progress.running():
if current_level in levels:
self.report_progress(current_level, cur_bbox)
self.tile_mgr.cleanup()
raise StopProcess()
process = False;
if current_level in levels:
levels = levels[1:]
process = True
for i, (subtile, sub_bbox, intersection) in enumerate(subtiles):
if subtile is None: # no intersection
self.seed_progress.step_forward(total_subtiles)
continue
if levels: # recurse to next level
sub_bbox = limit_sub_bbox(cur_bbox, sub_bbox)
if intersection == CONTAINS:
all_subtiles = True
else:
all_subtiles = False
with self.seed_progress.step_down(i, total_subtiles):
if self.seed_progress.already_processed():
self.seed_progress.step_forward()
else:
self._walk(sub_bbox, levels, current_level=current_level+1,
all_subtiles=all_subtiles)
if not process:
continue
# check if subtile was already processed. see comment in __init__
if subtile in self.seeded_tiles[current_level]:
if not levels:
self.seed_progress.step_forward(total_subtiles)
continue
self.seeded_tiles[current_level].appendleft(subtile)
if not self.work_on_metatiles:
# collect actual tiles
handle_tiles = self.grid.tile_list(subtile)
else:
handle_tiles = [subtile]
if self.handle_uncached:
handle_tiles = [t for t in handle_tiles if
t is not None and
not self.tile_mgr.is_cached(t)]
elif self.handle_stale:
handle_tiles = [t for t in handle_tiles if
t is not None and
self.tile_mgr.is_stale(t)]
if handle_tiles:
self.count += 1
self.worker_pool.process(handle_tiles, self.seed_progress)
if not levels:
self.seed_progress.step_forward(total_subtiles)
if len(levels) >= 4:
# call cleanup to close open caches
# for connection based caches
self.tile_mgr.cleanup()
def report_progress(self, level, bbox):
if self.progress_logger:
self.progress_logger.log_progress(self.seed_progress, level, bbox,
self.count * self.tiles_per_metatile)
def _filter_subtiles(self, subtiles, all_subtiles):
"""
Return an iterator with all sub tiles.
Yields (None, None, None) for non-intersecting tiles,
otherwise (subtile, subtile_bbox, intersection).
"""
for subtile in subtiles:
if subtile is None:
yield None, None, None
else:
sub_bbox = self.grid.meta_tile(subtile).bbox
if all_subtiles:
intersection = CONTAINS
else:
intersection = self.task.intersects(sub_bbox)
if intersection:
yield subtile, sub_bbox, intersection
else:
yield None, None, None
class SeedTask(object):
def __init__(self, md, tile_manager, levels, refresh_timestamp, coverage):
self.md = md
self.tile_manager = tile_manager
self.grid = tile_manager.grid
self.levels = levels
self.refresh_timestamp = refresh_timestamp
self.coverage = coverage
@property
def id(self):
return self.md['name'], self.md['cache_name'], self.md['grid_name'], tuple(self.levels)
def intersects(self, bbox):
if self.coverage.contains(bbox, self.grid.srs): return CONTAINS
if self.coverage.intersects(bbox, self.grid.srs): return INTERSECTS
return NONE
class CleanupTask(object):
"""
:param coverage: area for the cleanup
:param complete_extent: ``True`` if `coverage` equals the extent of the grid
"""
def __init__(self, md, tile_manager, levels, remove_timestamp, coverage, complete_extent=False):
self.md = md
self.tile_manager = tile_manager
self.grid = tile_manager.grid
self.levels = levels
self.remove_timestamp = remove_timestamp
self.coverage = coverage
self.complete_extent = complete_extent
@property
def id(self):
return 'cleanup', self.md['name'], self.md['cache_name'], self.md['grid_name']
def intersects(self, bbox):
if self.coverage.contains(bbox, self.grid.srs): return CONTAINS
if self.coverage.intersects(bbox, self.grid.srs): return INTERSECTS
return NONE
def seed(tasks, concurrency=2, dry_run=False, skip_geoms_for_last_levels=0,
progress_logger=None, cache_locker=None):
if cache_locker is None:
cache_locker = DummyCacheLocker()
active_tasks = tasks[::-1]
while active_tasks:
task = active_tasks[-1]
print(format_seed_task(task))
wait = len(active_tasks) == 1
try:
with cache_locker.lock(task.md['cache_name'], no_block=not wait):
if progress_logger and progress_logger.progress_store:
progress_logger.current_task_id = task.id
start_progress = progress_logger.progress_store.get(task.id)
else:
start_progress = None
seed_progress = SeedProgress(old_progress_identifier=start_progress)
seed_task(task, concurrency, dry_run, skip_geoms_for_last_levels, progress_logger,
seed_progress=seed_progress)
except CacheLockedError:
print(' ...cache is locked, skipping')
active_tasks = [task] + active_tasks[:-1]
else:
active_tasks.pop()
def seed_task(task, concurrency=2, dry_run=False, skip_geoms_for_last_levels=0,
progress_logger=None, seed_progress=None):
if task.coverage is False:
return
if task.refresh_timestamp is not None:
task.tile_manager._expire_timestamp = task.refresh_timestamp
task.tile_manager.minimize_meta_requests = False
work_on_metatiles = True
if task.tile_manager.rescale_tiles:
work_on_metatiles = False
tile_worker_pool = TileWorkerPool(task, TileSeedWorker, dry_run=dry_run,
size=concurrency, progress_logger=progress_logger)
tile_walker = TileWalker(task, tile_worker_pool, handle_uncached=True,
skip_geoms_for_last_levels=skip_geoms_for_last_levels, progress_logger=progress_logger,
seed_progress=seed_progress,
work_on_metatiles=work_on_metatiles,
)
try:
tile_walker.walk()
except KeyboardInterrupt:
tile_worker_pool.stop(force=True)
raise
finally:
tile_worker_pool.stop()
| mapproxy/seed/seeder.py | 19,719 | :param coverage: area for the cleanup
:param complete_extent: ``True`` if `coverage` equals the extent of the grid
TileWalker traverses through all tiles in a tile grid and calls worker_pool.process
for each (meta) tile. It traverses the tile grid (pyramid) depth-first.
Intersection with coverages are checked before handling subtiles in the next level,
allowing to determine if all subtiles should be seeded or skipped.
Manages multiple TileWorker.
Return an iterator with all sub tiles.
Yields (None, None, None) for non-intersecting tiles,
otherwise (subtile, subtile_bbox, intersection).
:param cur_bbox: the bbox to seed in this call
:param levels: list of levels to seed
:param all_subtiles: seed all subtiles and do not check for
intersections with bbox/geom
Return True if the `current_progress` is behind the `old_progress` -
when it isn't as far as the old progress.
>>> SeedProgress.can_skip(None, [(0, 4)])
False
>>> SeedProgress.can_skip([], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], None)
False
>>> SeedProgress.can_skip([(0, 4)], [(0, 4)])
False
>>> SeedProgress.can_skip([(1, 4)], [(0, 4)])
True
>>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)])
True
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)])
False
>>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)])
False
Stop seed workers by sending None-sentinel and joining the workers.
:param force: Skip sending None-sentinel and join with a timeout.
For use when workers might be shutdown already by KeyboardInterrupt.
This file is part of the MapProxy project. Copyright (C) 2010, 2011 Omniscale <http://omniscale.de> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. do not use multiprocessing on windows, it blows no lambdas, no anonymous functions/classes, no base_config(), etc. put None-sentinels to queue as long as we have workers alive It is possible that we 'walk' through the same tile multiple times when seeding irregular tile grids[0]. limit_sub_bbox prevents that we recurse into the same area multiple times, but it is still possible that a tile is processed multiple times. Locking prevents that a tile is seeded multiple times, but it is possible that we count the same tile multiple times (in dry-mode, or while the tile is in the process queue). Tile counts can be off by 280% with sqrt2 grids. We keep a small cache of already processed tiles to skip most duplicates. A simple cache of 64 tile coordinates for each level already brings the difference down to ~8%, which is good enough and faster than a more sophisticated FIFO cache with O(1) lookup, or even caching all tiles. [0] irregular tile grids: where one tile does not have exactly 4 subtiles Typically when you use res_factor, or a custom res list. nothing to seed do not filter in last levels no intersection recurse to next level check if subtile was already processed. see comment in __init__ collect actual tiles call cleanup to close open caches for connection based caches | 3,823 | en | 0.832963 |
import minimalmodbus
import serial.tools.list_ports
import argparse
import time
#Creates a new instance of a minimal modbus connection
#Change portname to whatever you're using (/dev/USB0, COM4, etc)
#Or just change it when you create the new serial object
#247 is the default address for Renogy devices
class RenogySmartBattery(minimalmodbus.Instrument):
def __init__(self, portname="/dev/USB0", slaveaddress=247, baudrate=9600, timeout=0.5):
minimalmodbus.Instrument.__init__(self, portname, slaveaddress)
self.serial.baudrate = baudrate
self.serial.timeout = timeout
self.address = slaveaddress
self.amps = 0
self.unitVolts = 0
self.cellVolts = []
self.numCells = 4
self.capacity = 0
self.maxCapacity = 0
self.percentage = 0
self.state = "Error"
self.heater = False
self.cellTemp = []
self.cycles = 0
self.batSerial = ""
#Reads number of Cells
try:
self.numCells = self.read_register(5000)
except Exception as e:
print("Error getting number of cells")
#Reads the Serial Number
try:
self.batSerial = self.read_registers(5110,6)
except Exception as e:
print("Error reading the serial number")
def update(self):
#Gets unit current flow in A (0), unit voltage (1), capacity in AH (2,3), max capacity (4,5), cycle nums (6)
try:
battInfo = self.read_registers(5042,7)
self.amps = battInfo[0] / 100 if battInfo[0] < 61440 else (battInfo[0] - 65535) / 100
self.unitVolts = battInfo[1] / 10
self.capacity = ( battInfo[2] << 15 | (battInfo[3] >> 1) ) * 0.002
self.Maxcapacity = ( battInfo[4] << 15 | (battInfo[5] >> 1) ) * 0.002
self.cycles = battInfo[6]
except Exception as e:
print("Error getting Unit info" + e)
#Gets heater status
try:
heaterInfo = self.read_register(5013)
self.heater = (heaterInfo / 255) * 100
except Exception as e:
print("Error getting heater info" + e)
#Get individual cell info
try:
self.cellTemp = self.read_registers(5018, self.numCells)
self.cellVolts = self.read_registers(5001, self.numCells)
except Exception as e:
print("Error getting individual cell info")
def getNumCells(self):
return self.numCells
#sets the address of the battery
def setAddress(self, address):
self.address = address
#Gets the amperage flow of the battery
def getAmps(self):
return self.amps
#Returns a list of the cell voltages
def getCellVolts(self):
return [x / 19 for x in self.cellVolts]
#Returns number of cycles on the battery
def getCycles(self):
return self.cycles
#Returns the serial number
def getSerial(self):
return ''.join(self.batSerial)
#Gets the voltage of the battery
def getUnitVolts(self):
return self.unitVolts
#Gets the current AH of the battery
def getCapacity(self):
return self.capacity
#Gets the max capacity of the battery
def getMax_capacity(self):
return self.maxCapacity
#Gets the percentage full of the battery
def getPercentage(self):
return self.capacity / self.maxCapacity
#Gets the state of the battery (Charging, Discharging, or Error)
def getState(self):
if self.amps < 0: return "DISCHARGING"
elif self.amps > 0: return "CHARGING"
return "IDLE"
#For the self-heating batteries, gets if the battery is on and how much (0-100)
def getHeater(self):
return self.heater
#Gets the overall temperature of the battery by getting the average temperature of the cells
def getBatteryTemp(self):
return sum(self.cellTemp) / len(self.cellTemp)
#Reads a specific register
def readRegister(self, register):
try:
return self.read_register(register)
except Exception as e:
print(e)
def readRegisters(self, startRegister, numRegisters):
try:
return self.read_registers(startRegister, numRegisters)
except Exception as e:
print(e)
#Writes a specific register
def writeRegister(self, register, value):
try:
return self.write_register(register, value)
except Exception as e:
print(e)
#Utilizes the write register to change the slave address of the battery
def changeAddress(self, value, address):
try:
return self.writeRegister(5223,value, address)
except Exception as e:
print(e)
#Main Method for demonstration
def main():
#main two arguments are the identifier of the USB connection and the address to connect to.
renogy = RenogySmartBattery("/dev/USB0", 50)
print(renogy.volts(51))
print(renogy.amps(51))
if __name__ == "__main__":
main() | Renogy.py | 5,034 | Creates a new instance of a minimal modbus connectionChange portname to whatever you're using (/dev/USB0, COM4, etc)Or just change it when you create the new serial object247 is the default address for Renogy devicesReads number of CellsReads the Serial NumberGets unit current flow in A (0), unit voltage (1), capacity in AH (2,3), max capacity (4,5), cycle nums (6)Gets heater statusGet individual cell infosets the address of the batteryGets the amperage flow of the batteryReturns a list of the cell voltagesReturns number of cycles on the batteryReturns the serial numberGets the voltage of the batteryGets the current AH of the batteryGets the max capacity of the batteryGets the percentage full of the batteryGets the state of the battery (Charging, Discharging, or Error)For the self-heating batteries, gets if the battery is on and how much (0-100)Gets the overall temperature of the battery by getting the average temperature of the cellsReads a specific registerWrites a specific registerUtilizes the write register to change the slave address of the batteryMain Method for demonstrationmain two arguments are the identifier of the USB connection and the address to connect to. | 1,188 | en | 0.779607 |
import pygame
import random
import helpers
from ItemManager import ItemManager
import scorer
class BlockManager:
def __init__(self,main):
self.main = main
self.blockSize = 75
self.gridWidth = 12
self.gridHeight = 12
self.grid = []
for x in range(0,self.gridWidth):
newColumn = []
for y in range(0,self.gridHeight):
newColumn.append(None)
self.grid.append(newColumn)
self.numTypes = 8
self.images = []
for x in range(0,self.numTypes):
self.images.append(helpers.loadTGA(str(x))[0])
self.maxTimeTillNew = 40
self.timeTillNew = self.maxTimeTillNew
self.moveTime = 0
self.moveFrec = 10
def compute(self):
self.calculateSpeed()
self.moveTime += 1
if self.moveTime % self.moveFrec == 0:
self.moveBlocksDown()
#Check for game over.
doneChecking = 0
y = 0
while y < self.gridHeight and not doneChecking:
x = 0
while x < self.gridWidth and not doneChecking:
if self.grid[x][y] is None:
doneChecking = 1
x += 1
y += 1
if not doneChecking: #If none in the top row were None:
self.main.lose()
self.timeTillNew -= 1
if self.timeTillNew == 0:
self.getNewBlock()
self.timeTillNew = self.maxTimeTillNew
self.checkAdj()
def checkAdj(self):
#Check grid for triple adjacency.
for x in range(0,self.gridWidth):
for y in range(0,self.gridHeight):
if self.grid[x][y] is not None:
adjacents = helpers.getAdjacents(x,y,self.grid)
if len(adjacents) >= 3:
for point in adjacents:
self.grid[point[0]][point[1]] = None
self.main.explosionGraphics.getPoint(point[0]*self.blockSize+self.blockSize/2,point[1]*self.blockSize+self.blockSize/2)
#+self.blockSize/2 so it's in the center.
for anObject in self.main.objects:
if isinstance(anObject,scorer.Scorer):
anObject.getDestroyedBlocks(len(adjacents))
if isinstance(anObject, ItemManager):
anObject.getDestroyedBlocks(adjacents)
def getNewBlock(self):
pos = random.randint(0,self.gridWidth - 1)
while self.grid[pos][0] is not None:
pos = random.randint(0,self.gridWidth - 1)
col = random.randint(0,self.numTypes - 1)
self.grid[pos][0] = col
def moveBlocksDown(self):
#Move all blocks down.
for x in range(0,self.gridWidth):
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move down no matter what.
if self.grid[x][y] is not None and self.grid[x][y + 1] is None:
self.grid[x][y + 1] = self.grid[x][y]
self.grid[x][y] = None
def draw(self,surface):
for y in range(0,self.gridHeight):
for x in range(0,self.gridWidth):
if self.grid[x][y] is not None:
surface.blit(self.images[self.grid[x][y]],(x*self.blockSize,y*self.blockSize))
def getDown(self):
self.moveBlocksDown()
self.moveTime = 0
if self.timeTillNew <= self.moveFrec:
self.getNewBlock()
self.timeTillNew = self.maxTimeTillNew
else:
self.timeTillNew -= self.moveFrec
def getRight(self):
#Remember: Blocks will not move right if there is a block directly below them.
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move right no matter what.
for x in range(self.gridWidth-2,-1,-1): #From gridWidth-2 to 0. Blocks on the right (x=gridWidth - 1) won't move right no matter what.
if self.grid[x][y] is not None and self.grid[x + 1][y] is None and self.grid[x][y + 1] is None:
self.grid[x + 1][y] = self.grid[x][y]
self.grid[x][y] = None
def getLeft(self):
#Remember: Blocks will not move right if there is a block directly below them.
for y in range(self.gridHeight-2,-1,-1): #From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move left no matter what.
for x in range(1,self.gridWidth): #From 1 to gridWidth-1. Blocks on the left (x=0) won't move left no matter what.
if self.grid[x][y] is not None and self.grid[x - 1][y] is None and self.grid[x][y + 1] is None:
self.grid[x - 1][y] = self.grid[x][y]
self.grid[x][y] = None
def calculateSpeed(self):
for anObject in self.main.objects:
if isinstance(anObject,scorer.Scorer):
score = anObject.score
if isinstance(anObject, ItemManager):
itemManager = anObject
k = 0
if score > 10: k = 1
if score > 20: k = 2
if score > 50: k = 3
if score > 100: k = 4
if score > 200: k = 5
if score > 400: k = 6
if score > 600: k = 7
if score > 800: k = 8
if score > 1000: k = 9
if score > 2000: k = 10
if score > 3000: k = 11
if score > 4000: k = 12
if score > 5000: k = 13
if score == 9999: k = 14
self.maxTimeTillNew = {
0: 100,
1: 80,
2: 60,
3: 50,
4: 40,
5: 36,
6: 34,
7: 30,
8: 28,
9: 26,
10: 24,
11: 22,
12: 20,
13: 19,
14: 18
}[k]
if k <= 2:
self.moveFrec = 10
else:
self.moveFrec = self.maxTimeTillNew / 3
scorer.comboLastTime = self.maxTimeTillNew * 3
if k > 0:
itemManager.itemFrec = max(int(self.maxTimeTillNew * 2.5), 30 * 2.5) #128
itemManager.itemLastTime = itemManager.itemFrec * 8
itemManager.itemsAvailable = min(k, 8)
| src/BlockManager.py | 5,318 | Check for game over.If none in the top row were None:Check grid for triple adjacency.+self.blockSize/2 so it's in the center.Move all blocks down.From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move down no matter what.Remember: Blocks will not move right if there is a block directly below them.From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move right no matter what.From gridWidth-2 to 0. Blocks on the right (x=gridWidth - 1) won't move right no matter what.Remember: Blocks will not move right if there is a block directly below them.From gridHeight-2 to 0. Blocks on the bottom (y=gridHeight - 1) won't move left no matter what.From 1 to gridWidth-1. Blocks on the left (x=0) won't move left no matter what.128 | 761 | en | 0.883823 |
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.vnic_eth_adapter_policy_list import VnicEthAdapterPolicyList # noqa: E501
from intersight.rest import ApiException
class TestVnicEthAdapterPolicyList(unittest.TestCase):
"""VnicEthAdapterPolicyList unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVnicEthAdapterPolicyList(self):
"""Test VnicEthAdapterPolicyList"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.vnic_eth_adapter_policy_list.VnicEthAdapterPolicyList() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| test/test_vnic_eth_adapter_policy_list.py | 1,967 | VnicEthAdapterPolicyList unit test stubs
Test VnicEthAdapterPolicyList
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = intersight.models.vnic_eth_adapter_policy_list.VnicEthAdapterPolicyList() noqa: E501 | 1,453 | en | 0.888637 |
import logging
from pyrogram.errors import InputUserDeactivated, UserNotParticipant, FloodWait, UserIsBlocked, PeerIdInvalid
from info import AUTH_CHANNEL, LONG_IMDB_DESCRIPTION, MAX_LIST_ELM
from imdb import IMDb
import asyncio
from pyrogram.types import Message
from typing import Union
import re
import os
from datetime import datetime
from typing import List
from pyrogram.types import InlineKeyboardButton
from database.users_chats_db import db
from bs4 import BeautifulSoup
import requests
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BTN_URL_REGEX = re.compile(
r"(\[([^\[]+?)\]\((buttonurl|buttonalert):(?:/{0,2})(.+?)(:same)?\))"
)
imdb = IMDb()
BANNED = {}
SMART_OPEN = '“'
SMART_CLOSE = '”'
START_CHAR = ('\'', '"', SMART_OPEN)
# temp db for banned
class temp(object):
BANNED_USERS = []
BANNED_CHATS = []
ME = None
CURRENT=int(os.environ.get("SKIP", 2))
CANCEL = False
MELCOW = {}
U_NAME = None
B_NAME = None
async def is_subscribed(bot, query):
try:
user = await bot.get_chat_member(AUTH_CHANNEL, query.from_user.id)
except UserNotParticipant:
pass
except Exception as e:
logger.exception(e)
else:
if user.status != 'kicked':
return True
return False
async def get_poster(query, bulk=False, id=False, file=None):
if not id:
# https://t.me/GetTGLink/4183
query = (query.strip()).lower()
title = query
year = re.findall(r'[1-2]\d{3}$', query, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
title = (query.replace(year, "")).strip()
elif file is not None:
year = re.findall(r'[1-2]\d{3}', file, re.IGNORECASE)
if year:
year = list_to_str(year[:1])
else:
year = None
movieid = imdb.search_movie(title.lower(), results=10)
if not movieid:
return None
if year:
filtered=list(filter(lambda k: str(k.get('year')) == str(year), movieid))
if not filtered:
filtered = movieid
else:
filtered = movieid
movieid=list(filter(lambda k: k.get('kind') in ['movie', 'tv series'], filtered))
if not movieid:
movieid = filtered
if bulk:
return movieid
movieid = movieid[0].movieID
else:
movieid = int(query)
movie = imdb.get_movie(movieid)
if movie.get("original air date"):
date = movie["original air date"]
elif movie.get("year"):
date = movie.get("year")
else:
date = "N/A"
plot = ""
if not LONG_IMDB_DESCRIPTION:
plot = movie.get('plot')
if plot and len(plot) > 0:
plot = plot[0]
else:
plot = movie.get('plot outline')
if plot and len(plot) > 800:
plot = plot[0:800] + "..."
return {
'title': movie.get('title'),
'votes': movie.get('votes'),
"aka": list_to_str(movie.get("akas")),
"seasons": movie.get("number of seasons"),
"box_office": movie.get('box office'),
'localized_title': movie.get('localized title'),
'kind': movie.get("kind"),
"imdb_id": f"tt{movie.get('imdbID')}",
"cast": list_to_str(movie.get("cast")),
"runtime": list_to_str(movie.get("runtimes")),
"countries": list_to_str(movie.get("countries")),
"certificates": list_to_str(movie.get("certificates")),
"languages": list_to_str(movie.get("languages")),
"director": list_to_str(movie.get("director")),
"writer":list_to_str(movie.get("writer")),
"producer":list_to_str(movie.get("producer")),
"composer":list_to_str(movie.get("composer")) ,
"cinematographer":list_to_str(movie.get("cinematographer")),
"music_team": list_to_str(movie.get("music department")),
"distributors": list_to_str(movie.get("distributors")),
'release_date': date,
'year': movie.get('year'),
'genres': list_to_str(movie.get("genres")),
'poster': movie.get('full-size cover url'),
'plot': plot,
'rating': str(movie.get("rating")),
'url':f'https://www.imdb.com/title/tt{movieid}'
}
# https://github.com/odysseusmax/animated-lamp/blob/2ef4730eb2b5f0596ed6d03e7b05243d93e3415b/bot/utils/broadcast.py#L37
async def broadcast_messages(user_id, message):
try:
await message.copy(chat_id=user_id)
return True, "Succes"
except FloodWait as e:
await asyncio.sleep(e.x)
return await broadcast_messages(user_id, message)
except InputUserDeactivated:
await db.delete_user(int(user_id))
logging.info(f"{user_id}-Removed from Database, since deleted account.")
return False, "Deleted"
except UserIsBlocked:
logging.info(f"{user_id} -Blocked the bot.")
return False, "Blocked"
except PeerIdInvalid:
await db.delete_user(int(user_id))
logging.info(f"{user_id} - PeerIdInvalid")
return False, "Error"
except Exception as e:
return False, "Error"
async def search_gagala(text):
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'
}
text = text.replace(" ", '+')
url = f'https://www.google.com/search?q={text}'
response = requests.get(url, headers=usr_agent)
response.raise_for_status()
soup = BeautifulSoup(response.text, 'html.parser')
titles = soup.find_all( 'h3' )
return [title.getText() for title in titles]
def get_size(size):
"""Get size in readable format"""
units = ["Bytes", "KB", "MB", "GB", "TB", "PB", "EB"]
size = float(size)
i = 0
while size >= 1024.0 and i < len(units):
i += 1
size /= 1024.0
return "%.2f %s" % (size, units[i])
def split_list(l, n):
for i in range(0, len
(l), n):
yield l[i:i + n]
def get_file_id(msg: Message):
if msg.media:
for message_type in (
"photo",
"animation",
"audio",
"document",
"video",
"video_note",
"voice",
"sticker"
):
obj = getattr(msg, message_type)
if obj:
setattr(obj, "message_type", message_type)
return obj
def extract_user(message: Message) -> Union[int, str]:
"""extracts the user from a message"""
# https://github.com/SpEcHiDe/PyroGramBot/blob/f30e2cca12002121bad1982f68cd0ff9814ce027/pyrobot/helper_functions/extract_user.py#L7
user_id = None
user_first_name = None
if message.reply_to_message:
user_id = message.reply_to_message.from_user.id
user_first_name = message.reply_to_message.from_user.first_name
elif len(message.command) > 1:
if (
len(message.entities) > 1 and
message.entities[1].type == "text_mention"
):
required_entity = message.entities[1]
user_id = required_entity.user.id
user_first_name = required_entity.user.first_name
else:
user_id = message.command[1]
# don't want to make a request -_-
user_first_name = user_id
try:
user_id = int(user_id)
except ValueError:
pass
else:
user_id = message.from_user.id
user_first_name = message.from_user.first_name
return (user_id, user_first_name)
def list_to_str(k):
if not k:
return "N/A"
elif len(k) == 1:
return str(k[0])
elif MAX_LIST_ELM:
k = k[:int(MAX_LIST_ELM)]
return ' '.join(f'{elem}, ' for elem in k)
else:
return ' '.join(f'{elem}, ' for elem in k)
def last_online(from_user):
time = ""
if from_user.is_bot:
time += "🤖 Bot :("
elif from_user.status == 'recently':
time += "Recently"
elif from_user.status == 'within_week':
time += "Within the last week"
elif from_user.status == 'within_month':
time += "Within the last month"
elif from_user.status == 'long_time_ago':
time += "A long time ago :("
elif from_user.status == 'online':
time += "Currently Online"
elif from_user.status == 'offline':
time += datetime.fromtimestamp(from_user.last_online_date).strftime("%a, %d %b %Y, %H:%M:%S")
return time
def split_quotes(text: str) -> List:
if not any(text.startswith(char) for char in START_CHAR):
return text.split(None, 1)
counter = 1 # ignore first char -> is some kind of quote
while counter < len(text):
if text[counter] == "\\":
counter += 1
elif text[counter] == text[0] or (text[0] == SMART_OPEN and text[counter] == SMART_CLOSE):
break
counter += 1
else:
return text.split(None, 1)
# 1 to avoid starting quote, and counter is exclusive so avoids ending
key = remove_escapes(text[1:counter].strip())
# index will be in range, or `else` would have been executed and returned
rest = text[counter + 1:].strip()
if not key:
key = text[0] + text[0]
return list(filter(None, [key, rest]))
def parser(text, keyword):
if "buttonalert" in text:
text = (text.replace("\n", "\\n").replace("\t", "\\t"))
buttons = []
note_data = ""
prev = 0
i = 0
alerts = []
for match in BTN_URL_REGEX.finditer(text):
# Check if btnurl is escaped
n_escapes = 0
to_check = match.start(1) - 1
while to_check > 0 and text[to_check] == "\\":
n_escapes += 1
to_check -= 1
# if even, not escaped -> create button
if n_escapes % 2 == 0:
note_data += text[prev:match.start(1)]
prev = match.end(1)
if match.group(3) == "buttonalert":
# create a thruple with button label, url, and newline status
if bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
callback_data=f"alertmessage:{i}:{keyword}"
)])
i += 1
alerts.append(match.group(4))
elif bool(match.group(5)) and buttons:
buttons[-1].append(InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
))
else:
buttons.append([InlineKeyboardButton(
text=match.group(2),
url=match.group(4).replace(" ", "")
)])
else:
note_data += text[prev:to_check]
prev = match.start(1) - 1
else:
note_data += text[prev:]
try:
return note_data, buttons, alerts
except:
return note_data, buttons, None
def remove_escapes(text: str) -> str:
res = ""
is_escaped = False
for counter in range(len(text)):
if is_escaped:
res += text[counter]
is_escaped = False
elif text[counter] == "\\":
is_escaped = True
else:
res += text[counter]
return res
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n +=
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
| untils.py | 11,861 | temp db for banned https://t.me/GetTGLink/4183 https://github.com/odysseusmax/animated-lamp/blob/2ef4730eb2b5f0596ed6d03e7b05243d93e3415b/bot/utils/broadcast.pyL37 https://github.com/SpEcHiDe/PyroGramBot/blob/f30e2cca12002121bad1982f68cd0ff9814ce027/pyrobot/helper_functions/extract_user.pyL7 don't want to make a request -_- ignore first char -> is some kind of quote 1 to avoid starting quote, and counter is exclusive so avoids ending index will be in range, or `else` would have been executed and returned Check if btnurl is escaped if even, not escaped -> create button create a thruple with button label, url, and newline status | 635 | en | 0.683476 |
"""
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 128
args_lr = 0.001
args_model = 'vgg16'
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_max_param/' + job_name + '*'
total_epochs = 5
starting_epoch = 0
# first step is to update the PID
pid_dict = {}
with open('pid_lock.json', 'r') as fp:
pid_dict = json.load(fp)
pid_dict[job_name] = os.getpid()
json_file = json.dumps(pid_dict)
with open('pid_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('pid_lock.json', 'pid.json')
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
model = keras.models.load_model(save_file)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_max_param/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
checkpoint_dict[job_name] = 1
json_file3 = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file3)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
def on_epoch_end(self, epoch, logs=None):
open('epoch/' + job_name + '.txt', 'a').close()
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
if not args.resume:
trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))
param_dict = {}
modify = False
with open('param_lock.json', 'r') as fp:
param_dict = json.load(fp)
if job_name not in param_dict:
param_dict[job_name] = trainable_count
modify = True
elif param_dict[job_name] != trainable_count:
param_dict[job_name] = trainable_count
modify = True
if modify:
json_file = json.dumps(param_dict)
with open('param_lock.json', 'w') as fp:
fp.write(json_file)
os.rename('param_lock.json', 'param.json')
ckpt_qual_dict = {}
while True:
if os.path.exists('ckpt_qual.json'):
os.rename('ckpt_qual.json', 'ckpt_qual_lock.json')
break
else:
time.sleep(1)
with open('ckpt_qual_lock.json', 'r') as fp:
ckpt_qual_dict = json.load(fp)
ckpt_qual_dict[job_name] = 1
json_file2 = json.dumps(ckpt_qual_dict)
with open('ckpt_qual_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('ckpt_qual_lock.json', 'ckpt_qual.json')
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
finish_dict = {}
while True:
if os.path.exists('finish.json'):
os.rename('finish.json', 'finish_lock.json')
break
else:
time.sleep(1)
with open('finish_lock.json', 'r') as fp:
finish_dict = json.load(fp)
finish_dict[job_name] = 1
json_file2 = json.dumps(finish_dict)
with open('finish_lock.json', 'w') as fp:
fp.write(json_file2)
os.rename('finish_lock.json', 'finish.json')
| examples/pwr_run/checkpointing/timed/max_par/job2.py | 7,697 | #Trains a ResNet on the CIFAR10 dataset.
Training parameters first step is to update the PID epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) Subtracting pixel mean improves accuracy Model name, depth and version'P100_resnet50_he_256_1' Load the CIFAR10 data. Normalize data. If subtract pixel mean is enabled Convert class vectors to binary class matrices.base_model.summary()pdb.set_trace(), kernel_initializer='he_uniform'))model.add(layers.Dropout(0.2)), kernel_initializer='he_uniform'))model.add(layers.Dropout(0.2)), kernel_initializer='he_uniform'))model.summary()pdb.set_trace() connects interrupt signal to the process delete whatever checkpoint that already exists, update_freq='batch')remaining_epochs = epochs - epoch[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] Run training Score trained model. | 847 | en | 0.542521 |
# vmimages.py - azurerm functions for Microsoft.Compute RP publishers and images
from .restfns import do_get
from .settings import azure_rm_endpoint, COMP_API
# list_offers(access_token, subscription_id, location, publisher)
# list available VM image offers from a publisher
def list_offers(access_token, subscription_id, location, publisher):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_publishers(access_token, subscription_id, location)
# list available image publishers for a location
def list_publishers(access_token, subscription_id, location):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_skus(access_token, subscription_id, location, publisher, offer)
# list available VM image skus for a publisher offer
def list_skus(access_token, subscription_id, location, publisher, offer):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus?api-version=', COMP_API])
return do_get(endpoint, access_token)
# list_sku_versions(access_token, subscription_id, location, publisher, offer, sku)
# list available versions for a given publisher's sku
def list_sku_versions(access_token, subscription_id, location, publisher, offer, sku):
endpoint = ''.join([azure_rm_endpoint,
'/subscriptions/', subscription_id,
'/providers/Microsoft.Compute/',
'locations/', location,
'/publishers/', publisher,
'/artifacttypes/vmimage/offers/', offer,
'/skus/', sku,
'/versions?api-version=', COMP_API])
return do_get(endpoint, access_token)
| mcazurerm/vmimages.py | 2,530 | vmimages.py - azurerm functions for Microsoft.Compute RP publishers and images list_offers(access_token, subscription_id, location, publisher) list available VM image offers from a publisher list_publishers(access_token, subscription_id, location) list available image publishers for a location list_skus(access_token, subscription_id, location, publisher, offer) list available VM image skus for a publisher offer list_sku_versions(access_token, subscription_id, location, publisher, offer, sku) list available versions for a given publisher's sku | 548 | en | 0.667398 |
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Timing settings for all of pywinauto
This module has one object that should be used for all timing adjustments
timings.Timings
There are a couple of predefined settings
timings.Timings.Fast()
timings.Timings.Defaults()
timings.Timings.Slow()
The Following are the individual timing settings that can be adjusted:
* window_find_timeout (default 5)
* window_find_retry (default .09)
* app_start_timeout (default 10)
* app_start_retry (default .90)
* app_connect_timeout (default 5.)
* app_connect_retry (default .1)
* cpu_usage_interval (default .5)
* cpu_usage_wait_timeout (default 20)
* exists_timeout (default .5)
* exists_retry (default .3)
* after_click_wait (default .09)
* after_clickinput_wait (default .09)
* after_menu_wait (default .1)
* after_sendkeys_key_wait (default .01)
* after_button_click_wait (default 0)
* before_closeclick_wait (default .1)
* closeclick_retry (default .05)
* closeclick_dialog_close_wait (default 2)
* after_closeclick_wait (default .2)
* after_windowclose_timeout (default 2)
* after_windowclose_retry (default .5)
* after_setfocus_wait (default .06)
* setfocus_timeout (default 2)
* setfocus_retry (default .1)
* after_setcursorpos_wait (default .01)
* sendmessagetimeout_timeout (default .01)
* after_tabselect_wait (default .05)
* after_listviewselect_wait (default .01)
* after_listviewcheck_wait default(.001)
* listviewitemcontrol_timeout default(1.5)
* after_treeviewselect_wait default(.1)
* after_toobarpressbutton_wait default(.01)
* after_updownchange_wait default(.1)
* after_movewindow_wait default(0)
* after_buttoncheck_wait default(0)
* after_comboboxselect_wait default(.001)
* after_listboxselect_wait default(0)
* after_listboxfocuschange_wait default(0)
* after_editsetedittext_wait default(0)
* after_editselect_wait default(.02)
* drag_n_drop_move_mouse_wait default(.1)
* before_drag_wait default(.2)
* before_drop_wait default(.1)
* after_drag_n_drop_wait default(.1)
* scroll_step_wait default(.1)
"""
import six
import time
import operator
from functools import wraps
from . import deprecated
#=========================================================================
class TimeConfig(object):
"""Central storage and manipulation of timing values"""
__default_timing = {
'window_find_timeout': 5.,
'window_find_retry': .09,
'app_start_timeout': 10.,
'app_start_retry': .90,
'app_connect_timeout': 5.,
'app_connect_retry': .1,
'cpu_usage_interval': .5,
'cpu_usage_wait_timeout': 20.,
'exists_timeout': .5,
'exists_retry': .3,
'after_click_wait': .09,
'after_clickinput_wait': .09,
'after_menu_wait': .1,
'after_sendkeys_key_wait': .01,
'after_button_click_wait': 0,
'before_closeclick_wait': .1,
'closeclick_retry': .05,
'closeclick_dialog_close_wait': 2.,
'after_closeclick_wait': .2,
'after_windowclose_timeout': 2,
'after_windowclose_retry': .5,
'after_setfocus_wait': .06,
'setfocus_timeout': 2,
'setfocus_retry': .1,
'after_setcursorpos_wait': .01,
'sendmessagetimeout_timeout': .01,
'after_tabselect_wait': .05,
'after_listviewselect_wait': .01,
'after_listviewcheck_wait': .001,
'listviewitemcontrol_timeout': 1.5,
'after_treeviewselect_wait': .1,
'after_toobarpressbutton_wait': .01,
'after_updownchange_wait': .1,
'after_movewindow_wait': 0,
'after_buttoncheck_wait': 0,
'after_comboboxselect_wait': 0.001,
'after_listboxselect_wait': 0,
'after_listboxfocuschange_wait': 0,
'after_editsetedittext_wait': 0,
'after_editselect_wait': 0.02,
'drag_n_drop_move_mouse_wait': 0.1,
'before_drag_wait': 0.2,
'before_drop_wait': 0.1,
'after_drag_n_drop_wait': 0.1,
'scroll_step_wait': 0.1,
'app_exit_timeout': 10.,
'app_exit_retry': .1,
}
assert(__default_timing['window_find_timeout'] >=
__default_timing['window_find_retry'] * 2)
_timings = __default_timing.copy()
_cur_speed = 1
def __getattribute__(self, attr):
"""Get the value for a particular timing"""
if attr in ['__dict__', '__members__', '__methods__', '__class__']:
return object.__getattribute__(self, attr)
if attr in dir(TimeConfig):
return object.__getattribute__(self, attr)
if attr in self.__default_timing:
return self._timings[attr]
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def __setattr__(self, attr, value):
"""Set a particular timing"""
if attr == '_timings':
object.__setattr__(self, attr, value)
elif attr in self.__default_timing:
self._timings[attr] = value
else:
raise AttributeError("Unknown timing setting: {0}".format(attr))
def Fast(self):
"""Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times)
"""
for setting in self.__default_timing:
# set timeouts to the min of the current speed or 1 second
if "_timeout" in setting:
self._timings[setting] = \
min(1, self._timings[setting])
if "_wait" in setting:
self._timings[setting] = self._timings[setting] / 2
elif setting.endswith("_retry"):
self._timings[setting] = 0.001
#self._timings['app_start_timeout'] = .5
def Slow(self):
"""Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times)
"""
for setting in self.__default_timing:
if "_timeout" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 10,
self._timings[setting])
if "_wait" in setting:
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
elif setting.endswith("_retry"):
self._timings[setting] = max(
self.__default_timing[setting] * 3,
self._timings[setting])
if self._timings[setting] < .2:
self._timings[setting] = .2
def Defaults(self):
"""Set all timings to the default time"""
self._timings = self.__default_timing.copy()
Timings = TimeConfig()
#=========================================================================
class TimeoutError(RuntimeError):
pass
#=========================================================================
if six.PY3:
_clock_func = time.perf_counter
else:
_clock_func = time.clock
def timestamp():
"""Get a precise timestamp"""
return _clock_func()
#=========================================================================
def always_wait_until(timeout,
retry_interval,
value=True,
op=operator.eq):
"""Decorator to call wait_until(...) every time for a decorated function/method"""
def wait_until_decorator(func):
"""Callable object that must be returned by the @always_wait_until decorator"""
@wraps(func)
def wrapper(*args, **kwargs):
"""pre-callback, target function call and post-callback"""
return wait_until(timeout, retry_interval,
func, value, op, *args, **kwargs)
return wrapper
return wait_until_decorator
#=========================================================================
def wait_until(timeout,
retry_interval,
func,
value=True,
op=operator.eq,
*args, **kwargs):
r"""
Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** the comparison function (defaults to equality)\
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the return value of the the function
is in the 'function_value' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# the objects item_count() method to return 10
# in increments of .5 of a second
wait_until(10.5, .5, self.item_count, 10)
except TimeoutError as e:
print("timed out")
"""
start = timestamp()
func_val = func(*args, **kwargs)
# while the function hasn't returned what we are waiting for
while not op(func_val, value):
# find out how much of the time is left
time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
func_val = func(*args, **kwargs)
else:
err = TimeoutError("timed out")
err.function_value = func_val
raise err
return func_val
# Non PEP-8 alias
WaitUntil = deprecated(wait_until)
#=========================================================================
def always_wait_until_passes(timeout,
retry_interval,
exceptions=(Exception)):
"""Decorator to call wait_until_passes(...) every time for a decorated function/method"""
def wait_until_passes_decorator(func):
"""Callable object that must be returned by the @always_wait_until_passes decorator"""
@wraps(func)
def wrapper(*args, **kwargs):
"""pre-callback, target function call and post-callback"""
return wait_until_passes(timeout, retry_interval,
func, exceptions, *args, **kwargs)
return wrapper
return wait_until_passes_decorator
#=========================================================================
def wait_until_passes(timeout,
retry_interval,
func,
exceptions=(Exception),
*args, **kwargs):
"""
Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the original exception raised is in
the 'original_exception' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# window to be found in increments of .5 of a second.
# P.int a message and re-raise the original exception if never found.
wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))
except TimeoutError as e:
print("timed out")
raise e.
"""
start = timestamp()
# keep trying until the timeout is passed
while True:
try:
# Call the function with any arguments
func_val = func(*args, **kwargs)
# if no exception is raised then we are finished
break
# An exception was raised - so wait and try again
except exceptions as e:
# find out how much of the time is left
time_left = timeout - (timestamp() - start)
# if we have to wait some more
if time_left > 0:
# wait either the retry_interval or else the amount of
# time until the timeout expires (whichever is less)
time.sleep(min(retry_interval, time_left))
else:
# Raise a TimeoutError - and put the original exception
# inside it
err = TimeoutError()
err.original_exception = e
raise err
# return the function value
return func_val
# Non PEP-8 alias
WaitUntilPasses = deprecated(wait_until_passes)
| auto1/venv/Lib/site-packages/pywinauto/timings.py | 15,079 | Central storage and manipulation of timing values
Set all timings to the default time
Set fast timing values
Currently this changes the timing in the following ways:
timeouts = 1 second
waits = 0 seconds
retries = .001 seconds (minimum!)
(if existing times are faster then keep existing times)
Set slow timing values
Currently this changes the timing in the following ways:
timeouts = default timeouts * 10
waits = default waits * 3
retries = default retries * 3
(if existing times are slower then keep existing times)
Get the value for a particular timing
Set a particular timing
Decorator to call wait_until(...) every time for a decorated function/method
Decorator to call wait_until_passes(...) every time for a decorated function/method
Get a precise timestamp
Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **value** the value to be compared against (defaults to True)
* **op** the comparison function (defaults to equality)\
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the return value of the the function
is in the 'function_value' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# the objects item_count() method to return 10
# in increments of .5 of a second
wait_until(10.5, .5, self.item_count, 10)
except TimeoutError as e:
print("timed out")
Callable object that must be returned by the @always_wait_until decorator
Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions
* **timeout** how long the function will try the function
* **retry_interval** how long to wait between retries
* **func** the function that will be executed
* **exceptions** list of exceptions to test against (default: Exception)
* **args** optional arguments to be passed to func when called
* **kwargs** optional keyword arguments to be passed to func when called
Returns the return value of the function
If the operation times out then the original exception raised is in
the 'original_exception' attribute of the raised exception.
e.g. ::
try:
# wait a maximum of 10.5 seconds for the
# window to be found in increments of .5 of a second.
# P.int a message and re-raise the original exception if never found.
wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))
except TimeoutError as e:
print("timed out")
raise e.
Callable object that must be returned by the @always_wait_until_passes decorator
pre-callback, target function call and post-callback
pre-callback, target function call and post-callback
Timing settings for all of pywinauto
This module has one object that should be used for all timing adjustments
timings.Timings
There are a couple of predefined settings
timings.Timings.Fast()
timings.Timings.Defaults()
timings.Timings.Slow()
The Following are the individual timing settings that can be adjusted:
* window_find_timeout (default 5)
* window_find_retry (default .09)
* app_start_timeout (default 10)
* app_start_retry (default .90)
* app_connect_timeout (default 5.)
* app_connect_retry (default .1)
* cpu_usage_interval (default .5)
* cpu_usage_wait_timeout (default 20)
* exists_timeout (default .5)
* exists_retry (default .3)
* after_click_wait (default .09)
* after_clickinput_wait (default .09)
* after_menu_wait (default .1)
* after_sendkeys_key_wait (default .01)
* after_button_click_wait (default 0)
* before_closeclick_wait (default .1)
* closeclick_retry (default .05)
* closeclick_dialog_close_wait (default 2)
* after_closeclick_wait (default .2)
* after_windowclose_timeout (default 2)
* after_windowclose_retry (default .5)
* after_setfocus_wait (default .06)
* setfocus_timeout (default 2)
* setfocus_retry (default .1)
* after_setcursorpos_wait (default .01)
* sendmessagetimeout_timeout (default .01)
* after_tabselect_wait (default .05)
* after_listviewselect_wait (default .01)
* after_listviewcheck_wait default(.001)
* listviewitemcontrol_timeout default(1.5)
* after_treeviewselect_wait default(.1)
* after_toobarpressbutton_wait default(.01)
* after_updownchange_wait default(.1)
* after_movewindow_wait default(0)
* after_buttoncheck_wait default(0)
* after_comboboxselect_wait default(.001)
* after_listboxselect_wait default(0)
* after_listboxfocuschange_wait default(0)
* after_editsetedittext_wait default(0)
* after_editselect_wait default(.02)
* drag_n_drop_move_mouse_wait default(.1)
* before_drag_wait default(.2)
* before_drop_wait default(.1)
* after_drag_n_drop_wait default(.1)
* scroll_step_wait default(.1)
GUI Application automation and testing library Copyright (C) 2006-2018 Mark Mc Mahon and Contributors https://github.com/pywinauto/pywinauto/graphs/contributors http://pywinauto.readthedocs.io/en/latest/credits.html All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of pywinauto nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.========================================================================= set timeouts to the min of the current speed or 1 secondself._timings['app_start_timeout'] = .5==================================================================================================================================================================================================================================================================================================== while the function hasn't returned what we are waiting for find out how much of the time is left if we have to wait some more wait either the retry_interval or else the amount of time until the timeout expires (whichever is less) Non PEP-8 alias================================================================================================================================================== keep trying until the timeout is passed Call the function with any arguments if no exception is raised then we are finished An exception was raised - so wait and try again find out how much of the time is left if we have to wait some more wait either the retry_interval or else the amount of time until the timeout expires (whichever is less) Raise a TimeoutError - and put the original exception inside it return the function value Non PEP-8 alias | 7,946 | en | 0.661094 |
from corvus.structures import Handler, Exchange, Loop, Update
import corvutils.pyparsing as pp
import os, sys, subprocess, shutil #, resource
import re
from scipy.interpolate import CubicSpline
from scipy.integrate import quad
from scipy.signal import convolve
import numpy as np
# Debug: FDV
import pprint
pp_debug = pprint.PrettyPrinter(indent=4)
# Define dictionary of implemented calculations
implemented = {}
strlistkey = lambda L:','.join(sorted(L))
subs = lambda L:[{L[j] for j in range(len(L)) if 1<<j&k} for k in range(1,1<<len(L))]
#for s in subs(['cell_vectors', 'cell_struct_xyz_red', 'cell_scaling_iso', 'cell_scaling_abc', 'number_density']):
# key = strlistkey(s)
# autodesc = 'Get ' + ', '.join(s) + ' using cif2cell'
# cost = 10
# implemented[key] = {'type':'Exchange','out':list(s),'req':['cif_input'],
# 'desc':autodesc,'cost':cost}
implemented['mbxanes'] = {'type':'Exchange','out':['mbxanes'],'cost':0,
'req':['xanes_cfavg','spectralFunction'],'desc':'Calculate many-body xanes from xanes and spectral function.'}
#'req':['xanes','spectal_function'],'desc':'Calculate supercell from cif input.'}
class mbconv(Handler):
def __str__(self):
return 'mbconv Handler'
@staticmethod
def canProduce(output):
if isinstance(output, list) and output and isinstance(output[0], str):
return strlistkey(output) in implemented
elif isinstance(output, str):
return output in implemented
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def requiredInputFor(output):
if isinstance(output, list) and output and isinstance(output[0], str):
unresolved = {o for o in output if not mbconv.canProduce(o)}
canProduce = (o for o in output if mbconv.canProduce(o))
additionalInput = (set(implemented[o]['req']) for o in canProduce)
return list(set.union(unresolved,*additionalInput))
elif isinstance(output, str):
if output in implemented:
return implemented[output]['req']
else:
return [output]
else:
raise TypeError('Output should be token or list of tokens')
@staticmethod
def cost(output):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token or list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
return implemented[key]['cost']
@staticmethod
def sequenceFor(output,inp=None):
if isinstance(output, list) and output and isinstance(output[0], str):
key = strlistkey(output)
elif isinstance(output, str):
key = output
else:
raise TypeError('Output should be token of list of tokens')
if key not in implemented:
raise LookupError('Corvus cannot currently produce ' + key + ' using FEFF')
f = lambda subkey : implemented[key][subkey]
required = f('req')
# JJK - Need to add requirements of internal workflow here.
if 'mbconv' in list(inp.keys()):
required.extend()
if f('type') is 'Exchange':
return Exchange(mbconv, f('req'), f('out'), cost=f('cost'), desc=f('desc'))
@staticmethod
def prep(config):
subdir = config['pathprefix'] + str(config['xcIndex']) + '_MBXANES'
xcDir = os.path.join(config['cwd'], subdir)
# Make new output directory if if doesn't exist
if not os.path.exists(xcDir):
os.mkdir(xcDir)
# Store current Exchange directory in configuration
config['xcDir'] = xcDir
#@staticmethod
#def setDefaults(input,target):
@staticmethod
def run(config, input, output):
# Loop over targets in output.
if 'mbxanes' in output:
# In future use file_reader handler to read in XANES and spectral function if already calculated.
w = np.array(input.get('xanes_cfavg')[0])
mu0= np.array(input.get('xanes_cfavg')[1])
wsf= np.flip(-1.0*np.array(input.get('spectralFunction')[0]))
sf = np.flip(np.array(input.get('spectralFunction')[1]))
# Interpolate both XANES and spectral function onto an even grid
#w, mu0 = np.loadtxt('xanes.dat',usecols = (0,1)).T
#wsf,sf = np.loadtxt('spfcn.dat',usecols = (0,1)).T
min_diff = np.amin(np.ediff1d(w))
min_diff = min(min_diff,np.amin(np.ediff1d(wsf)))
mu0_cs = CubicSpline(w,mu0)
spfcn_cs = CubicSpline(wsf,sf)
# Use larger of two ranges to specify range
w_terp = np.arange(w[0],w[-1],min_diff)
wsf_terp = np.arange(wsf[0],wsf[-1],min_diff)
mu0_terp = mu0_cs(w_terp)
spfcn_terp = spfcn_cs(wsf_terp)
mu_mb = convolve(mu0_terp,spfcn_terp,mode='full')*min_diff
# If extra broadening is requested, perform a convolution of that as well.
if 'mbconv.extra_broadening' in input:
gam = input['mbconv.extra_broadening'][0][0]
A_br = gam/np.pi*1.0/(wsf_terp**2 + gam**2)
mu_mb = np.convolve(mu_mb,A_br,mode='same')*min_diff
scale=w_terp[-1] - w_terp[0] + wsf_terp[-1] - wsf_terp[0]
first = w_terp[0] + wsf_terp[0]
w_terp = np.linspace(0.0,scale,mu_mb.size)
w_terp = w_terp + first
mu0_terp = mu0_cs(w_terp)
output['mbxanes'] = [w_terp,mu_mb]
np.savetxt('mbxanes.dat',np.array([w_terp, mu_mb, mu0_terp]).transpose())
@staticmethod
def cleanup(config):
pass
| corvus/mbconv.py | 5,995 | , resource Debug: FDV Define dictionary of implemented calculationsfor s in subs(['cell_vectors', 'cell_struct_xyz_red', 'cell_scaling_iso', 'cell_scaling_abc', 'number_density']): key = strlistkey(s) autodesc = 'Get ' + ', '.join(s) + ' using cif2cell' cost = 10 implemented[key] = {'type':'Exchange','out':list(s),'req':['cif_input'], 'desc':autodesc,'cost':cost}'req':['xanes','spectal_function'],'desc':'Calculate supercell from cif input.'} JJK - Need to add requirements of internal workflow here. Make new output directory if if doesn't exist Store current Exchange directory in configuration@staticmethoddef setDefaults(input,target): Loop over targets in output. In future use file_reader handler to read in XANES and spectral function if already calculated. Interpolate both XANES and spectral function onto an even gridw, mu0 = np.loadtxt('xanes.dat',usecols = (0,1)).Twsf,sf = np.loadtxt('spfcn.dat',usecols = (0,1)).T Use larger of two ranges to specify range If extra broadening is requested, perform a convolution of that as well. | 1,080 | en | 0.510909 |
# https://adventofcode.com/2020/day/14
import itertools
import re
SAMPLE_PATH = "../../input/2020-14-sample.txt"
INPUT_PATH = "../../input/2020-14-input.txt"
def get_data(filename):
with open(filename) as file:
data = file.read().split("\n\n")
data = [block.splitlines() for block in data]
if len(data) == 1:
return data[0]
return data
def part_1(program):
memory = {}
mask_to_0 = 0
mask_to_1 = 0
for line in program:
if line[:4] == "mask":
mask = line[7:]
mask_to_1 = int("".join(m if m == "1" else "0" for m in mask), 2)
mask_to_0 = int("".join(m if m == "0" else "1" for m in mask), 2)
else:
address, value = (int(x) for x in re.findall(r"(\d+)", line))
memory[address] = (value | mask_to_1) & mask_to_0
return sum(memory.values())
def part_2(program):
memory = {}
mask_to_1 = 0
mask_float = ""
n_floats = 0
for line in program:
if line[:4] == "mask":
mask = line[7:]
mask_to_1 = int("".join(m if m == "1" else "0" for m in mask), 2)
n_floats = mask.count("X")
mask_float = "".join("{}" if m == "X" else "0" for m in mask)
else:
address, value = (int(x) for x in re.findall(r"(\d+)", line))
address = address | mask_to_1
for bits in itertools.product("01", repeat=n_floats):
modified_mask = mask_float.format(*bits)
memory[address ^ int(modified_mask, 2)] = value
return sum(memory.values())
if __name__ == "__main__":
sample_data = get_data(SAMPLE_PATH)
assert part_1(sample_data[0]) == 165
assert part_2(sample_data[1]) == 208
challenge_data = get_data(INPUT_PATH)
print(part_1(challenge_data)) # 4297467072083
print(part_2(challenge_data)) # 5030603328768
| python/2020/day14.py | 1,885 | https://adventofcode.com/2020/day/14 4297467072083 5030603328768 | 64 | en | 0.462645 |
import copy
from unittest import mock
import matplotlib
import pytest
from matplotlib import pyplot as plt
from matplotlib._pylab_helpers import Gcf
@pytest.fixture(autouse=True)
def mpl_test_settings(qt_module, mpl_test_settings):
"""
Ensure qt_module fixture is *first* fixture.
We override the `mpl_test_settings` fixture and depend on the `qt_module`
fixture first. It is very important that it is first, because it skips
tests when Qt is not available, and if not, then the main
`mpl_test_settings` fixture will try to switch backends before the skip can
be triggered.
"""
pass
@pytest.fixture
def qt_module(request):
backend, = request.node.get_closest_marker('backend').args
if backend == 'Qt4Agg':
try:
import PyQt4
# RuntimeError if PyQt5 already imported.
except (ImportError, RuntimeError):
try:
import PySide
except ImportError:
pytest.skip("Failed to import a Qt4 binding.")
elif backend == 'Qt5Agg':
try:
import PyQt5
# RuntimeError if PyQt4 already imported.
except (ImportError, RuntimeError):
try:
import PySide2
except ImportError:
pytest.skip("Failed to import a Qt5 binding.")
else:
raise ValueError('Backend marker has unknown value: ' + backend)
qt_compat = pytest.importorskip('matplotlib.backends.qt_compat')
QtCore = qt_compat.QtCore
if backend == 'Qt4Agg':
try:
py_qt_ver = int(QtCore.PYQT_VERSION_STR.split('.')[0])
except AttributeError:
py_qt_ver = QtCore.__version_info__[0]
if py_qt_ver != 4:
pytest.skip(reason='Qt4 is not available')
from matplotlib.backends.backend_qt4 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT)
elif backend == 'Qt5Agg':
from matplotlib.backends.backend_qt5 import (
MODIFIER_KEYS, SUPER, ALT, CTRL, SHIFT)
mods = {}
keys = {}
for name, index in zip(['Alt', 'Control', 'Shift', 'Super'],
[ALT, CTRL, SHIFT, SUPER]):
_, mod, key = MODIFIER_KEYS[index]
mods[name + 'Modifier'] = mod
keys[name + 'Key'] = key
return QtCore, mods, keys
@pytest.fixture
def qt_key(request):
QtCore, _, keys = request.getfixturevalue('qt_module')
if request.param.startswith('Key'):
return getattr(QtCore.Qt, request.param)
else:
return keys[request.param]
@pytest.fixture
def qt_mods(request):
QtCore, mods, _ = request.getfixturevalue('qt_module')
result = QtCore.Qt.NoModifier
for mod in request.param:
result |= mods[mod]
return result
@pytest.mark.parametrize('backend', [
# Note: the value is irrelevant; the important part is the marker.
pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')),
pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg')),
])
def test_fig_close(backend):
# save the state of Gcf.figs
init_figs = copy.copy(Gcf.figs)
# make a figure using pyplot interface
fig = plt.figure()
# simulate user clicking the close button by reaching in
# and calling close on the underlying Qt object
fig.canvas.manager.window.close()
# assert that we have removed the reference to the FigureManager
# that got added by plt.figure()
assert init_figs == Gcf.figs
@pytest.mark.backend('Qt5Agg')
def test_fig_signals(qt_module):
# Create a figure
fig = plt.figure()
# Access QtCore
QtCore = qt_module[0]
# Access signals
import signal
event_loop_signal = None
# Callback to fire during event loop: save SIGINT handler, then exit
def fire_signal_and_quit():
# Save event loop signal
nonlocal event_loop_signal
event_loop_signal = signal.getsignal(signal.SIGINT)
# Request event loop exit
QtCore.QCoreApplication.exit()
# Timer to exit event loop
QtCore.QTimer.singleShot(0, fire_signal_and_quit)
# Save original SIGINT handler
original_signal = signal.getsignal(signal.SIGINT)
# Use our own SIGINT handler to be 100% sure this is working
def CustomHandler(signum, frame):
pass
signal.signal(signal.SIGINT, CustomHandler)
# mainloop() sets SIGINT, starts Qt event loop (which triggers timer and
# exits) and then mainloop() resets SIGINT
matplotlib.backends.backend_qt5._BackendQT5.mainloop()
# Assert: signal handler during loop execution is signal.SIG_DFL
assert event_loop_signal == signal.SIG_DFL
# Assert: current signal handler is the same as the one we set before
assert CustomHandler == signal.getsignal(signal.SIGINT)
# Reset SIGINT handler to what it was before the test
signal.signal(signal.SIGINT, original_signal)
@pytest.mark.parametrize(
'qt_key, qt_mods, answer',
[
('Key_A', ['ShiftModifier'], 'A'),
('Key_A', [], 'a'),
('Key_A', ['ControlModifier'], 'ctrl+a'),
('Key_Aacute', ['ShiftModifier'],
'\N{LATIN CAPITAL LETTER A WITH ACUTE}'),
('Key_Aacute', [],
'\N{LATIN SMALL LETTER A WITH ACUTE}'),
('ControlKey', ['AltModifier'], 'alt+control'),
('AltKey', ['ControlModifier'], 'ctrl+alt'),
('Key_Aacute', ['ControlModifier', 'AltModifier', 'SuperModifier'],
'ctrl+alt+super+\N{LATIN SMALL LETTER A WITH ACUTE}'),
('Key_Backspace', [], 'backspace'),
('Key_Backspace', ['ControlModifier'], 'ctrl+backspace'),
('Key_Play', [], None),
],
indirect=['qt_key', 'qt_mods'],
ids=[
'shift',
'lower',
'control',
'unicode_upper',
'unicode_lower',
'alt_control',
'control_alt',
'modifier_order',
'backspace',
'backspace_mod',
'non_unicode_key',
]
)
@pytest.mark.parametrize('backend', [
# Note: the value is irrelevant; the important part is the marker.
pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')),
pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg')),
])
def test_correct_key(backend, qt_key, qt_mods, answer):
"""
Make a figure
Send a key_press_event event (using non-public, qtX backend specific api)
Catch the event
Assert sent and caught keys are the same
"""
qt_canvas = plt.figure().canvas
event = mock.Mock()
event.isAutoRepeat.return_value = False
event.key.return_value = qt_key
event.modifiers.return_value = qt_mods
def receive(event):
assert event.key == answer
qt_canvas.mpl_connect('key_press_event', receive)
qt_canvas.keyPressEvent(event)
@pytest.mark.backend('Qt5Agg')
def test_dpi_ratio_change():
"""
Make sure that if _dpi_ratio changes, the figure dpi changes but the
widget remains the same physical size.
"""
prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio'
with mock.patch(prop, new_callable=mock.PropertyMock) as p:
p.return_value = 3
fig = plt.figure(figsize=(5, 2), dpi=120)
qt_canvas = fig.canvas
qt_canvas.show()
from matplotlib.backends.backend_qt5 import qApp
# Make sure the mocking worked
assert qt_canvas._dpi_ratio == 3
size = qt_canvas.size()
qt_canvas.manager.show()
qt_canvas.draw()
qApp.processEvents()
# The DPI and the renderer width/height change
assert fig.dpi == 360
assert qt_canvas.renderer.width == 1800
assert qt_canvas.renderer.height == 720
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
# assert qt_canvas.get_width_height() == (600, 240)
# assert (fig.get_size_inches() == (5, 2)).all()
p.return_value = 2
assert qt_canvas._dpi_ratio == 2
qt_canvas.draw()
qApp.processEvents()
# this second processEvents is required to fully run the draw.
# On `update` we notice the DPI has changed and trigger a
# resize event to refresh, the second processEvents is
# required to process that and fully update the window sizes.
qApp.processEvents()
# The DPI and the renderer width/height change
# assert fig.dpi == 240
# assert qt_canvas.renderer.width == 1200
# assert qt_canvas.renderer.height == 480
# The actual widget size and figure physical size don't change
assert size.width() == 600
assert size.height() == 240
# assert qt_canvas.get_width_height() == (600, 240)
# assert (fig.get_size_inches() == (5, 2)).all()
@pytest.mark.backend('Qt5Agg')
def test_subplottool():
fig, ax = plt.subplots()
with mock.patch(
"matplotlib.backends.backend_qt5.SubplotToolQt.exec_",
lambda self: None):
fig.canvas.manager.toolbar.configure_subplots()
@pytest.mark.backend('Qt5Agg')
def test_figureoptions():
fig, ax = plt.subplots()
ax.plot([1, 2])
ax.imshow([[1]])
ax.scatter(range(3), range(3), c=range(3))
with mock.patch(
"matplotlib.backends.qt_editor._formlayout.FormDialog.exec_",
lambda self: None):
fig.canvas.manager.toolbar.edit_parameters()
| tests/test_backend_qt.py | 9,414 | Ensure qt_module fixture is *first* fixture.
We override the `mpl_test_settings` fixture and depend on the `qt_module`
fixture first. It is very important that it is first, because it skips
tests when Qt is not available, and if not, then the main
`mpl_test_settings` fixture will try to switch backends before the skip can
be triggered.
Make a figure
Send a key_press_event event (using non-public, qtX backend specific api)
Catch the event
Assert sent and caught keys are the same
Make sure that if _dpi_ratio changes, the figure dpi changes but the
widget remains the same physical size.
RuntimeError if PyQt5 already imported. RuntimeError if PyQt4 already imported. Note: the value is irrelevant; the important part is the marker. save the state of Gcf.figs make a figure using pyplot interface simulate user clicking the close button by reaching in and calling close on the underlying Qt object assert that we have removed the reference to the FigureManager that got added by plt.figure() Create a figure Access QtCore Access signals Callback to fire during event loop: save SIGINT handler, then exit Save event loop signal Request event loop exit Timer to exit event loop Save original SIGINT handler Use our own SIGINT handler to be 100% sure this is working mainloop() sets SIGINT, starts Qt event loop (which triggers timer and exits) and then mainloop() resets SIGINT Assert: signal handler during loop execution is signal.SIG_DFL Assert: current signal handler is the same as the one we set before Reset SIGINT handler to what it was before the test Note: the value is irrelevant; the important part is the marker. Make sure the mocking worked The DPI and the renderer width/height change The actual widget size and figure physical size don't change assert qt_canvas.get_width_height() == (600, 240) assert (fig.get_size_inches() == (5, 2)).all() this second processEvents is required to fully run the draw. On `update` we notice the DPI has changed and trigger a resize event to refresh, the second processEvents is required to process that and fully update the window sizes. The DPI and the renderer width/height change assert fig.dpi == 240 assert qt_canvas.renderer.width == 1200 assert qt_canvas.renderer.height == 480 The actual widget size and figure physical size don't change assert qt_canvas.get_width_height() == (600, 240) assert (fig.get_size_inches() == (5, 2)).all() | 2,396 | en | 0.836091 |
import os
from indexing.pathanalyzer import PathAnalyzer
from indexing.pathanalyzerstore import PathAnalyzerStore
class Indexer:
"""
Traverses the given directory using the DFS algorithm. Allows registering different rules for handling different
file types and calls the associated PathAnalyzers and Collectors indirectly for each type.
"""
####################################################################################################################
# Constructor.
####################################################################################################################
def __init__(self, max_depth=10):
"""
Initializes attributes and checks the maximum depth provided.
Parameters
----------
max_depth : int
The maximum depth to look in.
"""
### Validate parameters.
if max_depth < 1:
raise Exception('max_depth must be greater than or equal to 1.')
### Attributes from outside.
self._max_depth = max_depth
### Private attributes.
# A collection of analyzers which handle different file types.
self._analyzers = []
# The depth we are currently in.
self._current_depth = 0
# The list of directories to index.
self._rules = {}
####################################################################################################################
# Public methods.
####################################################################################################################
def add_rule(self, directory, policy):
"""
Registers a new directory to index. Does nothing if the given directory is already added.
Parameters
----------
directory : str
The directory to be indexed.
policy : IndexerPolicy
A policy that applies to this directory.
"""
analyzer = self._create_analyzer(policy)
analyzer_store = self._create_analyzerstore(directory)
analyzer_store.add_analyzer(policy.extensions, analyzer)
def index(self):
"""
Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.
"""
for analyzer in self._analyzers:
analyzer.init_filters()
for directory, analyzer_store in self._rules.items():
if os.path.exists(directory):
self._scan_directory(directory, analyzer_store)
for analyzer in self._analyzers:
analyzer.clean_filters()
####################################################################################################################
# Auxiliary methods.
####################################################################################################################
def _analyze_file(self, current_path, analyzer_store):
current_path_without_extension, current_extension = os.path.splitext(current_path)
analyzer = analyzer_store.find_analyzer(current_extension)
if analyzer is not None:
analyzer.analyze(current_path_without_extension, current_extension)
def _create_analyzer(self, policy):
analyzer = PathAnalyzer(policy)
self._analyzers.append(analyzer)
return analyzer
def _create_analyzerstore(self, directory):
if directory not in self._rules:
self._rules[directory] = PathAnalyzerStore()
return self._rules[directory]
def _enter(self, directory):
"""
Indicates for the analyzers that we entered into the given directory.
Parameters
----------
directory : str
The directory we entered.
"""
for analyzer in self._analyzers:
analyzer.enter(directory)
self._current_depth = self._current_depth + 1
def _leave(self):
"""
Indicates for the analyzers that we are leaving the last directory.
"""
for analyzer in self._analyzers:
analyzer.leave()
self._current_depth = self._current_depth - 1
def _scan_directory(self, path, analyzer_store):
"""
Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to
analyze and store the data.
Parameters
----------
path : str
The path to enumerate.
analyzers : PathAnalyzerStore
The PathAnalyzerStore to use.
"""
for current_file in os.listdir(path):
current_path = os.path.join(path, current_file)
if self._current_depth >= self._max_depth:
return
if os.path.isdir(current_path):
self._enter(current_file)
self._scan_directory(current_path, analyzer_store)
self._leave()
else:
self._analyze_file(current_path, analyzer_store)
| src/indexing/indexer.py | 5,037 | Traverses the given directory using the DFS algorithm. Allows registering different rules for handling different
file types and calls the associated PathAnalyzers and Collectors indirectly for each type.
Initializes attributes and checks the maximum depth provided.
Parameters
----------
max_depth : int
The maximum depth to look in.
Indicates for the analyzers that we entered into the given directory.
Parameters
----------
directory : str
The directory we entered.
Indicates for the analyzers that we are leaving the last directory.
Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to
analyze and store the data.
Parameters
----------
path : str
The path to enumerate.
analyzers : PathAnalyzerStore
The PathAnalyzerStore to use.
Registers a new directory to index. Does nothing if the given directory is already added.
Parameters
----------
directory : str
The directory to be indexed.
policy : IndexerPolicy
A policy that applies to this directory.
Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.
Constructor. Validate parameters. Attributes from outside. Private attributes. A collection of analyzers which handle different file types. The depth we are currently in. The list of directories to index. Public methods. Auxiliary methods. | 1,377 | en | 0.770215 |
from __future__ import annotations
import logging
import os
import time
from functools import partial
from typing import Callable, Optional, Sequence, Union
import torch
from hivemind.averaging.control import AveragingStage, StepControl
from hivemind.compression import CompressionBase, NoCompression
from hivemind.dht import DHT
from hivemind.optim.grad_averager import GradientAverager
from hivemind.optim.grad_scaler import GradScaler
from hivemind.optim.progress_tracker import LocalTrainingProgress, ProgressTracker
from hivemind.optim.state_averager import (
LRSchedulerBase,
OptimizerFactory,
Parameters,
ParamGroups,
SchedulerFactory,
TorchOptimizer,
TrainingStateAverager,
)
from hivemind.utils import PerformanceEMA, get_dht_time, get_logger
logger = get_logger(__name__)
class Optimizer(torch.optim.Optimizer):
"""
hivemind.Optimizer wraps your regular PyTorch Optimizer for training collaboratively with peers.
By default, Optimizer is configured to be exactly **equivalent to synchronous training** with target_batch_size.
There are advanced options make training semi-asynchronous (delay_optimizer_step and delay_gradient_averaging)
or even fully asynchronous (use_local_updates=True).
:example: The Optimizer can be used as a drop-in replacement for a regular PyTorch Optimizer:
>>> model = transformers.AutoModel("albert-xxlarge-v2")
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, start=True)
>>> opt = hivemind.Optimizer(dht=dht, run_id="run_42", batch_size_per_step=4, target_batch_size=4096,
>>> params=model.parameters(), optimizer=lambda params: torch.optim.Adam(params))
>>> while True:
>>> loss = compute_loss_on_batch(model, batch_size=4)
>>> opt.zero_grad()
>>> loss.backward()
>>> opt.step() # <-- train collaboratively with any peers that use the same prefix (run_42)
By default, peers will perform the following steps:
* accumulate a minibatch of gradients towards the (global) target batch size, without updating parameters yet;
* after peers collectively accumulate target_batch_size, average gradients with peers and perform optimizer step;
* if your peer lags behind the rest of the swarm, it will download parameters and optimizer state from others;
Unlike regular training, your device may join midway through training, when other peers already made some progress.
For this reason, any learning rate schedulers, curriculum and other **time-dependent features should be based on**
``optimizer.local_epoch`` (and not the number ot calls to opt.step). Otherwise, peers that joined training late
may end up having different learning rates. To do so automatically, specify ``scheduler=...`` parameter below.
:What is an epoch?: Optimizer uses the term ``epoch`` to describe intervals between synchronizations. One epoch
coresponds to processing certain number of training samples (``target_batch_size``) in total across all peers.
Like in PyTorch LR Scheduler, **epoch does not necessarily correspond to a full pass over the training data.**
At the end of epoch, peers perform synchronous actions such as averaging gradients for a global optimizer update,
updating the learning rate scheduler or simply averaging parameters (if using local updates).
The purpose of this is to ensure that changing the number of peers does not require changing hyperparameters.
For instance, if the number of peers doubles, they will run all-reduce more frequently to adjust for faster training.
:Configuration guide: This guide will help you set up your first collaborative training run. It covers the most
important basic options, but ignores features that require significant changes to the training code.
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=IF_BEHIND_FIREWALL_OR_VERY_UNRELIABLE, start=True)
>>> opt = hivemind.Optimizer(
>>> dht=dht, run_id="a_unique_name_that_every_participant_will_see_when_training",
>>> batch_size_per_step=ACTUAL_BATCH_SIZE_OF_THIS_PEER, target_batch_size=LARGE_GLOBAL_BATCH,
>>> # ^--- Each global optimzier step will use gradients from 1x-1.1x of target_batch_size (due to latency);
>>> # It is recommended to train with very large batch sizes to reduce the % of time spent on communication.
>>>
>>> params=params, optimizer=lambda params: AnyPyTorchOptimizer(params, **hyperparams_for_target_batch_size),
>>> # tune learning rate for your target_batch_size. Here's a good reference: https://arxiv.org/abs/1904.00962
>>> scheduler=lambda opt: AnyPyTorchScheduler(opt, **hyperparams_for_target_batch_size),
>>> # scheduler.step will be called automatically each time when peers collectively accumulate target_batch_size
>>>
>>> offload_optimizer=True, # saves GPU memory, but increases RAM usage; Generally a good practice to use this.
>>> delay_grad_averaging=OPTIONAL, delay_optimizer_step=OPTIONAL, # train faster, but with 1 round of staleness;
>>> # setting both to True is equivalent to Delayed Parameter Updates (see https://arxiv.org/abs/2101.06840)
>>>
>>> grad_compression=hivemind.Float16Compression(), state_averaging_compression=hivemind.Float16Compression(),
>>> # ^-- it is usually fine to use pure 16-bit or even lower precision during communication with no precaution;
>>> # See hivemind/examples/albert for an working example of mixed 8/16-bit compression.
>>>
>>> matchmaking_time=15.0, # 3-5s for small local runs, 10-15s for training over the internet or with many peers
>>> averaging_timeout=60.0, # around of 2x the actual time it takes to run all-reduce
>>> verbose=True # periodically report the training progress to the console (e.g. "Averaged with N peers")
>>> ) # and you're done!
:param dht: a running hivemind.DHT instance connected to other peers.
:param run_id: a unique identifier of this training run, used as a common prefix for all DHT keys.
**Note:** peers with the same run_id should *generally* train the same model and use compatible configurations.
Some options can be safely changed by individual peers: ``batch_size_per_step``, ``client_mode``, ``auxiliary``,
``reuse_grad_buffers``, ``offload_optimizer``, and ``verbose``. In some cases, other options may also be tuned
individually by each peer, but they should be changed with caution to avoid deadlocks or convergence issues.
:param target_batch_size: global batch size that must be accumulated before the swarm transitions to the next epoch.
The actual batch may be *slightly* larger due asynchrony (e.g. peers submit more gradients in the last second).
:param batch_size_per_step: you should accumulate gradients over this many samples between calls to optimizer.step.
:param params: parameters or param groups for the optimizer; required if optimizer is a callable(params).
:param optimizer: a callable(parameters) -> pytorch.optim.Optimizer or a pre-initialized PyTorch optimizer.
**Note:** some advanced options like offload_optimizer, delay_optimizer_step, or delay_grad_averaging require
and require the callable and will not work if hivemind.optimizer is created with a pre-existing PyTorch Optimizer.
:param scheduler: callable(optimizer) -> PyTorch LRScheduler or a pre-initialized PyTorch scheduler.
The learning rate scheduler will adjust learning rate based on global epoch, not the number of
local calls to optimizer.step; this is required to keep different peers synchronized.
:param matchmaking_time: when looking for group, wait for peers to join for up to this many seconds.
Increase if you see "averaged gradients with N peers" where N is below 0.9x the real siee on >=25% of epochs.
When training with low-latency network, decreasing matchmaking_time allows training with smaller batch sizes.
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled automatically.
Increase averaging_timeout if you see "Proceeding with local gradients" at least 25% of the time.
Do not set this timeout too high, as it may cause your optimizer to hang after some types of network errors.
:param allreduce_timeout: timeout for a single attempt to run all-reduce, default: equal to averaging_timeout.
:param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers.
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param offload_optimizer: offload the optimizer to host memory, saving GPU memory for parameters and gradients
:param delay_optimizer_step: run optimizer in background, apply results in future .step; requires offload_optimizer
:param delay_grad_averaging: average gradients in background; requires offload_optimizer and delay_optimizer_step
:param delay_state_averaging: if enabled (default), average parameters and extra tensors in a background thread;
if set to False, average parameters synchronously within the corresponding hivemind.Optimizer.step call.
:param average_state_every: average state (parameters, chosen opt tensors) with peers every this many **epochs**.
This reduces the communication overhead increasing, but can cause parameters to diverge if too large.
The maximal average_state_every=num_epochs depends on how often peers diverge from each other. If peers
hardly ever skip averaging rounds, they can average state less frequently. In turn, network failures, lossy
gradient compression and local_updates cause parameters to diverge faster and requires more frequent averaging.
:param use_local_updates: if enabled, peers will update parameters on each .step using local gradients;
if not enabled (default), accumulate gradients to target_batch_size, and then call .step with averaged gradients.
Even if use_local_updates=True, learning rate scheduler will still be called once per target_batch_size.
:param client_mode: if True, this peer will not accept incoming connections (firewall-compatible mode)
:param auxiliary: if True, optimizer.step will only assist other peers in averaging (for cpu-only workers)
:param grad_compression: compression strategy used for averaging gradients, default = no compression
:param state_averaging_compression: compression for averaging params and state tensors, default = no compression
:param load_state_compression: compression strategy for loading state from peers, default = no compression
:param average_opt_statistics: names of optimizer statistics from state dict that should be averaged with peers
:param extra_tensors: if specified, these extra tensors will also be averaged and shared in load_state_from_peers.
:param averager_opts: additional keyword arguments forwarded to both GradientAverager and TrainingStateAverager
:param tracker_opts: additional keyword arguments forwarded to ProgressTracker
:param performance_ema_alpha: moving average alpha in ProgressTracker, TrainingStateAverager and Optimizer
:param verbose: if True, report internal events such as accumilating gradients and running background tasks
:note: in a large-scale training, peers will inevitably fail and you will see error messages. hivemind.Optimizer
is designed to recover from such failures, but will sometimes need a minute or two to re-adjust.
"""
def __init__(
self,
*,
dht: DHT,
run_id: str,
target_batch_size: int,
batch_size_per_step: Optional[int] = None,
optimizer: Union[TorchOptimizer, OptimizerFactory],
params: Optional[Union[Parameters, ParamGroups]] = None,
scheduler: Optional[Union[LRSchedulerBase, SchedulerFactory]] = None,
matchmaking_time: Optional[float] = 15.0,
averaging_timeout: Optional[float] = 60.0,
allreduce_timeout: Optional[float] = None,
next_chunk_timeout: Optional[float] = None,
load_state_timeout: float = 600.0,
reuse_grad_buffers: bool = False,
offload_optimizer: Optional[bool] = None,
delay_optimizer_step: Optional[bool] = None,
delay_grad_averaging: bool = False,
delay_state_averaging: bool = True,
average_state_every: int = 1,
use_local_updates: bool = False,
client_mode: bool = None,
auxiliary: bool = False,
grad_compression: CompressionBase = NoCompression(),
state_averaging_compression: CompressionBase = NoCompression(),
load_state_compression: CompressionBase = NoCompression(),
average_opt_statistics: Sequence[str] = (),
extra_tensors: Sequence[torch.Tensor] = (),
averager_opts: Optional[dict] = None,
tracker_opts: Optional[dict] = None,
performance_ema_alpha: float = 0.1,
shutdown_timeout: float = 5,
verbose: bool = False,
):
self._parent_pid = os.getpid()
client_mode = client_mode if client_mode is None else dht.client_mode
delay_optimizer_step = delay_optimizer_step if delay_optimizer_step is not None else delay_grad_averaging
offload_optimizer = offload_optimizer if offload_optimizer is not None else (params is not None)
allreduce_timeout = allreduce_timeout if allreduce_timeout is not None else averaging_timeout
next_chunk_timeout = next_chunk_timeout if next_chunk_timeout is not None else matchmaking_time
assert not delay_grad_averaging or delay_optimizer_step, "delay_grad_averaging requires delay_optimizer_step"
assert not (client_mode and auxiliary), "Client-mode peers cannot serve as auxiliaries"
assert not auxiliary or batch_size_per_step is None, "Auxiliary peers should not accumulate batches"
if callable(optimizer) and params is not None:
if scheduler is not None and (not callable(scheduler) or isinstance(scheduler, LRSchedulerBase)):
raise ValueError("For this mode, please provide scheduler factory: callable(optimizer) -> scheduler")
elif all(hasattr(optimizer, attr) for attr in ("param_groups", "step", "zero_grad")):
if offload_optimizer or delay_optimizer_step or delay_grad_averaging:
raise ValueError(
"To enable offload_optimizer or delayed updates, please initialize Optimizer as "
"hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)"
)
else:
raise ValueError(
"Please initialize the optimizer in one of the following two ways:\n"
"(A) hivemind.Optimizer(..., params=params, optimizer=lambda params: create_opt(params)\n"
"(B) hivemind.Optimizer(..., optimizer=pre_initialize_optimizer)"
)
if use_local_updates:
assert not reuse_grad_buffers, "if local_updates is True, gradients will not be accumulated"
assert not delay_grad_averaging, "if local_updates is True, gradients will not be averaged"
self.dht, self.run_id, self.client_mode, self.auxiliary = dht, run_id, client_mode, auxiliary
self.batch_size_per_step, self.target_batch_size = batch_size_per_step, target_batch_size
self.delay_state_averaging, self.average_state_every = delay_state_averaging, average_state_every
self.matchmaking_time, self.offload_optimizer = matchmaking_time, offload_optimizer
self.delay_grad_averaging, self.delay_optimizer_step = delay_grad_averaging, delay_optimizer_step
self.averaging_timeout, self.allreduce_timeout = averaging_timeout, allreduce_timeout
self.load_state_timeout, self.shutdown_timeout = load_state_timeout, shutdown_timeout
self.next_chunk_timeout = next_chunk_timeout
self.status_loglevel = logging.INFO if verbose else logging.DEBUG
self.scheduled_grads: Optional[StepControl] = None
self.scheduled_state: Optional[StepControl] = None
self.tracker = self._make_progress_tracker(
target_batch_size, performance_ema_alpha=performance_ema_alpha, **tracker_opts or {}
)
self.state_averager = self._make_state_averager(
optimizer=optimizer,
params=params,
scheduler=scheduler,
delta_rule_averaging=use_local_updates and self.delay_state_averaging,
compression=state_averaging_compression,
state_compression=load_state_compression,
average_opt_statistics=average_opt_statistics,
performance_ema_alpha=performance_ema_alpha,
extra_tensors=extra_tensors,
**averager_opts or {},
)
if not use_local_updates:
self.grad_averager = self._make_gradient_averager(
reuse_grad_buffers=reuse_grad_buffers, compression=grad_compression, **averager_opts or {}
)
else:
self.grad_averager = None
self._should_check_synchronization_on_update = True # used in self.should_load_state_from_peers
self._schema_hash = self._compute_schema_hash()
self.delay_before_state_averaging = PerformanceEMA(alpha=performance_ema_alpha)
# measures the average time from the beginning of self._update_global_epoch to the call to state_averager
# used for pre-scheduling the averaging round in state_averager
self._step_supports_amp_scaling = reuse_grad_buffers
# note: the line above is used by pytorch AMP GradScaler to enable custom behavior needed when reusing gradient
# buffers over multiple steps (to avoid repeated unscaling). Without reuse_grad_buffers, this is not needed.
def _make_state_averager(self, **kwargs) -> TrainingStateAverager:
return TrainingStateAverager(
dht=self.dht,
prefix=f"{self.run_id}_state_averager",
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
offload_optimizer=self.offload_optimizer,
custom_gradients=self.offload_optimizer,
status_loglevel=self.status_loglevel,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
def _make_gradient_averager(self, **kwargs) -> GradientAverager:
assert hasattr(self, "state_averager"), "must initialize state averager first"
grad_averager = GradientAverager(
dht=self.dht,
prefix=f"{self.run_id}_grad_averager",
parameters=self.state_averager.main_parameters,
min_matchmaking_time=self.matchmaking_time,
allreduce_timeout=self.allreduce_timeout,
shutdown_timeout=self.shutdown_timeout,
next_chunk_timeout=self.next_chunk_timeout,
client_mode=self.client_mode,
auxiliary=self.auxiliary,
start=True,
**kwargs,
)
if self.offload_optimizer:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad = averaged_grad
return grad_averager
def _make_progress_tracker(self, target_batch_size: int, **kwargs) -> ProgressTracker:
return ProgressTracker(
dht=self.dht,
prefix=self.run_id,
target_batch_size=target_batch_size,
client_mode=self.client_mode,
status_loglevel=self.status_loglevel,
start=True,
**kwargs,
)
def _compute_schema_hash(self) -> int:
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
param_shapes = tuple(tuple(param.shape) for param in optimized_parameters)
# offloaded optimizer requires that gradient tensors are reused between iterations
grad_ids = tuple(id(param.grad) for param in optimized_parameters) if self.offload_optimizer else None
return hash((grad_ids, param_shapes))
def is_alive(self) -> bool:
return self.state_averager.is_alive()
@property
def local_epoch(self) -> int:
"""
This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will
automatically re-synchronize by downloading state from another peer.
An epoch corresponds to accumulating target_batch_size across all active devices.
"""
return self.state_averager.local_epoch
@property
def local_progress(self) -> LocalTrainingProgress:
return self.tracker.local_progress
@property
def use_local_updates(self) -> bool:
return self.grad_averager is None
@property
def use_gradient_averaging(self) -> bool:
return self.grad_averager is not None
def step(
self,
closure: Optional[Callable[[], torch.Tensor]] = None,
batch_size: Optional[int] = None,
grad_scaler: Optional[GradScaler] = None,
):
"""
Update training progress after accumulating another local batch size. Depending on the configuration, this will
report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.
:param closure: A closure that reevaluates the model and returns the loss.
:param batch_size: optional override for batch_size_per_step from init.
:param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
"""
if grad_scaler is not None and not isinstance(grad_scaler, GradScaler):
raise ValueError("hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)")
if self.batch_size_per_step is None and batch_size is None and not self.auxiliary:
raise ValueError("Please either set batch_size_per_step parameter at init or when calling .step")
if self.auxiliary and (closure is not None or batch_size is not None or grad_scaler is not None):
raise ValueError("Auxiliary peers should not have batch size, run closures, or use grad_scaler")
batch_size = batch_size if batch_size is not None else self.batch_size_per_step
# if delayed updates finished before step, apply these updates; otherwise do nothing
self.state_averager.step(apply_delayed_updates=True)
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
if not self.auxiliary and self._should_load_state_from_peers():
logger.log(self.status_loglevel, "Peer is out of sync")
self.load_state_from_peers()
return loss # local gradients were computed with out-of-sync parameters, must start over
if self.use_gradient_averaging:
# accumulate gradients toward target batch size, then aggregate with peers and run optimizer
if not self.auxiliary:
grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler)
if not grads_are_valid:
return loss # local gradients were reset due to overflow, must start over
self._maybe_schedule_gradient_averaging()
self._maybe_schedule_state_averaging()
else:
# use_local_updates=True: update parameters on every step independently of other peers
if not self.auxiliary:
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
new_samples_accumulated = self.tracker.local_progress.samples_accumulated + batch_size
self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated)
self._maybe_schedule_state_averaging()
self.state_averager.step(
increment_epoch=False,
optimizer_step=True,
delay_optimizer_step=self.delay_optimizer_step,
grad_scaler=grad_scaler,
)
if self.tracker.ready_to_update_epoch:
self._update_global_epoch(grad_scaler)
return loss
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None:
"""Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step"""
assert self._schema_hash == self._compute_schema_hash(), "parameters or gradients changed during iteration"
_epoch_start_time = time.perf_counter()
with self.tracker.pause_updates():
wait_for_trigger = None
if self.use_gradient_averaging:
logger.log(self.status_loglevel, f"Beginning optimizer step #{self.local_epoch}")
if self.delay_optimizer_step:
self.state_averager.step(wait_for_delayed_updates=True)
began_averaging_gradients = self._begin_averaging_gradients(grad_scaler)
if not began_averaging_gradients:
# failed to start gradient averaging due to an internal error
self.grad_averager.load_accumulators_into_averager_()
elif self.delay_grad_averaging:
# if using delayed grad averaing, send this to state_averager as a pre-condition for optimizer step
wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads)
else:
# delay_grad_averaging=False, average gradients immediately
self._average_gradients_and_load_into_optimizer(self.scheduled_grads)
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
swarm_not_empty = self.tracker.global_progress.num_peers > 1
should_perform_optimizer_step = not self.auxiliary and not self.use_local_updates
should_average_state = (
swarm_not_empty
and next_epoch % self.average_state_every == 0
and not self.state_averager.averaging_in_progress
)
if should_average_state and self.scheduled_state is not None:
if self.scheduled_state.triggered or self.scheduled_state.done():
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_state = None
self.delay_before_state_averaging.update(task_size=1, interval=time.perf_counter() - _epoch_start_time)
self.state_averager.step(
increment_epoch=True,
wait_for_trigger=wait_for_trigger,
optimizer_step=should_perform_optimizer_step,
delay_optimizer_step=self.delay_optimizer_step and should_perform_optimizer_step,
grad_scaler=grad_scaler,
averaging_round=should_average_state,
delay_averaging=self.delay_state_averaging and not self.auxiliary,
averaging_control=self.scheduled_state if should_average_state else None,
averaging_opts=dict(timeout=self.averaging_timeout) if should_average_state else None,
)
if not should_average_state and self.scheduled_state is not None and not self.scheduled_state.done():
self.scheduled_state.cancel()
self.scheduled_state = None
self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch)
self._should_check_synchronization_on_update = True
# the above line ensures that peers check for *strict* synchronization once per epoch
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging and not self.auxiliary:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
logger.log(self.status_loglevel, f"Transitioning to epoch {self.local_epoch}")
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool:
"""Begin an all-reduce round to average gradients; return True if succeeded, False if failed"""
if grad_scaler is not None:
with grad_scaler.running_global_step():
assert grad_scaler.unscale_(self)
began_averaging_gradients = False
if self.scheduled_grads is not None and (self.scheduled_grads.triggered or self.scheduled_grads.done()):
logger.log(
self.status_loglevel,
f"Not using pre-scheduled group for state averaging because it"
f"was already used elsewhere: {self.scheduled_state}",
)
self.scheduled_grads = None
elif self.tracker.global_progress.num_peers > 1:
try:
self.scheduled_grads = self.grad_averager.step(
control=self.scheduled_grads, reset_accumulators=True, wait=False
)
began_averaging_gradients = True
except BaseException as e:
logger.exception(e)
if not began_averaging_gradients and self.scheduled_grads is not None and not self.scheduled_grads.done():
if self.tracker.global_progress.num_peers > 1:
logger.log(self.status_loglevel, f"Tagging along for a pre-scheduled gradient averaging round")
self._tag_along_with_zero_weight(self.scheduled_grads)
else:
logger.log(self.status_loglevel, f"Skipping pre-scheduled averaging round: there are no other peers")
self._load_local_gradients_into_optimizer()
self.scheduled_grads.cancel()
self.scheduled_grads = None
return began_averaging_gradients
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool:
"""Check if gradients are valid, accumulate and return True; otherwise, reset and return False"""
assert not self.use_local_updates and not self.auxiliary
if grad_scaler is not None and not grad_scaler.are_grads_finite(self):
logger.log(self.status_loglevel, "Encountered incorrect value in fp16 grads, resetting local gradients")
self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0)
self.grad_averager.reset_accumulated_grads_()
return False
self.grad_averager.accumulate_grads_(batch_size)
self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated)
return True
def _maybe_schedule_gradient_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch"""
assert self.use_gradient_averaging
if self.tracker.estimated_next_update_time - get_dht_time() <= self.matchmaking_time:
if self.scheduled_grads is None or self.scheduled_grads.triggered or self.scheduled_grads.done():
eta_seconds = self.tracker.estimated_next_update_time - get_dht_time()
eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs["min_matchmaking_time"])
logger.log(self.status_loglevel, f"Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec")
self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)
def _maybe_schedule_state_averaging(self) -> None:
"""If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start"""
next_epoch = max(self.local_epoch + 1, self.tracker.global_epoch)
if next_epoch % self.average_state_every != 0:
return # averaging is not performed at this epoch
if self.state_averager.averaging_in_progress:
return # previous run is still in progress
if self.delay_before_state_averaging.num_updates == 0:
return # not enough data to accurately pre-schedule
estimated_time = self.tracker.estimated_next_update_time
estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample
estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample
eta_seconds_to_averaging = estimated_time - get_dht_time()
if eta_seconds_to_averaging <= self.matchmaking_time:
if self.scheduled_state is None or self.scheduled_state.triggered or self.scheduled_state.done():
min_matchmaking_time = self.state_averager.matchmaking_kwargs["min_matchmaking_time"]
actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time)
logger.log(self.status_loglevel, f"Pre-scheduling state averaging round in {actual_seconds:.2f} sec")
self.scheduled_state = self.state_averager.schedule_step(
gather=next_epoch, timeout=self.averaging_timeout
)
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]):
"""Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients"""
assert self.use_gradient_averaging and maybe_step_control is None or maybe_step_control.triggered
averaged_gradients = False
try:
if maybe_step_control is not None:
group_info = maybe_step_control.result(self.averaging_timeout)
logger.log(self.status_loglevel, f"Averaged gradients with {len(group_info)} peers")
self._load_averaged_gradients_into_optimizer_()
averaged_gradients = True
else:
logger.log(self.status_loglevel, f"Skipped averaging: there are no other peers")
except BaseException as e:
logger.log(self.status_loglevel, f"Averaging gradients failed with {repr(e)}")
if not averaged_gradients:
self._load_local_gradients_into_optimizer()
def _load_averaged_gradients_into_optimizer_(self):
"""If required, load averaged gradients into optimizer; otherwise simply notify grad averager"""
assert self.use_gradient_averaging
if self.offload_optimizer:
pass # averaged gradients are already baked into optimizer, see _make_gradient_averager
else:
# copy averaged gradients into optimizer .grad buffers
optimized_param_groups = self.state_averager.optimizer.param_groups
optimized_parameters = [param for group in optimized_param_groups for param in group["params"]]
with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients:
assert len(averaged_gradients) == len(optimized_parameters)
for opt_param, averaged_grad in zip(optimized_parameters, averaged_gradients):
opt_param.grad.copy_(averaged_grad, non_blocking=True)
self.grad_averager.notify_used_averaged_gradients()
def _load_local_gradients_into_optimizer(self):
"""Fallback to using local gradients in the optimizer (instead of averaged gradients)"""
logger.log(self.status_loglevel, f"Proceeding with local gradients")
self.grad_averager.load_accumulators_into_averager_()
# note: we load gradients into grad_averager even though there is only one peer because of two reasons:
# - if offload_optimizer, then we must load gradients onto the CPU gradient buffers used by the optimizer
# - if not offload_optimizer, we must un-scale gradients (divide them by the number of accumulation steps)
self._load_averaged_gradients_into_optimizer_()
def zero_grad(self, set_to_none: bool = False):
"""Reset gradients from model. If reuse_grad_buffers=True, this will raise an error."""
if self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers:
raise ValueError(
f"When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never "
f"call zero_grad manually. Gradients will be refreshed internally"
)
for param_group in self.param_groups:
for param in param_group["params"]:
if param.grad is None:
pass
elif set_to_none:
param.grad = None
else:
param.grad.zero_()
def _should_load_state_from_peers(self) -> bool:
"""
If true, peer will discard local progress and attempt to download state from peers.
This method allows peer to continue training in two cases:
- peer is on the same epoch as other collaborators - keep training normally
- peer was on the same epoch and accumulated some grads, but some collaborators
have just transitioned to the next epoch - this peer should also transition.
:note: The latter case occurs due to the lack of network synchrony: the first peer that
detects enough samples will transition to the next step and start counting samples anew.
Some other peers may take time before they check with DHT and observe that
- the global epoch is technically one epoch ahead of the current one and
- the remaining (non-transitioned) peers no longer have target_batch_size between them
If this is the case, peer should transition to the next epoch and does *not* need to re-load state.
"""
if self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set():
self._should_check_synchronization_on_update = False
return self.local_epoch != self.tracker.global_epoch # require exact synchronization once per step
return self.local_epoch < self.tracker.global_epoch - 1 # catch up if a peer just switched to next epoch
def is_synchronized_with_peers(self) -> bool:
"""Checks whether the current peer is up-to-date with others in terms of the epoch (step) number."""
return self.local_epoch >= self.tracker.global_epoch - 1
def load_state_from_peers(self, **kwargs):
"""
Attempt to load the newest collaboration state from other peers within the same run_id.
If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.
"""
# note: we tag along for the next all-reduce because the run may have already started and cancelling it
# will cause peers to restart matchmaking and may stall the entire collaboration for a few seconds.
if self.scheduled_grads is not None and not self.scheduled_grads.done():
self._tag_along_with_zero_weight(self.scheduled_grads)
self.scheduled_grads = None
self.state_averager.step(wait_for_delayed_updates=True)
with self.tracker.pause_updates():
while True:
try:
self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs)
break
except KeyboardInterrupt:
raise
except BaseException as e:
logger.exception(f"Failed to load state from peers: {e}, retrying ...")
continue
if self.tracker.global_epoch - 1 <= self.local_epoch < self.tracker.global_epoch:
logger.log(self.status_loglevel, f"Catching up with collaboration step {self.tracker.global_epoch}")
self.state_averager.local_epoch = self.tracker.global_epoch
self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0)
if not self.client_mode:
self.state_averager.state_sharing_priority = self.local_epoch
if self.use_gradient_averaging:
self.grad_averager.reset_accumulated_grads_()
if not self.client_mode:
self.grad_averager.state_sharing_priority = self.local_epoch
def state_dict(self) -> dict:
state_dict = self.state_averager.optimizer.state_dict()
state_dict["state"]["local_epoch"] = self.local_epoch
return state_dict
def load_state_dict(self, state_dict: dict):
if "local_epoch" in state_dict["state"]:
self.state_averager.local_epoch = state_dict["state"].pop("local_epoch")
return self.state_averager.optimizer.load_state_dict(state_dict)
@property
def state(self):
return dict(self.state_averager.optimizer.state, local_epoch=self.local_epoch)
@property
def opt(self) -> TorchOptimizer:
return self.state_averager.optimizer
@property
def param_groups(self) -> ParamGroups:
next_index = 0
param_groups = tuple(dict(param_group) for param_group in self.state_averager.optimizer.param_groups)
for param_group in param_groups:
num_params = len(param_group["params"])
main_params_for_group = self.state_averager.main_parameters[next_index : next_index + num_params]
param_group["params"] = main_params_for_group
next_index += num_params
assert next_index == len(self.state_averager.main_parameters)
return param_groups
def add_param_group(self, param_group: dict) -> None:
raise ValueError(
f"{self.__class__.__name__} does not support calling add_param_group after creation. "
f"Please provide all parameter groups at init"
)
def __repr__(self):
return f"{self.__class__.__name__}(prefix={self.run_id}, epoch={self.local_epoch})"
def _tag_along_with_zero_weight(self, control: StepControl):
"""Wait for a running averaging round to finish with zero weight."""
if not control.triggered:
control.weight = 0
control.allow_allreduce()
if not control.done():
try:
control.result(self.averaging_timeout)
except BaseException as e:
logger.exception(e)
if not control.done():
control.cancel()
def shutdown(self):
logger.log(self.status_loglevel, "Sending goodbye to peers...")
self.tracker.shutdown(self.shutdown_timeout)
self.state_averager.step(wait_for_delayed_updates=True)
for scheduled_round in self.scheduled_grads, self.scheduled_state:
if scheduled_round is not None:
if scheduled_round.stage == AveragingStage.LOOKING_FOR_GROUP:
scheduled_round.cancel()
else:
self._tag_along_with_zero_weight(scheduled_round)
logger.log(self.status_loglevel, "Shutting down averagers...")
self.state_averager.shutdown()
if self.use_gradient_averaging:
self.grad_averager.shutdown()
logger.log(self.status_loglevel, f"{self.__class__.__name__} is shut down")
def __del__(self):
if self._parent_pid == os.getpid() and self.is_alive():
self.shutdown()
| hivemind/optim/optimizer.py | 44,615 | hivemind.Optimizer wraps your regular PyTorch Optimizer for training collaboratively with peers.
By default, Optimizer is configured to be exactly **equivalent to synchronous training** with target_batch_size.
There are advanced options make training semi-asynchronous (delay_optimizer_step and delay_gradient_averaging)
or even fully asynchronous (use_local_updates=True).
:example: The Optimizer can be used as a drop-in replacement for a regular PyTorch Optimizer:
>>> model = transformers.AutoModel("albert-xxlarge-v2")
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, start=True)
>>> opt = hivemind.Optimizer(dht=dht, run_id="run_42", batch_size_per_step=4, target_batch_size=4096,
>>> params=model.parameters(), optimizer=lambda params: torch.optim.Adam(params))
>>> while True:
>>> loss = compute_loss_on_batch(model, batch_size=4)
>>> opt.zero_grad()
>>> loss.backward()
>>> opt.step() # <-- train collaboratively with any peers that use the same prefix (run_42)
By default, peers will perform the following steps:
* accumulate a minibatch of gradients towards the (global) target batch size, without updating parameters yet;
* after peers collectively accumulate target_batch_size, average gradients with peers and perform optimizer step;
* if your peer lags behind the rest of the swarm, it will download parameters and optimizer state from others;
Unlike regular training, your device may join midway through training, when other peers already made some progress.
For this reason, any learning rate schedulers, curriculum and other **time-dependent features should be based on**
``optimizer.local_epoch`` (and not the number ot calls to opt.step). Otherwise, peers that joined training late
may end up having different learning rates. To do so automatically, specify ``scheduler=...`` parameter below.
:What is an epoch?: Optimizer uses the term ``epoch`` to describe intervals between synchronizations. One epoch
coresponds to processing certain number of training samples (``target_batch_size``) in total across all peers.
Like in PyTorch LR Scheduler, **epoch does not necessarily correspond to a full pass over the training data.**
At the end of epoch, peers perform synchronous actions such as averaging gradients for a global optimizer update,
updating the learning rate scheduler or simply averaging parameters (if using local updates).
The purpose of this is to ensure that changing the number of peers does not require changing hyperparameters.
For instance, if the number of peers doubles, they will run all-reduce more frequently to adjust for faster training.
:Configuration guide: This guide will help you set up your first collaborative training run. It covers the most
important basic options, but ignores features that require significant changes to the training code.
>>> dht = hivemind.DHT(initial_peers=INITIAL_PEERS, client_mode=IF_BEHIND_FIREWALL_OR_VERY_UNRELIABLE, start=True)
>>> opt = hivemind.Optimizer(
>>> dht=dht, run_id="a_unique_name_that_every_participant_will_see_when_training",
>>> batch_size_per_step=ACTUAL_BATCH_SIZE_OF_THIS_PEER, target_batch_size=LARGE_GLOBAL_BATCH,
>>> # ^--- Each global optimzier step will use gradients from 1x-1.1x of target_batch_size (due to latency);
>>> # It is recommended to train with very large batch sizes to reduce the % of time spent on communication.
>>>
>>> params=params, optimizer=lambda params: AnyPyTorchOptimizer(params, **hyperparams_for_target_batch_size),
>>> # tune learning rate for your target_batch_size. Here's a good reference: https://arxiv.org/abs/1904.00962
>>> scheduler=lambda opt: AnyPyTorchScheduler(opt, **hyperparams_for_target_batch_size),
>>> # scheduler.step will be called automatically each time when peers collectively accumulate target_batch_size
>>>
>>> offload_optimizer=True, # saves GPU memory, but increases RAM usage; Generally a good practice to use this.
>>> delay_grad_averaging=OPTIONAL, delay_optimizer_step=OPTIONAL, # train faster, but with 1 round of staleness;
>>> # setting both to True is equivalent to Delayed Parameter Updates (see https://arxiv.org/abs/2101.06840)
>>>
>>> grad_compression=hivemind.Float16Compression(), state_averaging_compression=hivemind.Float16Compression(),
>>> # ^-- it is usually fine to use pure 16-bit or even lower precision during communication with no precaution;
>>> # See hivemind/examples/albert for an working example of mixed 8/16-bit compression.
>>>
>>> matchmaking_time=15.0, # 3-5s for small local runs, 10-15s for training over the internet or with many peers
>>> averaging_timeout=60.0, # around of 2x the actual time it takes to run all-reduce
>>> verbose=True # periodically report the training progress to the console (e.g. "Averaged with N peers")
>>> ) # and you're done!
:param dht: a running hivemind.DHT instance connected to other peers.
:param run_id: a unique identifier of this training run, used as a common prefix for all DHT keys.
**Note:** peers with the same run_id should *generally* train the same model and use compatible configurations.
Some options can be safely changed by individual peers: ``batch_size_per_step``, ``client_mode``, ``auxiliary``,
``reuse_grad_buffers``, ``offload_optimizer``, and ``verbose``. In some cases, other options may also be tuned
individually by each peer, but they should be changed with caution to avoid deadlocks or convergence issues.
:param target_batch_size: global batch size that must be accumulated before the swarm transitions to the next epoch.
The actual batch may be *slightly* larger due asynchrony (e.g. peers submit more gradients in the last second).
:param batch_size_per_step: you should accumulate gradients over this many samples between calls to optimizer.step.
:param params: parameters or param groups for the optimizer; required if optimizer is a callable(params).
:param optimizer: a callable(parameters) -> pytorch.optim.Optimizer or a pre-initialized PyTorch optimizer.
**Note:** some advanced options like offload_optimizer, delay_optimizer_step, or delay_grad_averaging require
and require the callable and will not work if hivemind.optimizer is created with a pre-existing PyTorch Optimizer.
:param scheduler: callable(optimizer) -> PyTorch LRScheduler or a pre-initialized PyTorch scheduler.
The learning rate scheduler will adjust learning rate based on global epoch, not the number of
local calls to optimizer.step; this is required to keep different peers synchronized.
:param matchmaking_time: when looking for group, wait for peers to join for up to this many seconds.
Increase if you see "averaged gradients with N peers" where N is below 0.9x the real siee on >=25% of epochs.
When training with low-latency network, decreasing matchmaking_time allows training with smaller batch sizes.
:param averaging_timeout: if an averaging step hangs for this long, it will be cancelled automatically.
Increase averaging_timeout if you see "Proceeding with local gradients" at least 25% of the time.
Do not set this timeout too high, as it may cause your optimizer to hang after some types of network errors.
:param allreduce_timeout: timeout for a single attempt to run all-reduce, default: equal to averaging_timeout.
:param load_state_timeout: wait for at most this many seconds before giving up on load_state_from_peers.
:param reuse_grad_buffers: if True, use model's .grad buffers for gradient accumulation.
This is more memory efficient, but it requires that the user does *NOT* call model/opt zero_grad at all
:param offload_optimizer: offload the optimizer to host memory, saving GPU memory for parameters and gradients
:param delay_optimizer_step: run optimizer in background, apply results in future .step; requires offload_optimizer
:param delay_grad_averaging: average gradients in background; requires offload_optimizer and delay_optimizer_step
:param delay_state_averaging: if enabled (default), average parameters and extra tensors in a background thread;
if set to False, average parameters synchronously within the corresponding hivemind.Optimizer.step call.
:param average_state_every: average state (parameters, chosen opt tensors) with peers every this many **epochs**.
This reduces the communication overhead increasing, but can cause parameters to diverge if too large.
The maximal average_state_every=num_epochs depends on how often peers diverge from each other. If peers
hardly ever skip averaging rounds, they can average state less frequently. In turn, network failures, lossy
gradient compression and local_updates cause parameters to diverge faster and requires more frequent averaging.
:param use_local_updates: if enabled, peers will update parameters on each .step using local gradients;
if not enabled (default), accumulate gradients to target_batch_size, and then call .step with averaged gradients.
Even if use_local_updates=True, learning rate scheduler will still be called once per target_batch_size.
:param client_mode: if True, this peer will not accept incoming connections (firewall-compatible mode)
:param auxiliary: if True, optimizer.step will only assist other peers in averaging (for cpu-only workers)
:param grad_compression: compression strategy used for averaging gradients, default = no compression
:param state_averaging_compression: compression for averaging params and state tensors, default = no compression
:param load_state_compression: compression strategy for loading state from peers, default = no compression
:param average_opt_statistics: names of optimizer statistics from state dict that should be averaged with peers
:param extra_tensors: if specified, these extra tensors will also be averaged and shared in load_state_from_peers.
:param averager_opts: additional keyword arguments forwarded to both GradientAverager and TrainingStateAverager
:param tracker_opts: additional keyword arguments forwarded to ProgressTracker
:param performance_ema_alpha: moving average alpha in ProgressTracker, TrainingStateAverager and Optimizer
:param verbose: if True, report internal events such as accumilating gradients and running background tasks
:note: in a large-scale training, peers will inevitably fail and you will see error messages. hivemind.Optimizer
is designed to recover from such failures, but will sometimes need a minute or two to re-adjust.
Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients
Begin an all-reduce round to average gradients; return True if succeeded, False if failed
Check if gradients are valid, accumulate and return True; otherwise, reset and return False
If required, load averaged gradients into optimizer; otherwise simply notify grad averager
Fallback to using local gradients in the optimizer (instead of averaged gradients)
If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch
If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start
If true, peer will discard local progress and attempt to download state from peers.
This method allows peer to continue training in two cases:
- peer is on the same epoch as other collaborators - keep training normally
- peer was on the same epoch and accumulated some grads, but some collaborators
have just transitioned to the next epoch - this peer should also transition.
:note: The latter case occurs due to the lack of network synchrony: the first peer that
detects enough samples will transition to the next step and start counting samples anew.
Some other peers may take time before they check with DHT and observe that
- the global epoch is technically one epoch ahead of the current one and
- the remaining (non-transitioned) peers no longer have target_batch_size between them
If this is the case, peer should transition to the next epoch and does *not* need to re-load state.
Wait for a running averaging round to finish with zero weight.
Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step
Checks whether the current peer is up-to-date with others in terms of the epoch (step) number.
Attempt to load the newest collaboration state from other peers within the same run_id.
If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.
This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will
automatically re-synchronize by downloading state from another peer.
An epoch corresponds to accumulating target_batch_size across all active devices.
Update training progress after accumulating another local batch size. Depending on the configuration, this will
report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.
:param closure: A closure that reevaluates the model and returns the loss.
:param batch_size: optional override for batch_size_per_step from init.
:param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.
:note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
Reset gradients from model. If reuse_grad_buffers=True, this will raise an error.
used in self.should_load_state_from_peers measures the average time from the beginning of self._update_global_epoch to the call to state_averager used for pre-scheduling the averaging round in state_averager note: the line above is used by pytorch AMP GradScaler to enable custom behavior needed when reusing gradient buffers over multiple steps (to avoid repeated unscaling). Without reuse_grad_buffers, this is not needed. offloaded optimizer requires that gradient tensors are reused between iterations if delayed updates finished before step, apply these updates; otherwise do nothing local gradients were computed with out-of-sync parameters, must start over accumulate gradients toward target batch size, then aggregate with peers and run optimizer local gradients were reset due to overflow, must start over use_local_updates=True: update parameters on every step independently of other peers failed to start gradient averaging due to an internal error if using delayed grad averaing, send this to state_averager as a pre-condition for optimizer step delay_grad_averaging=False, average gradients immediately the above line ensures that peers check for *strict* synchronization once per epoch averaging is not performed at this epoch previous run is still in progress not enough data to accurately pre-schedule averaged gradients are already baked into optimizer, see _make_gradient_averager copy averaged gradients into optimizer .grad buffers note: we load gradients into grad_averager even though there is only one peer because of two reasons: - if offload_optimizer, then we must load gradients onto the CPU gradient buffers used by the optimizer - if not offload_optimizer, we must un-scale gradients (divide them by the number of accumulation steps) require exact synchronization once per step catch up if a peer just switched to next epoch note: we tag along for the next all-reduce because the run may have already started and cancelling it will cause peers to restart matchmaking and may stall the entire collaboration for a few seconds. | 15,444 | en | 0.802908 |
'''A temporarily outdated visualization module.'''
import graphviz as gv
from .model import Model
def visualize(model: Model, structural_part=True, measurement_part=False,
view=True, filename=None, title=''):
"""Visualization of SEM model via graphviz library.
Keyword arguments:
model -- A SEM model.
structural_part -- Should structural part be visualised?
measurement_part -- Should measurement part be visualised?
view -- Should graph be displayed?
filename -- Filename/path.
title -- Title.
"""
g = gv.Digraph(format='jpg', graph_attr={'label': title})
if structural_part:
g.node_attr.update(color='red', shape='box')
for i, j in model.parameters['Beta']:
lval, rval = model.beta_names[0][i], model.beta_names[0][j]
g.edge(rval, lval)
if measurement_part:
g.node_attr.update(color='black', shape='circle')
for i, j in model.parameters['Lambda']:
lval, rval = model.lambda_names[0][i], model.lambda_names[0][j]
g.edge(lval, rval)
g.render(filename, view=view)
| semopy/visualization.py | 1,157 | Visualization of SEM model via graphviz library.
Keyword arguments:
model -- A SEM model.
structural_part -- Should structural part be visualised?
measurement_part -- Should measurement part be visualised?
view -- Should graph be displayed?
filename -- Filename/path.
title -- Title.
A temporarily outdated visualization module. | 372 | en | 0.632921 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .project_task_properties import ProjectTaskProperties
class MigrateSqlServerSqlDbTaskProperties(ProjectTaskProperties):
"""Properties for the task that migrates on-prem SQL Server databases to Azure
SQL Database.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar errors: Array of errors. This is ignored if submitted.
:vartype errors: list[~azure.mgmt.datamigration.models.ODataError]
:ivar state: The state of the task. This is ignored if submitted. Possible
values include: 'Unknown', 'Queued', 'Running', 'Canceled', 'Succeeded',
'Failed', 'FailedInputValidation', 'Faulted'
:vartype state: str or ~azure.mgmt.datamigration.models.TaskState
:ivar commands: Array of command properties.
:vartype commands:
list[~azure.mgmt.datamigration.models.CommandProperties]
:param client_data: Key value pairs of client data to attach meta data
information to task
:type client_data: dict[str, str]
:param task_type: Required. Constant filled by server.
:type task_type: str
:param input: Task input
:type input:
~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskInput
:ivar output: Task output. This is ignored if submitted.
:vartype output:
list[~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskOutput]
"""
_validation = {
'errors': {'readonly': True},
'state': {'readonly': True},
'commands': {'readonly': True},
'task_type': {'required': True},
'output': {'readonly': True},
}
_attribute_map = {
'errors': {'key': 'errors', 'type': '[ODataError]'},
'state': {'key': 'state', 'type': 'str'},
'commands': {'key': 'commands', 'type': '[CommandProperties]'},
'client_data': {'key': 'clientData', 'type': '{str}'},
'task_type': {'key': 'taskType', 'type': 'str'},
'input': {'key': 'input', 'type': 'MigrateSqlServerSqlDbTaskInput'},
'output': {'key': 'output', 'type': '[MigrateSqlServerSqlDbTaskOutput]'},
}
def __init__(self, **kwargs):
super(MigrateSqlServerSqlDbTaskProperties, self).__init__(**kwargs)
self.input = kwargs.get('input', None)
self.output = None
self.task_type = 'Migrate.SqlServer.SqlDb'
| sdk/datamigration/azure-mgmt-datamigration/azure/mgmt/datamigration/models/migrate_sql_server_sql_db_task_properties.py | 2,873 | Properties for the task that migrates on-prem SQL Server databases to Azure
SQL Database.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar errors: Array of errors. This is ignored if submitted.
:vartype errors: list[~azure.mgmt.datamigration.models.ODataError]
:ivar state: The state of the task. This is ignored if submitted. Possible
values include: 'Unknown', 'Queued', 'Running', 'Canceled', 'Succeeded',
'Failed', 'FailedInputValidation', 'Faulted'
:vartype state: str or ~azure.mgmt.datamigration.models.TaskState
:ivar commands: Array of command properties.
:vartype commands:
list[~azure.mgmt.datamigration.models.CommandProperties]
:param client_data: Key value pairs of client data to attach meta data
information to task
:type client_data: dict[str, str]
:param task_type: Required. Constant filled by server.
:type task_type: str
:param input: Task input
:type input:
~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskInput
:ivar output: Task output. This is ignored if submitted.
:vartype output:
list[~azure.mgmt.datamigration.models.MigrateSqlServerSqlDbTaskOutput]
coding=utf-8 -------------------------------------------------------------------------- Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. See License.txt in the project root for license information. Code generated by Microsoft (R) AutoRest Code Generator. Changes may cause incorrect behavior and will be lost if the code is regenerated. -------------------------------------------------------------------------- | 1,666 | en | 0.51092 |
from setuptools import find_packages, setup
setup(
# Application info
name="pytorch_common",
version="1.5.3",
author="Mihir Rana",
author_email="ranamihir@gmail.com",
description="Repo for common PyTorch code",
long_description=open("README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
packages=find_packages(),
test_suite="tests",
# Packages that this package requires
install_requires=[
"numpy>=1.21.2",
"pandas>=1.3.4",
"matplotlib>=3.4.3",
"dask[dataframe]>=2021.11.1",
"toolz==0.10.0",
"scikit-learn>=1.0.1",
"dill>=0.3.4",
"munch>=2.5.0",
"locket==0.2.0",
],
# Optional dependencies
extras_require={"nlp": ["transformers>=4.15.0"]}, # for NLP related projects
# Add config and sql files to the package
# https://python-packaging.readthedocs.io/en/latest/non-code-files.html
include_package_data=True,
)
| setup.py | 993 | Application info Packages that this package requires Optional dependencies for NLP related projects Add config and sql files to the package https://python-packaging.readthedocs.io/en/latest/non-code-files.html | 209 | en | 0.638735 |
from torch import nn
import torch.nn.functional as F
from torch import distributions as pyd
class TanhTransform(pyd.transforms.Transform):
domain = pyd.constraints.real
codomain = pyd.constraints.interval(-1.0, 1.0)
bijective = True
sign = +1
def __init__(self, cache_size=1):
super().__init__(cache_size=cache_size)
@staticmethod
def atanh(x):
return 0.5 * (x.log1p() - (-x).log1p())
def __eq__(self, other):
return isinstance(other, TanhTransform)
def _call(self, x):
return x.tanh()
def _inverse(self, y):
# We do not clamp to the boundary here as it may degrade the performance of certain algorithms.
# one should use `cache_size=1` instead
return self.atanh(y)
def log_abs_det_jacobian(self, x, y):
# We use a formula that is more numerically stable, see details in the following link
# https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963f#diff-e120f70e92e6741bca649f04fcd907b7
return 2. * (math.log(2.) - x - F.softplus(-2. * x))
class SquashedNormal(pyd.transformed_distribution.TransformedDistribution):
def __init__(self, loc, scale):
self.loc = loc
self.scale = scale
self.base_dist = pyd.Normal(loc, scale)
transforms = [TanhTransform()]
super().__init__(self.base_dist, transforms)
@property
def mean(self):
mu = self.loc
for tr in self.transforms:
mu = tr(mu)
return mu
| distributions/squashed_normal.py | 1,544 | We do not clamp to the boundary here as it may degrade the performance of certain algorithms. one should use `cache_size=1` instead We use a formula that is more numerically stable, see details in the following link https://github.com/tensorflow/probability/commit/ef6bb176e0ebd1cf6e25c6b5cecdd2428c22963fdiff-e120f70e92e6741bca649f04fcd907b7 | 342 | en | 0.818183 |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
import pandas as pd
import pytest
import tempfile
import uuid
from unittest.mock import patch
import altair as alt
import matplotlib.pyplot as plt
from bokeh.plotting import figure
from plotly import figure_factory
from polyaxon import settings
from polyaxon.constants.globals import DEFAULT, PLATFORM_DIST_CE
from polyaxon.containers.contexts import (
CONTEXT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_ARTIFACTS_FORMAT,
CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT,
CONTEXT_OFFLINE_FORMAT,
CONTEXTS_OUTPUTS_SUBPATH_FORMAT,
)
from polyaxon.env_vars import getters
from polyaxon.env_vars.getters import get_run_info
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_COLLECT_ARTIFACTS,
POLYAXON_KEYS_COLLECT_RESOURCES,
POLYAXON_KEYS_LOG_LEVEL,
POLYAXON_KEYS_RUN_INSTANCE,
)
from polyaxon.exceptions import PolyaxonClientException
from polyaxon.polyboard.artifacts import V1ArtifactKind
from polyaxon.polyboard.events import V1Events, get_asset_path, get_event_path
from polyaxon.polyboard.processors.writer import EventFileWriter, ResourceFileWriter
from polyaxon.tracking.run import Run
from polyaxon.utils.path_utils import create_path
from tests.utils import TestEnvVarsCase, tensor_np
@pytest.mark.tracking_mark
class TestRunTracking(TestEnvVarsCase):
def setUp(self):
super().setUp()
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = True
def test_get_collect_artifacts_return_false_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
assert getters.get_collect_artifacts() is False
def test_empty_collect_artifacts_path(self):
settings.CLIENT_CONFIG.is_managed = True
assert getters.get_collect_artifacts() is False
def test_valid_artifacts_path(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_valid_value(
POLYAXON_KEYS_COLLECT_ARTIFACTS, getters.get_collect_artifacts, "true", True
)
def test_get_collect_resources_return_false_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
assert getters.get_collect_resources() is False
def test_empty_collect_resources_path(self):
settings.CLIENT_CONFIG.is_managed = True
assert getters.get_collect_resources() is False
def test_valid_resources_path(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_valid_value(
POLYAXON_KEYS_COLLECT_RESOURCES, getters.get_collect_resources, "true", True
)
def test_get_log_level_out_cluster(self):
settings.CLIENT_CONFIG.is_managed = False
self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)
def test_empty_log_level(self):
settings.CLIENT_CONFIG.is_managed = True
self.check_empty_value(POLYAXON_KEYS_LOG_LEVEL, getters.get_log_level)
def test_run_info_checks_is_managed(self):
settings.CLIENT_CONFIG.is_managed = False
with self.assertRaises(PolyaxonClientException):
get_run_info()
def test_empty_run_info(self):
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE, get_run_info, None, PolyaxonClientException
)
def test_non_valid_run_info(self):
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
"something random",
PolyaxonClientException,
)
self.check_raise_for_invalid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
"foo.bar",
PolyaxonClientException,
)
def test_dict_run_info(self):
uid = uuid.uuid4().hex
run_info = "user.project_bar.runs.{}".format(uid)
self.check_valid_value(
POLYAXON_KEYS_RUN_INSTANCE,
get_run_info,
run_info,
("user", "project_bar", uid),
)
@patch("polyaxon.managers.base.os.path.expanduser")
def test_run_init(self, expanduser):
expanduser.return_value = tempfile.mkdtemp()
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = False
with self.assertRaises(PolyaxonClientException):
Run()
# Uses default as owner in non CE
with self.assertRaises(PolyaxonClientException):
Run(project="test")
# Uses default as owner in CE
settings.CLIENT_CONFIG.is_offline = True
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="test", track_code=False, track_env=False)
assert exit_mock.call_count == 1
assert run.owner == DEFAULT
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
owner="owner-test", project="test", track_code=False, track_env=False
)
assert exit_mock.call_count == 1
assert run.owner == "owner-test"
assert run.project == "test"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="owner-test.test")
assert exit_mock.call_count == 1
assert run.owner == "owner-test"
assert run.project == "test"
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = False
with self.assertRaises(PolyaxonClientException):
Run()
settings.CLI_CONFIG.installation = None
# Uses default as owner in non CE
with self.assertRaises(PolyaxonClientException):
Run(project="test")
# Uses default as owner in CE
settings.CLIENT_CONFIG.is_offline = True
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
run = Run(project="test")
assert run.owner == DEFAULT
# FQN non CE
settings.CLI_CONFIG.installation = None
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
run = Run()
assert run.owner == "user"
assert run.project == "project_bar"
assert run.run_uuid == "uid"
# FQN CE
settings.CLI_CONFIG.installation = {"dist": PLATFORM_DIST_CE}
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
run = Run()
assert run.owner == "user"
assert run.project == "project_bar"
assert run.run_uuid == "uid"
def test_event_logger_from_non_managed_run(self):
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = False
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
project="owner-test.test",
track_code=False,
track_env=False,
collect_artifacts=False,
auto_create=False,
)
assert exit_mock.call_count == 1
artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)
assert run.get_artifacts_path() == artifacts_context
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_context
)
assert run._event_logger is None
# Add run id
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(
project="owner-test.test",
run_uuid="uuid",
track_code=False,
track_env=False,
collect_artifacts=False,
)
assert exit_mock.call_count == 1
artifacts_context = CONTEXT_ARTIFACTS_FORMAT.format(run.run_uuid)
assert run.get_artifacts_path() == artifacts_context
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_context
)
assert run._event_logger is None
run.set_artifacts_path()
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uuid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uuid")
with patch("polyaxon.tracking.run.EventFileWriter") as mock_call:
run.set_run_event_logger()
assert mock_call.call_count == 1
with patch("polyaxon.tracking.run.ResourceFileWriter") as mock_call:
run.set_run_resource_logger()
assert mock_call.call_count == 1
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
settings.CLIENT_CONFIG.is_managed = True
with patch("polyaxon.tracking.run.EventFileWriter") as event_call:
with patch("polyaxon.tracking.run.ResourceFileWriter") as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
with patch(
"polyaxon.tracking.run.Run._set_exit_handler"
) as exit_call:
run = Run(project="owner-test.test", run_uuid="uuid")
assert refresh_call.call_count == 1
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert exit_call.call_count == 1
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uuid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uuid")
def test_event_logger_from_a_managed_run(self):
# Set managed flag
settings.CLIENT_CONFIG.is_managed = True
settings.CLIENT_CONFIG.is_offline = False
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
run = Run()
assert refresh_call.call_count == 1
assert run.get_artifacts_path() == CONTEXT_MOUNT_ARTIFACTS_FORMAT.format("uid")
assert run.get_outputs_path() == CONTEXT_MOUNT_RUN_OUTPUTS_FORMAT.format("uid")
assert run._event_logger is None
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
with patch(
"polyaxon.tracking.run.Run._set_exit_handler"
) as exit_call:
Run(project="test.test", run_uuid="uuid")
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert refresh_call.call_count == 1
assert exit_call.call_count == 1
# Set run info
os.environ[POLYAXON_KEYS_RUN_INSTANCE] = "user.project_bar.runs.uid"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run.refresh_data") as refresh_call:
Run()
assert event_call.call_count == 1
assert resource_call.call_count == 1
assert refresh_call.call_count == 1
def test_event_logger_from_an_offline_run(self):
# Set managed flag
settings.CLIENT_CONFIG.is_managed = False
settings.CLIENT_CONFIG.is_offline = True
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
run = Run(project="test.test", run_uuid="uid")
assert exit_mock.call_count == 1
artifacts_path = CONTEXT_OFFLINE_FORMAT.format("uid")
assert run.get_artifacts_path() == artifacts_path
assert run.get_outputs_path() == CONTEXTS_OUTPUTS_SUBPATH_FORMAT.format(
artifacts_path
)
assert run._event_logger is None
# Set collect flag
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "true"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "true"
# Add run id
with patch("polyaxon.tracking.run.Run.set_run_event_logger") as event_call:
with patch(
"polyaxon.tracking.run.Run.set_run_resource_logger"
) as resource_call:
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
Run(project="test.test", run_uuid="uid")
assert exit_mock.call_count == 1
assert event_call.call_count == 1
assert resource_call.call_count == 1
@pytest.mark.tracking_mark
class TestRunLogging(TestEnvVarsCase):
def setUp(self):
super().setUp()
self.run_path = tempfile.mkdtemp()
self.run_outputs_path = tempfile.mkdtemp()
settings.CLIENT_CONFIG.is_managed = False
os.environ[POLYAXON_KEYS_COLLECT_ARTIFACTS] = "false"
os.environ[POLYAXON_KEYS_COLLECT_RESOURCES] = "false"
with patch("polyaxon.tracking.run.Run._set_exit_handler") as exit_mock:
self.run = Run(
project="owner.project",
track_env=False,
track_code=False,
auto_create=False,
)
assert exit_mock.call_count == 1
self.event_logger = EventFileWriter(run_path=self.run_path)
self.resource_logger = ResourceFileWriter(run_path=self.run_path)
self.run._artifacts_path = self.run_path
self.run._outputs_path = self.run_outputs_path
self.run._event_logger = self.event_logger
self.run._resource_logger = self.resource_logger
assert os.path.exists(get_event_path(self.run_path)) is True
assert os.path.exists(get_asset_path(self.run_path)) is True
@staticmethod
def touch(path):
with open(path, "w") as f:
f.write("test")
def test_log_empty_metric(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics()
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
def test_log_single_metric(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=1, metric1=1.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 1
def test_log_multiple_metrics(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=1, metric1=1.1, metric2=21.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric2", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_metrics") as log_metrics:
self.run.log_metrics(step=2, metric1=1.1, metric2=21.1, metric3=12.1)
assert log_metrics.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.METRIC))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.METRIC))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric1"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric1", data=events_file)
assert len(results.df.values) == 2
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric2", data=events_file)
assert len(results.df.values) == 2
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.METRIC, name="metric3"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="metric", name="metric3", data=events_file)
assert len(results.df.values) == 1
def test_log_image_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(name="my_image", data=image_file)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_image_from_path_with_step(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(name="my_image", data=image_file, step=1)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_data_image(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image:
self.run.log_image(
name="my_image", data=tensor_np(shape=(1, 8, 8)), dataformats="CHW"
)
assert log_image.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image", ext="png"
)
assert os.path.exists(asset_file) is True
def test_log_image_with_boxes(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
image_file = tempfile.mkdtemp() + "/file.png"
self.touch(image_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_image_with_boxes:
self.run.log_image_with_boxes(
name="my_image",
tensor_image=tensor_np(shape=(3, 32, 32)),
tensor_boxes=np.array([[10, 10, 40, 40]]),
)
assert log_image_with_boxes.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="my_image", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="my_image"
)
assert os.path.exists(asset_file) is True
def test_log_mpl_image(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color="r")
circle2 = plt.Circle((0.8, 0.5), 0.2, color="g")
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis("scaled")
plt.tight_layout()
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figure, step=1, close=False)
assert log_mpl_image.call_count == 1
assert plt.fignum_exists(figure.number) is True
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dashboard:
self.run.log_mpl_image(name="figure", data=figure, step=2)
assert log_dashboard.call_count == 1
assert plt.fignum_exists(figure.number) is False
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
@pytest.mark.filterwarnings("ignore::FutureWarning")
def test_log_mpl_images(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is False
)
figures = []
for i in range(5):
figure = plt.figure()
plt.plot([i * 1, i * 2, i * 3], label="Plot " + str(i))
plt.xlabel("X")
plt.xlabel("Y")
plt.legend()
plt.tight_layout()
figures.append(figure)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figures, step=1, close=False)
assert log_mpl_image.call_count == 1
assert all([plt.fignum_exists(figure.number) is True for figure in figures])
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_image:
self.run.log_mpl_image(name="figure", data=figures, step=2)
assert log_mpl_image.call_count == 1
assert all([plt.fignum_exists(figure.number) is False for figure in figures])
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.IMAGE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.IMAGE, name="figure", step=1, ext="png"
)
assert os.path.exists(asset_file) is True
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_log_mpl_plotly(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
figure, axes = plt.figure(), plt.gca()
circle1 = plt.Circle((0.2, 0.5), 0.2, color="r")
circle2 = plt.Circle((0.8, 0.5), 0.2, color="g")
axes.add_patch(circle1)
axes.add_patch(circle2)
plt.axis("scaled")
plt.tight_layout()
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_plotly_chart:
self.run.log_mpl_plotly_chart(name="figure", figure=figure, step=1)
assert log_mpl_plotly_chart.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 1
with patch("polyaxon.tracking.run.Run._log_has_events") as log_mpl_plotly_chart:
self.run.log_mpl_plotly_chart(name="figure", figure=figure, step=2)
assert log_mpl_plotly_chart.call_count == 1
assert plt.fignum_exists(figure.number) is False
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="figure"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="image", name="figure", data=events_file)
assert len(results.df.values) == 2
def test_log_video_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
video_file = tempfile.mkdtemp() + "/video.gif"
self.touch(video_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_video:
self.run.log_video(name="my_video", data=video_file)
assert log_video.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="video", name="my_video", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video", ext="gif"
)
assert os.path.exists(asset_file) is True
def test_log_data_video(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dashboard:
self.run.log_video(name="my_video", data=tensor_np(shape=(4, 3, 1, 8, 8)))
assert log_dashboard.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.VIDEO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="video", name="my_video", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.VIDEO, name="my_video", ext="gif"
)
assert os.path.exists(asset_file) is True
def test_log_audio_from_path(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
audio_file = tempfile.mkdtemp() + "/audio.wav"
self.touch(audio_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_audio:
self.run.log_audio(name="my_audio", data=audio_file)
assert log_audio.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="audio", name="my_audio", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio", ext="wav"
)
assert os.path.exists(asset_file) is True
def test_log_data_audio(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_audio:
self.run.log_audio(name="my_audio", data=tensor_np(shape=(42,)))
assert log_audio.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.AUDIO))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="audio", name="my_audio", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.AUDIO, name="my_audio", ext="wav"
)
assert os.path.exists(asset_file) is True
def test_log_text(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_text:
self.run.log_text(name="my_text", text="some text", step=1)
assert log_text.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TEXT))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TEXT))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TEXT, name="my_text"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="text", name="my_text", data=events_file)
assert len(results.df.values) == 1
def test_log_html(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_html:
self.run.log_html(name="my_div", html="<div>test<div/>", step=1)
assert log_html.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HTML))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HTML))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HTML, name="my_div"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="html", name="my_div", data=events_file)
assert len(results.df.values) == 1
def test_log_histogram(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_histogram:
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="auto", step=1
)
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="fd", step=1
)
self.run.log_histogram(
name="histo", values=tensor_np(shape=(1024,)), bins="doane", step=1
)
assert log_histogram.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HISTOGRAM, name="histo"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="histogram", name="histo", data=events_file)
assert len(results.df.values) == 3
def test_log_np_histogram(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
values, counts = np.histogram(np.random.randint(255, size=(1000,)))
with patch("polyaxon.tracking.run.Run._log_has_events") as log_np_histogram:
self.run.log_np_histogram(
name="histo", values=values, counts=counts, step=1
)
assert log_np_histogram.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.HISTOGRAM))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.HISTOGRAM, name="histo"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="histogram", name="histo", data=events_file)
assert len(results.df.values) == 1
def test_log_model_file(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_file = tempfile.mkdtemp() + "/model.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run.log_model_ref") as log_model:
self.run.log_model(
name="my_model", path=model_file, framework="scikit", versioned=False
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_file = self.run.get_outputs_path("model.pkl")
assert os.path.exists(model_file) is True
def test_log_model_dir(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is False
model_dir = tempfile.mkdtemp() + "/model"
create_path(model_dir)
model_file = model_dir + "/model.pkl"
self.touch(model_file)
weights_file = model_dir + "/weights"
self.touch(weights_file)
configs_file = model_dir + "/configs"
self.touch(configs_file)
with patch("polyaxon.tracking.run.Run.log_model_ref") as log_model:
self.run.log_model(
name="my_model", path=model_dir, framework="tensorflow", versioned=False
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert os.path.exists(self.run.get_outputs_path(V1ArtifactKind.MODEL)) is True
model_file = self.run.get_outputs_path(
"{}/{}".format(V1ArtifactKind.MODEL, "model.pkl")
)
assert os.path.exists(model_file) is True
def test_log_versioned_model_file(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
model_file = tempfile.mkdtemp() + "/model.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run._log_has_model") as log_model:
self.run.log_model(
name="my_model", path=model_file, framework="scikit", step=1
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="model", name="my_model", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model_1", ext="pkl"
)
assert os.path.exists(asset_file) is True
def test_log_versioned_model_dir(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is False
)
model_dir = tempfile.mkdtemp() + "/model"
create_path(model_dir)
model_file = model_dir + "/model.pkl"
self.touch(model_file)
weights_file = model_dir + "/weights"
self.touch(weights_file)
configs_file = model_dir + "/configs"
self.touch(configs_file)
with patch("polyaxon.tracking.run.Run._log_has_model") as log_model:
self.run.log_model(
name="my_model", path=model_dir, framework="tensorflow", step=1
)
assert log_model.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.MODEL))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="model", name="my_model", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.MODEL, name="my_model_1"
)
assert os.path.exists(asset_file) is True
def test_log_dataframe_ref(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
model_file = tempfile.mkdtemp() + "/df.pkl"
self.touch(model_file)
with patch("polyaxon.tracking.run.Run.log_artifact_ref") as log_artifact_ref:
self.run.log_artifact(
name="dataframe",
path=model_file,
kind=V1ArtifactKind.DATAFRAME,
versioned=False,
)
assert log_artifact_ref.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
asset_file = self.run.get_outputs_path(rel_path="df.pkl")
assert os.path.exists(asset_file) is True
def test_log_dataframe(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
df = pd.DataFrame(data=[])
with patch("polyaxon.tracking.run.Run._log_has_events") as log_dataframe:
self.run.log_dataframe(df=df, name="dataframe", content_type="csv")
assert log_dataframe.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="dataframe"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind="dataframe", name="dataframe", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="dataframe", ext="csv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run.log_artifact_ref") as log_artifact:
self.run.log_artifact(
name="file",
path=tsv_file,
kind=V1ArtifactKind.TSV,
versioned=False,
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert os.path.exists(self.run.get_outputs_path("file.tsv")) is True
def test_versioned_log_artifact(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file", path=tsv_file, kind=V1ArtifactKind.TSV, step=1
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact_without_name(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(path=tsv_file, kind=V1ArtifactKind.TSV, step=1)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
def test_log_artifact_without_name_and_filename_with_several_dots(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))
is False
)
tar_file = tempfile.mkdtemp() + "/file.tar.gz"
self.touch(tar_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(path=tar_file, kind=V1ArtifactKind.FILE, step=1)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.FILE))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.FILE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.FILE, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.FILE, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.FILE, name="file_1", ext="tar.gz"
)
assert os.path.exists(asset_file) is True
def test_log_versioned_artifacts(self):
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is False
)
tsv_file = tempfile.mkdtemp() + "/file.tsv"
self.touch(tsv_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file", path=tsv_file, kind=V1ArtifactKind.TSV, step=1
)
assert log_artifact.call_count == 1
pd_file = tempfile.mkdtemp() + "/dataframe"
self.touch(pd_file)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_artifact:
self.run.log_artifact(
name="file2", path=pd_file, kind=V1ArtifactKind.DATAFRAME, step=1
)
assert log_artifact.call_count == 1
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.TSV))
is True
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.DATAFRAME))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="file2"
)
assert os.path.exists(events_file) is True
results = V1Events.read(kind=V1ArtifactKind.TSV, name="file", data=events_file)
assert len(results.df.values) == 1
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.TSV, name="file_1", ext="tsv"
)
assert os.path.exists(asset_file) is True
asset_file = get_asset_path(
self.run_path, kind=V1ArtifactKind.DATAFRAME, name="file2_1"
)
assert os.path.exists(asset_file) is True
def test_log_charts(self):
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
bokeh_test = figure(
title="simple line example", x_axis_label="x", y_axis_label="y"
)
bokeh_test.line(x, y, line_width=2)
x1 = np.random.randn(200) - 2
x2 = np.random.randn(200)
x3 = np.random.randn(200) + 2
hist_data = [x1, x2, x3]
group_labels = ["Group 1", "Group 2", "Group 3"]
plotly_test = figure_factory.create_distplot(
hist_data, group_labels, bin_size=[0.1, 0.25, 0.5]
)
df1 = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
alt_test = alt.Chart(df1).mark_bar().encode(x="a", y="b")
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
with patch("polyaxon.tracking.run.Run._log_has_events") as log_charts:
self.run.log_bokeh_chart(name="bokeh_test", figure=bokeh_test, step=1)
self.run.log_plotly_chart(name="plotly_test", figure=plotly_test, step=1)
self.run.log_altair_chart(name="alt_test", figure=alt_test, step=1)
assert log_charts.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CHART))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CHART))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="bokeh_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="bokeh_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="plotly_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="plotly_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CHART, name="alt_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="alt_test", data=events_file
)
assert len(results.df.values) == 1
def test_log_curves(self):
x = [1, 2, 3, 4, 5]
y = [6, 7, 2, 4, 5]
with patch("polyaxon.tracking.run.Run._log_has_events") as log_curves:
self.run.log_roc_auc_curve(name="roc_test", fpr=x, tpr=y, auc=0.6, step=1)
self.run.log_pr_curve(
name="pr_test", precision=x, recall=y, average_precision=0.6, step=1
)
self.run.log_curve(name="curve_test", x=x, y=y, annotation=0.6, step=1)
assert log_curves.call_count == 3
self.event_logger.flush()
assert (
os.path.exists(get_asset_path(self.run_path, kind=V1ArtifactKind.CURVE))
is False
)
assert (
os.path.exists(get_event_path(self.run_path, kind=V1ArtifactKind.CURVE))
is True
)
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="roc_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CURVE, name="roc_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="pr_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="pr_test", data=events_file
)
assert len(results.df.values) == 1
events_file = get_event_path(
self.run_path, kind=V1ArtifactKind.CURVE, name="curve_test"
)
assert os.path.exists(events_file) is True
results = V1Events.read(
kind=V1ArtifactKind.CHART, name="curve_test", data=events_file
)
assert len(results.df.values) == 1
| src/tests/test_tracking/test_run_tracking.py | 63,652 | !/usr/bin/python Copyright 2018-2021 Polyaxon, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Uses default as owner in non CE Uses default as owner in CE Uses default as owner in non CE Uses default as owner in CE FQN non CE FQN CE Add run id Set collect flag Set managed flag Set collect flag Add run id Set run info Add run id Set managed flag Set collect flag Add run id | 854 | en | 0.834254 |
# Copyright 2014 Joe Cora.
# Revisions copyright 2017 Peter Cock.
# All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Objects to represent NEXUS standard data type matrix coding."""
from __future__ import print_function
import sys
from Bio._py3k import basestring
class NexusError(Exception):
pass
class StandardData(object):
"""Create a StandardData iterable object.
Each coding specifies t [type] => (std [standard], multi [multistate] or
uncer [uncertain]) and d [data]
"""
def __init__(self, data):
self._data = []
self._current_pos = 0
# Enforce string data requirement
if not isinstance(data, basestring):
raise NexusError("The coding data given to a StandardData object should be a string")
# Transfer each coding to a position within a sequence
multi_coding = False
uncertain_coding = False
coding_list = {'t': 'std', 'd': []}
for pos, coding in enumerate(data):
# Check if in a multiple coded or uncertain character
if multi_coding:
# End multicoding if close parenthesis
if coding == ')':
multi_coding = False
else:
# Add current coding to list and advance to next coding
coding_list['d'].append(coding)
continue
elif uncertain_coding:
# End multicoding if close parenthesis
if coding == '}':
uncertain_coding = False
else:
# Add current coding to list and advance to next coding
coding_list['d'].append(coding)
continue
else:
# Check if a multiple coded or uncertain character is starting
if coding == '(':
multi_coding = True
coding_list['t'] = 'multi'
continue
elif coding == '{':
uncertain_coding = True
coding_list['t'] = 'uncer'
continue
elif coding in [')', '}']:
raise NexusError('Improper character "' + coding +
'" at position ' + pos +
' of a coding sequence.')
else:
coding_list['d'].append(coding)
# Add character coding to data
self._data.append(coding_list.copy())
coding_list = {'t': 'std', 'd': []}
def __len__(self):
"""Returns the length of the coding, use len(my_coding)."""
return len(self._data)
def __getitem__(self, arg):
return self._data[arg]
def __iter__(self):
return self
def __next__(self):
try:
return_coding = self._data[self._current_pos]
except IndexError:
self._current_pos = 0
raise StopIteration
else:
self._current_pos += 1
return return_coding
if sys.version_info[0] < 3:
def next(self):
"""Deprecated Python 2 style alias for Python 3 style __next__ method."""
return self.__next__()
def raw(self):
"""Returns the full coding as a python list."""
return self._data
def __str__(self):
"""Returns the full coding as a python string, use str(my_coding)."""
str_return = ''
for coding in self._data:
if coding['t'] == 'multi':
str_return += '(' + ''.join(coding['d']) + ')'
elif coding['t'] == 'uncer':
str_return += '{' + ''.join(coding['d']) + '}'
else:
str_return += coding['d'][0]
return str_return
| Bio/Nexus/StandardData.py | 3,980 | Create a StandardData iterable object.
Each coding specifies t [type] => (std [standard], multi [multistate] or
uncer [uncertain]) and d [data]
Returns the length of the coding, use len(my_coding).
Returns the full coding as a python string, use str(my_coding).
Deprecated Python 2 style alias for Python 3 style __next__ method.
Returns the full coding as a python list.
Objects to represent NEXUS standard data type matrix coding.
Copyright 2014 Joe Cora. Revisions copyright 2017 Peter Cock. All rights reserved. This code is part of the Biopython distribution and governed by its license. Please see the LICENSE file that should have been included as part of this package. Enforce string data requirement Transfer each coding to a position within a sequence Check if in a multiple coded or uncertain character End multicoding if close parenthesis Add current coding to list and advance to next coding End multicoding if close parenthesis Add current coding to list and advance to next coding Check if a multiple coded or uncertain character is starting Add character coding to data | 1,089 | en | 0.773303 |
"""
Words analyses by Amardjia Amine.
Free software utility which allows you to find the most frequent phrases
and frequencies of words. French and English language texts are supported.
It also counts number of words, characters, the lexical density,
sentences...etc.
https://github.com/Layto888/Words-Analysis
Usage in command line: python words.py -f [filename.txt] -d [True/False]
-h : for help.
Usage example: python words.py -f test.txt -d True
"""
import argparse
import re
import sys
import time
import platform
import operator
from collections import Counter
from contextlib import redirect_stdout
MAX_DIPLAY = 10
FILE_LEXI = "lexi.wd"
FRENCH_LIST_LENGTH = 78
if platform.system() == 'Linux':
DEFAULT_CODEC = "ISO-8859-1"
elif platform.system() == 'Windows':
DEFAULT_CODEC = None
class Words:
def __init__(self, filename):
"""
Input : text file name
Do some operations to a text and return results.
"""
with open(filename, "r", encoding=DEFAULT_CODEC) as fp:
self.normal_text = fp.read().strip()
self.normalized_text = self.normalize_text(self.normal_text)
def all_characters_without_spaces(self, text):
""" count the total of characters without any space char """
return len(text) - self.number_spaces(text)
def differents_words_list(self, text):
""" select only the total of different words,
it's a set, return the set.
"""
return set(self.words_list(text))
def average_sentence_length(self, text):
""" count the average length of sentences
avg = words / sentences
"""
if len(self.words_list(text)) == 0:
return 0
else:
return len(self.words_list(text)) / len(self.sentence_split(text))
def max_sentence_length(self, text):
""" count and return the maximum length
of sentences list """
all_senteces = self.sentence_split(text)
try:
return (max(list(map(len, all_senteces))))
except Exception as e:
print(e)
return 0
def min_sentence_length(self, text):
""" count and return the minimum length
of sentences list """
all_senteces = self.sentence_split(text)
try:
return (min(list(map(len, all_senteces))))
except Exception as e:
print(e)
return 0
@staticmethod
def normalize_text(normal_text):
""" remove extra spaces if any in the text
and put it in lowercase, to normalize the input text.
"""
normalized_text = re.sub(' +', ' ', normal_text)
normalized_text = normalized_text.lower()
return normalized_text
@staticmethod
def number_spaces(text):
""" count the number of spaces in the text """
return text.count(' ')
@staticmethod
def words_list(text):
""" get all words in a list
return the list of words [a-zA-Z_].
"""
return re.findall("[a-zA-Z]+", text)
@staticmethod
def sentence_split(text):
""" split sentences into list of sentences.
return len(sentence_split(text)) to get the number of sentences.
"""
sentences = re.split('[.!?]', text)
# strip space from the sides.
sentences = [sentence.strip()
for sentence in sentences if len(sentence) > 1]
return sentences
# run the program
def run(filename, write_it):
wa = Words(filename)
# display text basic infos
if write_it:
with open("Output.txt", "w") as fp:
""" if argument value -w is equal to True redirect the output to
a text file if argument value -w is equal to False or not specified
the output will be redirected to the console """
with redirect_stdout(fp):
display(wa)
# display the top 'X' occurrences words
display_top_words(wa, MAX_DIPLAY)
display(wa)
display_top_words(wa, MAX_DIPLAY)
def display(wa):
"""Display all the stuffs on the screen"""
print('Total word count: {}'
.format(len(wa.words_list(wa.normalized_text))))
print('Number of different words: {}'
.format(len(wa.differents_words_list(wa.normalized_text))))
print('Total number of characters: {}'.format(len(wa.normal_text)))
print('Number of characters without spaces: {}'
.format(wa.all_characters_without_spaces(wa.normal_text)))
print('Number of spaces: {}'
.format(wa.number_spaces(wa.normal_text)))
print('Sentence count: {}'
.format(len(wa.sentence_split(wa.normalized_text))))
print('Average sentence length (Words): {0:.2f}'
.format(wa.average_sentence_length(wa.normalized_text)))
print('Max sentence length (Characters): {}'
.format(wa.max_sentence_length(wa.normalized_text)))
print('Min sentence length (Characters): {}'
.format(wa.min_sentence_length(wa.normalized_text)))
print('Lexical density: {0:.2f} %'
.format(lexical_density(wa.words_list(wa.normalized_text), FILE_LEXI)))
print('Language: {} \n'
.format(deduce_language(wa.words_list(wa.normalized_text), FILE_LEXI)))
def display_top_words(wa, max_display):
cp = 0
counts = Counter(wa.words_list(wa.normalized_text))
sorted_occurences = sorted(
counts.items(), key=operator.itemgetter(1), reverse=True)
print('Top 10 recurring words:\n')
print('{0:<30}{1:<30}{2:<30}'.format('# Ref', 'Occurrence', 'Perc'))
for occurence in sorted_occurences:
cp += 1
if cp <= max_display:
print('{0:<30}{1:<30}{2:.2f} %'.format
(
occurence[0],
occurence[1],
(occurence[1] * 100) / len(wa.words_list(wa.normalized_text)))
)
else:
break
def lexical_density(words_list, lexi_file_name):
""" calculates the lexical density.
L_d = (N_lex / N) * 100
Where:
L_d = the analyzed text's lexical density
N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)
in the analyzed text.
N = the number of all tokens (total number of words) in the analyzed text.
"""
l_d = 0
n_lex = 0
n = 0
with open(lexi_file_name, "r", encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',')
for word in lexical_words:
counter = words_list.count(word)
n_lex += counter
counter = 0
n = len(words_list)
l_d = (n_lex / n) * 100
return l_d
def deduce_language(words_list, lexi_file_name):
"""
This function will deduce language between French and English.
Using the lexical words found on the text.
"""
with open(lexi_file_name, "r", encoding=DEFAULT_CODEC) as fp:
lexical_words = fp.read()
lexical_words = lexical_words.split(',')
for word in words_list:
if word in lexical_words:
if lexical_words.index(word) <= FRENCH_LIST_LENGTH:
return 'French'
else:
return 'English'
return 'Not found'
def show_process_time(t1_start, t1_stop, t2_start, t2_stop):
"""
function to show elapsed time.
"""
print('\n')
print('Elapsed time: {0:.4f} [sec]'.format(t1_stop - t1_start))
print('CPU process time: {0:.4f} [sec]'.format(t2_stop - t2_start))
print('Done.')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--file_name', default='test.txt', required=True,
help='The text file to analyze.', type=str)
parser.add_argument('-d', '--display', default=False,
help='display the output into a text file'
' use True/False to specify', type=bool)
args = parser.parse_args()
# compute time perf and time process
t1_start = time.perf_counter()
t2_start = time.process_time()
run(args.file_name, args.display)
t1_stop = time.perf_counter()
t2_stop = time.process_time()
show_process_time(t1_start, t1_stop, t2_start, t2_stop)
return 0
# main
if __name__ == '__main__':
sys.exit(main())
| words.py | 8,602 | Input : text file name
Do some operations to a text and return results.
count the total of characters without any space char
count the average length of sentences
avg = words / sentences
This function will deduce language between French and English.
Using the lexical words found on the text.
select only the total of different words,
it's a set, return the set.
Display all the stuffs on the screen
calculates the lexical density.
L_d = (N_lex / N) * 100
Where:
L_d = the analyzed text's lexical density
N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)
in the analyzed text.
N = the number of all tokens (total number of words) in the analyzed text.
count and return the maximum length
of sentences list
count and return the minimum length
of sentences list
remove extra spaces if any in the text
and put it in lowercase, to normalize the input text.
count the number of spaces in the text
split sentences into list of sentences.
return len(sentence_split(text)) to get the number of sentences.
function to show elapsed time.
get all words in a list
return the list of words [a-zA-Z_].
Words analyses by Amardjia Amine.
Free software utility which allows you to find the most frequent phrases
and frequencies of words. French and English language texts are supported.
It also counts number of words, characters, the lexical density,
sentences...etc.
https://github.com/Layto888/Words-Analysis
Usage in command line: python words.py -f [filename.txt] -d [True/False]
-h : for help.
Usage example: python words.py -f test.txt -d True
strip space from the sides. run the program display text basic infos display the top 'X' occurrences words compute time perf and time process main | 1,718 | en | 0.810108 |
import logging as log
import time
from . import gitlab
from .approvals import Approvals
GET, POST, PUT, DELETE = gitlab.GET, gitlab.POST, gitlab.PUT, gitlab.DELETE
class MergeRequest(gitlab.Resource):
@classmethod
def create(cls, api, project_id, params):
merge_request_info = api.call(POST(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
params,
))
merge_request = cls(api, merge_request_info)
return merge_request
@classmethod
def search(cls, api, project_id, params):
merge_requests = api.collect_all_pages(GET(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
params,
))
return [cls(api, merge_request) for merge_request in merge_requests]
@classmethod
def fetch_by_iid(cls, project_id, merge_request_iid, api):
merge_request = cls(api, {'iid': merge_request_iid, 'project_id': project_id})
merge_request.refetch_info()
return merge_request
@classmethod
def fetch_all_open_for_user(cls, project_id, user_id, api, merge_order):
all_merge_request_infos = api.collect_all_pages(GET(
'/projects/{project_id}/merge_requests'.format(project_id=project_id),
{'state': 'opened', 'order_by': merge_order, 'sort': 'asc'},
))
my_merge_request_infos = [
mri for mri in all_merge_request_infos
if ((mri.get('assignee', {}) or {}).get('id') == user_id) or
(user_id in [assignee.get('id') for assignee in (mri.get('assignees', []) or [])])
]
return [cls(api, merge_request_info) for merge_request_info in my_merge_request_infos]
@property
def project_id(self):
return self.info['project_id']
@property
def iid(self):
return self.info['iid']
@property
def title(self):
return self.info['title']
@property
def state(self):
return self.info['state']
@property
def merge_status(self):
return self.info['merge_status']
@property
def rebase_in_progress(self):
return self.info.get('rebase_in_progress', False)
@property
def merge_error(self):
return self.info.get('merge_error')
@property
def assignee_ids(self):
if 'assignees' in self.info:
return [assignee.get('id') for assignee in (self.info['assignees'] or [])]
return [(self.info.get('assignee', {}) or {}).get('id')]
@property
def author_id(self):
return self.info['author'].get('id')
@property
def source_branch(self):
return self.info['source_branch']
@property
def target_branch(self):
return self.info['target_branch']
@property
def sha(self):
return self.info['sha']
@property
def squash(self):
return self.info.get('squash', False) # missing means auto-squash not supported
@property
def source_project_id(self):
return self.info['source_project_id']
@property
def target_project_id(self):
return self.info['target_project_id']
@property
def work_in_progress(self):
return self.info['work_in_progress']
@property
def approved_by(self):
return self.info['approved_by']
@property
def web_url(self):
return self.info['web_url']
@property
def force_remove_source_branch(self):
return self.info['force_remove_source_branch']
def refetch_info(self):
self._info = self._api.call(GET('/projects/{0.project_id}/merge_requests/{0.iid}'.format(self)))
def comment(self, message):
if self._api.version().release >= (9, 2, 2):
notes_url = '/projects/{0.project_id}/merge_requests/{0.iid}/notes'.format(self)
else:
# GitLab botched the v4 api before 9.2.2
notes_url = '/projects/{0.project_id}/merge_requests/{0.id}/notes'.format(self)
return self._api.call(POST(notes_url, {'body': message}))
def rebase(self):
self.refetch_info()
if not self.rebase_in_progress:
self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}/rebase'.format(self),
))
else:
# We wanted to rebase and someone just happened to press the button for us!
log.info('A rebase was already in progress on the merge request!')
max_attempts = 30
wait_between_attempts_in_secs = 1
for _ in range(max_attempts):
self.refetch_info()
if not self.rebase_in_progress:
if self.merge_error:
raise MergeRequestRebaseFailed(self.merge_error)
return
time.sleep(wait_between_attempts_in_secs)
raise TimeoutError('Waiting for merge request to be rebased by GitLab')
def accept(self, remove_branch=False, sha=None, merge_when_pipeline_succeeds=True):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}/merge'.format(self),
dict(
should_remove_source_branch=remove_branch,
merge_when_pipeline_succeeds=merge_when_pipeline_succeeds,
sha=sha or self.sha, # if provided, ensures what is merged is what we want (or fails)
),
))
def close(self):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}'.format(self),
{'state_event': 'close'},
))
def assign_to(self, user_id):
return self._api.call(PUT(
'/projects/{0.project_id}/merge_requests/{0.iid}'.format(self),
{'assignee_id': user_id},
))
def unassign(self):
return self.assign_to(0)
def fetch_approvals(self):
# 'id' needed for for GitLab 9.2.2 hack (see Approvals.refetch_info())
info = {'id': self.id, 'iid': self.iid, 'project_id': self.project_id}
approvals = Approvals(self.api, info)
approvals.refetch_info()
return approvals
def fetch_commits(self):
return self._api.call(GET('/projects/{0.project_id}/merge_requests/{0.iid}/commits'.format(self)))
class MergeRequestRebaseFailed(Exception):
pass
| marge/merge_request.py | 6,331 | missing means auto-squash not supported GitLab botched the v4 api before 9.2.2 We wanted to rebase and someone just happened to press the button for us! if provided, ensures what is merged is what we want (or fails) 'id' needed for for GitLab 9.2.2 hack (see Approvals.refetch_info()) | 284 | en | 0.898337 |
import gdax
import os
import json
API_KEY = os.environ['GDAX_API_KEY']
API_SECRET = os.environ['GDAX_API_SECRET']
API_PASS = os.environ['GDAX_API_PASS']
def main():
'''
Cancels all bitcoin orders.
'''
client = gdax.AuthenticatedClient(API_KEY, API_SECRET, API_PASS)
r = client.cancel_all(product='LTC-USD')
print(json.dumps(r))
if __name__ == '__main__':
main()
| cancel.py | 395 | Cancels all bitcoin orders. | 27 | en | 0.535647 |
import pyupbit
import time
from datetime import datetime
# 초기화 준비
def init_prepairing(investable_coins_map, all_market_codes, all_market_names, order_money):
# 이전 투자 시 코인 별 전날 대비 상승률
prev_coins_map = pyupbit.get_prev_dict(investable_coins_map, all_market_codes, all_market_names)
# 투자할 만한 코인 목록 가져오기
investable_coins_map = get_investable_coin_map(all_market_codes, all_market_names)
slack_message = f"""
현재코인들 수익률 ::: {investable_coins_map}
이전코인들 수익률 ::: {prev_coins_map}
"""
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
# 투자 할 코인 1개 가져오기
best_coin = get_best_coin_name(investable_coins_map, prev_coins_map)
# 매수
init(best_coin, order_money)
# 계좌에 보유한 코인이 없는 상태로 만들고 -> 매수 시작!
def init(best_coin='', order_money=0):
init_counter = 0
print(f"이번시간에 투자할 코인은? {best_coin}")
# 가장 살만할 것 같은 코인 매수
response = pyupbit.order_best_coin(best_coin, order_money)
print(f'주문 결과 ::: {response} / uuid ::: {pyupbit.get_order_bid_uuid(response.json())}')
# 주문 성공 시 매수 완료 될 때 까지 대기
if 200 <= response.status_code <= 299:
# 매수 신청 후 매수 될 때까지 대기
while pyupbit.get_my_coin_info() is None:
# 1초에 한번 매수 되었는지 확인
time.sleep(1)
init_counter = init_counter + 1
print('매수 체결 대기 중...')
if init_counter >= 30:
print(f'아직 사지지 않았습니다. 30초 후 다시 초기화 작업 시작합니다..')
# 너무 오래 걸리면 주문 취소, 30초 후 다시 매수 시도
pyupbit.cancel_order(pyupbit.get_order_bid_uuid(response.json()))
time.sleep(30)
init(best_coin, order_money)
# 주문 실패 시 재 주문 시도(10초 후)
else:
print(f'재 주문 시도(10초 후 다시 초기화 작업 시작합니다.)...{response.status_code} / {response.json()}')
time.sleep(10)
init(best_coin, order_money)
# 투자해도 될 것 같은 코인 목록 조회
def get_investable_coin_map(market_codes=[], market_names=[]):
investable_coins_map = {}
i = 0
for code in market_codes:
# coin = { 코인 코드 : 현재가와 1차 저항선 간 차이% }
coin = pyupbit.get_investable_coins(code, market_names[i])
if coin is not None:
investable_coins_map.update(coin)
time.sleep(0.3)
i = i + 1
return investable_coins_map
# 투자해도 될 것 같은 코인 중 가장 좋을 것 같은 코인 조회
def get_best_coin_name(investable_coins_map={}, prev_coins_map={}):
while True:
if dict(investable_coins_map):
print(f'original_map ::: {investable_coins_map}')
if dict(prev_coins_map):
print(f'prev_coins_map ::: {prev_coins_map}')
# 코인 맵에서 이전 상승률 보다 현재 상승률이 낮은 코인 제거
filtered_map = pyupbit.map_filtering(prev_coins_map, investable_coins_map)
print(f'original_map :: {investable_coins_map} / filtered_map :: {filtered_map}')
investable_coins_map = filtered_map
if dict(investable_coins_map):
# investable_coins_map = { 코인 코드 : 현재가와 1차 저항선 간 차이% }
# 투자 대상 코인을 현재가와 1차 저항선 간 차이 기준으로 정렬(asc)
coins_map = sorted(investable_coins_map.items(), reverse=True, key=lambda item: item[1])
# 현재가와 1차 저항선 간 차이가 가장 작은 코인
best_coin = list(coins_map[0])[0]
# 현재가와 1차 저항선 간 차이
coin_dynamic_rate = list(coins_map[0])[1]
slack_message = f"best_coin ::: {best_coin} / change_rate(현재가 - 1차 저항선) ::: {coin_dynamic_rate}%"
print(slack_message)
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
return best_coin
else:
slack_message = f':meow_code: 살만한 코인이 없습니다.. 10분 후 다시 초기화 작업 시작합니다..'
print(slack_message)
time.sleep(600)
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
return recursive_get_investable_coin_map(prev_coins_map)
# 살만한 코인이 없는 경우 코인 목록 재 조회
def recursive_get_investable_coin_map(prev_coins_map={}):
# 전체 코인 코드
all_market_codes = pyupbit.all_market_names.view_market_codes()
# 전체 코인 이름
all_market_names = pyupbit.all_market_names.view_market_names()
investable_coins_map = get_investable_coin_map(all_market_codes, all_market_names)
return get_best_coin_name(investable_coins_map, prev_coins_map)
# 빡침 스코어 기록기(안씀)
def calc_profit_score(rage_score=0, prev_profit_rate=0, current_profit_rate=0):
"""
매도 할 타이밍은 스코어가 5점 이상인 경우로 한다.
1. 절대 수익률이 100% 보다 높은 경우
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate / 2
2. 절대 수익률이 100% 보다 낮은 경우는 그냥 97% 미만일 때 매도 처리(빡침 스코어는 계산)
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate * 1.5
3. 빡침 스코어가 마이너스인 경우 0으로 처리
"""
# 마이너스 변동폭(마이너스 / 플러스 반대)
minus_change_rate = prev_profit_rate - current_profit_rate
# 빡침 스코어 계산 하기!
# 수익률 100% 이상
if current_profit_rate >= 100:
# 하락중... (그냥 팔까...)
if minus_change_rate >= 0:
rage_score = rage_score + minus_change_rate * 3
# 상승중! (가즈아!!)
else:
rage_score = rage_score + minus_change_rate / 2
# 수익률 100% 미만
else:
# 하락중... (아..)
if minus_change_rate >= 0:
rage_score = rage_score + minus_change_rate * 2
# 상승중! (제발!!)
else:
rage_score = rage_score + minus_change_rate * 2
slack_message = f'현재 점수는 ::: {round(rage_score, 2)} / 변동폭은 ::: {round(-minus_change_rate, 2)}% / 직전 수익률은 ::: {prev_profit_rate}% / 현재 수익률은 ::: {current_profit_rate}%'
print(slack_message)
if rage_score >= 6.5:
pyupbit.send_message(pyupbit.get_slack_channel(), slack_message)
elif rage_score < 0:
rage_score = 0
return rage_score
# 매도 / 매수 메인 로직(안씀)
def working(market='', my_investment={}, prev_profit_rate=100, score=0, has_minus_exp=False):
# 해당 코인의 현재 상태(분 캔들) 조회
coin_candle = pyupbit.view_candle_min(market)
# 내가 매수 한 코인 단가
buy_unit_price = pyupbit.get_my_coin_unit_price(my_investment)
# 내 계좌에 남은 현금
#krw_balance = pyupbit.get_my_krw_balance(my_investment)
# 내 계좌에 남은 코인 수
#my_coin_balance = pyupbit.get_my_coin_total_amount(my_investment)
# 현재 코인 단가
current_unit_price = pyupbit.get_current_coin_price(coin_candle)
# 수익률(100%가 매수 시점 단가)
profit_rate = pyupbit.get_profit_rate(current_unit_price, buy_unit_price)
# 스코어(매도시점용)
score = calc_profit_score(score, prev_profit_rate, profit_rate)
slack_message1 = f"코인명 ::: {market}(현재빡침점수 : {round(score, 2)}), 매수단가 ::: {buy_unit_price}, 현재단가 ::: {current_unit_price}, 수익률 ::: {str(profit_rate)}%"
print(slack_message1)
if profit_rate < 100:
has_minus_exp = True
# 수익률 한번이라도 100% 미만인 경우 수익률 기준으로 매도 결정
if has_minus_exp and profit_rate >= 100:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[구사일생으로 팔았음.-{str(datetime.today())}]' + slack_message1)
print('sell!!')
else:
# 매수할 만 하고 코인 단가가 내가 샀을때 보다 살짝 떨어져 있을 때 추가 매수 -> 일단 막기!!
# if target_price >= current_unit_price and 99 >= profit_rate >= 97:
# if krw_balance >= 10000:
# 추가 매수 기능 막음
# available_coin_amount = pyupbit.get_possible_order_volume(coin_candle, 10000)
# pyupbit.order_10000(market, available_coin_amount, 'bid')
# pyupbit.send_message('#myinvestment', f'[Buying!!-{str(datetime.today())}]' + slack_message1)
# print('buy!!')
# 매도 매수 시점 판단 빡침 스코어 기준으로 변경!
if score > 5:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[빡쳐서 팔았음!!-{str(datetime.today())}]' + slack_message1)
print('sell!!')
# 수익률이 너무 떨어질 것 같을때 매도
elif profit_rate < 99:
pyupbit.sell_all()
pyupbit.send_message(pyupbit.get_slack_channel(), f'[하락해서 팔았음... -{str(datetime.today())}]' + slack_message1)
print('sell...')
# 그 외 상태일 경우
else:
print('thinking...')
# 수익률, 스코어 반환
return [profit_rate, score, has_minus_exp]
# 잘 될 것 같은 코인 계산(안씀)
def get_rocketboosting_coins(candle_data, market_name):
d = candle_data
# 코인 코드
market = pyupbit.get_market(d)
# 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 )
target_price = pyupbit.get_target_price_to_buy(market)
# 코인 현재 단가
current_price = pyupbit.get_current_coin_price(d)
# 전날 대비 변동 률
change_rate = pyupbit.get_change_rate(d)
coin_info = pyupbit.get_coin_info_with_candle(d, market_name)
# 현재 코인 단가가 목표가 보다 높고 단가가 1원 이상인 코인만 필터
if current_price >= target_price and pyupbit.get_today_opening_price(d) > 1:
print(f'대상 : {coin_info}')
pyupbit.send_message(pyupbit.get_slack_channel(), coin_info)
return {market: change_rate}
else:
#print(f'비대상 ::: {coin_info}')
return None
# 코인 변동률 맵 조회(전체)(안씀)
def get_coin_rate_map(market_codes=[]):
result_map = {}
for market in market_codes:
d = pyupbit.get_candle_data(market)
# 전날 대비 변동 률
change_rate = pyupbit.get_change_rate(d)
result_map.update({market: change_rate})
time.sleep(0.2)
return result_map
# 일 캔들 데이터로 코인 정보 조회
def get_coin_info_with_candle(d, market_name):
# 코인 코드
market = pyupbit.get_market(d)
# 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 )
target_price = pyupbit.get_target_price_to_buy(market)
# 코인 현재 단가
current_price = pyupbit.get_current_coin_price(d)
# 오늘 시가
today_open_price = pyupbit.get_today_opening_price(d)
# 어제 고가
prev_high_price = pyupbit.get_yesterday_high_price(d)
# 어제 저가
prev_low_price = pyupbit.get_yesterday_low_price(d)
# 기준선
standard_price = pyupbit.calc_standard_line(prev_high_price, prev_low_price, today_open_price)
# 1차 지지선
first_low_price = pyupbit.first_lower_line(standard_price, prev_high_price)
# 2차 지지선
second_low_price = pyupbit.second_lower_line(standard_price, prev_high_price, prev_low_price)
# 1차 저항선
first_high_price = pyupbit.first_higher_line(standard_price, prev_low_price)
# 2차 저항선
second_high_price = pyupbit.second_higher_line(standard_price, prev_high_price, prev_low_price)
coin_info = f"""
현재시간 : {datetime.today()}
코인명: {market} ({market_name}:{str(pyupbit.get_change_rate(d))}%)
opening_p:{str(pyupbit.get_today_opening_price(d))}
high_p(오늘[어제]):{str(pyupbit.get_today_high_price(d))}[{str(pyupbit.get_yesterday_high_price(d))}]
low_p(오늘[어제]):{str(pyupbit.get_today_low_price(d))}[{str(pyupbit.get_yesterday_low_price(d))}]
prev_p:{str(pyupbit.get_yesterday_close_price(d))}
change_p:{str(pyupbit.get_change_price(d))}
기준선 : {standard_price}
1차 지지선 : {first_low_price}
2차 지지선 : {second_low_price}
1차 저항선 : {first_high_price}
2차 저항선 : {second_high_price}
목표가 : {first_high_price}
현재가 : {current_price}
"""
return coin_info
# 목표 코인 단가 계산(안씀)
def get_target_price_to_buy(market="KRW-BTC"):
d = pyupbit.get_candle_data(market)
return d[0]['opening_price'] + (d[1]['high_price'] - d[1]['low_price']) * 0.5
"""
맵 객체 값으로 나쁜 코인 필터링(수익률 필터링)
직전 수익률과 현재 수익률 기준으로
투자 하지 말아야 할 코인들 필터링(직전 보다 현재 가격이 같거나 높은 코인들.. old_value <= new_value)
"""
# 나쁜 코인 필터링
def map_filtering(original_map, new_map):
bad_arr = []
for old_key, old_value in original_map.items():
if old_key in new_map:
new_value = new_map[old_key]
# 요 부등호가 중요함!
if old_value >= new_value:
bad_arr.append(old_key)
print(f'나쁜코인목록 ::: {bad_arr}')
for old_key in bad_arr:
new_map.pop(old_key, None)
return new_map
| pyupbit/strategy.py | 14,197 | 매도 할 타이밍은 스코어가 5점 이상인 경우로 한다.
1. 절대 수익률이 100% 보다 높은 경우
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate / 2
2. 절대 수익률이 100% 보다 낮은 경우는 그냥 97% 미만일 때 매도 처리(빡침 스코어는 계산)
- 직전 수익률 보다 떨어졌을 때(+)
rage_score = rage_score + minus_change_rate * 2
- 직전 수익률 보다 올라갔을 때(-)
rage_score = rage_score + minus_change_rate * 1.5
3. 빡침 스코어가 마이너스인 경우 0으로 처리
초기화 준비 이전 투자 시 코인 별 전날 대비 상승률 투자할 만한 코인 목록 가져오기 투자 할 코인 1개 가져오기 매수 계좌에 보유한 코인이 없는 상태로 만들고 -> 매수 시작! 가장 살만할 것 같은 코인 매수 주문 성공 시 매수 완료 될 때 까지 대기 매수 신청 후 매수 될 때까지 대기 1초에 한번 매수 되었는지 확인 너무 오래 걸리면 주문 취소, 30초 후 다시 매수 시도 주문 실패 시 재 주문 시도(10초 후) 투자해도 될 것 같은 코인 목록 조회 coin = { 코인 코드 : 현재가와 1차 저항선 간 차이% } 투자해도 될 것 같은 코인 중 가장 좋을 것 같은 코인 조회 코인 맵에서 이전 상승률 보다 현재 상승률이 낮은 코인 제거 investable_coins_map = { 코인 코드 : 현재가와 1차 저항선 간 차이% } 투자 대상 코인을 현재가와 1차 저항선 간 차이 기준으로 정렬(asc) 현재가와 1차 저항선 간 차이가 가장 작은 코인 현재가와 1차 저항선 간 차이 살만한 코인이 없는 경우 코인 목록 재 조회 전체 코인 코드 전체 코인 이름 빡침 스코어 기록기(안씀) 마이너스 변동폭(마이너스 / 플러스 반대) 빡침 스코어 계산 하기! 수익률 100% 이상 하락중... (그냥 팔까...) 상승중! (가즈아!!) 수익률 100% 미만 하락중... (아..) 상승중! (제발!!) 매도 / 매수 메인 로직(안씀) 해당 코인의 현재 상태(분 캔들) 조회 내가 매수 한 코인 단가 내 계좌에 남은 현금krw_balance = pyupbit.get_my_krw_balance(my_investment) 내 계좌에 남은 코인 수my_coin_balance = pyupbit.get_my_coin_total_amount(my_investment) 현재 코인 단가 수익률(100%가 매수 시점 단가) 스코어(매도시점용) 수익률 한번이라도 100% 미만인 경우 수익률 기준으로 매도 결정 매수할 만 하고 코인 단가가 내가 샀을때 보다 살짝 떨어져 있을 때 추가 매수 -> 일단 막기!! if target_price >= current_unit_price and 99 >= profit_rate >= 97: if krw_balance >= 10000: 추가 매수 기능 막음 available_coin_amount = pyupbit.get_possible_order_volume(coin_candle, 10000) pyupbit.order_10000(market, available_coin_amount, 'bid') pyupbit.send_message('myinvestment', f'[Buying!!-{str(datetime.today())}]' + slack_message1) print('buy!!') 매도 매수 시점 판단 빡침 스코어 기준으로 변경! 수익률이 너무 떨어질 것 같을때 매도 그 외 상태일 경우 수익률, 스코어 반환 잘 될 것 같은 코인 계산(안씀) 코인 코드 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 ) 코인 현재 단가 전날 대비 변동 률 현재 코인 단가가 목표가 보다 높고 단가가 1원 이상인 코인만 필터print(f'비대상 ::: {coin_info}') 코인 변동률 맵 조회(전체)(안씀) 전날 대비 변동 률 일 캔들 데이터로 코인 정보 조회 코인 코드 목표 코인 단가( 오늘 시작가 + (어제 고가 - 어제 저가) * 0.5 ) 코인 현재 단가 오늘 시가 어제 고가 어제 저가 기준선 1차 지지선 2차 지지선 1차 저항선 2차 저항선 목표 코인 단가 계산(안씀) 나쁜 코인 필터링 요 부등호가 중요함! | 2,226 | ko | 0.999985 |
import itertools
import multiprocessing as mp
import os
import pickle
import random
import string
import tempfile
from concurrent.futures import ProcessPoolExecutor
from copy import copy
from functools import partial
from unittest import mock
import numpy as np
import pandas as pd
import pytest
import dask
import dask.dataframe as dd
from dask import delayed
from dask.base import compute_as_if_collection
from dask.dataframe._compat import PANDAS_GT_120, assert_categorical_equal, tm
from dask.dataframe.shuffle import (
_noop,
maybe_buffered_partd,
partitioning_index,
rearrange_by_column,
rearrange_by_divisions,
remove_nans,
shuffle,
)
from dask.dataframe.utils import assert_eq, make_meta
from dask.optimization import cull
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [1, 4, 7]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [2, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [3, 6, 9]}, index=[9, 9, 9]),
}
meta = make_meta(
{"a": "i8", "b": "i8"}, index=pd.Index([], "i8"), parent_meta=pd.DataFrame()
)
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
shuffle_func = shuffle # conflicts with keyword argument
def test_shuffle(shuffle_method):
s = shuffle_func(d, d.b, shuffle=shuffle_method)
assert isinstance(s, dd.DataFrame)
assert s.npartitions == d.npartitions
x = dask.get(s.dask, (s._name, 0))
y = dask.get(s.dask, (s._name, 1))
assert not (set(x.b) & set(y.b)) # disjoint
assert set(s.dask).issuperset(d.dask)
assert shuffle_func(d, d.b)._name == shuffle_func(d, d.b)._name
def test_default_partitions():
assert shuffle(d, d.b).npartitions == d.npartitions
def test_shuffle_npartitions(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=10)
s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=17, max_branch=4)
sc = s.compute()
assert s.npartitions == 17
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
def test_shuffle_npartitions_lt_input_partitions(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=20)
s = shuffle(ddf, ddf.x, shuffle=shuffle_method, npartitions=5, max_branch=2)
sc = s.compute()
assert s.npartitions == 5
assert set(s.dask).issuperset(set(ddf.dask))
assert len(sc) == len(df)
assert list(s.columns) == list(df.columns)
assert set(map(tuple, sc.values.tolist())) == set(map(tuple, df.values.tolist()))
def test_index_with_non_series(shuffle_method):
from dask.dataframe.tests.test_multi import list_eq
list_eq(
shuffle(d, d.b, shuffle=shuffle_method), shuffle(d, "b", shuffle=shuffle_method)
)
def test_index_with_dataframe(shuffle_method):
res1 = shuffle(d, d[["b"]], shuffle=shuffle_method).compute()
res2 = shuffle(d, ["b"], shuffle=shuffle_method).compute()
res3 = shuffle(d, "b", shuffle=shuffle_method).compute()
assert sorted(res1.values.tolist()) == sorted(res2.values.tolist())
assert sorted(res1.values.tolist()) == sorted(res3.values.tolist())
def test_shuffle_from_one_partition_to_one_other(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3]})
a = dd.from_pandas(df, 1)
for i in [1, 2]:
b = shuffle(a, "x", npartitions=i, shuffle=shuffle_method)
assert len(a.compute(scheduler="sync")) == len(b.compute(scheduler="sync"))
def test_shuffle_empty_partitions(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3] * 10})
ddf = dd.from_pandas(df, npartitions=3)
s = shuffle(ddf, ddf.x, npartitions=6, shuffle=shuffle_method)
parts = compute_as_if_collection(dd.DataFrame, s.dask, s.__dask_keys__())
for p in parts:
assert s.columns == p.columns
df2 = pd.DataFrame(
{
"i32": np.array([1, 2, 3] * 3, dtype="int32"),
"f32": np.array([None, 2.5, 3.5] * 3, dtype="float32"),
"cat": pd.Series(["a", "b", "c"] * 3).astype("category"),
"obj": pd.Series(["d", "e", "f"] * 3),
"bool": np.array([True, False, True] * 3),
"dt": pd.Series(pd.date_range("20130101", periods=9)),
"dt_tz": pd.Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),
"td": pd.Series(pd.timedelta_range("2000", periods=9)),
}
)
def test_partitioning_index():
res = partitioning_index(df2.i32, 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
assert (partitioning_index(df2.i32, 3) == partitioning_index(df2.i32, 3)).all()
res = partitioning_index(df2[["i32"]], 3)
assert ((res < 3) & (res >= 0)).all()
assert len(np.unique(res)) > 1
res = partitioning_index(df2[["cat", "bool", "f32"]], 2)
assert ((0 <= res) & (res < 2)).all()
res = partitioning_index(df2.index, 4)
assert ((res < 4) & (res >= 0)).all()
assert len(np.unique(res)) > 1
def test_partitioning_index_categorical_on_values():
df = pd.DataFrame({"a": list(string.ascii_letters), "b": [1, 2, 3, 4] * 13})
df.a = df.a.astype("category")
df2 = df.copy()
df2.a = df2.a.cat.set_categories(list(reversed(df2.a.cat.categories)))
res = partitioning_index(df.a, 5)
res2 = partitioning_index(df2.a, 5)
assert (res == res2).all()
res = partitioning_index(df, 5)
res2 = partitioning_index(df2, 5)
assert (res == res2).all()
@pytest.mark.parametrize(
"npartitions", [1, 4, 7, pytest.param(23, marks=pytest.mark.slow)]
)
def test_set_index_general(npartitions, shuffle_method):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
assert_eq(df.set_index("x"), ddf.set_index("x", shuffle=shuffle_method))
assert_eq(df.set_index("y"), ddf.set_index("y", shuffle=shuffle_method))
assert_eq(df.set_index(df.x), ddf.set_index(ddf.x, shuffle=shuffle_method))
assert_eq(
df.set_index(df.x + df.y), ddf.set_index(ddf.x + ddf.y, shuffle=shuffle_method)
)
assert_eq(df.set_index(df.x + 1), ddf.set_index(ddf.x + 1, shuffle=shuffle_method))
assert_eq(df.set_index(df.index), ddf.set_index(ddf.index, shuffle=shuffle_method))
def test_set_index_self_index(shuffle_method):
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
a = dd.from_pandas(df, npartitions=4)
b = a.set_index(a.index, shuffle=shuffle_method)
assert a is b
assert_eq(b, df.set_index(df.index))
def test_set_index_names(shuffle_method):
if shuffle_method == "disk":
pytest.xfail("dsk names in disk shuffle are not deterministic")
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
assert set(ddf.set_index("x", shuffle=shuffle_method).dask) == set(
ddf.set_index("x", shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", shuffle=shuffle_method).dask) != set(
ddf.set_index("y", shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", max_branch=4, shuffle=shuffle_method).dask) != set(
ddf.set_index("x", max_branch=3, shuffle=shuffle_method).dask
)
assert set(ddf.set_index("x", drop=True, shuffle=shuffle_method).dask) != set(
ddf.set_index("x", drop=False, shuffle=shuffle_method).dask
)
def test_set_index_2(shuffle_method):
df = dd.demo.make_timeseries(
"2000",
"2004",
{"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
df2 = df.set_index("name", shuffle=shuffle_method)
df2.value.sum().compute(scheduler="sync")
def test_set_index_3(shuffle_method):
df = pd.DataFrame(np.random.random((10, 2)), columns=["x", "y"])
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, max_branch=2, npartitions=ddf.npartitions
)
df2 = df.set_index("x")
assert_eq(df2, ddf2)
assert ddf2.npartitions == ddf.npartitions
def test_shuffle_sort(shuffle_method):
df = pd.DataFrame({"x": [1, 2, 3, 2, 1], "y": [9, 8, 7, 1, 5]})
ddf = dd.from_pandas(df, npartitions=3)
df2 = df.set_index("x").sort_index()
ddf2 = ddf.set_index("x", shuffle=shuffle_method)
assert_eq(ddf2.loc[2:3], df2.loc[2:3])
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_rearrange(shuffle_method, scheduler):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle=shuffle_method
)
assert result.npartitions == ddf.npartitions
assert set(ddf.dask).issubset(result.dask)
# Every value in exactly one partition
a = result.compute(scheduler=scheduler)
get = dask.base.get_scheduler(scheduler=scheduler)
parts = get(result.dask, result.__dask_keys__())
for i in a._partitions.drop_duplicates():
assert sum(i in set(part._partitions) for part in parts) == 1
def test_rearrange_cleanup():
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
result = rearrange_by_column(ddf2, "_partitions", max_branch=32, shuffle="disk")
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def mock_shuffle_group_3(df, col, npartitions, p):
raise ValueError("Mock exception!")
def test_rearrange_disk_cleanup_with_exception():
# ensure temporary files are cleaned up when there's an internal exception.
with mock.patch("dask.dataframe.shuffle.shuffle_group_3", new=mock_shuffle_group_3):
df = pd.DataFrame({"x": np.random.random(10)})
ddf = dd.from_pandas(df, npartitions=4)
ddf2 = ddf.assign(_partitions=ddf.x % 4)
tmpdir = tempfile.mkdtemp()
with dask.config.set(temporay_directory=str(tmpdir)):
with pytest.raises(ValueError, match="Mock exception!"):
result = rearrange_by_column(
ddf2, "_partitions", max_branch=32, shuffle="disk"
)
result.compute(scheduler="processes")
assert len(os.listdir(tmpdir)) == 0
def test_rearrange_by_column_with_narrow_divisions():
from dask.dataframe.tests.test_multi import list_eq
A = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": [1, 1, 2, 2, 3, 4]})
a = dd.repartition(A, [0, 4, 5])
df = rearrange_by_divisions(a, "x", (0, 2, 5))
list_eq(df, a)
def test_maybe_buffered_partd(tmp_path):
import partd
f = maybe_buffered_partd()
p1 = f()
assert isinstance(p1.partd, partd.Buffer)
f2 = pickle.loads(pickle.dumps(f))
assert not f2.buffer
p2 = f2()
assert isinstance(p2.partd, partd.File)
f3 = maybe_buffered_partd(tempdir=tmp_path)
p3 = f3()
assert isinstance(p3.partd, partd.Buffer)
contents = list(tmp_path.iterdir())
assert len(contents) == 1
assert contents[0].suffix == ".partd"
assert contents[0].parent == tmp_path
f4 = pickle.loads(pickle.dumps(f3))
assert not f4.buffer
assert f4.tempdir == tmp_path
def test_set_index_with_explicit_divisions():
df = pd.DataFrame({"x": [4, 1, 2, 5]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2)
def throw(*args, **kwargs):
raise Exception()
with dask.config.set(get=throw):
ddf2 = ddf.set_index("x", divisions=[1, 3, 5])
assert ddf2.divisions == (1, 3, 5)
df2 = df.set_index("x")
assert_eq(ddf2, df2)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("x", divisions=[3, 1, 5])
def test_set_index_divisions_2():
df = pd.DataFrame({"x": [1, 2, 3, 4, 5, 6], "y": list("abdabd")})
ddf = dd.from_pandas(df, 2)
result = ddf.set_index("y", divisions=["a", "c", "d"])
assert result.divisions == ("a", "c", "d")
assert list(result.compute(scheduler="sync").index[-2:]) == ["d", "d"]
def test_set_index_divisions_compute():
d2 = d.set_index("b", divisions=[0, 2, 9], compute=False)
d3 = d.set_index("b", divisions=[0, 2, 9], compute=True)
assert_eq(d2, d3)
assert_eq(d2, full.set_index("b"))
assert_eq(d3, full.set_index("b"))
assert len(d2.dask) > len(d3.dask)
d4 = d.set_index(d.b, divisions=[0, 2, 9], compute=False)
d5 = d.set_index(d.b, divisions=[0, 2, 9], compute=True)
exp = full.copy()
exp.index = exp.b
assert_eq(d4, d5)
assert_eq(d4, exp)
assert_eq(d5, exp)
assert len(d4.dask) > len(d5.dask)
def test_set_index_divisions_sorted():
p1 = pd.DataFrame({"x": [10, 11, 12], "y": ["a", "a", "a"]})
p2 = pd.DataFrame({"x": [13, 14, 15], "y": ["b", "b", "c"]})
p3 = pd.DataFrame({"x": [16, 17, 18], "y": ["d", "e", "e"]})
ddf = dd.DataFrame(
{("x", 0): p1, ("x", 1): p2, ("x", 2): p3}, "x", p1, [None, None, None, None]
)
df = ddf.compute()
def throw(*args, **kwargs):
raise Exception("Shouldn't have computed")
with dask.config.set(get=throw):
res = ddf.set_index("x", divisions=[10, 13, 16, 18], sorted=True)
assert_eq(res, df.set_index("x"))
with dask.config.set(get=throw):
res = ddf.set_index("y", divisions=["a", "b", "d", "e"], sorted=True)
assert_eq(res, df.set_index("y"))
# with sorted=True, divisions must be same length as df.divisions
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "c", "d", "e"], sorted=True)
# Divisions must be sorted
with pytest.raises(ValueError):
ddf.set_index("y", divisions=["a", "b", "d", "c"], sorted=True)
@pytest.mark.slow
def test_set_index_consistent_divisions():
# See https://github.com/dask/dask/issues/3867
df = pd.DataFrame(
{"x": np.random.random(100), "y": np.random.random(100) // 0.2},
index=np.random.random(100),
)
ddf = dd.from_pandas(df, npartitions=4)
ddf = ddf.clear_divisions()
ctx = mp.get_context("spawn")
with ProcessPoolExecutor(8, ctx) as pool:
func = partial(_set_index, df=ddf, idx="x")
divisions_set = set(pool.map(func, range(100)))
assert len(divisions_set) == 1
def _set_index(i, df, idx):
return df.set_index(idx).divisions
def test_set_index_reduces_partitions_small(shuffle_method):
df = pd.DataFrame({"x": np.random.random(100)})
ddf = dd.from_pandas(df, npartitions=50)
ddf2 = ddf.set_index("x", shuffle=shuffle_method, npartitions="auto")
assert ddf2.npartitions < 10
def make_part(n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
def test_set_index_reduces_partitions_large(shuffle_method):
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, npartitions="auto", partition_size=nbytes
)
assert 1 < ddf2.npartitions < 20
def test_set_index_doesnt_increase_partitions(shuffle_method):
nparts = 2
nbytes = 1e6
n = int(nbytes / (nparts * 8))
ddf = dd.DataFrame(
{("x", i): (make_part, n) for i in range(nparts)},
"x",
make_part(1),
[None] * (nparts + 1),
)
ddf2 = ddf.set_index(
"x", shuffle=shuffle_method, npartitions="auto", partition_size=nbytes
)
assert ddf2.npartitions <= ddf.npartitions
def test_set_index_detects_sorted_data(shuffle_method):
df = pd.DataFrame({"x": range(100), "y": range(100)})
ddf = dd.from_pandas(df, npartitions=10, name="x", sort=False)
ddf2 = ddf.set_index("x", shuffle=shuffle_method)
assert len(ddf2.dask) < ddf.npartitions * 4
def test_set_index_sorts():
# https://github.com/dask/dask/issues/2288
vals = np.array(
[
1348550149000000000,
1348550149000000000,
1348558142000000000,
1348558142000000000,
1348585928000000000,
1348585928000000000,
1348600739000000000,
1348601706000000000,
1348600739000000000,
1348601706000000000,
1348614789000000000,
1348614789000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348621037000000000,
1348621038000000000,
1348621040000000000,
1348637628000000000,
1348638159000000000,
1348638160000000000,
1348638159000000000,
1348638160000000000,
1348637628000000000,
1348646354000000000,
1348646354000000000,
1348659107000000000,
1348657111000000000,
1348659107000000000,
1348657111000000000,
1348672876000000000,
1348672876000000000,
1348682787000000000,
1348681985000000000,
1348682787000000000,
1348681985000000000,
1348728167000000000,
1348728167000000000,
1348730745000000000,
1348730745000000000,
1348750198000000000,
1348750198000000000,
1348750198000000000,
1348753539000000000,
1348753539000000000,
1348753539000000000,
1348754449000000000,
1348754449000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348761333000000000,
1348761554000000000,
1348761610000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
1348782624000000000,
]
)
vals = pd.to_datetime(vals, unit="ns")
breaks = [10, 36, 58]
dfs = []
for i in range(len(breaks)):
lo = sum(breaks[:i])
hi = sum(breaks[i : i + 1])
dfs.append(pd.DataFrame({"timestamp": vals[lo:hi]}, index=range(lo, hi)))
ddf = dd.concat(dfs).clear_divisions()
assert ddf.set_index("timestamp").index.compute().is_monotonic_increasing is True
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
dask_cudf = pytest.importorskip("dask_cudf")
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 2, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 5, 8]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [9, 1, 8]}, index=[9, 9, 9]),
}
d = dd.DataFrame(dsk, "x", meta, [0, 4, 9, 9])
if engine == "cudf":
d = dask_cudf.from_dask_dataframe(d)
full = d.compute()
d2 = d.set_index("b", npartitions=3)
assert d2.npartitions == 3
assert d2.index.name == "b"
assert_eq(d2, full.set_index("b"))
d3 = d.set_index(d.b, npartitions=3)
assert d3.npartitions == 3
assert d3.index.name == "b"
assert_eq(d3, full.set_index(full.b))
d4 = d.set_index("b")
assert d4.index.name == "b"
assert_eq(d4, full.set_index("b"))
d5 = d.set_index(["b"])
assert d5.index.name == "b"
assert_eq(d5, full.set_index(["b"]))
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
df = pd.DataFrame({"x": [4, 1, 1, 3, 3], "y": [1.0, 1, 1, 1, 2]})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=3)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=3)
assert d1.npartitions == 3
assert set(d1.divisions) == {1, 2, 4}
d2 = d.set_index("y", npartitions=3)
assert d2.divisions[0] == 1.0
assert 1.0 < d2.divisions[1] < d2.divisions[2] < 2.0
assert d2.divisions[3] == 2.0
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_int(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
L = sorted(list(range(0, 200, 10)) * 2)
df = pd.DataFrame({"x": 2 * L})
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 2)
d1 = d.set_index("x", npartitions=10)
assert all(np.issubdtype(type(x), np.integer) for x in d1.divisions)
@pytest.mark.parametrize(
"engine", ["pandas", pytest.param("cudf", marks=pytest.mark.gpu)]
)
def test_set_index_interpolate_large_uint(engine):
if engine == "cudf":
# NOTE: engine == "cudf" requires cudf/dask_cudf,
# will be skipped by non-GPU CI.
cudf = pytest.importorskip("cudf")
dask_cudf = pytest.importorskip("dask_cudf")
"""This test is for #7304"""
df = pd.DataFrame(
{"x": np.array([612509347682975743, 616762138058293247], dtype=np.uint64)}
)
if engine == "cudf":
gdf = cudf.from_pandas(df)
d = dask_cudf.from_cudf(gdf, npartitions=2)
else:
d = dd.from_pandas(df, 1)
d1 = d.set_index("x", npartitions=1)
assert d1.npartitions == 1
assert set(d1.divisions) == {612509347682975743, 616762138058293247}
def test_set_index_timezone():
s_naive = pd.Series(pd.date_range("20130101", periods=3))
s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern"))
df = pd.DataFrame({"tz": s_aware, "notz": s_naive})
d = dd.from_pandas(df, 2)
d1 = d.set_index("notz", npartitions=1)
s1 = pd.DatetimeIndex(s_naive.values, dtype=s_naive.dtype)
assert d1.divisions[0] == s_naive[0] == s1[0]
assert d1.divisions[-1] == s_naive[2] == s1[2]
# We currently lose "freq". Converting data with pandas-defined dtypes
# to numpy or pure Python can be lossy like this.
d2 = d.set_index("tz", npartitions=1)
s2 = pd.DatetimeIndex(s_aware, dtype=s_aware.dtype)
assert d2.divisions[0] == s2[0]
assert d2.divisions[-1] == s2[2]
assert d2.divisions[0].tz == s2[0].tz
assert d2.divisions[0].tz is not None
s2badtype = pd.DatetimeIndex(s_aware.values, dtype=s_naive.dtype)
if PANDAS_GT_120:
# starting with pandas 1.2.0, comparing equality of timestamps with different
# timezones returns False instead of raising an error
assert not d2.divisions[0] == s2badtype[0]
else:
with pytest.raises(TypeError):
d2.divisions[0] == s2badtype[0]
def test_set_index_npartitions():
# https://github.com/dask/dask/issues/6974
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"]
)
)
data = dd.from_pandas(data, npartitions=2)
output = data.reset_index().set_index("index", npartitions=1)
assert output.npartitions == 1
@pytest.mark.parametrize("unit", ["ns", "us"])
def test_set_index_datetime_precision(unit):
# https://github.com/dask/dask/issues/6864
df = pd.DataFrame(
[
[1567703791155681, 1],
[1567703792155681, 2],
[1567703790155681, 0],
[1567703793155681, 3],
],
columns=["ts", "rank"],
)
df.ts = pd.to_datetime(df.ts, unit=unit)
ddf = dd.from_pandas(df, npartitions=2)
ddf = ddf.set_index("ts")
assert_eq(ddf, df.set_index("ts"))
@pytest.mark.parametrize("drop", [True, False])
def test_set_index_drop(drop):
pdf = pd.DataFrame(
{
"A": list("ABAABBABAA"),
"B": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
"C": [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index("A", drop=drop), pdf.set_index("A", drop=drop))
assert_eq(ddf.set_index("B", drop=drop), pdf.set_index("B", drop=drop))
assert_eq(ddf.set_index("C", drop=drop), pdf.set_index("C", drop=drop))
assert_eq(ddf.set_index(ddf.A, drop=drop), pdf.set_index(pdf.A, drop=drop))
assert_eq(ddf.set_index(ddf.B, drop=drop), pdf.set_index(pdf.B, drop=drop))
assert_eq(ddf.set_index(ddf.C, drop=drop), pdf.set_index(pdf.C, drop=drop))
# numeric columns
pdf = pd.DataFrame(
{
0: list("ABAABBABAA"),
1: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
2: [1, 2, 3, 2, 1, 3, 2, 4, 2, 3],
}
)
ddf = dd.from_pandas(pdf, 3)
assert_eq(ddf.set_index(0, drop=drop), pdf.set_index(0, drop=drop))
assert_eq(ddf.set_index(2, drop=drop), pdf.set_index(2, drop=drop))
def test_set_index_raises_error_on_bad_input():
df = pd.DataFrame({"a": [1, 2, 3, 4, 5, 6, 7], "b": [7, 6, 5, 4, 3, 2, 1]})
ddf = dd.from_pandas(df, 2)
msg = r"Dask dataframe does not yet support multi-indexes"
with pytest.raises(NotImplementedError) as err:
ddf.set_index(["a", "b"])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a", "b"]])
assert msg in str(err.value)
with pytest.raises(NotImplementedError) as err:
ddf.set_index([["a"]])
assert msg in str(err.value)
def test_set_index_sorted_true():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]})
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = a.set_index("x", sorted=True)
assert b.known_divisions
assert set(a.dask).issubset(set(b.dask))
for drop in [True, False]:
assert_eq(a.set_index("x", drop=drop), df.set_index("x", drop=drop))
assert_eq(
a.set_index(a.x, sorted=True, drop=drop), df.set_index(df.x, drop=drop)
)
assert_eq(
a.set_index(a.x + 1, sorted=True, drop=drop),
df.set_index(df.x + 1, drop=drop),
)
with pytest.raises(ValueError):
a.set_index(a.z, sorted=True)
def test_set_index_sorted_single_partition():
df = pd.DataFrame({"x": [1, 2, 3, 4], "y": [1, 0, 1, 0]})
ddf = dd.from_pandas(df, npartitions=1)
assert_eq(ddf.set_index("x", sorted=True), df.set_index("x"))
def test_set_index_sorted_min_max_same():
a = pd.DataFrame({"x": [1, 2, 3], "y": [0, 0, 0]})
b = pd.DataFrame({"x": [1, 2, 3], "y": [1, 1, 1]})
aa = delayed(a)
bb = delayed(b)
df = dd.from_delayed([aa, bb], meta=a)
assert not df.known_divisions
df2 = df.set_index("y", sorted=True)
assert df2.divisions == (0, 1, 1)
def test_set_index_empty_partition():
test_vals = [1, 2, 3]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for conv in converters:
df = pd.DataFrame(
[{"x": conv(i), "y": i} for i in test_vals], columns=["x", "y"]
)
ddf = dd.concat(
[
dd.from_pandas(df, npartitions=1),
dd.from_pandas(df[df.y > df.y.max()], npartitions=1),
]
)
assert any(ddf.get_partition(p).compute().empty for p in range(ddf.npartitions))
assert assert_eq(ddf.set_index("x"), df.set_index("x"))
def test_set_index_on_empty():
test_vals = [1, 2, 3, 4]
converters = [int, float, str, lambda x: pd.to_datetime(x, unit="ns")]
for converter in converters:
df = pd.DataFrame([{"x": converter(x), "y": x} for x in test_vals])
ddf = dd.from_pandas(df, npartitions=4)
assert ddf.npartitions > 1
ddf = ddf[ddf.y > df.y.max()].set_index("x")
expected_df = df[df.y > df.y.max()].set_index("x")
assert assert_eq(ddf, expected_df, **CHECK_FREQ)
assert ddf.npartitions == 1
def test_set_index_categorical():
# https://github.com/dask/dask/issues/5671
order = list(reversed(string.ascii_letters))
values = list(string.ascii_letters)
random.shuffle(values)
dtype = pd.api.types.CategoricalDtype(order, ordered=True)
df = pd.DataFrame({"A": pd.Categorical(values, dtype=dtype), "B": 1})
result = dd.from_pandas(df, npartitions=2).set_index("A")
assert len(result) == len(df)
# sorted with the metric defined by the Categorical
divisions = pd.Categorical(result.divisions, dtype=dtype)
assert_categorical_equal(divisions, divisions.sort_values())
def test_compute_divisions():
from dask.dataframe.shuffle import compute_and_set_divisions
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": [10, 20, 20, 40], "z": [4, 3, 2, 1]},
index=[1, 3, 10, 20],
)
a = dd.from_pandas(df, 2, sort=False)
assert not a.known_divisions
b = compute_and_set_divisions(copy(a))
assert_eq(a, b, check_divisions=False)
assert b.known_divisions
def test_empty_partitions():
# See https://github.com/dask/dask/issues/2408
df = pd.DataFrame({"a": list(range(10))})
df["b"] = df["a"] % 3
df["c"] = df["b"].astype(str)
ddf = dd.from_pandas(df, npartitions=3)
ddf = ddf.set_index("b")
ddf = ddf.repartition(npartitions=3)
ddf.get_partition(0).compute()
assert_eq(ddf, df.set_index("b"))
ddf = ddf.set_index("c")
assert_eq(ddf, df.set_index("b").set_index("c"))
def test_remove_nans():
tests = [
((1, 1, 2), (1, 1, 2)),
((None, 1, 2), (1, 1, 2)),
((1, None, 2), (1, 2, 2)),
((1, 2, None), (1, 2, 2)),
((1, 2, None, None), (1, 2, 2, 2)),
((None, None, 1, 2), (1, 1, 1, 2)),
((1, None, None, 2), (1, 2, 2, 2)),
((None, 1, None, 2, None, 3, None), (1, 1, 2, 2, 3, 3, 3)),
]
converters = [
(int, np.nan),
(float, np.nan),
(str, np.nan),
(lambda x: pd.to_datetime(x, unit="ns"), np.datetime64("NaT")),
]
for conv, none_val in converters:
for inputs, expected in tests:
params = [none_val if x is None else conv(x) for x in inputs]
expected = [conv(x) for x in expected]
assert remove_nans(params) == expected
@pytest.mark.slow
def test_gh_2730():
large = pd.DataFrame({"KEY": np.arange(0, 50000)})
small = pd.DataFrame({"KEY": np.arange(25, 500)})
dd_left = dd.from_pandas(small, npartitions=3)
dd_right = dd.from_pandas(large, npartitions=257)
with dask.config.set(shuffle="tasks", scheduler="sync"):
dd_merged = dd_left.merge(dd_right, how="inner", on="KEY")
result = dd_merged.compute()
expected = large.merge(small, how="inner", on="KEY")
tm.assert_frame_equal(result.sort_values("KEY").reset_index(drop=True), expected)
@pytest.mark.parametrize("npartitions", [None, "auto"])
def test_set_index_does_not_repeat_work_due_to_optimizations(npartitions):
# Atomic counter
count = itertools.count()
def increment():
next(count)
def make_part(dummy, n):
return pd.DataFrame({"x": np.random.random(n), "y": np.random.random(n)})
nbytes = 1e6
nparts = 50
n = int(nbytes / (nparts * 8))
dsk = {("inc", i): (increment,) for i in range(nparts)}
dsk.update({("x", i): (make_part, ("inc", i), n) for i in range(nparts)})
ddf = dd.DataFrame(dsk, "x", make_part(None, 1), [None] * (nparts + 1))
ddf.set_index("x", npartitions=npartitions)
ntimes = next(count)
assert ntimes == nparts
def test_set_index_errors_with_inplace_kwarg():
df = pd.DataFrame({"a": [9, 8, 7], "b": [6, 5, 4], "c": [3, 2, 1]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.set_index("a")
with pytest.raises(NotImplementedError):
ddf.set_index("a", inplace=True)
def test_set_index_timestamp():
df = pd.DataFrame({"A": pd.date_range("2000", periods=12, tz="US/Central"), "B": 1})
ddf = dd.from_pandas(df, 2)
divisions = (
pd.Timestamp("2000-01-01 00:00:00-0600", tz="US/Central"),
pd.Timestamp("2000-01-12 00:00:00-0600", tz="US/Central"),
)
# Note: `freq` is lost during round trip
df2 = df.set_index("A")
ddf_new_div = ddf.set_index("A", divisions=divisions)
for (ts1, ts2) in zip(divisions, ddf_new_div.divisions):
assert ts1.value == ts2.value
assert ts1.tz == ts2.tz
assert_eq(df2, ddf_new_div, **CHECK_FREQ)
assert_eq(df2, ddf.set_index("A"), **CHECK_FREQ)
@pytest.mark.parametrize("compression", [None, "ZLib"])
def test_disk_shuffle_with_compression_option(compression):
# test if dataframe shuffle works both with and without compression
with dask.config.set({"dataframe.shuffle-compression": compression}):
test_shuffle("disk")
@pytest.mark.parametrize("compression", ["UNKOWN_COMPRESSION_ALGO"])
def test_disk_shuffle_with_unknown_compression(compression):
# test if dask raises an error in case of fault config string
with dask.config.set({"dataframe.shuffle-compression": compression}):
with pytest.raises(
ImportError,
match=(
"Not able to import and load {} as compression algorithm."
"Please check if the library is installed and supported by Partd.".format(
compression
)
),
):
test_shuffle("disk")
def test_disk_shuffle_check_actual_compression():
# test if the compression switch is really respected by testing the size of the actual partd-data on disk
def generate_raw_partd_file(compression):
# generate and write a dummy dataframe to disk and return the raw data bytes
df1 = pd.DataFrame({"a": list(range(10000))})
df1["b"] = (df1["a"] * 123).astype(str)
with dask.config.set({"dataframe.shuffle-compression": compression}):
p1 = maybe_buffered_partd(buffer=False, tempdir=None)()
p1.append({"x": df1})
# get underlying filename from partd - depending on nested structure of partd object
filename = (
p1.partd.partd.filename("x") if compression else p1.partd.filename("x")
)
with open(filename, "rb") as f:
return f.read()
# get compressed and uncompressed raw data
uncompressed_data = generate_raw_partd_file(compression=None)
compressed_data = generate_raw_partd_file(compression="BZ2")
assert len(uncompressed_data) > len(compressed_data)
@pytest.mark.parametrize("ignore_index", [None, True, False])
@pytest.mark.parametrize(
"on", ["id", "name", ["id", "name"], pd.Series(["id", "name"])]
)
@pytest.mark.parametrize("max_branch", [None, 4])
def test_dataframe_shuffle_on_arg(on, ignore_index, max_branch, shuffle_method):
# Make sure DataFrame.shuffle API returns the same result
# whether the ``on`` argument is a list of column names,
# or a separate DataFrame with equivalent values...
df_in = dask.datasets.timeseries(
"2000",
"2001",
types={"value": float, "name": str, "id": int},
freq="2H",
partition_freq="1M",
seed=1,
)
if isinstance(on, str):
ext_on = df_in[[on]].copy()
else:
ext_on = df_in[on].copy()
df_out_1 = df_in.shuffle(
on, shuffle=shuffle_method, ignore_index=ignore_index, max_branch=max_branch
)
df_out_2 = df_in.shuffle(ext_on, shuffle=shuffle_method, ignore_index=ignore_index)
assert_eq(df_out_1, df_out_2, check_index=(not ignore_index))
# disk shuffling doesn't support ignore_index
if ignore_index and shuffle_method == "tasks":
assert df_out_1.index.dtype != df_in.index.dtype
else:
assert df_out_1.index.dtype == df_in.index.dtype
def test_set_index_overlap():
A = pd.DataFrame({"key": [1, 2, 3, 4, 4, 5, 6, 7], "value": list("abcd" * 2)})
a = dd.from_pandas(A, npartitions=2)
a = a.set_index("key", sorted=True)
b = a.repartition(divisions=a.divisions)
assert_eq(a, b)
def test_set_index_overlap_2():
data = pd.DataFrame(
index=pd.Index(
["A", "A", "A", "A", "A", "A", "A", "A", "A", "B", "B", "B", "C"],
name="index",
)
)
ddf1 = dd.from_pandas(data, npartitions=2)
ddf2 = ddf1.reset_index().repartition(8).set_index("index", sorted=True)
assert_eq(ddf1, ddf2)
assert ddf2.npartitions == 8
def test_shuffle_hlg_layer():
# This test checks that the `ShuffleLayer` HLG Layer
# is used (as expected) for a multi-stage shuffle.
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=10
)
# Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
keys = [(ddf_shuffled._name, i) for i in range(ddf_shuffled.npartitions)]
# Cull the HLG
dsk = ddf_shuffled.__dask_graph__()
dsk_culled = dsk.cull(set(keys))
assert isinstance(dsk_culled, dask.highlevelgraph.HighLevelGraph)
# Ensure we have ShuffleLayers
assert any(
isinstance(layer, dd.shuffle.ShuffleLayer) for layer in dsk.layers.values()
)
# Check that the ShuffleLayers are non-materialized
for layer in dsk.layers.values():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert not hasattr(layer, "_cached_dict")
# Make sure HLG culling reduces the graph size
assert len(dsk_culled) < len(dsk)
# Check ShuffleLayer names
for name, layer in dsk.layers.items():
if isinstance(layer, dd.shuffle.ShuffleLayer):
assert name.startswith("shuffle-")
# Since we already culled the HLG,
# culling the dictionary should not change the graph
dsk_dict = dict(dsk_culled)
dsk_dict_culled, _ = cull(dsk_dict, keys)
assert dsk_dict_culled == dsk_dict
@pytest.mark.parametrize(
"npartitions",
[
10, # ShuffleLayer
1, # SimpleShuffleLayer
],
)
def test_shuffle_hlg_layer_serialize(npartitions):
ddf = dd.from_pandas(
pd.DataFrame({"a": np.random.randint(0, 10, 100)}), npartitions=npartitions
)
# Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks
ddf_shuffled = ddf.shuffle("a", max_branch=3, shuffle="tasks")
# Ensure shuffle layers can be serialized and don't result in
# the underlying low-level graph being materialized
dsk = ddf_shuffled.__dask_graph__()
for layer in dsk.layers.values():
if not isinstance(layer, dd.shuffle.SimpleShuffleLayer):
continue
assert not hasattr(layer, "_cached_dict")
layer_roundtrip = pickle.loads(pickle.dumps(layer))
assert type(layer_roundtrip) == type(layer)
assert not hasattr(layer_roundtrip, "_cached_dict")
assert layer_roundtrip.keys() == layer.keys()
def test_set_index_nan_partition():
d[d.a > 3].set_index("a") # Set index with 1 null partition
d[d.a > 1].set_index("a", sorted=True) # Set sorted index with 0 null partitions
a = d[d.a > 3].set_index("a", sorted=True) # Set sorted index with 1 null partition
assert_eq(a, a)
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nelem", [10, 500])
def test_sort_values(nelem, by, ascending):
np.random.seed(0)
df = pd.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.arange(100, nelem + 100)
ddf = dd.from_pandas(df, npartitions=10)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending)
expect = df.sort_values(by=by, ascending=ascending)
dd.assert_eq(got, expect, check_index=False)
@pytest.mark.parametrize("ascending", [True, False, [False, True], [True, False]])
@pytest.mark.parametrize("by", [["a", "b"], ["b", "a"]])
@pytest.mark.parametrize("nelem", [10, 500])
def test_sort_values_single_partition(nelem, by, ascending):
np.random.seed(0)
df = pd.DataFrame()
df["a"] = np.ascontiguousarray(np.arange(nelem)[::-1])
df["b"] = np.arange(100, nelem + 100)
ddf = dd.from_pandas(df, npartitions=1)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending)
expect = df.sort_values(by=by, ascending=ascending)
dd.assert_eq(got, expect, check_index=False)
@pytest.mark.parametrize("na_position", ["first", "last"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("by", ["a", "b"])
@pytest.mark.parametrize("nparts", [1, 5])
@pytest.mark.parametrize(
"data",
[
{
"a": list(range(50)) + [None] * 50 + list(range(50, 100)), # type: ignore
"b": [None] * 100 + list(range(100, 150)), # type: ignore
},
{
"a": list(range(15)) + [None] * 5, # type: ignore
"b": list(reversed(range(20))),
},
],
)
def test_sort_values_with_nulls(data, nparts, by, ascending, na_position):
df = pd.DataFrame(data)
ddf = dd.from_pandas(df, npartitions=nparts)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=by, ascending=ascending, na_position=na_position)
expect = df.sort_values(by=by, ascending=ascending, na_position=na_position)
dd.assert_eq(got, expect, check_index=False)
def test_shuffle_values_raises():
df = pd.DataFrame({"a": [1, 3, 2]})
ddf = dd.from_pandas(df, npartitions=3)
with pytest.raises(
ValueError, match="na_position must be either 'first' or 'last'"
):
ddf.sort_values(by="a", na_position="invalid")
def test_shuffle_by_as_list():
df = pd.DataFrame({"a": [1, 3, 2]})
ddf = dd.from_pandas(df, npartitions=3)
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(by=["a"], npartitions="auto", ascending=True)
expect = pd.DataFrame({"a": [1, 2, 3]})
dd.assert_eq(got, expect, check_index=False)
def test_noop():
assert _noop(1, None) == 1
assert _noop("test", None) == "test"
@pytest.mark.parametrize("by", [["a", "b"], ["b", "a"]])
@pytest.mark.parametrize("nparts", [1, 10])
def test_sort_values_custom_function(by, nparts):
df = pd.DataFrame({"a": [1, 2, 3] * 20, "b": [4, 5, 6, 7] * 15})
ddf = dd.from_pandas(df, npartitions=nparts)
def f(partition, by_columns, ascending, na_position, **kwargs):
return partition.sort_values(
by_columns, ascending=ascending, na_position=na_position
)
# run on single-threaded scheduler for debugging purposes
with dask.config.set(scheduler="single-threaded"):
got = ddf.sort_values(
by=by[0], sort_function=f, sort_function_kwargs={"by_columns": by}
)
expect = df.sort_values(by=by)
dd.assert_eq(got, expect, check_index=False)
def test_sort_values_bool_ascending():
df = pd.DataFrame({"a": [1, 2, 3] * 20, "b": [4, 5, 6, 7] * 15})
ddf = dd.from_pandas(df, npartitions=10)
# attempt to sort with list of ascending booleans
with pytest.raises(NotImplementedError):
ddf.sort_values(by="a", ascending=[True, False])
| dask/dataframe/tests/test_shuffle.py | 44,361 | conflicts with keyword argument disjoint Every value in exactly one partition ensure temporary files are cleaned up when there's an internal exception. Divisions must be sorted with sorted=True, divisions must be same length as df.divisions Divisions must be sorted See https://github.com/dask/dask/issues/3867 https://github.com/dask/dask/issues/2288 NOTE: engine == "cudf" requires cudf/dask_cudf, will be skipped by non-GPU CI. NOTE: engine == "cudf" requires cudf/dask_cudf, will be skipped by non-GPU CI. NOTE: engine == "cudf" requires cudf/dask_cudf, will be skipped by non-GPU CI. NOTE: engine == "cudf" requires cudf/dask_cudf, will be skipped by non-GPU CI. We currently lose "freq". Converting data with pandas-defined dtypes to numpy or pure Python can be lossy like this. starting with pandas 1.2.0, comparing equality of timestamps with different timezones returns False instead of raising an error https://github.com/dask/dask/issues/6974 https://github.com/dask/dask/issues/6864 numeric columns https://github.com/dask/dask/issues/5671 sorted with the metric defined by the Categorical See https://github.com/dask/dask/issues/2408 Atomic counter Note: `freq` is lost during round trip test if dataframe shuffle works both with and without compression test if dask raises an error in case of fault config string test if the compression switch is really respected by testing the size of the actual partd-data on disk generate and write a dummy dataframe to disk and return the raw data bytes get underlying filename from partd - depending on nested structure of partd object get compressed and uncompressed raw data Make sure DataFrame.shuffle API returns the same result whether the ``on`` argument is a list of column names, or a separate DataFrame with equivalent values... disk shuffling doesn't support ignore_index This test checks that the `ShuffleLayer` HLG Layer is used (as expected) for a multi-stage shuffle. Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks Cull the HLG Ensure we have ShuffleLayers Check that the ShuffleLayers are non-materialized Make sure HLG culling reduces the graph size Check ShuffleLayer names Since we already culled the HLG, culling the dictionary should not change the graph ShuffleLayer SimpleShuffleLayer Disk-based shuffle doesn't use HLG layers at the moment, so we only test tasks Ensure shuffle layers can be serialized and don't result in the underlying low-level graph being materialized Set index with 1 null partition Set sorted index with 0 null partitions Set sorted index with 1 null partition run on single-threaded scheduler for debugging purposes run on single-threaded scheduler for debugging purposes type: ignore type: ignore type: ignore run on single-threaded scheduler for debugging purposes run on single-threaded scheduler for debugging purposes attempt to sort with list of ascending booleans | 2,903 | en | 0.838349 |
import os
from dataclasses import dataclass
from tequila import TequilaException, BitString, TequilaWarning
from tequila.hamiltonian import QubitHamiltonian
from tequila.wavefunction import QubitWaveFunction
from tequila.hamiltonian.paulis import Sp, Sm, Qp, Qm
from tequila.circuit import QCircuit, gates, _gates_impl
from tequila.objective.objective import Variable, Variables, ExpectationValue
from tequila.simulators.simulator_api import simulate
from tequila.utils import to_float
from tequila.objective import assign_variable
from .encodings import known_encodings
import typing, numpy, numbers, copy
from itertools import product
# if you are experiencing import errors you need to update openfermion
# required is version >= 1.0
# otherwise replace with from openfermion.hamiltonians import MolecularData
import openfermion
from openfermion.chem import MolecularData
import warnings
@dataclass
class ActiveSpaceData:
active_orbitals: list # active orbitals (spatial, c1)
reference_orbitals: list # reference orbitals (spatial, c1)
def __str__(self):
result = "Active Space Data:\n"
result += "{key:15} : {value:15} \n".format(key="active_orbitals", value=str(self.active_orbitals))
result += "{key:15} : {value:15} \n".format(key="reference_orbitals",
value=str(self.reference_orbitals))
result += "{key:15} : {value:15} \n".format(key="frozen_docc", value=str(self.frozen_docc))
result += "{key:15} : {value:15} \n".format(key="frozen_uocc", value=str(self.frozen_uocc))
return result
@property
def frozen_reference_orbitals(self):
return [i for i in self.reference_orbitals if i not in self.active_orbitals]
@property
def active_reference_orbitals(self):
return [i for i in self.reference_orbitals if i in self.active_orbitals]
class FermionicGateImpl(gates.QubitExcitationImpl):
# keep the overview in circuits
def __init__(self, generator, p0, transformation, *args, **kwargs):
super().__init__(generator=generator, target=generator.qubits, p0=p0, *args, **kwargs)
self._name = "FermionicExcitation"
self.transformation=transformation
def compile(self):
return gates.Trotterized(generator=self.generator, control=self.control, angle=self.parameter, steps=1)
def prepare_product_state(state: BitString) -> QCircuit:
"""Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
"""
result = QCircuit()
for i, v in enumerate(state.array):
if v == 1:
result += gates.X(target=i)
return result
@dataclass
class ParametersQC:
"""Specialization of ParametersHamiltonian"""
basis_set: str = None # Quantum chemistry basis set
geometry: str = None # geometry of the underlying molecule (units: Angstrom!),
# this can be a filename leading to an .xyz file or the geometry given as a string
description: str = ""
multiplicity: int = 1
charge: int = 0
name: str = None
@property
def n_electrons(self, *args, **kwargs):
return self.get_nuc_charge() - self.charge
def get_nuc_charge(self):
return sum(self.get_atom_number(name=atom) for atom in self.get_atoms())
def get_atom_number(self, name):
atom_numbers={"h":1, "he":2, "li":3, "be":4, "b":5, "c":6, "n":7, "o":8, "f":9, "ne":10, "na":11, "mg":12, "al":13, "si":14, "ph":15, "s":16, "cl":17, "ar":18}
if name.lower() in atom_numbers:
return atom_numbers[name.lower()]
try:
import periodictable as pt
atom=name.lower()
atom[0]=atom[0].upper()
element = pt.elements.symbol(atom)
return element.number()
except:
raise TequilaException("can not assign atomic number to element {}\npip install periodictable will fix it".format(atom))
def get_atoms(self):
return [x[0] for x in self.get_geometry()]
def __post_init__(self,*args, **kwargs):
if self.name is None and self.geometry is None:
raise TequilaException("no geometry or name given to molecule\nprovide geometry=filename.xyz or geometry=`h 0.0 0.0 0.0\\n...`\nor name=whatever with file whatever.xyz being present")
# auto naming
if self.name is None:
if ".xyz" in self.geometry:
self.name=self.geometry.split(".xyz")[0]
if self.description is None:
coord, description = self.read_xyz_from_file()
self.description=description
else:
atoms=self.get_atoms()
atom_names=sorted(list(set(atoms)), key=lambda x: self.get_atom_number(x), reverse=True)
if self.name is None:
drop_ones=lambda x: "" if x==1 else x
self.name="".join(["{}{}".format(x,drop_ones(atoms.count(x))) for x in atom_names])
self.name = self.name.lower()
if self.geometry is None:
self.geometry=self.name+".xyz"
if ".xyz" in self.geometry and not os.path.isfile(self.geometry):
raise TequilaException("could not find file for molecular coordinates {}".format(self.geometry))
@property
def filename(self):
""" """
return "{}_{}".format(self.name, self.basis_set)
@property
def molecular_data_param(self) -> dict:
""":return: Give back all parameters for the MolecularData format from openfermion as dictionary"""
return {'basis': self.basis_set, 'geometry': self.get_geometry(), 'description': self.description,
'charge': self.charge, 'multiplicity': self.multiplicity, 'filename': self.filename
}
@staticmethod
def format_element_name(string):
"""OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
"""
assert (len(string) > 0)
assert (isinstance(string, str))
fstring = string[0].upper() + string[1:].lower()
return fstring
@staticmethod
def convert_to_list(geometry):
"""Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0\n h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
"""
result = []
# Remove blank lines
lines = [l for l in geometry.split("\n") if l]
for line in lines:
words = line.split()
# Pad coordinates
if len(words) < 4:
words += [0.0] * (4 - len(words))
try:
tmp = (ParametersQC.format_element_name(words[0]),
(float(words[1]), float(words[2]), float(words[3])))
result.append(tmp)
except ValueError:
print("get_geometry list unknown line:\n ", line, "\n proceed with caution!")
return result
def get_geometry_string(self) -> str:
"""returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if comment is not None:
self.description = comment
return geomstring
else:
return self.geometry
def get_geometry(self):
"""Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
"""
if self.geometry.split('.')[-1] == 'xyz':
geomstring, comment = self.read_xyz_from_file(self.geometry)
if self.description == '':
self.description = comment
return self.convert_to_list(geomstring)
elif self.geometry is not None:
return self.convert_to_list(self.geometry)
else:
raise Exception("Parameters.qc.geometry is None")
@staticmethod
def read_xyz_from_file(filename):
"""Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
"""
with open(filename, 'r') as file:
content = file.readlines()
natoms = int(content[0])
comment = str(content[1]).strip('\n')
coord = ''
for i in range(natoms):
coord += content[2 + i]
return coord, comment
@dataclass
class ClosedShellAmplitudes:
""" """
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, J, A, B), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(nocc + A, I, nocc + B, J)] = value
if self.tIA is not None:
nocc = self.tIA.shape[0]
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(A + nocc, I)] = value
return dict(sorted(variables.items(), key=lambda x: numpy.abs(x[1]), reverse=True))
@dataclass
class Amplitudes:
"""Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
"""
@classmethod
def from_closed_shell(cls, cs: ClosedShellAmplitudes):
"""
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
"""
tijab = cs.tIjAb - numpy.einsum("ijab -> ijba", cs.tIjAb, optimize='greedy')
return cls(tIjAb=cs.tIjAb, tIA=cs.tIA, tiJaB=cs.tIjAb, tia=cs.tIA, tijab=tijab, tIJAB=tijab)
tIjAb: numpy.ndarray = None
tIA: numpy.ndarray = None
tiJaB: numpy.ndarray = None
tijab: numpy.ndarray = None
tIJAB: numpy.ndarray = None
tia: numpy.ndarray = None
def make_parameter_dictionary(self, threshold=1.e-8):
"""
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
"""
variables = {}
if self.tIjAb is not None:
nvirt = self.tIjAb.shape[2]
nocc = self.tIjAb.shape[0]
assert (self.tIjAb.shape[1] == nocc and self.tIjAb.shape[3] == nvirt)
for (I, j, A, b), value in numpy.ndenumerate(self.tIjAb):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + b) + 1, j + 1)] = value
for (i, J, a, B), value in numpy.ndenumerate(self.tiJaB):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + B), J)] = value
for (i, j, a, b), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + a) + 1, 2 * i + 1, 2 * (nocc + b) + 1, j + 1)] = value
for (I, J, A, B), value in numpy.ndenumerate(self.tijab):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (nocc + A), 2 * I, 2 * (nocc + B), J)] = value
if self.tIA is not None:
nocc = self.tIjAb.shape[0]
assert (self.tia.shape[0] == nocc)
for (I, A), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (A + nocc), 2 * I)] = value
for (i, a), value, in numpy.ndenumerate(self.tIA):
if not numpy.isclose(value, 0.0, atol=threshold):
variables[(2 * (a + nocc) + 1, 2 * i + 1)] = value
return variables
class NBodyTensor:
""" Convenience class for handling N-body tensors """
class Ordering:
def __init__(self, scheme):
if hasattr(scheme, "_scheme"):
scheme = scheme._scheme
elif hasattr(scheme, "scheme"):
scheme = scheme.scheme
self._scheme = self.assign_scheme(scheme)
def assign_scheme(self, scheme):
if scheme is None:
return "chem"
else:
scheme = str(scheme)
if scheme.lower() in ["mulliken", "chem", "c", "1122"]:
return "chem"
elif scheme.lower() in ["dirac", "phys", "p", "1212"]:
return "phys"
elif scheme.lower() in ["openfermion", "of", "o", "1221"]:
return "of"
else:
raise TequilaException(
"Unknown two-body tensor scheme {}. Supported are dirac, mulliken, and openfermion".format(scheme))
def is_phys(self):
return self._scheme == "phys"
def is_chem(self):
return self._scheme == "chem"
def is_of(self):
return self._scheme == "of"
def __init__(self, elems: numpy.ndarray = None, active_indices: list = None, ordering: str = None,
size_full: int = None):
"""
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \\int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \\int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \\int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
"""
# Set elements
self.elems = elems
# Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible
# representations
if active_indices is not None:
self.active_indices = active_indices
self._passive_indices = None
self._full_indices = None
self._indices_set: bool = False
# Determine order of tensor
# Assume, that tensor is entered in desired shape, not as flat array.
self.order = len(self.elems.shape)
# Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well
if size_full is None:
self._size_full = self.elems.shape[0]
else:
self._size_full = size_full
# 2-body tensors (<=> order 4) currently allow reordering
if self.order == 4:
self.ordering = self.Ordering(ordering)
else:
if ordering is not None:
raise Exception("Ordering only implemented for tensors of order 4 / 2-body tensors.")
self.ordering = None
def sub_lists(self, idx_lists: list = None) -> numpy.ndarray:
"""
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
# Check if index list has correct size
if len(idx_lists) != self.order:
raise Exception("Need to pass an index list for each dimension!" +
" Length of idx_lists needs to match order of tensor.")
# Perform slicing via numpy.take
out = self.elems
for ax in range(self.order):
if idx_lists[ax] is not None: # None means, we want the full space in this direction
out = numpy.take(out, idx_lists[ax], axis=ax)
return out
def set_index_lists(self):
""" Set passive and full index lists based on class inputs """
tmp_size = self._size_full
if self._size_full is None:
tmp_size = self.elems.shape[0]
self._passive_indices = [i for i in range(tmp_size)
if i not in self.active_indices]
self._full_indices = [i for i in range(tmp_size)]
def sub_str(self, name: str) -> numpy.ndarray:
"""
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
"""
if not self._indices_set:
self.set_index_lists()
self._indices_set = True
if name is None:
raise Exception("No name specified.")
if len(name) != self.order:
raise Exception("Name does not match order of the tensor.")
if self.active_indices is None:
raise Exception("Need to set an active space in order to call this function.")
idx_lists = []
# Parse name as string of space indices
for char in name:
if char.lower() == 'a':
idx_lists.append(self.active_indices)
elif char.lower() == 'p':
idx_lists.append(self._passive_indices)
elif char.lower() == 'f':
if self._size_full is None:
idx_lists.append(None)
else:
idx_lists.append(self._full_indices)
else:
raise Exception("Need to specify a valid letter (a,p,f).")
out = self.sub_lists(idx_lists)
return out
def reorder(self, to: str = 'of'):
"""
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
"""
if self.order != 4:
raise Exception('Reordering currently only implemented for two-body tensors.')
to = self.Ordering(to)
if self.ordering == to:
return self
elif self.ordering.is_chem():
if to.is_of():
self.elems = numpy.einsum("psqr -> pqrs", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("prqs -> pqrs", self.elems, optimize='greedy')
elif self.ordering.is_of():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> psqr", self.elems, optimize='greedy')
elif to.is_phys():
self.elems = numpy.einsum("pqrs -> pqsr", self.elems, optimize='greedy')
elif self.ordering.is_phys():
if to.is_chem():
self.elems = numpy.einsum("pqrs -> prqs", self.elems, optimize='greedy')
elif to.is_of():
self.elems = numpy.einsum("pqsr -> pqrs", self.elems, optimize='greedy')
return self
class QuantumChemistryBase:
def __init__(self, parameters: ParametersQC,
transformation: typing.Union[str, typing.Callable] = None,
active_orbitals: list = None,
*args,
**kwargs):
self.parameters = parameters
if "molecule" in kwargs:
self.molecule = kwargs["molecule"]
else:
self.molecule = self.make_molecule(*args, **kwargs)
assert (parameters.basis_set.lower() == self.molecule.basis.lower())
assert (parameters.multiplicity == self.molecule.multiplicity)
assert (parameters.charge == self.molecule.charge)
self.active_space = None
if active_orbitals is not None:
self.active_space = self._make_active_space_data(active_orbitals=active_orbitals)
self.transformation = self._initialize_transformation(transformation=transformation, *args, **kwargs)
self._rdm1 = None
self._rdm2 = None
def _initialize_transformation(self, transformation=None, *args, **kwargs):
if transformation is None:
transformation = "JordanWigner"
# filter out arguments to the transformation
trafo_args = {k.split("__")[1]: v for k, v in kwargs.items() if
(hasattr(k, "lower") and "transformation__" in k.lower())}
trafo_args["n_electrons"] = self.n_electrons
trafo_args["n_orbitals"] = self.n_orbitals
if hasattr(transformation, "upper"):
# format to conventions
transformation = transformation.replace("_", "").replace("-", "").upper()
encodings = known_encodings()
if transformation in encodings:
transformation = encodings[transformation](**trafo_args)
else:
raise TequilaException(
"Unkown Fermion-to-Qubit encoding {}. Try something like: {}".format(transformation,
list(encodings.keys())))
return transformation
def _make_active_space_data(self, active_orbitals, reference=None):
"""
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
"""
if active_orbitals is None:
return None
if reference is None:
# auto assignment only for closed-shell
assert (self.n_electrons % 2 == 0)
reference = sorted([i for i in range(self.n_electrons // 2)])
return ActiveSpaceData(active_orbitals=sorted(active_orbitals),
reference_orbitals=sorted(reference))
@classmethod
def from_openfermion(cls, molecule: openfermion.MolecularData,
transformation: typing.Union[str, typing.Callable] = None,
*args,
**kwargs):
"""
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
"""
parameters = ParametersQC(basis_set=molecule.basis, geometry=molecule.geometry,
description=molecule.description, multiplicity=molecule.multiplicity,
charge=molecule.charge)
return cls(parameters=parameters, transformation=transformation, molecule=molecule, *args, **kwargs)
def make_excitation_generator(self,
indices: typing.Iterable[typing.Tuple[int, int]],
form: str = None,
remove_constant_term: bool = True) -> QubitHamiltonian:
"""
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
"""
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet")
# check indices and convert to list of tuples if necessary
if len(indices) == 0:
raise TequilaException("make_excitation_operator: no indices given")
elif not isinstance(indices[0], typing.Iterable):
if len(indices) % 2 != 0:
raise TequilaException("make_excitation_generator: unexpected input format of indices\n"
"use list of tuples as [(a_0, i_0),(a_1, i_1) ...]\n"
"or list as [a_0, i_0, a_1, i_1, ... ]\n"
"you gave: {}".format(indices))
converted = [(indices[2 * i], indices[2 * i + 1]) for i in range(len(indices) // 2)]
else:
converted = indices
# convert everything to native python int
# otherwise openfermion will complain
converted = [(int(pair[0]), int(pair[1])) for pair in converted]
# convert to openfermion input format
ofi = []
dag = []
for pair in converted:
assert (len(pair) == 2)
ofi += [(int(pair[0]), 1),
(int(pair[1]), 0)] # openfermion does not take other types of integers like numpy.int64
dag += [(int(pair[0]), 0), (int(pair[1]), 1)]
op = openfermion.FermionOperator(tuple(ofi), 1.j) # 1j makes it hermitian
op += openfermion.FermionOperator(tuple(reversed(dag)), -1.j)
if isinstance(form, str) and form.lower() != 'fermionic':
# indices for all the Na operators
Na = [x for pair in converted for x in [(pair[0], 1), (pair[0], 0)]]
# indices for all the Ma operators (Ma = 1 - Na)
Ma = [x for pair in converted for x in [(pair[0], 0), (pair[0], 1)]]
# indices for all the Ni operators
Ni = [x for pair in converted for x in [(pair[1], 1), (pair[1], 0)]]
# indices for all the Mi operators
Mi = [x for pair in converted for x in [(pair[1], 0), (pair[1], 1)]]
# can gaussianize as projector or as involution (last is default)
if form.lower() == "p+":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, 0.5)
op += openfermion.FermionOperator(Ni + Ma, 0.5)
elif form.lower() == "p-":
op *= 0.5
op += openfermion.FermionOperator(Na + Mi, -0.5)
op += openfermion.FermionOperator(Ni + Ma, -0.5)
elif form.lower() == "g+":
op += openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
elif form.lower() == "g-":
op += openfermion.FermionOperator([], -1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, 1.0)
op += openfermion.FermionOperator(Ni + Ma, 1.0)
elif form.lower() == "p0":
# P0: we only construct P0 and don't keep the original generator
op = openfermion.FermionOperator([], 1.0) # Just for clarity will be subtracted anyway
op += openfermion.FermionOperator(Na + Mi, -1.0)
op += openfermion.FermionOperator(Ni + Ma, -1.0)
else:
raise TequilaException(
"Unknown generator form {}, supported are G, P+, P-, G+, G- and P0".format(form))
qop = self.transformation(op)
# remove constant terms
# they have no effect in the unitary (if not controlled)
if remove_constant_term:
qop.qubit_operator.terms[tuple()] = 0.0
# check if the operator is hermitian and cast coefficients to floats
# in order to avoid trouble with the simulation backends
assert qop.is_hermitian()
for k, v in qop.qubit_operator.terms.items():
qop.qubit_operator.terms[k] = to_float(v)
qop = qop.simplify()
if len(qop) == 0:
warnings.warn("Excitation generator is a unit operator.\n"
"Non-standard transformations might not work with general fermionic operators\n"
"indices = " + str(indices), category=TequilaWarning)
return qop
def make_hardcore_boson_excitation_gate(self, indices, angle, control=None, assume_real=True, compile_options="optimize"):
target = []
for pair in indices:
assert len(pair) == 2
target += [pair[0], pair[1]]
consistency = [x < self.n_orbitals for x in target]
if not all(consistency):
raise TequilaException(
"make_hardcore_boson_excitation_gate: Inconsistencies in indices={}. Should be indexed from 0 ... n_orbitals={}".format(
indices, self.n_orbitals))
return gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, control=control, compile_options=compile_options)
def make_excitation_gate(self, indices, angle, control=None, assume_real=True, **kwargs):
"""
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\\prod_{k} a_{p_k}^\\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
"""
generator = self.make_excitation_generator(indices=indices, remove_constant_term=control is None)
p0 = self.make_excitation_generator(indices=indices, form="P0", remove_constant_term=control is None)
return QCircuit.wrap_gate(
FermionicGateImpl(angle=angle, generator=generator, p0=p0, transformation=type(self.transformation).__name__.lower(), assume_real=assume_real, control=control, **kwargs))
def make_molecule(self, *args, **kwargs) -> MolecularData:
"""Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
"""
molecule = MolecularData(**self.parameters.molecular_data_param)
# try to load
do_compute = True
try:
import os
if os.path.exists(self.parameters.filename):
molecule.load()
do_compute = False
except OSError:
do_compute = True
if do_compute:
molecule = self.do_make_molecule(*args, **kwargs)
molecule.save()
return molecule
def do_make_molecule(self, *args, **kwargs):
"""
Parameters
----------
args
kwargs
Returns
-------
"""
# integrals need to be passed in base class
assert ("one_body_integrals" in kwargs)
assert ("two_body_integrals" in kwargs)
one_body_integrals = kwargs["one_body_integrals"]
two_body_integrals = kwargs["two_body_integrals"]
# tequila assumes "openfermion" ordering, integrals can however be passed
# down in other orderings, but it needs to be indicated by keyword
if "ordering" in kwargs:
two_body_integrals = NBodyTensor(two_body_integrals, ordering=kwargs["ordering"])
two_body_integrals.reorder(to="openfermion")
two_body_integrals = two_body_integrals.elems
if "nuclear_repulsion" in kwargs:
nuclear_repulsion = kwargs["nuclear_repulsion"]
else:
nuclear_repulsion = 0.0
warnings.warn("No nuclear_repulsion given for custom molecule, setting to zero", category=TequilaWarning)
if ("n_orbitals" in kwargs):
n_orbitals = kwargs["n_orbitals"]
else:
n_orbitals = one_body_integrals.shape[0]
for i in [0, 1, 2, 3]:
assert n_orbitals == two_body_integrals.shape[i]
molecule = MolecularData(**self.parameters.molecular_data_param)
molecule.one_body_integrals = one_body_integrals
molecule.two_body_integrals = two_body_integrals
molecule.nuclear_repulsion = nuclear_repulsion
molecule.n_orbitals = n_orbitals
if "n_electrons" in kwargs:
molecule.n_electrons = kwargs["n_electrons"]
molecule.save()
return molecule
@property
def n_orbitals(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_orbitals
else:
return len(self.active_space.active_orbitals)
@property
def n_electrons(self) -> int:
""" """
if self.active_space is None:
return self.molecule.n_electrons
else:
return 2 * len(self.active_space.active_reference_orbitals)
def make_hamiltonian(self, occupied_indices=None, active_indices=None, threshold=1.e-8) -> QubitHamiltonian:
""" """
if occupied_indices is None and self.active_space is not None:
occupied_indices = self.active_space.frozen_reference_orbitals
if active_indices is None and self.active_space is not None:
active_indices = self.active_space.active_orbitals
fop = openfermion.transforms.get_fermion_operator(
self.molecule.get_molecular_hamiltonian(occupied_indices, active_indices))
try:
qop = self.transformation(fop)
except TypeError:
qop = self.transformation(openfermion.transforms.get_interaction_operator(fop))
qop.is_hermitian()
return qop
def make_hardcore_boson_hamiltonian(self):
if not self.transformation.up_then_down:
warnings.warn(
"Hardcore-Boson Hamiltonian without reordering will result in non-consecutive Hamiltonians that are eventually not be combinable with other features of tequila. Try transformation=\'ReorderedJordanWigner\' or similar for more consistency",
TequilaWarning)
# integrate with QubitEncoding at some point
n_orbitals = self.n_orbitals
c, obt, tbt = self.get_integrals()
h = numpy.zeros(shape=[n_orbitals] * 2)
g = numpy.zeros(shape=[n_orbitals] * 2)
for p in range(n_orbitals):
h[p, p] += 2 * obt[p, p]
for q in range(n_orbitals):
h[p, q] += + tbt[p, p, q, q]
if p != q:
g[p, q] += 2 * tbt[p, q, q, p] - tbt[p, q, p, q]
H = c
for p in range(n_orbitals):
for q in range(n_orbitals):
up = p
uq = q
H += h[p, q] * Sm(up) * Sp(uq) + g[p, q] * Sm(up) * Sp(up) * Sm(uq) * Sp(uq)
return H
def make_molecular_hamiltonian(self):
if self.active_space:
return self.molecule.get_molecular_hamiltonian(occupied_indices=self.active_space.frozen_reference_orbitals,
active_indices=self.active_space.active_orbitals)
else:
return self.molecule.get_molecular_hamiltonian()
def get_integrals(self, two_body_ordering="openfermion"):
"""
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
"""
if self.active_space is not None and len(self.active_space.frozen_reference_orbitals) > 0:
c, h1, h2 = self.molecule.get_active_space_integrals(active_indices=self.active_space.active_orbitals,
occupied_indices=self.active_space.frozen_reference_orbitals)
else:
c = 0.0
h1 = self.molecule.one_body_integrals
h2 = self.molecule.two_body_integrals
c += self.molecule.nuclear_repulsion
h2 = NBodyTensor(h2, ordering="openfermion")
h2 = h2.reorder(to=two_body_ordering).elems
return c, h1, h2
def compute_one_body_integrals(self):
""" convenience function """
c, h1, h2 = self.get_integrals()
return h1
def compute_two_body_integrals(self, two_body_ordering="openfermion"):
""" """
c, h1, h2 = self.get_integrals(two_body_ordering=two_body_ordering)
return h2
def compute_constant_part(self):
c, h1, h2 = self.get_integrals()
return c
def compute_ccsd_amplitudes(self) -> ClosedShellAmplitudes:
""" """
raise Exception("BaseClass Method")
def prepare_reference(self, state=None, *args, **kwargs):
"""
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
"""
if state is None:
assert self.n_electrons %2 == 0
state = [0]*(self.n_orbitals*2)
for i in range(self.n_electrons):
state[i]=1
reference_state = BitString.from_array(self.transformation.map_state(state=state))
U = prepare_product_state(reference_state)
# prevent trace out in direct wfn simulation
U.n_qubits = self.n_orbitals*2 # adapt when tapered transformations work
return U
def prepare_hardcore_boson_reference(self):
# HF state in the HCB representation (paired electrons)
U = gates.X(target=[i for i in range(self.n_electrons // 2)])
U.n_qubits = self.n_orbitals
return U
def hcb_to_me(self, U=None):
"""
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
"""
if U is None:
U = QCircuit()
# consistency
consistency = [x < self.n_orbitals for x in U.qubits]
if not all(consistency):
warnings.warn(
"hcb_to_me: given circuit is not defined on the first {} qubits. Is this a HCB circuit?".format(
self.n_orbitals))
# map to alpha qubits
alpha_map = {k: self.transformation.up(k) for k in range(self.n_orbitals)}
alpha_U = U.map_qubits(qubit_map=alpha_map)
UX = self.transformation.hcb_to_me()
if UX is None:
raise TequilaException(
"transformation={} has no hcb_to_me function implemented".format(self.transformation))
return alpha_U + UX
def get_pair_specific_indices(self,
pair_info: str = None,
include_singles: bool = True,
general_excitations: bool = True) -> list:
"""
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
"""
if pair_info is None:
raise TequilaException("Need to provide some pair information.")
# If pair-information given on file, load (layout see above)
if isinstance(pair_info, str):
pairs = numpy.loadtxt(pair_info, dtype=str, delimiter=",", skiprows=1)
elif isinstance(pair_info, list):
pairs = pair_info
elif not isinstance(pair_info, list):
raise TequilaException("Pair information needs to be contained in a list or filename.")
connect = [[]] * len(pairs)
# determine "connectivity"
generalized = 0
for idx, p in enumerate(pairs):
if len(p) == 1:
connect[idx] = [i for i in range(len(pairs))
if ((len(pairs[i]) == 2) and (str(idx) in pairs[i]))]
elif (len(p) == 2) and general_excitations:
connect[idx] = [i for i in range(len(pairs))
if (((p[0] in pairs[i]) or (p[1] in pairs[i]) or str(i) in p)
and not (i == idx))]
elif len(p) > 2:
raise TequilaException("Invalid reference of pair id.")
# create generating indices from connectivity
indices = []
for i, to in enumerate(connect):
for a in to:
indices.append(((2 * i, 2 * a), (2 * i + 1, 2 * a + 1)))
if include_singles:
indices.append(((2 * i, 2 * a)))
indices.append(((2 * i + 1, 2 * a + 1)))
return indices
def format_excitation_indices(self, idx):
"""
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
"""
idx = [tuple(sorted(x)) for x in idx]
idx = sorted(idx, key=lambda x: x[0])
return tuple(idx)
def make_upccgsd_indices(self, key, reference_orbitals=None, *args, **kwargs):
if reference_orbitals is None:
reference_orbitals = [i for i in range(self.n_electrons // 2)]
indices = []
# add doubles in hcb encoding
if hasattr(key, "lower") and key.lower() == "ladder":
# ladder structure of the pair excitations
# ensures local connectivity
indices = [[(n, n + 1)] for n in range(self.n_orbitals - 1)]
elif hasattr(key, "lower") and "g" not in key.lower():
indices = [[(n, m)] for n in reference_orbitals for m in range(self.n_orbitals) if
n < m and m not in reference_orbitals]
elif hasattr(key, "lower") and "g" in key.lower():
indices = [[(n, m)] for n in range(self.n_orbitals) for m in range(self.n_orbitals) if n < m]
else:
raise TequilaException("Unknown recipe: {}".format(key))
indices = [self.format_excitation_indices(idx) for idx in indices]
return indices
def make_hardcore_boson_upccgd_layer(self,
indices: list = "UpCCGD",
label: str = None,
assume_real: bool = True,
*args, **kwargs):
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices.lower())
UD = QCircuit()
for idx in indices:
UD += self.make_hardcore_boson_excitation_gate(indices=idx, angle=(idx, "D", label),
assume_real=assume_real)
return UD
def make_ansatz(self, name:str, *args, **kwargs):
name = name.lower()
if name.strip()=="":
return QCircuit()
if "+" in name:
U = QCircuit()
subparts = name.split("+")
U = self.make_ansatz(name=subparts[0], *args ,**kwargs)
if "include_reference" in kwargs:
kwargs.pop("include_reference")
if "hcb_optimization" in kwargs:
kwargs.pop("hcb_optimization")
for subpart in subparts[1:]:
U += self.make_ansatz(name=subpart, *args, include_reference=False, hcb_optimization=False, **kwargs)
return U
if name=="uccsd":
return self.make_uccsd_ansatz(*args, **kwargs)
elif "d" in name or "s" in name:
return self.make_upccgsd_ansatz(name=name, *args, **kwargs)
else:
raise TequilaException("unknown ansatz with name={}".format(name))
def make_upccgsd_ansatz(self,
include_reference: bool = True,
name: str = "UpCCGSD",
label: str = None,
order: int = None,
assume_real: bool = True,
hcb_optimization: bool = None,
spin_adapt_singles: bool = True,
neglect_z = False,
*args, **kwargs):
"""
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
"""
name = name.upper()
if ("A" in name) and neglect_z is None:
neglect_z = True
else:
neglect_z = False
if order is None:
try:
if "-" in name:
order = int(name.split("-")[0])
else:
order = 1
except:
order = 1
indices = self.make_upccgsd_indices(key=name)
# check if the used qubit encoding has a hcb transformation
have_hcb_trafo = self.transformation.hcb_to_me() is not None
# consistency checks for optimization
if have_hcb_trafo and hcb_optimization is None:
hcb_optimization = True
if "HCB" in name:
hcb_optimization = True
if hcb_optimization and not have_hcb_trafo and "HCB" not in name:
raise TequilaException(
"use_hcb={} but transformation={} has no \'hcb_to_me\' function. Try transformation=\'ReorderedJordanWigner\'".format(
hcb_optimization, self.transformation))
if "S" in name and "HCB" in name:
if "HCB" in name and "S" in name:
raise Exception(
"name={}, Singles can't be realized without mapping back to the standard encoding leave S or HCB out of the name".format(
name))
# first layer
if not hcb_optimization:
U = QCircuit()
if include_reference:
U = self.prepare_reference()
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, assume_real=assume_real,
label=(label, 0), spin_adapt_singles=spin_adapt_singles, *args, **kwargs)
else:
U = QCircuit()
if include_reference:
U = self.prepare_hardcore_boson_reference()
U += self.make_hardcore_boson_upccgd_layer(indices=indices, assume_real=assume_real, label=(label, 0),
*args, **kwargs)
if "HCB" not in name:
U = self.hcb_to_me(U=U)
if "S" in name:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=(label, 0),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z, *args, **kwargs)
for k in range(1, order):
U += self.make_upccgsd_layer(include_singles="S" in name, indices=indices, label=(label, k),
spin_adapt_singles=spin_adapt_singles, neglect_z=neglect_z)
return U
def make_upccgsd_layer(self, indices, include_singles=True, include_doubles=True, assume_real=True, label=None,
spin_adapt_singles: bool = True, angle_transform=None, mix_sd=False, neglect_z=False, *args, **kwargs):
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
angle = (tuple([idx]), "D", label)
if include_doubles:
if "jordanwigner" in self.transformation.name.lower() and not self.transformation.up_then_down:
# we can optimize with qubit excitations for the JW representation
target=[self.transformation.up(idx[0]), self.transformation.up(idx[1]), self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=target, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle,
indices=((2 * idx[0], 2 * idx[1]), (2 * idx[0] + 1, 2 * idx[1] + 1)),
assume_real=assume_real, **kwargs)
if include_singles and mix_sd:
U += self.make_upccgsd_singles(indices=[idx], assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
if include_singles and not mix_sd:
U += self.make_upccgsd_singles(indices=indices, assume_real=assume_real, label=label,
spin_adapt_singles=spin_adapt_singles, angle_transform=angle_transform, neglect_z=neglect_z)
return U
def make_upccgsd_singles(self, indices="UpCCGSD", spin_adapt_singles=True, label=None, angle_transform=None,
assume_real=True, neglect_z=False, *args, **kwargs):
if neglect_z and "jordanwigner" not in self.transformation.name.lower():
raise TequilaException("neglegt-z approximation in UpCCGSD singles needs the (Reversed)JordanWigner representation")
if hasattr(indices, "lower"):
indices = self.make_upccgsd_indices(key=indices)
U = QCircuit()
for idx in indices:
assert len(idx) == 1
idx = idx[0]
if spin_adapt_singles:
angle = (idx, "S", label)
if angle_transform is not None:
angle = angle_transform(angle)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle, target=targeta, assume_real=assume_real, **kwargs)
U += gates.QubitExcitation(angle=angle, target=targetb, assume_real=assume_real, **kwargs)
else:
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0], 2 * idx[1])], assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
else:
angle1 = (idx, "SU", label)
angle2 = (idx, "SD", label)
if angle_transform is not None:
angle1 = angle_transform(angle1)
angle2 = angle_transform(angle2)
if neglect_z:
targeta=[self.transformation.up(idx[0]), self.transformation.up(idx[1])]
targetb=[self.transformation.down(idx[0]), self.transformation.down(idx[1])]
U += gates.QubitExcitation(angle=angle1, target=targeta, assume_real=assume_real, *kwargs)
U += gates.QubitExcitation(angle=angle2, target=targetb, assume_real=assume_real, *kwargs)
else:
U += self.make_excitation_gate(angle=angle1, indices=[(2 * idx[0], 2 * idx[1])],
assume_real=assume_real, **kwargs)
U += self.make_excitation_gate(angle=angle2, indices=[(2 * idx[0] + 1, 2 * idx[1] + 1)],
assume_real=assume_real, **kwargs)
return U
def make_uccsd_ansatz(self, trotter_steps: int=1,
initial_amplitudes: typing.Union[str, Amplitudes, ClosedShellAmplitudes] = "mp2",
include_reference_ansatz=True,
parametrized=True,
threshold=1.e-8,
add_singles=None,
*args, **kwargs) -> QCircuit:
"""
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
"""
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2" and add_singles is None:
add_singles=True
elif initial_amplitudes is not None and add_singles is not None:
warnings.warn("make_uccsd_anstatz: add_singles has no effect when explicit amplitudes are passed down", TequilaWarning)
elif add_singles is None:
add_singles=True
if self.n_electrons % 2 != 0:
raise TequilaException("make_uccsd_ansatz currently only for closed shell systems")
nocc = self.n_electrons // 2
nvirt = self.n_orbitals - nocc
Uref = QCircuit()
if include_reference_ansatz:
Uref = self.prepare_reference()
amplitudes = initial_amplitudes
if hasattr(initial_amplitudes, "lower"):
if initial_amplitudes.lower() == "mp2":
amplitudes = self.compute_mp2_amplitudes()
elif initial_amplitudes.lower() == "ccsd":
amplitudes = self.compute_ccsd_amplitudes()
else:
try:
amplitudes = self.compute_amplitudes(method=initial_amplitudes.lower())
except Exception as exc:
raise TequilaException(
"{}\nDon't know how to initialize \'{}\' amplitudes".format(exc, initial_amplitudes))
if amplitudes is None:
tia=None
if add_singles: tia=numpy.zeros(shape=[nocc, nvirt])
amplitudes = ClosedShellAmplitudes(
tIjAb=numpy.zeros(shape=[nocc, nocc, nvirt, nvirt]),
tIA=tia)
closed_shell = isinstance(amplitudes, ClosedShellAmplitudes)
indices = {}
if not isinstance(amplitudes, dict):
amplitudes = amplitudes.make_parameter_dictionary(threshold=threshold)
amplitudes = dict(sorted(amplitudes.items(), key=lambda x: numpy.fabs(x[1]), reverse=True))
for key, t in amplitudes.items():
assert (len(key) % 2 == 0)
if not numpy.isclose(t, 0.0, atol=threshold):
if closed_shell:
if len(key) == 2 and add_singles:
# singles
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_a = (2*key[0], 2*key[1])
idx_b = (2*key[0]+1, 2*key[1]+1)
indices[idx_a]=angle
indices[idx_b]=angle
else:
assert len(key)==4
angle=2.0*t
if parametrized:
angle=2.0*Variable(name=key)
idx_abab=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2], 2 * key[3])
indices[idx_abab]=angle
if key[0]!=key[2] and key[1]!=key[3]:
idx_aaaa=(2 * key[0], 2 * key[1], 2 * key[2], 2 * key[3])
idx_bbbb=(2 * key[0] + 1, 2 * key[1] + 1, 2 * key[2]+1, 2 * key[3]+1)
partner = tuple([key[2], key[1], key[0], key[3]])
anglex=2.0*(t - amplitudes[partner])
if parametrized:
anglex=2.0*(Variable(name=key) - Variable(partner))
indices[idx_aaaa]=anglex
indices[idx_bbbb]=anglex
else:
raise Exception("only closed-shell supported, please assemble yourself .... sorry :-)")
UCCSD = QCircuit()
factor = 1.0 / trotter_steps
for step in range(trotter_steps):
for idx, angle in indices.items():
UCCSD += self.make_excitation_gate(indices=idx, angle=factor * angle)
if hasattr(initial_amplitudes,"lower") and initial_amplitudes.lower()=="mp2" and parametrized and add_singles:
# mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though)
UCCSD += self.make_upccgsd_layer(indices="upccsd", include_singles=True, include_doubles=False)
return Uref + UCCSD
def compute_amplitudes(self, method: str, *args, **kwargs):
"""
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
"""
raise TequilaException("compute amplitudes: Needs to be overwritten by backend")
def compute_mp2_amplitudes(self) -> ClosedShellAmplitudes:
"""
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
"""
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.molecule.n_electrons // 2 # this is never the active space
ei = fij[:nocc]
ai = fij[nocc:]
abgij = g[nocc:, nocc:, :nocc, :nocc]
amplitudes = abgij * 1.0 / (
ei.reshape(1, 1, -1, 1) + ei.reshape(1, 1, 1, -1) - ai.reshape(-1, 1, 1, 1) - ai.reshape(1, -1, 1, 1))
E = 2.0 * numpy.einsum('abij,abij->', amplitudes, abgij) - numpy.einsum('abji,abij', amplitudes, abgij,
optimize='greedy')
self.molecule.mp2_energy = E + self.molecule.hf_energy
return ClosedShellAmplitudes(tIjAb=numpy.einsum('abij -> ijab', amplitudes, optimize='greedy'))
def compute_cis_amplitudes(self):
"""
Compute the CIS amplitudes of the molecule
"""
@dataclass
class ResultCIS:
""" """
omegas: typing.List[numbers.Real] # excitation energies [omega0, ...]
amplitudes: typing.List[ClosedShellAmplitudes] # corresponding amplitudes [x_{ai}_0, ...]
def __getitem__(self, item):
return (self.omegas[item], self.amplitudes[item])
def __len__(self):
return len(self.omegas)
g = self.molecule.two_body_integrals
fij = self.molecule.orbital_energies
nocc = self.n_alpha_electrons
nvirt = self.n_orbitals - nocc
pairs = []
for i in range(nocc):
for a in range(nocc, nocc + nvirt):
pairs.append((a, i))
M = numpy.ndarray(shape=[len(pairs), len(pairs)])
for xx, x in enumerate(pairs):
eia = fij[x[0]] - fij[x[1]]
a, i = x
for yy, y in enumerate(pairs):
b, j = y
delta = float(y == x)
gpart = 2.0 * g[a, i, b, j] - g[a, i, j, b]
M[xx, yy] = eia * delta + gpart
omega, xvecs = numpy.linalg.eigh(M)
# convert amplitudes to ndarray sorted by excitation energy
nex = len(omega)
amplitudes = []
for ex in range(nex):
t = numpy.ndarray(shape=[nvirt, nocc])
exvec = xvecs[ex]
for xx, x in enumerate(pairs):
a, i = x
t[a - nocc, i] = exvec[xx]
amplitudes.append(ClosedShellAmplitudes(tIA=t))
return ResultCIS(omegas=list(omega), amplitudes=amplitudes)
@property
def rdm1(self):
"""
Returns RMD1 if computed with compute_rdms function before
"""
if self._rdm1 is not None:
return self._rdm1
else:
print("1-RDM has not been computed. Return None for 1-RDM.")
return None
@property
def rdm2(self):
"""
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
"""
if self._rdm2 is not None:
return self._rdm2
else:
print("2-RDM has not been computed. Return None for 2-RDM.")
return None
def compute_rdms(self, U: QCircuit = None, variables: Variables = None, spin_free: bool = True,
get_rdm1: bool = True, get_rdm2: bool = True, ordering="dirac"):
"""
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\\text{rdm1: } \\gamma^p_q = \\langle \\psi | a^p a_q | \\psi \\rangle
= \\langle U 0 | a^p a_q | U 0 \\rangle
\\text{rdm2: } \\gamma^{pq}_{rs} = \\langle \\psi | a^p a^q a_s a_r | \\psi \\rangle
= \\langle U 0 | a^p a^q a_s a_r | U 0 \\rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \\psi = U |0\\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
"""
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
# Check whether transformation is BKSF.
# Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct
# transformation, because it computes the number of qubits incorrectly in this case.
# A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now
if type(self.transformation).__name__ == "BravyiKitaevFast":
raise TequilaException(
"The Bravyi-Kitaev-Superfast transformation does not support general FermionOperators yet.")
# Set up number of spin-orbitals and molecular orbitals respectively
n_SOs = 2 * self.n_orbitals
n_MOs = self.n_orbitals
# Check whether unitary circuit is not 0
if U is None:
raise TequilaException('Need to specify a Quantum Circuit.')
def _get_of_op(operator_tuple):
""" Returns operator given by a operator tuple as OpenFermion - Fermion operator """
op = openfermion.FermionOperator(operator_tuple)
return op
def _get_qop_hermitian(of_operator) -> QubitHamiltonian:
""" Returns Hermitian part of Fermion operator as QubitHamiltonian """
qop = self.transformation(of_operator)
#qop = QubitHamiltonian(self.transformation(of_operator))
real, imag = qop.split(hermitian=True)
if real:
return real
elif not real:
raise TequilaException(
"Qubit Hamiltonian does not have a Hermitian part. Operator ={}".format(of_operator))
def _build_1bdy_operators_spinful() -> list:
""" Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp
ops = []
for p in range(n_SOs):
for q in range(p + 1):
op_tuple = ((p, 1), (q, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinful() -> list:
""" Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = -pqsr = -qprs = qpsr
# and = rspq
ops = []
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
op_tuple = ((p, 1), (q, 1), (s, 0), (r, 0))
op = _get_of_op(op_tuple)
ops += [op]
return ops
def _build_1bdy_operators_spinfree() -> list:
""" Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetry pq = qp (not changed by spin-summation)
ops = []
for p in range(n_MOs):
for q in range(p + 1):
# Spin aa
op_tuple = ((2 * p, 1), (2 * q, 0))
op = _get_of_op(op_tuple)
# Spin bb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 0))
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _build_2bdy_operators_spinfree() -> list:
""" Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians """
# Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out)
# and = rspq
ops = []
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
# Spin aaaa
op_tuple = ((2 * p, 1), (2 * q, 1), (2 * s, 0), (2 * r, 0)) if (p != q and r != s) else '0.0 []'
op = _get_of_op(op_tuple)
# Spin abab
op_tuple = ((2 * p, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r, 0)) if (
2 * p != 2 * q + 1 and 2 * r != 2 * s + 1) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin baba
op_tuple = ((2 * p + 1, 1), (2 * q, 1), (2 * s, 0), (2 * r + 1, 0)) if (
2 * p + 1 != 2 * q and 2 * r + 1 != 2 * s) else '0.0 []'
op += _get_of_op(op_tuple)
# Spin bbbb
op_tuple = ((2 * p + 1, 1), (2 * q + 1, 1), (2 * s + 1, 0), (2 * r + 1, 0)) if (
p != q and r != s) else '0.0 []'
op += _get_of_op(op_tuple)
ops += [op]
return ops
def _assemble_rdm1(evals) -> numpy.ndarray:
"""
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
"""
N = n_MOs if spin_free else n_SOs
rdm1 = numpy.zeros([N, N])
ctr: int = 0
for p in range(N):
for q in range(p + 1):
rdm1[p, q] = evals[ctr]
# Symmetry pq = qp
rdm1[q, p] = rdm1[p, q]
ctr += 1
return rdm1
def _assemble_rdm2_spinful(evals) -> numpy.ndarray:
""" Returns spin-ful two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_SOs, n_SOs, n_SOs, n_SOs])
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
if p * n_SOs + q >= r * n_SOs + s:
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetries due to anticommutation relations
for p in range(n_SOs):
for q in range(p):
for r in range(n_SOs):
for s in range(r):
rdm2[p, q, s, r] = -1 * rdm2[p, q, r, s] # pqrs = -pqsr
rdm2[q, p, r, s] = -1 * rdm2[p, q, r, s] # pqrs = -qprs
rdm2[q, p, s, r] = rdm2[p, q, r, s] # pqrs = qpsr
return rdm2
def _assemble_rdm2_spinfree(evals) -> numpy.ndarray:
""" Returns spin-free two-particle RDM built by symmetry conditions """
ctr: int = 0
rdm2 = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
if p * n_MOs + q >= r * n_MOs + s and (p >= q or r >= s):
rdm2[p, q, r, s] = evals[ctr]
# Symmetry pqrs = rspq
rdm2[r, s, p, q] = rdm2[p, q, r, s]
ctr += 1
# Further permutational symmetry: pqrs = qpsr
for p, q, r, s in product(range(n_MOs), repeat=4):
if p >= q or r >= s:
rdm2[q, p, s, r] = rdm2[p, q, r, s]
return rdm2
# Build operator lists
qops = []
if spin_free:
qops += _build_1bdy_operators_spinfree() if get_rdm1 else []
qops += _build_2bdy_operators_spinfree() if get_rdm2 else []
else:
qops += _build_1bdy_operators_spinful() if get_rdm1 else []
qops += _build_2bdy_operators_spinful() if get_rdm2 else []
# Transform operator lists to QubitHamiltonians
qops = [_get_qop_hermitian(op) for op in qops]
# Compute expected values
evals = simulate(ExpectationValue(H=qops, U=U, shape=[len(qops)]), variables=variables)
# Assemble density matrices
# If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type
def _reset_rdm(rdm):
if rdm is not None:
if spin_free and rdm.shape[0] != n_MOs:
return None
if not spin_free and rdm.shape[0] != n_SOs:
return None
return rdm
self._rdm1 = _reset_rdm(self._rdm1)
self._rdm2 = _reset_rdm(self._rdm2)
# Split expectation values in 1- and 2-particle expectation values
if get_rdm1:
len_1 = n_MOs * (n_MOs + 1) // 2 if spin_free else n_SOs * (n_SOs + 1) // 2
else:
len_1 = 0
evals_1, evals_2 = evals[:len_1], evals[len_1:]
# Build matrices using the expectation values
self._rdm1 = _assemble_rdm1(evals_1) if get_rdm1 else self._rdm1
if spin_free:
self._rdm2 = _assemble_rdm2_spinfree(evals_2) if get_rdm2 else self._rdm2
else:
self._rdm2 = _assemble_rdm2_spinful(evals_2) if get_rdm2 else self._rdm2
if get_rdm2:
rdm2 = NBodyTensor(elems=self.rdm2, ordering="dirac")
rdm2.reorder(to=ordering)
rdm2 = rdm2.elems
self._rdm2 = rdm2
if get_rdm1:
if get_rdm2:
return self.rdm1, self.rdm2
else:
return self.rdm1
elif get_rdm2:
return self.rdm2
else:
warnings.warn("compute_rdms called with instruction to not compute?", TequilaWarning)
def rdm_spinsum(self, sum_rdm1: bool = True, sum_rdm2: bool = True) -> tuple:
"""
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
"""
n_MOs = self.n_orbitals
rdm1_spinsum = None
rdm2_spinsum = None
# Spin summation on rdm1
if sum_rdm1:
# Check whether spin-rdm2 exists
if self._rdm1 is None:
raise TequilaException("The spin-RDM for the 1-RDM does not exist!")
# Check whether existing rdm1 is in spin-orbital basis
if self._rdm1.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm1_spinsum = numpy.zeros([n_MOs, n_MOs])
for p in range(n_MOs):
for q in range(p + 1):
rdm1_spinsum[p, q] += self._rdm1[2 * p, 2 * q]
rdm1_spinsum[p, q] += self._rdm1[2 * p + 1, 2 * q + 1]
for p in range(n_MOs):
for q in range(p):
rdm1_spinsum[q, p] = rdm1_spinsum[p, q]
# Spin summation on rdm2
if sum_rdm2:
# Check whether spin-rdm2 exists
if self._rdm2 is None:
raise TequilaException("The spin-RDM for the 2-RDM does not exist!")
# Check whether existing rdm2 is in spin-orbital basis
if self._rdm2.shape[0] != 2 * n_MOs:
raise TequilaException("The existing RDM needs to be in spin-orbital basis, it is already spin-free!")
# Do summation
rdm2_spinsum = numpy.zeros([n_MOs, n_MOs, n_MOs, n_MOs])
for p, q, r, s in product(range(n_MOs), repeat=4):
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q, 2 * r, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q, 2 * r + 1, 2 * s]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p, 2 * q + 1, 2 * r, 2 * s + 1]
rdm2_spinsum[p, q, r, s] += self._rdm2[2 * p + 1, 2 * q + 1, 2 * r + 1, 2 * s + 1]
return rdm1_spinsum, rdm2_spinsum
def perturbative_f12_correction(self, rdm1: numpy.ndarray = None, rdm2: numpy.ndarray = None,
gamma: float = 1.4, n_ri: int = None,
external_info: dict = None, **kwargs) -> float:
"""
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
"""
from .f12_corrections._f12_correction_base import ExplicitCorrelationCorrection
correction = ExplicitCorrelationCorrection(mol=self, rdm1=rdm1, rdm2=rdm2, gamma=gamma,
n_ri=n_ri, external_info=external_info, **kwargs)
return correction.compute()
def __str__(self) -> str:
result = str(type(self)) + "\n"
result += "Qubit Encoding\n"
result += str(self.transformation) + "\n\n"
result += "Parameters\n"
for k, v in self.parameters.__dict__.items():
result += "{key:15} : {value:15} \n".format(key=str(k), value=str(v))
result += "\n"
return result
| src/tequila/quantumchemistry/qc_base.py | 84,601 | Coupled-Cluster Amplitudes
We adopt the Psi4 notation for consistency
I,A for alpha
i,a for beta
Parameters
----------
Returns
-------
Convenience class for handling N-body tensors
Specialization of ParametersHamiltonian
Parameters
----------
elems: Tensor data as numpy array
active_indices: List of active indices in total ordering
ordering: Ordering scheme for two body tensors
"dirac" or "phys": <12|g|12>
.. math::
g_{pqrs} = \int d1 d2 p(1)q(2) g(1,2) r(1)s(2)
"mulliken" or "chem": (11|g|22)
.. math::
g_{pqrs} = \int d1 d2 p(1)r(2) g(1,2) q(1)s(2)
"openfermion":
.. math:: [12|g|21]
g_{gqprs} = \int d1 d2 p(1)q(2) g(1,2) s(1)r(2)
size_full
Returns spin-ful or spin-free one-particle RDM built by symmetry conditions
Same symmetry with or without spin, so we can use the same function
Returns spin-free two-particle RDM built by symmetry conditions
Returns spin-ful two-particle RDM built by symmetry conditions
Returns spinfree one-body operators as a symmetry-reduced list of QubitHamiltonians
Returns spinful one-body operators as a symmetry-reduced list of QubitHamiltonians
Returns spinfree two-body operators as a symmetry-reduced list of QubitHamiltonians
Returns spinful two-body operators as a symmetry-reduced list of QubitHamiltonians
Returns operator given by a operator tuple as OpenFermion - Fermion operator
Returns Hermitian part of Fermion operator as QubitHamiltonian
Small helper function
Internal use only
Parameters
----------
active_orbitals: dictionary :
list: Give a list of spatial orbital indices
i.e. occ = [0,1,3] means that spatial orbital 0, 1 and 3 are used
reference: (Default value=None)
List of orbitals which form the reference
Can be given in the same format as active_orbitals
If given as None then the first N_electron/2 orbitals are taken
for closed-shell systems.
Returns
-------
Dataclass with active indices and reference indices (in spatial notation)
Compute closed-shell CC amplitudes
Parameters
----------
method :
coupled-cluster methods like cc2, ccsd, cc3, ccsd(t)
Success might depend on backend
got an extra function for MP2
*args :
**kwargs :
Returns
-------
Compute the CIS amplitudes of the molecule
Compute closed-shell mp2 amplitudes
.. math::
t(a,i,b,j) = 0.25 * g(a,i,b,j)/(e(i) + e(j) -a(i) - b(j) )
:return:
Parameters
----------
Returns
-------
convenience function
Computes the one- and two-particle reduced density matrices (rdm1 and rdm2) given
a unitary U. This method uses the standard ordering in physics as denoted below.
Note, that the representation of the density matrices depends on the qubit transformation
used. The Jordan-Wigner encoding corresponds to 'classical' second quantized density
matrices in the occupation picture.
We only consider real orbitals and thus real-valued RDMs.
The matrices are set as private members _rdm1, _rdm2 and can be accessed via the properties rdm1, rdm2.
.. math :
\text{rdm1: } \gamma^p_q = \langle \psi | a^p a_q | \psi \rangle
= \langle U 0 | a^p a_q | U 0 \rangle
\text{rdm2: } \gamma^{pq}_{rs} = \langle \psi | a^p a^q a_s a_r | \psi \rangle
= \langle U 0 | a^p a^q a_s a_r | U 0 \rangle
Parameters
----------
U :
Quantum Circuit to achieve the desired state \psi = U |0\rangle, non-optional
variables :
If U is parametrized, then need to hand over a set of fixed variables
spin_free :
Set whether matrices should be spin-free (summation over spin) or defined by spin-orbitals
get_rdm1, get_rdm2 :
Set whether either one or both rdm1, rdm2 should be computed. If both are needed at some point,
it is recommended to compute them at once.
Returns
-------
Convert a molecular structure given as a string into a list suitable for openfermion
Parameters
----------
geometry :
a string specifying a mol. structure. E.g. geometry="h 0.0 0.0 0.0
h 0.0 0.0 1.0"
Returns
-------
type
A list with the correct format for openfermion E.g return [ ['h',[0.0,0.0,0.0], [..]]
Parameters
----------
args
kwargs
Returns
-------
OpenFermion uses case sensitive hash tables for chemical elements
I.e. you need to name Lithium: 'Li' and 'li' or 'LI' will not work
this convenience function does the naming
:return: first letter converted to upper rest to lower
Parameters
----------
string :
Returns
-------
Consistent formatting of excitation indices
idx = [(p0,q0),(p1,q1),...,(pn,qn)]
sorted as: p0<p1<pn and pi<qi
:param idx: list of index tuples describing a single(!) fermionic excitation
:return: tuple-list of index tuples
Initialize from closed-shell Amplitude structure
Parameters
----------
cs: ClosedShellAmplitudes :
Returns
-------
Initialize direclty from openfermion MolecularData object
Parameters
----------
molecule
The openfermion molecule
Returns
-------
The Tequila molecule
Returns the geometry
If a xyz filename was given the file is read out
otherwise it is assumed that the geometry was given as string
which is then reformatted as a list usable as input for openfermion
:return: geometry as list
e.g. [(h,(0.0,0.0,0.35)),(h,(0.0,0.0,-0.35))]
Units: Angstrom!
Parameters
----------
Returns
-------
returns the geometry as a string
:return: geometry string
Parameters
----------
Returns
-------
Returns
-------
Tuple with:
constant part (nuclear_repulsion + possible integrated parts from active-spaces)
one_body_integrals
two_body_integrals
Assuming a pair-specific model, create a pair-specific index list
to be used in make_upccgsd_ansatz(indices = ... )
Excite from a set of references (i) to any pair coming from (i),
i.e. any (i,j)/(j,i). If general excitations are allowed, also
allow excitations from pairs to appendant pairs and reference.
Parameters
----------
pair_info
file or list including information about pair structure
references single number, pair double
example: as file: "0,1,11,11,00,10" (hand over file name)
in file, skip first row assuming some text with information
as list:['0','1`','11','11','00','10']
~> two reference orbitals 0 and 1,
then two orbitals from pair 11, one from 00, one mixed 10
include_singles
include single excitations
general_excitations
allow general excitations
Returns
-------
list of indices with pair-specific ansatz
Transform a circuit in the hardcore-boson encoding (HCB)
to the encoding of this molecule
HCB is supposed to be encoded on the first n_orbitals qubits
Parameters
----------
U: HCB circuit (using the alpha qubits)
Returns
-------
Initialize a fermionic excitation gate defined as
.. math::
e^{-i\frac{a}{2} G}
with generator defines by the indices [(p0,q0),(p1,q1),...]
.. math::
G = i(\prod_{k} a_{p_k}^\dagger a_{q_k} - h.c.)
Parameters
----------
indices:
List of tuples that define the generator
angle:
Numeric or hashable type or tequila objective
control:
List of possible control qubits
assume_real:
Assume that the wavefunction will always stay real.
Will reduce potential gradient costs by a factor of 2
Notes
----------
Creates the transformed hermitian generator of UCC type unitaries:
M(a^\dagger_{a_0} a_{i_0} a^\dagger{a_1}a_{i_1} ... - h.c.)
where the qubit map M depends is self.transformation
Parameters
----------
indices : typing.Iterable[typing.Tuple[int, int]] :
List of tuples [(a_0, i_0), (a_1, i_1), ... ] - recommended format, in spin-orbital notation (alpha odd numbers, beta even numbers)
can also be given as one big list: [a_0, i_0, a_1, i_1 ...]
form : str : (Default value None):
Manipulate the generator to involution or projector
set form='involution' or 'projector'
the default is no manipulation which gives the standard fermionic excitation operator back
remove_constant_term: bool: (Default value True):
by default the constant term in the qubit operator is removed since it has no effect on the unitary it generates
if the unitary is controlled this might not be true!
Returns
-------
type
1j*Transformed qubit excitation operator, depends on self.transformation
Creates a molecule in openfermion format by running psi4 and extracting the data
Will check for previous outputfiles before running
Will not recompute if a file was found
Parameters
----------
parameters :
An instance of ParametersQC, which also holds an instance of ParametersPsi4 via parameters.psi4
The molecule will be saved in parameters.filename, if this file exists before the call the molecule will be imported from the file
Returns
-------
type
the molecule in openfermion.MolecularData format
Parameters
----------
threshold :
(Default value = 1.e-8)
Returns
-------
Parameters
----------
threshold :
(Default value = 1.e-8)
Neglect amplitudes below the threshold
Returns
-------
Dictionary of tequila variables (hash is in the style of (a,i,b,j))
Parameters
----------
initial_amplitudes :
initial amplitudes given as ManyBodyAmplitudes structure or as string
where 'mp2', 'cc2' or 'ccsd' are possible initializations
include_reference_ansatz :
Also do the reference ansatz (prepare closed-shell Hartree-Fock) (Default value = True)
parametrized :
Initialize with variables, otherwise with static numbers (Default value = True)
trotter_steps: int :
initial_amplitudes: typing.Union[str :
Amplitudes :
ClosedShellAmplitudes] :
(Default value = "cc2")
Returns
-------
type
Parametrized QCircuit
UpGCCSD Ansatz similar as described by Lee et. al.
Parameters
----------
include_singles
include singles excitations. Is overwritten if indices are a string (i.e. indices=UpCCGSD will always include singles, UpCCGD will not)
include_reference
include the HF reference state as initial state
indices
pass custom defined set of indices from which the ansatz will be created
List of tuples of tuples spin-indices e.g. [((2*p,2*q),(2*p+1,2*q+1)), ...]
label
An additional label that is set with the variables
default is None and no label will be set: variables names will be
(x, (p,q)) for x in range(order)
with a label the variables will be named
(label, (x, (p,q)))
order
Order of the ansatz (default is 1)
determines how often the ordering gets repeated
parameters of repeating layers are independent
assume_real
assume a real wavefunction (that is always the case if the reference state is real)
reduces potential gradient costs from 4 to 2
Returns
-------
UpGCCSD ansatz
:return: Give back all parameters for the MolecularData format from openfermion as dictionary
Computes the spin-free [2]_R12 correction, needing only the 1- and 2-RDM of a reference method
Requires either 1-RDM, 2-RDM or information to compute them in kwargs
Parameters
----------
rdm1 :
1-electron reduced density matrix
rdm2 :
2-electron reduced density matrix
gamma :
f12-exponent, for a correlation factor f_12 = -1/gamma * exp[-gamma*r_12]
n_ri :
dimensionality of RI-basis; specify only, if want to truncate available RI-basis
if None, then the maximum available via tensors / basis-set is used
must not be larger than size of available RI-basis, and not smaller than size of OBS
for n_ri==dim(OBS), the correction returns zero
external_info :
for usage in qc_base, need to provide information where to find one-body tensor f12-tensor <rs|f_12|pq>;
pass dictionary with {"f12_filename": where to find f12-tensor, "scheme": ordering scheme of tensor}
kwargs :
e.g. RDM-information via {"U": QCircuit, "variables": optimal angles}, needs to be passed if rdm1,rdm2 not
yet computed
Returns
-------
the f12 correction for the energy
Small convenience function
Parameters
----------
state :
product state encoded into a bitstring
state: BitString :
Returns
-------
type
unitary circuit which prepares the product state
Returns
-------
A tequila circuit object which prepares the reference of this molecule in the chosen transformation
Returns RMD1 if computed with compute_rdms function before
Returns RMD2 if computed with compute_rdms function before
This is returned in Dirac (physics) notation by default (can be changed in compute_rdms with keyword)!
Given the spin-ful 1- and 2-particle reduced density matrices, compute the spin-free RDMs by spin summation.
Parameters
----------
sum_rdm1, sum_rdm2 :
If set to true, perform spin summation on rdm1, rdm2
Returns
-------
rdm1_spinsum, rdm2_spinsum :
The desired spin-free matrices
Read XYZ filetype for molecular structures
https://en.wikipedia.org/wiki/XYZ_file_format
Units: Angstrom!
Parameters
----------
filename :
return:
Returns
-------
Function to reorder tensors according to some convention.
Parameters
----------
to :
Ordering scheme of choice.
'openfermion', 'of' (default) :
openfermion - ordering, corresponds to integrals of the type
h^pq_rs = int p(1)* q(2)* O(1,2) r(2) s(1) (O(1,2)
with operators a^pq_rs = a^p a^q a_r a_s (a^p == a^dagger_p)
currently needed for dependencies on openfermion-library
'chem', 'c' :
quantum chemistry ordering, collect particle terms,
more convenient for real-space methods
h^pq_rs = int p(1) q(1) O(1,2) r(2) s(2)
This is output by psi4
'phys', 'p' :
typical physics ordering, integrals of type
h^pq_rs = int p(1)* q(2)* O(1,2) r(1) s(2)
with operators a^pq_rs = a^p a^q a_s a_r
Returns
-------
Set passive and full index lists based on class inputs
Get subspace of tensor by a set of index lists
according to hPQ.sub_lists(idx_lists=[p, q]) = [hPQ for P in p and Q in q]
This essentially is an implementation of a non-contiguous slicing using numpy.take
Parameters
----------
idx_lists :
List of lists, each defining the desired subspace per axis
Size needs to match order of tensor, and lists successively correspond to axis=0,1,2,...,N
Returns
-------
out :
Sliced tensor as numpy.ndarray
Get subspace of tensor by a string
Currently is able to resolve an active space, named 'a', full space 'f', and the complement 'p' = 'f' - 'a'.
Full space in this context may also be smaller than actual tensor dimension.
The specification of active space in this context only allows to pick a set from a list of orbitals, and
is not able to resolve an active space from irreducible representations.
Example for one-body tensor:
hPQ.sub_lists(name='ap') = [hPQ for P in active_indices and Q in _passive_indices]
Parameters
----------
name :
String specifying the desired subspace, elements need to be a (active), f (full), p (full - active)
Returns
-------
out :
Sliced tensor as numpy.ndarray
if you are experiencing import errors you need to update openfermion required is version >= 1.0 otherwise replace with from openfermion.hamiltonians import MolecularData active orbitals (spatial, c1) reference orbitals (spatial, c1) keep the overview in circuits Quantum chemistry basis set geometry of the underlying molecule (units: Angstrom!), this can be a filename leading to an .xyz file or the geometry given as a string auto naming Remove blank lines Pad coordinates Set elements Active indices only as list of indices (e.g. spatial orbital indices), not as a dictionary of irreducible representations Determine order of tensor Assume, that tensor is entered in desired shape, not as flat array. Can use size_full < self.elems.shape[0] -> 'full' space is to be considered a subspace as well 2-body tensors (<=> order 4) currently allow reordering Check if index list has correct size Perform slicing via numpy.take None means, we want the full space in this direction Parse name as string of space indices filter out arguments to the transformation format to conventions auto assignment only for closed-shell check indices and convert to list of tuples if necessary convert everything to native python int otherwise openfermion will complain convert to openfermion input format openfermion does not take other types of integers like numpy.int64 1j makes it hermitian indices for all the Na operators indices for all the Ma operators (Ma = 1 - Na) indices for all the Ni operators indices for all the Mi operators can gaussianize as projector or as involution (last is default) Just for clarity will be subtracted anyway Just for clarity will be subtracted anyway P0: we only construct P0 and don't keep the original generator Just for clarity will be subtracted anyway remove constant terms they have no effect in the unitary (if not controlled) check if the operator is hermitian and cast coefficients to floats in order to avoid trouble with the simulation backends try to load integrals need to be passed in base class tequila assumes "openfermion" ordering, integrals can however be passed down in other orderings, but it needs to be indicated by keyword integrate with QubitEncoding at some point prevent trace out in direct wfn simulation adapt when tapered transformations work HF state in the HCB representation (paired electrons) consistency map to alpha qubits If pair-information given on file, load (layout see above) determine "connectivity" create generating indices from connectivity add doubles in hcb encoding ladder structure of the pair excitations ensures local connectivity check if the used qubit encoding has a hcb transformation consistency checks for optimization first layer we can optimize with qubit excitations for the JW representation singles mp2 has no singles, need to initialize them here (if not parametrized initializling as 0.0 makes no sense though) this is never the active space excitation energies [omega0, ...] corresponding amplitudes [x_{ai}_0, ...] convert amplitudes to ndarray sorted by excitation energy Check whether unitary circuit is not 0 Check whether transformation is BKSF. Issue here: when a single operator acts only on a subset of qubits, BKSF might not yield the correct transformation, because it computes the number of qubits incorrectly in this case. A hotfix such as for symmetry_conserving_bravyi_kitaev would require deeper changes, thus omitted for now Set up number of spin-orbitals and molecular orbitals respectively Check whether unitary circuit is not 0qop = QubitHamiltonian(self.transformation(of_operator)) Exploit symmetry pq = qp Exploit symmetries pqrs = -pqsr = -qprs = qpsr and = rspq Exploit symmetry pq = qp (not changed by spin-summation) Spin aa Spin bb Exploit symmetries pqrs = qpsr (due to spin summation, '-pqsr = -qprs' drops out) and = rspq Spin aaaa Spin abab Spin baba Spin bbbb Symmetry pq = qp Symmetry pqrs = rspq Further permutational symmetries due to anticommutation relations pqrs = -pqsr pqrs = -qprs pqrs = qpsr Symmetry pqrs = rspq Further permutational symmetry: pqrs = qpsr Build operator lists Transform operator lists to QubitHamiltonians Compute expected values Assemble density matrices If self._rdm1, self._rdm2 exist, reset them if they are of the other spin-type Split expectation values in 1- and 2-particle expectation values Build matrices using the expectation values Spin summation on rdm1 Check whether spin-rdm2 exists Check whether existing rdm1 is in spin-orbital basis Do summation Spin summation on rdm2 Check whether spin-rdm2 exists Check whether existing rdm2 is in spin-orbital basis Do summation | 19,631 | en | 0.708281 |
#!/usr/bin/env python
#
# svnadmin_tests.py: testing the 'svnadmin' tool.
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import os
import logging
import re
import shutil
import sys
import threading
import time
import gzip
logger = logging.getLogger()
# Our testing module
import svntest
from svntest.verify import SVNExpectedStdout, SVNExpectedStderr
from svntest.verify import SVNUnexpectedStderr
from svntest.verify import UnorderedOutput
from svntest.main import SVN_PROP_MERGEINFO
# (abbreviation)
Skip = svntest.testcase.Skip_deco
SkipUnless = svntest.testcase.SkipUnless_deco
XFail = svntest.testcase.XFail_deco
Issues = svntest.testcase.Issues_deco
Issue = svntest.testcase.Issue_deco
Wimp = svntest.testcase.Wimp_deco
SkipDumpLoadCrossCheck = svntest.testcase.SkipDumpLoadCrossCheck_deco
Item = svntest.wc.StateItem
def read_rep_cache(repo_dir):
"""Return the rep-cache contents as a dict {hash: (rev, index, ...)}.
"""
db_path = os.path.join(repo_dir, 'db', 'rep-cache.db')
db1 = svntest.sqlite3.connect(db_path)
schema1 = db1.execute("pragma user_version").fetchone()[0]
# Can't test newer rep-cache schemas with an old built-in SQLite; see the
# documentation of STMT_CREATE_SCHEMA_V2 in ../../libsvn_fs_fs/rep-cache-db.sql
if schema1 >= 2 and svntest.sqlite3.sqlite_version_info < (3, 8, 2):
raise svntest.Failure("Can't read rep-cache schema %d using old "
"Python-SQLite version %s < (3,8,2)" %
(schema1,
svntest.sqlite3.sqlite_version_info))
content = { row[0]: row[1:] for row in
db1.execute("select * from rep_cache") }
return content
def check_hotcopy_bdb(src, dst):
"Verify that the SRC BDB repository has been correctly copied to DST."
### TODO: This function should be extended to verify all hotcopied files,
### not just compare the output of 'svnadmin dump'. See check_hotcopy_fsfs().
exit_code, origout, origerr = svntest.main.run_svnadmin("dump", src,
'--quiet')
exit_code, backout, backerr = svntest.main.run_svnadmin("dump", dst,
'--quiet')
if origerr or backerr or origout != backout:
raise svntest.Failure
def check_hotcopy_fsfs_fsx(src, dst):
# Walk the source and compare all files to the destination
for src_dirpath, src_dirs, src_files in os.walk(src):
# Verify that the current directory exists in the destination
dst_dirpath = src_dirpath.replace(src, dst)
if not os.path.isdir(dst_dirpath):
raise svntest.Failure("%s does not exist in hotcopy "
"destination" % dst_dirpath)
# Verify that all dirents in the current directory also exist in source
for dst_dirent in os.listdir(dst_dirpath):
# Ignore auto-created empty lock files as they may or may not
# be present and are neither required by nor do they harm to
# the destination repository.
if dst_dirent == 'pack-lock':
continue
if dst_dirent == 'write-lock':
continue
# Ignore auto-created rep-cache.db-journal file
if dst_dirent == 'rep-cache.db-journal':
continue
src_dirent = os.path.join(src_dirpath, dst_dirent)
if not os.path.exists(src_dirent):
raise svntest.Failure("%s does not exist in hotcopy "
"source" % src_dirent)
# Compare all files in this directory
for src_file in src_files:
# Ignore auto-created empty lock files as they may or may not
# be present and are neither required by nor do they harm to
# the destination repository.
if src_file == 'pack-lock':
continue
if src_file == 'write-lock':
continue
# Ignore auto-created rep-cache.db-journal file
if src_file == 'rep-cache.db-journal':
continue
src_path = os.path.join(src_dirpath, src_file)
dst_path = os.path.join(dst_dirpath, src_file)
if not os.path.isfile(dst_path):
raise svntest.Failure("%s does not exist in hotcopy "
"destination" % dst_path)
# Special case for db/uuid: Only the UUID in the first line needs
# to match. Source and target must have the same number of lines
# (due to having the same format).
if src_path == os.path.join(src, 'db', 'uuid'):
lines1 = open(src_path, 'rb').read().split(b"\n")
lines2 = open(dst_path, 'rb').read().split(b"\n")
if len(lines1) != len(lines2):
raise svntest.Failure("%s differs in number of lines"
% dst_path)
if lines1[0] != lines2[0]:
raise svntest.Failure("%s contains different uuid: '%s' vs. '%s'"
% (dst_path, lines1[0], lines2[0]))
continue
# Special case for rep-cache: It will always differ in a byte-by-byte
# comparison, so compare db tables instead.
if src_file == 'rep-cache.db':
db1 = svntest.sqlite3.connect(src_path)
db2 = svntest.sqlite3.connect(dst_path)
schema1 = db1.execute("pragma user_version").fetchone()[0]
schema2 = db2.execute("pragma user_version").fetchone()[0]
if schema1 != schema2:
raise svntest.Failure("rep-cache schema differs: '%s' vs. '%s'"
% (schema1, schema2))
# Can't test newer rep-cache schemas with an old built-in SQLite.
if schema1 >= 2 and svntest.sqlite3.sqlite_version_info < (3, 8, 2):
continue
rows1 = []
rows2 = []
for row in db1.execute("select * from rep_cache order by hash"):
rows1.append(row)
for row in db2.execute("select * from rep_cache order by hash"):
rows2.append(row)
if len(rows1) != len(rows2):
raise svntest.Failure("number of rows in rep-cache differs")
for i in range(len(rows1)):
if rows1[i] != rows2[i]:
raise svntest.Failure("rep-cache row %i differs: '%s' vs. '%s'"
% (i, rows1[i], rows2[i]))
continue
# Special case for revprop-generation: It will always be zero in
# the hotcopy destination (i.e. a fresh cache generation)
if src_file == 'revprop-generation':
f2 = open(dst_path, 'r')
revprop_gen = int(f2.read().strip())
if revprop_gen != 0:
raise svntest.Failure("Hotcopy destination has non-zero " +
"revprop generation")
continue
f1 = open(src_path, 'rb')
f2 = open(dst_path, 'rb')
while True:
offset = 0
BUFSIZE = 1024
buf1 = f1.read(BUFSIZE)
buf2 = f2.read(BUFSIZE)
if not buf1 or not buf2:
if not buf1 and not buf2:
# both at EOF
break
elif buf1:
raise svntest.Failure("%s differs at offset %i" %
(dst_path, offset))
elif buf2:
raise svntest.Failure("%s differs at offset %i" %
(dst_path, offset))
if len(buf1) != len(buf2):
raise svntest.Failure("%s differs in length" % dst_path)
for i in range(len(buf1)):
if buf1[i] != buf2[i]:
raise svntest.Failure("%s differs at offset %i"
% (dst_path, offset))
offset += 1
f1.close()
f2.close()
def check_hotcopy_fsfs(src, dst):
"Verify that the SRC FSFS repository has been correctly copied to DST."
check_hotcopy_fsfs_fsx(src, dst)
def check_hotcopy_fsx(src, dst):
"Verify that the SRC FSX repository has been correctly copied to DST."
check_hotcopy_fsfs_fsx(src, dst)
#----------------------------------------------------------------------
# How we currently test 'svnadmin' --
#
# 'svnadmin create': Create an empty repository, test that the
# root node has a proper created-revision,
# because there was once a bug where it
# didn't.
#
# Note also that "svnadmin create" is tested
# implicitly every time we run a python test
# script. (An empty repository is always
# created and then imported into; if this
# subcommand failed catastrophically, every
# test would fail and we would know instantly.)
#
# 'svnadmin createtxn'
# 'svnadmin rmtxn': See below.
#
# 'svnadmin lstxns': We don't care about the contents of transactions;
# we only care that they exist or not.
# Therefore, we can simply parse transaction headers.
#
# 'svnadmin dump': A couple regression tests that ensure dump doesn't
# error out, and one to check that the --quiet option
# really does what it's meant to do. The actual
# contents of the dump aren't verified at all.
#
# ### TODO: someday maybe we could parse the contents of trees too.
#
######################################################################
# Helper routines
def get_txns(repo_dir):
"Get the txn names using 'svnadmin lstxns'."
exit_code, output_lines, error_lines = svntest.main.run_svnadmin('lstxns',
repo_dir)
txns = sorted([output_lines.strip(x) for x in output_lines])
return txns
def patch_format(repo_dir, shard_size):
"""Rewrite the format of the FSFS or FSX repository REPO_DIR so
that it would use sharding with SHARDS revisions per shard."""
format_path = os.path.join(repo_dir, "db", "format")
contents = open(format_path, 'rb').read()
processed_lines = []
for line in contents.split(b"\n"):
if line.startswith(b"layout "):
processed_lines.append(("layout sharded %d" % shard_size).encode())
else:
processed_lines.append(line)
new_contents = b"\n".join(processed_lines)
os.chmod(format_path, svntest.main.S_ALL_RW)
with open(format_path, 'wb') as f:
f.write(new_contents)
def is_sharded(repo_dir):
"""Return whether the FSFS repository REPO_DIR is sharded."""
format_path = os.path.join(repo_dir, "db", "format")
contents = open(format_path, 'rb').read()
for line in contents.split(b"\n"):
if line.startswith(b"layout sharded"):
return True
return False
def load_and_verify_dumpstream(sbox, expected_stdout, expected_stderr,
revs, check_props, dump, *varargs):
"""Load the array of lines passed in DUMP into the current tests'
repository and verify the repository content using the array of
wc.States passed in REVS. If CHECK_PROPS is True, check properties
of each rev's items. VARARGS are optional arguments passed to the
'load' command."""
dump = svntest.main.ensure_list(dump)
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnadmin_binary, expected_stderr, 0, True, dump,
'load', '--quiet', sbox.repo_dir, *varargs)
if expected_stdout:
if expected_stdout is svntest.verify.AnyOutput:
if len(output) == 0:
raise SVNExpectedStdout
else:
svntest.verify.compare_and_display_lines(
"Standard output", "STDOUT:", expected_stdout, output)
if expected_stderr:
if expected_stderr is svntest.verify.AnyOutput:
if len(errput) == 0:
raise SVNExpectedStderr
else:
svntest.verify.compare_and_display_lines(
"Standard error output", "STDERR:", expected_stderr, errput)
# The expected error occurred, so don't try to verify the result
return
if revs:
# verify revs as wc states
for rev in range(len(revs)):
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
"update", "-r%s" % (rev+1),
sbox.wc_dir)
rev_tree = revs[rev]
svntest.actions.verify_disk(sbox.wc_dir, rev_tree, check_props)
def load_dumpstream(sbox, dump, *varargs):
"Load dump text without verification."
return load_and_verify_dumpstream(sbox, None, None, None, False, dump,
*varargs)
class FSFS_Index:
"""Manages indexes of a rev file in a FSFS format 7 repository.
The interface returns P2L information and allows for item offsets
and lengths to be modified. """
def __init__(self, sbox, revision):
self.by_item = { }
self.revision = revision
self.repo_dir = sbox.repo_dir
self._read()
def _read(self):
""" Read P2L index using svnfsfs. """
exit_code, output, errput = svntest.main.run_svnfsfs('dump-index',
'-r' + str(self.revision),
self.repo_dir)
svntest.verify.verify_outputs("Error while dumping index",
[], errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
self.by_item.clear()
for line in output:
values = line.split()
if len(values) >= 4 and values[0] != 'Start':
item = int(values[4])
self.by_item[item] = values
def _write(self):
""" Rewrite indexes using svnfsfs. """
by_offset = {}
for key in self.by_item:
values = self.by_item[key]
by_offset[int(values[0], 16)] = values
lines = []
for (offset, values) in sorted(by_offset.items()):
values = by_offset[offset]
line = values[0] + ' ' + values[1] + ' ' + values[2] + ' ' + \
values[3] + ' ' + values[4] + '\n';
lines.append(line.encode())
exit_code, output, errput = svntest.main.run_command_stdin(
svntest.main.svnfsfs_binary, 0, 0, False, lines,
'load-index', self.repo_dir)
svntest.verify.verify_outputs("Error while rewriting index",
output, errput, [], [])
svntest.verify.verify_exit_code(None, exit_code, 0)
def get_item(self, item):
""" Return offset, length and type of ITEM. """
values = self.by_item[item]
offset = int(values[0], 16)
len = int(values[1], 16)
type = values[2]
return (offset, len, type)
def modify_item(self, item, offset, len):
""" Modify offset and length of ITEM. """
values = self.by_item[item]
values[0] = '%x' % offset
values[1] = '%x' % len
self._write()
def repo_format(sbox):
""" Return the repository format number for SBOX."""
format_file = open(os.path.join(sbox.repo_dir, "db", "format"))
format = int(format_file.read()[:1])
format_file.close()
return format
def set_changed_path_list(sbox, revision, changes):
""" Replace the changed paths list in the revision file REVISION in SBOX
with the text CHANGES."""
idx = None
# read full file
fp = open(fsfs_file(sbox.repo_dir, 'revs', str(revision)), 'r+b')
contents = fp.read()
length = len(contents)
if repo_format(sbox) < 7:
# replace the changed paths list
header = contents[contents.rfind(b'\n', length - 64, length - 1):]
body_len = int(header.split(b' ')[1])
else:
# read & parse revision file footer
footer_length = contents[length-1];
if isinstance(footer_length, str):
footer_length = ord(footer_length)
footer = contents[length - footer_length - 1:length-1]
l2p_offset = int(footer.split(b' ')[0])
l2p_checksum = footer.split(b' ')[1]
p2l_offset = int(footer.split(b' ')[2])
p2l_checksum = footer.split(b' ')[3]
idx = FSFS_Index(sbox, revision)
(offset, item_len, item_type) = idx.get_item(1)
# split file contents
body_len = offset
indexes = contents[l2p_offset:length - footer_length - 1]
# construct new footer, include indexes as are
file_len = body_len + len(changes) + 1
p2l_offset += file_len - l2p_offset
header = str(file_len).encode() + b' ' + l2p_checksum + b' ' \
+ str(p2l_offset).encode() + b' ' + p2l_checksum
header += bytes([len(header)])
header = b'\n' + indexes + header
contents = contents[:body_len] + changes + header
# set new contents
fp.seek(0)
fp.write(contents)
fp.truncate()
fp.close()
if repo_format(sbox) >= 7:
idx.modify_item(1, offset, len(changes) + 1)
######################################################################
# Tests
#----------------------------------------------------------------------
# dump stream tests need a dump file
def clean_dumpfile():
return \
[ b"SVN-fs-dump-format-version: 2\n\n",
b"UUID: 668cc64a-31ed-0310-8ccb-b75d75bb44e3\n\n",
b"Revision-number: 0\n",
b"Prop-content-length: 56\n",
b"Content-length: 56\n\n",
b"K 8\nsvn:date\nV 27\n2005-01-08T21:48:13.838745Z\nPROPS-END\n\n\n",
b"Revision-number: 1\n",
b"Prop-content-length: 98\n",
b"Content-length: 98\n\n",
b"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\n",
b"K 8\nsvn:date\nV 27\n2005-01-08T21:51:16.313791Z\nPROPS-END\n\n\n",
b"Node-path: A\n",
b"Node-kind: file\n",
b"Node-action: add\n",
b"Prop-content-length: 35\n",
b"Text-content-length: 5\n",
b"Text-content-md5: e1cbb0c3879af8347246f12c559a86b5\n",
b"Content-length: 40\n\n",
b"K 12\nsvn:keywords\nV 2\nId\nPROPS-END\ntext\n\n\n"]
dumpfile_revisions = \
[ svntest.wc.State('', { 'A' : svntest.wc.StateItem(contents="text\n") }) ]
#----------------------------------------------------------------------
def extra_headers(sbox):
"loading of dumpstream with extra headers"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
dumpfile[3:3] = \
[ b"X-Comment-Header: Ignored header normally not in dump stream\n" ]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
# Ensure loading continues after skipping a bit of unknown extra content.
def extra_blockcontent(sbox):
"load success on oversized Content-length"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
# Replace "Content-length" line with two lines
dumpfile[8:9] = \
[ b"Extra-content-length: 10\n",
b"Content-length: 108\n\n" ]
# Insert the extra content after "PROPS-END\n"
dumpfile[11] = dumpfile[11][:-2] + b"extra text\n\n\n"
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
#----------------------------------------------------------------------
def inconsistent_headers(sbox):
"load failure on undersized Content-length"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
dumpfile[-2] = b"Content-length: 30\n\n"
load_and_verify_dumpstream(sbox, [], svntest.verify.AnyOutput,
dumpfile_revisions, False, dumpfile)
#----------------------------------------------------------------------
# Test for issue #2729: Datestamp-less revisions in dump streams do
# not remain so after load
@Issue(2729)
def empty_date(sbox):
"preserve date-less revisions in load"
sbox.build(empty=True)
dumpfile = clean_dumpfile()
# Replace portions of the revision data to drop the svn:date revprop.
dumpfile[7:11] = \
[ b"Prop-content-length: 52\n",
b"Content-length: 52\n\n",
b"K 7\nsvn:log\nV 0\n\nK 10\nsvn:author\nV 4\nerik\nPROPS-END\n\n\n"
]
load_and_verify_dumpstream(sbox,[],[], dumpfile_revisions, False, dumpfile,
'--ignore-uuid')
# Verify that the revision still lacks the svn:date property.
svntest.actions.run_and_verify_svn([], '.*(E195011|E200017).*svn:date',
"propget", "--revprop", "-r1", "svn:date",
sbox.wc_dir)
#----------------------------------------------------------------------
def dump_copied_dir(sbox):
"'svnadmin dump' on copied directory"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
old_C_path = os.path.join(wc_dir, 'A', 'C')
new_C_path = os.path.join(wc_dir, 'A', 'B', 'C')
svntest.main.run_svn(None, 'cp', old_C_path, new_C_path)
sbox.simple_commit(message='log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
if svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput):
raise svntest.Failure
#----------------------------------------------------------------------
def dump_move_dir_modify_child(sbox):
"'svnadmin dump' on modified child of copied dir"
sbox.build()
wc_dir = sbox.wc_dir
repo_dir = sbox.repo_dir
B_path = os.path.join(wc_dir, 'A', 'B')
Q_path = os.path.join(wc_dir, 'A', 'Q')
svntest.main.run_svn(None, 'cp', B_path, Q_path)
svntest.main.file_append(os.path.join(Q_path, 'lambda'), 'hello')
sbox.simple_commit(message='log msg')
exit_code, output, errput = svntest.main.run_svnadmin("dump", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
exit_code, output, errput = svntest.main.run_svnadmin("dump", "-r",
"0:HEAD", repo_dir)
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', ["* Dumped revision 0.\n",
"* Dumped revision 1.\n",
"* Dumped revision 2.\n"], errput)
#----------------------------------------------------------------------
def dump_quiet(sbox):
"'svnadmin dump --quiet'"
sbox.build(create_wc = False)
exit_code, dump, errput = svntest.main.run_svnadmin("dump", sbox.repo_dir,
'--quiet')
svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump --quiet' is unexpected.",
'STDERR', [], errput)
#----------------------------------------------------------------------
def hotcopy_dot(sbox):
"'svnadmin hotcopy PATH .'"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
if svntest.main.is_fs_type_fsfs():
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if svntest.main.is_fs_type_bdb():
check_hotcopy_bdb(sbox.repo_dir, backup_dir)
if svntest.main.is_fs_type_fsx():
check_hotcopy_fsx(sbox.repo_dir, backup_dir)
#----------------------------------------------------------------------
# This test is redundant for FSFS. The hotcopy_dot and hotcopy_incremental
# tests cover this check for FSFS already.
@SkipUnless(svntest.main.is_fs_type_bdb)
def hotcopy_format(sbox):
"'svnadmin hotcopy' checking db/format file"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
exit_code, output, errput = svntest.main.run_svnadmin("hotcopy",
sbox.repo_dir,
backup_dir)
if errput:
logger.warn("Error: hotcopy failed")
raise svntest.Failure
# verify that the db/format files are the same
fp = open(os.path.join(sbox.repo_dir, "db", "format"))
contents1 = fp.read()
fp.close()
fp2 = open(os.path.join(backup_dir, "db", "format"))
contents2 = fp2.read()
fp2.close()
if contents1 != contents2:
logger.warn("Error: db/format file contents do not match after hotcopy")
raise svntest.Failure
#----------------------------------------------------------------------
def setrevprop(sbox):
"setlog, setrevprop, delrevprop; bypass hooks"
sbox.build()
# Try a simple log property modification.
iota_path = os.path.join(sbox.wc_dir, "iota")
mu_path = sbox.ospath('A/mu')
svntest.actions.run_and_verify_svnadmin([], [],
"setlog", sbox.repo_dir, "-r0",
"--bypass-hooks",
iota_path)
# Make sure it fails without --bypass-hooks. (We haven't called
# svntest.actions.enable_revprop_changes().)
#
# Note that we attempt to set the log message to a different value than the
# successful call.
svntest.actions.run_and_verify_svnadmin([], svntest.verify.AnyOutput,
"setlog", sbox.repo_dir, "-r0",
mu_path)
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn([ "This is the file 'iota'.\n", "\n" ],
[], "propget", "--revprop", "-r0",
"svn:log", sbox.wc_dir)
# Try an author property modification.
foo_path = os.path.join(sbox.wc_dir, "foo")
svntest.main.file_write(foo_path, "foo")
exit_code, output, errput = svntest.main.run_svnadmin("setrevprop",
sbox.repo_dir,
"-r0", "svn:author",
foo_path)
if errput:
logger.warn("Error: 'setrevprop' failed")
raise svntest.Failure
# Verify that the revprop value matches what we set when retrieved
# through the client.
svntest.actions.run_and_verify_svn([ "foo\n" ], [], "propget",
"--revprop", "-r0", "svn:author",
sbox.wc_dir)
# Delete the property.
svntest.actions.run_and_verify_svnadmin([], [],
"delrevprop", "-r0", sbox.repo_dir,
"svn:author")
svntest.actions.run_and_verify_svnlook([], ".*E200017.*svn:author.*",
"propget", "--revprop", "-r0",
sbox.repo_dir, "svn:author")
def verify_windows_paths_in_repos(sbox):
"verify a repository containing paths like 'c:hi'"
# setup a repo with a directory 'c:hi'
sbox.build(create_wc = False)
repo_url = sbox.repo_url
chi_url = sbox.repo_url + '/c:hi'
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
chi_url)
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
# unfortunately, some backends needs to do more checks than other
# resulting in different progress output
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying metadata at revision 0 ...\n",
"* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
elif svntest.main.fs_has_rep_sharing() and not svntest.main.is_fs_type_bdb():
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
else:
svntest.verify.compare_and_display_lines(
"Error while running 'svnadmin verify'.",
'STDOUT', ["* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n"], output)
#----------------------------------------------------------------------
# Returns the filename of the rev or revprop file (according to KIND)
# numbered REV in REPO_DIR, which must be in the first shard if we're
# using a sharded repository.
def fsfs_file(repo_dir, kind, rev):
if svntest.main.options.server_minor_version >= 5:
if svntest.main.options.fsfs_sharding is None:
if svntest.main.is_fs_type_fsx():
rev = 'r' + rev
return os.path.join(repo_dir, 'db', kind, '0', rev)
else:
shard = int(rev) // svntest.main.options.fsfs_sharding
if svntest.main.is_fs_type_fsx():
rev = 'r' + rev
path = os.path.join(repo_dir, 'db', kind, str(shard), rev)
if svntest.main.options.fsfs_packing is None or kind == 'revprops':
# we don't pack revprops
return path
elif os.path.exists(path):
# rev exists outside a pack file.
return path
else:
# didn't find the plain file; assume it's in a pack file
return os.path.join(repo_dir, 'db', kind, ('%d.pack' % shard), 'pack')
else:
return os.path.join(repo_dir, 'db', kind, rev)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_incremental_fsfs(sbox):
"""svnadmin verify detects corruption dump can't"""
if svntest.main.options.fsfs_version is not None and \
svntest.main.options.fsfs_version not in [4, 6]:
raise svntest.Skip("Unsupported prepackaged repository version")
# setup a repo with a directory 'c:hi'
# use physical addressing as this is hard to provoke with logical addressing
sbox.build(create_wc = False,
minor_version = min(svntest.main.options.server_minor_version,8))
repo_url = sbox.repo_url
E_url = sbox.repo_url + '/A/B/E'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
E_url + '/bravo')
# Corrupt r2's reference to A/C by replacing "dir 7-1.0.r1/1568" with
# "dir 7-1.0.r1/1569" (increment offset) and updating the checksum for
# this directory listing to "c9b5a2d26473a4e28088673dda9df804" so that
# the listing itself is valid.
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
if r2.endswith('pack'):
raise svntest.Skip("Test doesn't handle packed revisions")
fp = open(r2, 'wb')
fp.write(b"""id: 0-2.0.r2/0
type: dir
count: 0
cpath: /A/B/E/bravo
copyroot: 0 /
PLAIN
K 5
alpha
V 17
file 3-1.0.r1/719
K 4
beta
V 17
file 4-1.0.r1/840
K 5
bravo
V 14
dir 0-2.0.r2/0
END
ENDREP
id: 2-1.0.r2/181
type: dir
pred: 2-1.0.r1/1043
count: 1
text: 2 69 99 99 f63001f7fddd1842d8891474d0982111
cpath: /A/B/E
copyroot: 0 /
PLAIN
K 1
E
V 16
dir 2-1.0.r2/181
K 1
F
V 17
dir 5-1.0.r1/1160
K 6
lambda
V 17
file 6-1.0.r1/597
END
ENDREP
id: 1-1.0.r2/424
type: dir
pred: 1-1.0.r1/1335
count: 1
text: 2 316 95 95 bccb66379b4f825dac12b50d80211bae
cpath: /A/B
copyroot: 0 /
PLAIN
K 1
B
V 16
dir 1-1.0.r2/424
K 1
C
V 17
dir 7-1.0.r1/1569
K 1
D
V 17
dir 8-1.0.r1/3061
K 2
mu
V 18
file i-1.0.r1/1451
END
ENDREP
id: 0-1.0.r2/692
type: dir
pred: 0-1.0.r1/3312
count: 1
text: 2 558 121 121 c9b5a2d26473a4e28088673dda9df804
cpath: /A
copyroot: 0 /
PLAIN
K 1
A
V 16
dir 0-1.0.r2/692
K 4
iota
V 18
file j-1.0.r1/3428
END
ENDREP
id: 0.0.r2/904
type: dir
pred: 0.0.r1/3624
count: 2
text: 2 826 65 65 e44e4151d0d124533338619f082c8c9a
cpath: /
copyroot: 0 /
_0.0.t1-1 add false false /A/B/E/bravo
904 1031
""")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify", "-r2",
sbox.repo_dir)
svntest.verify.verify_outputs(
message=None, actual_stdout=output, actual_stderr=errput,
expected_stdout=None,
expected_stderr=".*Found malformed header '[^']*' in revision file"
"|.*Missing id field in node-rev.*")
#----------------------------------------------------------------------
# Helper for two test functions.
def corrupt_and_recover_db_current(sbox, minor_version=None):
"""Build up a MINOR_VERSION sandbox and test different recovery scenarios
with missing, out-of-date or even corrupt db/current files. Recovery should
behave the same way with all values of MINOR_VERSION, hence this helper
containing the common code that allows us to check it."""
sbox.build(minor_version=minor_version)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
sbox.simple_commit(message='log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
sbox.simple_commit(message='log msg')
# Remember the contents of the db/current file.
expected_current_contents = open(current_path).read()
# Move aside the current file for r3.
os.rename(os.path.join(sbox.repo_dir, 'db','current'),
os.path.join(sbox.repo_dir, 'db','was_current'))
# Run 'svnadmin recover' and check that the current file is recreated.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be one rev lower than it should be.
svntest.main.file_write(current_path, '2\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be *two* revs lower than it should be.
svntest.main.file_write(current_path, '1\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
# Now try writing db/current to be fish revs lower than it should be.
#
# Note: I'm not actually sure it's wise to recover from this, but
# detecting it would require rewriting fs_fs.c:get_youngest() to
# check the actual contents of its buffer, since atol() will happily
# convert "fish" to 0.
svntest.main.file_write(current_path, 'fish\n')
# Run 'svnadmin recover' and check that the current file is fixed.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
actual_current_contents = open(current_path).read()
svntest.verify.compare_and_display_lines(
"Contents of db/current is unexpected.",
'db/current', expected_current_contents, actual_current_contents)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_db_current(sbox):
"fsfs recover db/current"
corrupt_and_recover_db_current(sbox)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_db_current(sbox):
"fsfs recover db/current --compatible-version=1.3"
# Around trunk@1573728, 'svnadmin recover' wrongly errored out
# for the --compatible-version=1.3 repositories with missing or
# invalid db/current file:
# svnadmin: E160006: No such revision 1
corrupt_and_recover_db_current(sbox, minor_version=3)
#----------------------------------------------------------------------
@Issue(2983)
def load_with_parent_dir(sbox):
"'svnadmin load --parent-dir' reparents mergeinfo"
## See https://issues.apache.org/jira/browse/SVN-2983. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
# Create 'sample' dir in sbox.repo_url, and load the dump stream there.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/sample",
"-m", "Create sample dir")
load_dumpstream(sbox, dumpfile, '--parent-dir', '/sample')
# Verify the svn:mergeinfo properties for '--parent-dir'
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample/branch - /sample/trunk:5-7\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch')
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample/branch1 - " +
"/sample/branch:6-9\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample/branch1')
# Create 'sample-2' dir in sbox.repo_url, and load the dump stream again.
# This time, don't include a leading slash on the --parent-dir argument.
# See issue #3547.
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 11.\n'],
[], "mkdir", sbox.repo_url + "/sample-2",
"-m", "Create sample-2 dir")
load_dumpstream(sbox, dumpfile, '--parent-dir', 'sample-2')
# Verify the svn:mergeinfo properties for '--parent-dir'.
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample-2/branch - " +
"/sample-2/trunk:15-17\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch')
svntest.actions.run_and_verify_svn([sbox.repo_url +
"/sample-2/branch1 - " +
"/sample-2/branch:16-19\n"],
[], 'propget', 'svn:mergeinfo', '-R',
sbox.repo_url + '/sample-2/branch1')
#----------------------------------------------------------------------
def set_uuid(sbox):
"test 'svnadmin setuuid'"
sbox.build(create_wc=False)
# Squirrel away the original repository UUID.
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
orig_uuid = output[0].rstrip()
# Try setting a new, bogus UUID.
svntest.actions.run_and_verify_svnadmin(None, '^.*Malformed UUID.*$',
'setuuid', sbox.repo_dir, 'abcdef')
# Try generating a brand new UUID.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', sbox.repo_dir)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid == orig_uuid:
logger.warn("Error: new UUID matches the original one")
raise svntest.Failure
# Now, try setting the UUID back to the original value.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', sbox.repo_dir, orig_uuid)
exit_code, output, errput = svntest.main.run_svnlook('uuid', sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
new_uuid = output[0].rstrip()
if new_uuid != orig_uuid:
logger.warn("Error: new UUID doesn't match the original one")
raise svntest.Failure
#----------------------------------------------------------------------
@Issue(3020)
def reflect_dropped_renumbered_revs(sbox):
"reflect dropped renumbered revs in svn:mergeinfo"
## See https://issues.apache.org/jira/browse/SVN-3020. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svndumpfilter_tests_data',
'with_merges.dump')
dumpfile = svntest.actions.load_dumpfile(dumpfile_location)
# Create 'toplevel' dir in sbox.repo_url
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 1.\n'],
[], "mkdir", sbox.repo_url + "/toplevel",
"-m", "Create toplevel dir")
# Load the dump stream in sbox.repo_url
load_dumpstream(sbox, dumpfile)
# Load the dump stream in toplevel dir
load_dumpstream(sbox, dumpfile, '--parent-dir', '/toplevel')
# Verify the svn:mergeinfo properties
url = sbox.repo_url
expected_output = svntest.verify.UnorderedOutput([
url + "/trunk - /branch1:5-9\n",
url + "/toplevel/trunk - /toplevel/branch1:14-18\n",
])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
#----------------------------------------------------------------------
@SkipUnless(svntest.main.is_fs_type_fsfs)
@Issue(2992)
def fsfs_recover_handle_missing_revs_or_revprops_file(sbox):
"""fsfs recovery checks missing revs / revprops files"""
# Set up a repository containing the greek tree.
sbox.build()
# Commit up to r3, so we can test various recovery scenarios.
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newer line\n')
sbox.simple_commit(message='log msg')
svntest.main.file_append(os.path.join(sbox.wc_dir, 'iota'), 'newest line\n')
sbox.simple_commit(message='log msg')
rev_3 = fsfs_file(sbox.repo_dir, 'revs', '3')
rev_was_3 = rev_3 + '.was'
# Move aside the revs file for r3.
os.rename(rev_3, rev_was_3)
# Verify 'svnadmin recover' fails when youngest has a revprops
# file but no revs file.
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Expected current rev to be <= %s but found 3"
# For example, if svntest.main.fsfs_sharding == 2, then rev_3 would
# be the pack file for r2:r3, and the error message would report "<= 1".
% (rev_3.endswith('pack') and '[012]' or '2')):
raise svntest.Failure
# Restore the r3 revs file, thus repairing the repository.
os.rename(rev_was_3, rev_3)
revprop_3 = fsfs_file(sbox.repo_dir, 'revprops', '3')
revprop_was_3 = revprop_3 + '.was'
# Move aside the revprops file for r3.
os.rename(revprop_3, revprop_was_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but no revprops file (issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a revs file but no revprops file"):
raise svntest.Failure
# Restore the r3 revprops file, thus repairing the repository.
os.rename(revprop_was_3, revprop_3)
# Change revprops file to a directory for revision 3
os.rename(revprop_3, revprop_was_3)
os.mkdir(revprop_3)
# Verify 'svnadmin recover' fails when youngest has a revs file
# but revprops file is not a file (another aspect of issue #2992).
exit_code, output, errput = svntest.main.run_svnadmin("recover",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin recover' is unexpected.", None, errput, None,
".*Revision 3 has a non-file where its revprops file should be.*"):
raise svntest.Failure
# Restore the r3 revprops file, thus repairing the repository.
os.rmdir(revprop_3)
os.rename(revprop_was_3, revprop_3)
#----------------------------------------------------------------------
@Skip(svntest.main.tests_use_prepackaged_repository)
def create_in_repo_subdir(sbox):
"'svnadmin create /path/to/repo/subdir'"
sbox.build(create_wc=False, empty=True)
repo_dir = sbox.repo_dir
success = False
try:
# This should fail
subdir = os.path.join(repo_dir, 'Z')
svntest.main.create_repos(subdir)
except svntest.main.SVNRepositoryCreateFailure:
success = True
if not success:
raise svntest.Failure
cwd = os.getcwd()
success = False
try:
# This should fail, too
subdir = os.path.join(repo_dir, 'conf')
os.chdir(subdir)
svntest.main.create_repos('Z')
os.chdir(cwd)
except svntest.main.SVNRepositoryCreateFailure:
success = True
os.chdir(cwd)
if not success:
raise svntest.Failure
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipDumpLoadCrossCheck()
def verify_with_invalid_revprops(sbox):
"svnadmin verify detects invalid revprops file"
sbox.build(create_wc=False, empty=True)
repo_dir = sbox.repo_dir
# Run a test verify
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, output, None,
".*Verified revision 0*"):
raise svntest.Failure
# Empty the revprops file
rp_file = open(os.path.join(repo_dir, 'db', 'revprops', '0', '0'), 'w')
rp_file.write('')
rp_file.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, errput, None,
".*svnadmin: E200002:.*"):
raise svntest.Failure
#----------------------------------------------------------------------
# Even *more* testing for issue #3020 'Reflect dropped/renumbered
# revisions in svn:mergeinfo data during svnadmin load'
#
# Full or incremental dump-load cycles should result in the same
# mergeinfo in the loaded repository.
#
# Given a repository 'SOURCE-REPOS' with mergeinfo, and a repository
# 'TARGET-REPOS' (which may or may not be empty), either of the following
# methods to move 'SOURCE-REPOS' to 'TARGET-REPOS' should result in
# the same mergeinfo on 'TARGET-REPOS':
#
# 1) Dump -r1:HEAD from 'SOURCE-REPOS' and load it in one shot to
# 'TARGET-REPOS'.
#
# 2) Dump 'SOURCE-REPOS' in a series of incremental dumps and load
# each of them to 'TARGET-REPOS'.
#
# See https://issues.apache.org/jira/browse/SVN-3020#desc13
@Issue(3020)
def dont_drop_valid_mergeinfo_during_incremental_loads(sbox):
"don't filter mergeinfo revs from incremental dump"
# Create an empty repos.
sbox.build(empty=True)
# PART 1: Load a full dump to an empty repository.
#
# The test repository used here, 'mergeinfo_included_full.dump', is
# this repos:
# __________________________________________
# | |
# | ____________________________|_____
# | | | |
# trunk---r2---r3-----r5---r6-------r8---r9---------------> | |
# r1 | | | | | |
# initial | | | |______ | |
# import copy | copy | merge merge
# | | | merge (r5) (r8)
# | | | (r9) | |
# | | | | | |
# | | V V | |
# | | branches/B2-------r11---r12----> | |
# | | r7 |____| | |
# | | | | |
# | merge |___ | |
# | (r6) | | |
# | |_________________ | | |
# | | merge | |
# | | (r11-12) | |
# | | | | |
# V V V | |
# branches/B1-------------------r10--------r13--> | |
# r4 | |
# | V V
# branches/B1/B/E------------------------------r14---r15->
#
#
# The mergeinfo on this repos@15 is:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /branches/B2:11-12
# /trunk:6,9
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /branches/B2/B/E:11-12
# /trunk/B/E:5-6,8-9
# Properties on 'branches/B2':
# svn:mergeinfo
# /trunk:9
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'mergeinfo_included_full.dump')
dumpfile_full = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_full, '--ignore-uuid')
# Check that the mergeinfo is as expected.
url = sbox.repo_url + '/branches/'
expected_output = svntest.verify.UnorderedOutput([
url + "B1 - /branches/B2:11-12\n",
"/trunk:6,9\n",
url + "B2 - /trunk:9\n",
url + "B1/B/E - /branches/B2/B/E:11-12\n",
"/trunk/B/E:5-6,8-9\n"])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# PART 2: Load a a series of incremental dumps to an empty repository.
#
# Incrementally dump the repository into three dump files:
dump_file_r1_10 = sbox.get_tempname("r1-10-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '-r1:10')
dump_fp = open(dump_file_r1_10, 'wb')
dump_fp.writelines(output)
dump_fp.close()
dump_file_r11_13 = sbox.get_tempname("r11-13-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '--incremental', '-r11:13')
dump_fp = open(dump_file_r11_13, 'wb')
dump_fp.writelines(output)
dump_fp.close()
dump_file_r14_15 = sbox.get_tempname("r14-15-dump")
exit_code, output, errput = svntest.main.run_svnadmin(
'dump', sbox.repo_dir, '--incremental', '-r14:15')
dump_fp = open(dump_file_r14_15, 'wb')
dump_fp.writelines(output)
dump_fp.close()
# Blow away the current repos and create an empty one in its place.
sbox.build(empty=True)
# Load the three incremental dump files in sequence.
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r1_10),
'--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r11_13),
'--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r14_15),
'--ignore-uuid')
# Check the mergeinfo, we use the same expected output as before,
# as it (duh!) should be exactly the same as when we loaded the
# repos in one shot.
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# Now repeat the above two scenarios, but with an initially non-empty target
# repository. First, try the full dump-load in one shot.
#
# PART 3: Load a full dump to an non-empty repository.
#
# Reset our sandbox.
sbox.build(empty=True)
# Load this skeleton repos into the empty target:
#
# Projects/ (Added r1)
# README (Added r2)
# Project-X (Added r3)
# Project-Y (Added r4)
# Project-Z (Added r5)
# docs/ (Added r6)
# README (Added r6)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-uuid')
# Load 'svnadmin_tests_data/mergeinfo_included_full.dump' in one shot:
load_dumpstream(sbox, dumpfile_full, '--parent-dir', 'Projects/Project-X',
'--ignore-uuid')
# Check that the mergeinfo is as expected. This is exactly the
# same expected mergeinfo we previously checked, except that the
# revisions are all offset +6 to reflect the revions already in
# the skeleton target before we began loading and the leading source
# paths are adjusted by the --parent-dir:
#
# Properties on 'branches/B1':
# svn:mergeinfo
# /Projects/Project-X/branches/B2:17-18
# /Projects/Project-X/trunk:12,15
# Properties on 'branches/B1/B/E':
# svn:mergeinfo
# /Projects/Project-X/branches/B2/B/E:17-18
# /Projects/Project-X/trunk/B/E:11-12,14-15
# Properties on 'branches/B2':
# svn:mergeinfo
# /Projects/Project-X/trunk:15
url = sbox.repo_url + '/Projects/Project-X/branches/'
expected_output = svntest.verify.UnorderedOutput([
url + "B1 - /Projects/Project-X/branches/B2:17-18\n",
"/Projects/Project-X/trunk:12,15\n",
url + "B2 - /Projects/Project-X/trunk:15\n",
url + "B1/B/E - /Projects/Project-X/branches/B2/B/E:17-18\n",
"/Projects/Project-X/trunk/B/E:11-12,14-15\n"])
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
# PART 4: Load a a series of incremental dumps to an non-empty repository.
#
# Reset our sandbox.
sbox.build(empty=True)
# Load this skeleton repos into the empty target:
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-uuid')
# Load the three incremental dump files in sequence.
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r1_10),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r11_13),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dump_file_r14_15),
'--parent-dir', 'Projects/Project-X', '--ignore-uuid')
# Check the resulting mergeinfo. We expect the exact same results
# as Part 3.
# See https://issues.apache.org/jira/browse/SVN-3020#desc16.
svntest.actions.run_and_verify_svn(expected_output, [],
'propget', 'svn:mergeinfo', '-R',
sbox.repo_url)
@SkipUnless(svntest.main.is_posix_os)
@Issue(2591)
def hotcopy_symlink(sbox):
"'svnadmin hotcopy' replicates symlink"
## See https://issues.apache.org/jira/browse/SVN-2591. ##
# Create a repository.
sbox.build(create_wc=False, empty=True)
original_repo = sbox.repo_dir
hotcopy_repo, hotcopy_url = sbox.add_repo_path('hotcopy')
# Create a file, a dir and a missing path outside the repoitory.
svntest.main.safe_rmtree(sbox.wc_dir, 1)
os.mkdir(sbox.wc_dir)
external_file_path = os.path.join(sbox.wc_dir, "file")
svntest.main.file_write(external_file_path, "An existing file")
external_dir_path = os.path.join(sbox.wc_dir, "dir")
os.mkdir(external_dir_path)
external_missing_path = os.path.join(sbox.wc_dir, "missing")
# Symlink definitions: base name -> target relpath.
# Check both existing and nonexistent targets.
# Check targets both within and outside the source repository.
symlinks = [
('in_repos_file', 'format'),
('in_repos_dir', 'conf'),
('in_repos_missing', 'missing'),
('external_file', os.path.join('..', '..', '..', external_file_path)),
('external_dir', os.path.join('..', '..', '..', external_dir_path)),
('external_missing', os.path.join('..', '..', '..', external_missing_path)),
]
# Create symlinks within the repository directory.
for name, target_relpath in symlinks:
target_path = os.path.join(original_repo, target_relpath)
target_abspath = os.path.abspath(target_path)
# Create two symlinks to each target - one relative, one absolute.
symlink_path = os.path.join(original_repo, name)
os.symlink(target_relpath, symlink_path + '_rel')
os.symlink(target_abspath, symlink_path + '_abs')
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", original_repo, hotcopy_repo)
# Check if the symlinks were copied correctly.
for name, target_relpath in symlinks:
target_path = os.path.join(original_repo, target_relpath)
target_abspath = os.path.abspath(target_path)
# Check two symlinks to each target - one relative, one absolute.
symlink_path = os.path.join(hotcopy_repo, name)
if os.readlink(symlink_path + '_rel') != target_relpath:
raise svntest.Failure
if os.readlink(symlink_path + '_abs') != target_abspath:
raise svntest.Failure
def load_bad_props(sbox):
"svnadmin load with invalid svn: props"
dump_str = b"""SVN-fs-dump-format-version: 2
UUID: dc40867b-38f6-0310-9f5f-f81aa277e06f
Revision-number: 0
Prop-content-length: 56
Content-length: 56
K 8
svn:date
V 27
2005-05-03T19:09:41.129900Z
PROPS-END
Revision-number: 1
Prop-content-length: 99
Content-length: 99
K 7
svn:log
V 3
\n\r\n
K 10
svn:author
V 2
pl
K 8
svn:date
V 27
2005-05-03T19:10:19.975578Z
PROPS-END
Node-path: file
Node-kind: file
Node-action: add
Prop-content-length: 10
Text-content-length: 5
Text-content-md5: e1cbb0c3879af8347246f12c559a86b5
Content-length: 15
PROPS-END
text
"""
sbox.build(empty=True)
# Try to load the dumpstream, expecting a failure (because of mixed EOLs).
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*',
'svnadmin: E125005:.*',
'svnadmin: E125017:.*'],
match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions,
False, dump_str, '--ignore-uuid')
# Now try it again bypassing prop validation. (This interface takes
# care of the removal and recreation of the original repository.)
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=True)
# Getting the property should fail.
svntest.actions.run_and_verify_svn(None, 'svn: E135000: ',
'pg', 'svn:log', '--revprop', '-r1',
sbox.repo_url)
# Now try it again with prop normalization.
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=False,
normalize_props=True)
# We should get the expected property value.
exit_code, output, _ = svntest.main.run_svn(None, 'pg', 'svn:log',
'--revprop', '-r1',
'--no-newline',
sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if output != ['\n', '\n']:
raise svntest.Failure("Unexpected property value %s" % output)
# This test intentionally corrupts a revision and assumes an FSFS
# repository. If you can make it work with BDB please do so.
# However, the verification triggered by this test is in the repos layer
# so it will trigger with either backend anyway.
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.server_enforces_UTF8_fspaths_in_verify)
def verify_non_utf8_paths(sbox):
"svnadmin verify with non-UTF-8 paths"
if svntest.main.options.fsfs_version is not None and \
svntest.main.options.fsfs_version not in [4, 6]:
raise svntest.Skip("Unsupported prepackaged repository version")
dumpfile = clean_dumpfile()
# Corruption only possible in physically addressed revisions created
# with pre-1.6 servers.
sbox.build(empty=True,
minor_version=min(svntest.main.options.server_minor_version,8))
# Load the dumpstream
load_and_verify_dumpstream(sbox, [], [], dumpfile_revisions, False,
dumpfile, '--ignore-uuid')
# Replace the path 'A' in revision 1 with a non-UTF-8 sequence.
# This has been observed in repositories in the wild, though Subversion
# 1.6 and greater should prevent such filenames from entering the repository.
path1 = os.path.join(sbox.repo_dir, "db", "revs", "0", "1")
path_new = os.path.join(sbox.repo_dir, "db", "revs", "0", "1.new")
fp1 = open(path1, 'rb')
fp_new = open(path_new, 'wb')
for line in fp1.readlines():
if line == b"A\n":
# replace 'A' with a latin1 character -- the new path is not valid UTF-8
fp_new.write(b"\xE6\n")
elif line == b"text: 1 340 32 32 a6be7b4cf075fd39e6a99eb69a31232b\n":
# phys, PLAIN directories: fix up the representation checksum
fp_new.write(b"text: 1 340 32 32 f2e93e73272cac0f18fccf16f224eb93\n")
elif line == b"text: 1 340 44 32 a6be7b4cf075fd39e6a99eb69a31232b\n":
# phys, deltified directories: fix up the representation checksum
fp_new.write(b"text: 1 340 44 32 f2e93e73272cac0f18fccf16f224eb93\n")
elif line == b"cpath: /A\n":
# also fix up the 'created path' field
fp_new.write(b"cpath: /\xE6\n")
elif line == b"_0.0.t0-0 add-file true true /A\n":
# and another occurrance
fp_new.write(b"_0.0.t0-0 add-file true true /\xE6\n")
else:
fp_new.write(line)
fp1.close()
fp_new.close()
os.remove(path1)
os.rename(path_new, path1)
# Verify the repository, expecting failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
[], errput, None, ".*Path '.*' is not in UTF-8.*")
# Make sure the repository can still be dumped so that the
# encoding problem can be fixed in a dump/edit/load cycle.
expected_stderr = [
"* Dumped revision 0.\n",
"WARNING 0x0002: E160005: "
"While validating fspath '?\\E6': "
"Path '?\\E6' is not in UTF-8"
"\n",
"* Dumped revision 1.\n",
]
exit_code, output, errput = svntest.main.run_svnadmin("dump", sbox.repo_dir)
if svntest.verify.compare_and_display_lines(
"Output of 'svnadmin dump' is unexpected.",
'STDERR', expected_stderr, errput):
raise svntest.Failure
def test_lslocks_and_rmlocks(sbox):
"test 'svnadmin lslocks' and 'svnadmin rmlocks'"
sbox.build(create_wc=False)
iota_url = sbox.repo_url + '/iota'
lambda_url = sbox.repo_url + '/A/B/lambda'
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir)
if exit_code or errput or output:
raise svntest.Failure("Error: 'lslocks' failed")
expected_output = svntest.verify.UnorderedRegexListOutput(
["'.*lambda' locked by user 'jrandom'.\n",
"'.*iota' locked by user 'jrandom'.\n"])
# Lock iota and A/B/lambda using svn client
svntest.actions.run_and_verify_svn(expected_output,
[], "lock", "-m", "Locking files",
iota_url, lambda_url)
def expected_output_list(path):
return [
"Path: " + path,
"UUID Token: opaquelocktoken:.*",
"Owner: jrandom",
"Created:.*",
"Expires:.*",
"Comment \(1 line\):",
"Locking files",
"\n", # empty line
]
# List all locks
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
svntest.verify.verify_exit_code(None, exit_code, 0)
expected_output = svntest.verify.UnorderedRegexListOutput(
expected_output_list('/A/B/lambda') +
expected_output_list('/iota'))
svntest.verify.compare_and_display_lines('lslocks output mismatch',
'output',
expected_output, output)
# List lock in path /A
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir,
"A")
if errput:
raise SVNUnexpectedStderr(errput)
expected_output = svntest.verify.RegexListOutput(
expected_output_list('/A/B/lambda'))
svntest.verify.compare_and_display_lines('lslocks output mismatch',
'output',
expected_output, output)
svntest.verify.verify_exit_code(None, exit_code, 0)
# Remove locks
exit_code, output, errput = svntest.main.run_svnadmin("rmlocks",
sbox.repo_dir,
"iota",
"A/B/lambda")
expected_output = UnorderedOutput(["Removed lock on '/iota'.\n",
"Removed lock on '/A/B/lambda'.\n"])
svntest.verify.verify_outputs(
"Unexpected output while running 'svnadmin rmlocks'.",
output, [], expected_output, None)
#----------------------------------------------------------------------
@Issue(3734)
def load_ranges(sbox):
"'svnadmin load --revision X:Y'"
## See https://issues.apache.org/jira/browse/SVN-3734. ##
sbox.build(empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumplines = svntest.actions.load_dumpfile(dumpfile_location)
# Load our dumpfile, 2 revisions at a time, verifying that we have
# the correct youngest revision after each load.
load_dumpstream(sbox, dumplines, '-r0:2')
svntest.actions.run_and_verify_svnlook(['2\n'],
None, 'youngest', sbox.repo_dir)
load_dumpstream(sbox, dumplines, '-r3:4')
svntest.actions.run_and_verify_svnlook(['4\n'],
None, 'youngest', sbox.repo_dir)
load_dumpstream(sbox, dumplines, '-r5:6')
svntest.actions.run_and_verify_svnlook(['6\n'],
None, 'youngest', sbox.repo_dir)
# There are ordering differences in the property blocks.
if (svntest.main.options.server_minor_version < 6):
temp = []
for line in dumplines:
if not "Text-content-sha1:" in line:
temp.append(line)
expected_dump = UnorderedOutput(temp)
else:
expected_dump = UnorderedOutput(dumplines)
new_dumpdata = svntest.actions.run_and_verify_dump(sbox.repo_dir)
svntest.verify.compare_and_display_lines("Dump files", "DUMP",
expected_dump, new_dumpdata)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_incremental(sbox):
"'svnadmin hotcopy --incremental PATH .'"
sbox.build()
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
for i in [1, 2, 3]:
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", "--incremental", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if i < 3:
sbox.simple_mkdir("newdir-%i" % i)
sbox.simple_commit()
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def hotcopy_incremental_packed(sbox):
"'svnadmin hotcopy --incremental' with packing"
# Configure two files per shard to trigger packing.
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
backup_dir, backup_url = sbox.add_repo_path('backup')
os.mkdir(backup_dir)
cwd = os.getcwd()
# Pack revisions 0 and 1 if not already packed.
if not (svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing
and svntest.main.options.fsfs_sharding == 2):
svntest.actions.run_and_verify_svnadmin(
['Packing revisions in shard 0...done.\n'], [], "pack",
os.path.join(cwd, sbox.repo_dir))
# Commit 5 more revs, hotcopy and pack after each commit.
for i in [1, 2, 3, 4, 5]:
os.chdir(backup_dir)
svntest.actions.run_and_verify_svnadmin(
None, [],
"hotcopy", "--incremental", os.path.join(cwd, sbox.repo_dir), '.')
os.chdir(cwd)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
if i < 5:
sbox.simple_mkdir("newdir-%i" % i)
sbox.simple_commit()
if (svntest.main.is_fs_type_fsfs and not svntest.main.options.fsfs_packing
and not i % 2):
expected_output = ['Packing revisions in shard %d...done.\n' % (i/2)]
else:
expected_output = []
svntest.actions.run_and_verify_svnadmin(
expected_output, [], "pack", os.path.join(cwd, sbox.repo_dir))
def locking(sbox):
"svnadmin lock tests"
sbox.build(create_wc=False)
comment_path = os.path.join(svntest.main.temp_dir, "comment")
svntest.main.file_write(comment_path, "dummy comment")
invalid_comment_path = os.path.join(svntest.main.temp_dir, "invalid_comment")
svntest.main.file_write(invalid_comment_path, "character is invalid")
# Test illegal character in comment file.
expected_error = ".*svnadmin: E130004:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
invalid_comment_path)
# Test locking path with --bypass-hooks
expected_output = "'/iota' locked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path,
"--bypass-hooks")
# Remove lock
svntest.actions.run_and_verify_svnadmin(None,
None, "rmlocks",
sbox.repo_dir, "iota")
# Test locking path without --bypass-hooks
expected_output = "'/iota' locked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Test locking already locked path.
expected_error = ".*svnadmin: E160035:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Test locking non-existent path.
expected_error = ".*svnadmin: E160013:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"non-existent", "jrandom",
comment_path)
# Test locking a path while specifying a lock token.
expected_output = "'/A/D/G/rho' locked by user 'jrandom'."
lock_token = "opaquelocktoken:01234567-89ab-cdef-89ab-cdef01234567"
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "lock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
comment_path, lock_token)
# Test unlocking a path, but provide the wrong lock token.
expected_error = ".*svnadmin: E160040:.*"
wrong_lock_token = "opaquelocktoken:12345670-9ab8-defc-9ab8-def01234567c"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "unlock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
wrong_lock_token)
# Test unlocking the path again, but this time provide the correct
# lock token.
expected_output = "'/A/D/G/rho' unlocked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "unlock",
sbox.repo_dir,
"A/D/G/rho", "jrandom",
lock_token)
# Install lock/unlock prevention hooks.
hook_path = svntest.main.get_pre_lock_hook_path(sbox.repo_dir)
svntest.main.create_python_hook_script(hook_path, 'import sys; sys.exit(1)')
hook_path = svntest.main.get_pre_unlock_hook_path(sbox.repo_dir)
svntest.main.create_python_hook_script(hook_path, 'import sys; sys.exit(1)')
# Test locking a path. Don't use --bypass-hooks, though, as we wish
# to verify that hook script is really getting executed.
expected_error = ".*svnadmin: E165001:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "lock",
sbox.repo_dir,
"iota", "jrandom",
comment_path)
# Fetch the lock token for our remaining locked path. (We didn't
# explicitly set it, so it will vary from test run to test run.)
exit_code, output, errput = svntest.main.run_svnadmin("lslocks",
sbox.repo_dir,
"iota")
iota_token = None
for line in output:
if line.startswith("UUID Token: opaquelocktoken:"):
iota_token = line[12:].rstrip()
break
if iota_token is None:
raise svntest.Failure("Unable to lookup lock token for 'iota'")
# Try to unlock a path while providing the correct lock token but
# with a preventative hook in place.
expected_error = ".*svnadmin: E165001:.*"
svntest.actions.run_and_verify_svnadmin(None,
expected_error, "unlock",
sbox.repo_dir,
"iota", "jrandom",
iota_token)
# Finally, use --bypass-hooks to unlock the path (again using the
# correct lock token).
expected_output = "'/iota' unlocked by user 'jrandom'."
svntest.actions.run_and_verify_svnadmin(expected_output,
None, "unlock",
"--bypass-hooks",
sbox.repo_dir,
"iota", "jrandom",
iota_token)
@SkipUnless(svntest.main.is_threaded_python)
@Issue(4129)
def mergeinfo_race(sbox):
"concurrent mergeinfo commits invalidate pred-count"
sbox.build()
# This test exercises two commit-time race condition bugs:
#
# (a) metadata corruption when concurrent commits change svn:mergeinfo (issue #4129)
# (b) false positive SVN_ERR_FS_CONFLICT error with httpv1 commits
# https://mail-archives.apache.org/mod_mbox/subversion-dev/201507.mbox/%3C20150731234536.GA5395@tarsus.local2%3E
#
# Both bugs are timing-dependent and might not reproduce 100% of the time.
wc_dir = sbox.wc_dir
wc2_dir = sbox.add_wc_path('2')
## Create wc2.
svntest.main.run_svn(None, 'checkout', '-q', sbox.repo_url, wc2_dir)
## Some random edits.
svntest.main.run_svn(None, 'mkdir', sbox.ospath('d1', wc_dir))
svntest.main.run_svn(None, 'mkdir', sbox.ospath('d2', wc2_dir))
## Set random mergeinfo properties.
svntest.main.run_svn(None, 'ps', 'svn:mergeinfo', '/P:42', sbox.ospath('A', wc_dir))
svntest.main.run_svn(None, 'ps', 'svn:mergeinfo', '/Q:42', sbox.ospath('iota', wc2_dir))
def makethread(some_wc_dir):
def worker():
svntest.main.run_svn(None, 'commit', '-mm', some_wc_dir)
return worker
t1 = threading.Thread(None, makethread(wc_dir))
t2 = threading.Thread(None, makethread(wc2_dir))
# t2 will trigger the issue #4129 sanity check in fs_fs.c
t1.start(); t2.start()
t1.join(); t2.join()
# Crude attempt to make sure everything worked.
# TODO: better way to catch exceptions in the thread
if svntest.actions.run_and_parse_info(sbox.repo_url)[0]['Revision'] != '3':
raise svntest.Failure("one or both commits failed")
@Issue(4213)
@Skip(svntest.main.is_fs_type_fsx)
def recover_old_empty(sbox):
"recover empty --compatible-version=1.3"
sbox.build(create_wc=False, empty=True, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [],
"recover", sbox.repo_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going(sbox):
"svnadmin verify --keep-going test"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
B_url = sbox.repo_url + '/B'
C_url = sbox.repo_url + '/C'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
B_url)
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b"inserting junk to corrupt the rev")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*",
".*Summary.*",
".*r2: E160004:.*",
".*r2: E160004:.*",
".*r3: E160004:.*",
".*r3: E160004:.*"])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
".*Error verifying revision 3.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
"svnadmin: E205012:.*"], False)
if (svntest.main.is_fs_log_addressing()):
exp_err.insert(0, ".*Error verifying repository metadata.")
exp_err.insert(1, "svnadmin: E160004:.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
if (svntest.main.is_fs_log_addressing()):
exp_out = svntest.verify.RegexListOutput([".*Verifying metadata at revision 0.*"])
else:
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1."])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying repository metadata.*")
if (svntest.main.is_fs_log_addressing()):
exp_err = svntest.verify.RegexListOutput([
".*Error verifying repository metadata.",
"svnadmin: E160004:.*"], False)
else:
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*"], False)
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--quiet",
sbox.repo_dir)
if (svntest.main.is_fs_log_addressing()):
exp_err = svntest.verify.RegexListOutput([
".*Error verifying repository metadata.",
"svnadmin: E160004:.*"], False)
else:
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*"], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.",
None, errput, None, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_keep_going_quiet(sbox):
"svnadmin verify --keep-going --quiet test"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
B_url = sbox.repo_url + '/B'
C_url = sbox.repo_url + '/C'
# Create A/B/E/bravo in r2.
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
B_url)
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
C_url)
r2 = fsfs_file(sbox.repo_dir, 'revs', '2')
fp = open(r2, 'r+b')
fp.write(b"inserting junk to corrupt the rev")
fp.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
"--quiet",
sbox.repo_dir)
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
".*Error verifying revision 3.",
"svnadmin: E160004:.*",
"svnadmin: E160004:.*",
"svnadmin: E205012:.*"], False)
# Insert another expected error from checksum verification
if (svntest.main.is_fs_log_addressing()):
exp_err.insert(0, ".*Error verifying repository metadata.")
exp_err.insert(1, "svnadmin: E160004:.*")
if svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
output, errput, None, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def verify_invalid_path_changes(sbox):
"detect invalid changed path list entries"
# No support for modifying pack files
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
sbox.build(create_wc = False)
repo_url = sbox.repo_url
# Create a number of revisions each adding a single path
for r in range(2,20):
svntest.actions.run_and_verify_svn(None, [],
'mkdir', '-m', 'log_msg',
sbox.repo_url + '/B' + str(r))
# modify every other revision to make sure that errors are not simply
# "carried over" but that all corrupts we get detected independently
# add existing node
set_changed_path_list(sbox, 2,
b"_0.0.t1-1 add-dir false false /A\n\n")
# add into non-existent parent
set_changed_path_list(sbox, 4,
b"_0.0.t3-2 add-dir false false /C/X\n\n")
# del non-existent node
set_changed_path_list(sbox, 6,
b"_0.0.t5-2 delete-dir false false /C\n\n")
# del existent node of the wrong kind
#
# THIS WILL NOT BE DETECTED
# since dump mechanism and file don't care about the types of deleted nodes
set_changed_path_list(sbox, 8,
b"_0.0.t7-2 delete-file false false /B3\n\n")
# copy from non-existent node
set_changed_path_list(sbox, 10,
b"_0.0.t9-2 add-dir false false /B10\n6 /B8\n")
# copy from existing node of the wrong kind
set_changed_path_list(sbox, 12,
b"_0.0.t11-2 add-file false false /B12\n9 /B8\n")
# modify non-existent node
set_changed_path_list(sbox, 14,
b"_0.0.t13-2 modify-file false false /A/D/H/foo\n\n")
# modify existent node of the wrong kind
set_changed_path_list(sbox, 16,
b"_0.0.t15-2 modify-file false false /B12\n\n")
# replace non-existent node
set_changed_path_list(sbox, 18,
b"_0.0.t17-2 replace-file false false /A/D/H/foo\n\n")
# find corruptions
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--keep-going",
sbox.repo_dir)
# Errors generated by FSFS when CHANGED_PATHS is not forced into emulation
exp_out1 = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 3.",
".*Verified revision 5.",
".*Verified revision 7.",
".*Verified revision 8.",
".*Verified revision 9.",
".*Verified revision 11.",
".*Verified revision 13.",
".*Verified revision 15.",
".*Verified revision 17.",
".*Verified revision 19.",
".*",
".*Summary.*",
".*r2: E160020:.*",
".*r2: E160020:.*",
".*r4: E160013:.*",
".*r6: E160013:.*",
".*r6: E160013:.*",
".*r10: E160013:.*",
".*r10: E160013:.*",
".*r12: E145001:.*",
".*r12: E145001:.*",
".*r14: E160013:.*",
".*r14: E160013:.*",
".*r16: E145001:.*",
".*r16: E145001:.*",
".*r18: E160013:.*",
".*r18: E160013:.*"])
exp_err1 = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*",
".*Error verifying revision 4.",
"svnadmin: E160013:.*",
".*Error verifying revision 6.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 10.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 12.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 14.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 16.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 18.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
"svnadmin: E205012:.*"], False)
# If CHANGED_PATHS is emulated, FSFS fails earlier, generating fewer
# of the same messages per revision.
exp_out2 = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 3.",
".*Verified revision 5.",
".*Verified revision 7.",
".*Verified revision 8.",
".*Verified revision 9.",
".*Verified revision 11.",
".*Verified revision 13.",
".*Verified revision 15.",
".*Verified revision 17.",
".*Verified revision 19.",
".*",
".*Summary.*",
".*r2: E160020:.*",
".*r2: E160020:.*",
".*r4: E160013:.*",
".*r6: E160013:.*",
".*r10: E160013:.*",
".*r10: E160013:.*",
".*r12: E145001:.*",
".*r12: E145001:.*",
".*r14: E160013:.*",
".*r16: E145001:.*",
".*r16: E145001:.*",
".*r18: E160013:.*"])
exp_err2 = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*",
".*Error verifying revision 4.",
"svnadmin: E160013:.*",
".*Error verifying revision 6.",
"svnadmin: E160013:.*",
".*Error verifying revision 10.",
"svnadmin: E160013:.*",
"svnadmin: E160013:.*",
".*Error verifying revision 12.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 14.",
"svnadmin: E160013:.*",
".*Error verifying revision 16.",
"svnadmin: E145001:.*",
"svnadmin: E145001:.*",
".*Error verifying revision 18.",
"svnadmin: E160013:.*",
"svnadmin: E205012:.*"], False)
# Determine which pattern to use.
# Note that index() will throw an exception if the string can't be found.
try:
rev6_line = errput.index('* Error verifying revision 6.\n');
rev10_line = errput.index('* Error verifying revision 10.\n');
error_count = 0
for line in errput[rev6_line+1:rev10_line]:
if "svnadmin: E" in line:
error_count = error_count + 1
if error_count == 1:
exp_out = exp_out2
exp_err = exp_err2
else:
exp_out = exp_out1
exp_err = exp_err1
except ValueError:
exp_out = exp_out1
exp_err = exp_err1
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 19 / svntest.main.options.fsfs_sharding):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir)
exp_out = svntest.verify.RegexListOutput([".*Verified revision 0.",
".*Verified revision 1."])
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*"], False)
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 19 / svntest.main.options.fsfs_sharding):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
exit_code, output, errput = svntest.main.run_svnadmin("verify",
"--quiet",
sbox.repo_dir)
exp_out = []
exp_err = svntest.verify.RegexListOutput([".*Error verifying revision 2.",
"svnadmin: E160020:.*",
"svnadmin: E160020:.*"], False)
if svntest.verify.verify_outputs("Output of 'svnadmin verify' is unexpected.",
output, errput, exp_out, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
def verify_denormalized_names(sbox):
"detect denormalized names and name collisions"
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'normalization_check.dump')
load_dumpstream(sbox, svntest.actions.load_dumpfile(dumpfile_location))
exit_code, output, errput = svntest.main.run_svnadmin(
"verify", "--check-normalization", sbox.repo_dir)
expected_output_regex_list = [
".*Verified revision 0.",
".*Verified revision 1.",
".*Verified revision 2.",
".*Verified revision 3.",
# A/{Eacute}/{aring}lpha
"WARNING 0x0003: Duplicate representation of path 'A/.*/.*lpha'",
".*Verified revision 4.",
".*Verified revision 5.",
# Q/{aring}lpha
"WARNING 0x0004: Duplicate representation of path '/Q/.*lpha'"
# A/{Eacute}
" in svn:mergeinfo property of 'A/.*'",
".*Verified revision 6.",
".*Verified revision 7."]
# The BDB backend doesn't do global metadata verification.
if (svntest.main.fs_has_rep_sharing() and not svntest.main.is_fs_type_bdb()):
expected_output_regex_list.insert(0, ".*Verifying repository metadata.*")
if svntest.main.options.fsfs_sharding is not None:
for x in range(0, 7 / svntest.main.options.fsfs_sharding):
expected_output_regex_list.insert(0, ".*Verifying.*metadata.*")
if svntest.main.is_fs_log_addressing():
expected_output_regex_list.insert(0, ".* Verifying metadata at revision 0.*")
exp_out = svntest.verify.RegexListOutput(expected_output_regex_list)
exp_err = svntest.verify.ExpectedOutput([])
svntest.verify.verify_outputs(
"Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_recover_old_non_empty(sbox):
"fsfs recover non-empty --compatible-version=1.3"
# Around trunk@1560210, 'svnadmin recover' wrongly errored out
# for the --compatible-version=1.3 Greek tree repository:
# svnadmin: E200002: Serialized hash missing terminator
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], "recover",
sbox.repo_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_non_empty(sbox):
"fsfs hotcopy non-empty --compatible-version=1.3"
# Around trunk@1560210, 'svnadmin hotcopy' wrongly errored out
# for the --compatible-version=1.3 Greek tree repository:
# svnadmin: E160006: No such revision 1
sbox.build(create_wc=False, minor_version=3)
backup_dir, backup_url = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
def load_ignore_dates(sbox):
"svnadmin load --ignore-dates"
# All revisions in the loaded repository should come after this time.
start_time = time.localtime()
time.sleep(1)
sbox.build(create_wc=False, empty=True)
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dumpfile_skeleton = svntest.actions.load_dumpfile(dumpfile_location)
load_dumpstream(sbox, dumpfile_skeleton, '--ignore-dates')
svntest.actions.run_and_verify_svnlook(['6\n'],
None, 'youngest', sbox.repo_dir)
for rev in range(1, 6):
exit_code, output, errput = svntest.main.run_svnlook('date', '-r', rev,
sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
rev_time = time.strptime(output[0].rstrip()[:19], '%Y-%m-%d %H:%M:%S')
if rev_time < start_time:
raise svntest.Failure("Revision time for r%d older than load start time\n"
" rev_time: %s\n"
" start_time: %s"
% (rev, str(rev_time), str(start_time)))
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_old_with_id_changes(sbox):
"fsfs hotcopy old with node-id and copy-id changes"
# Around trunk@1573728, running 'svnadmin hotcopy' for the
# --compatible-version=1.3 repository with certain node-id and copy-id
# changes ended with mismatching db/current in source and destination:
#
# source: "2 l 1" destination: "2 k 1",
# "3 l 2" "3 4 2"
# (and so on...)
#
# We test this case by creating a --compatible-version=1.3 repository
# and committing things that result in node-id and copy-id changes.
# After every commit, we hotcopy the repository to a new destination
# and check whether the source of the backup and the backup itself are
# identical. We also maintain a separate --incremental backup, which
# is updated and checked after every commit.
sbox.build(create_wc=True, minor_version=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# r1 = Initial greek tree sandbox.
backup_dir, backup_url = sbox.add_repo_path('backup-after-r1')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r2 = Add a new property.
sbox.simple_propset('foo', 'bar', 'A/mu')
sbox.simple_commit(message='r2')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r2')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r3 = Copy a file.
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_commit(message='r3')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r3')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r4 = Remove an existing file ...
sbox.simple_rm('A/D/gamma')
sbox.simple_commit(message='r4')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r4')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r5 = ...and replace it with a new file here.
sbox.simple_add_text("This is the replaced file.\n", 'A/D/gamma')
sbox.simple_commit(message='r5')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r5')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r6 = Add an entirely new file.
sbox.simple_add_text('This is an entirely new file.\n', 'A/C/mu1')
sbox.simple_commit(message='r6')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r6')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
# r7 = Change the content of the existing file (this changeset does
# not bump the next-id and copy-id counters in the repository).
sbox.simple_append('A/mu', 'This is change in the existing file.\n')
sbox.simple_commit(message='r7')
backup_dir, backup_url = sbox.add_repo_path('backup-after-r7')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
"--incremental",
sbox.repo_dir, inc_backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, backup_dir)
check_hotcopy_fsfs(sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.fs_has_pack)
def verify_packed(sbox):
"verify packed with small shards"
# Configure two files per shard to trigger packing.
sbox.build()
patch_format(sbox.repo_dir, shard_size=2)
# Play with our greek tree. These changesets fall into two
# separate shards with r2 and r3 being in shard 1 ...
sbox.simple_append('iota', "Line.\n")
sbox.simple_append('A/D/gamma', "Another line.\n")
sbox.simple_commit(message='r2')
sbox.simple_propset('foo', 'bar', 'iota')
sbox.simple_propset('foo', 'baz', 'A/mu')
sbox.simple_commit(message='r3')
# ...and r4 and r5 being in shard 2.
sbox.simple_rm('A/C')
sbox.simple_copy('A/B/E', 'A/B/E1')
sbox.simple_move('A/mu', 'A/B/mu')
sbox.simple_commit(message='r4')
sbox.simple_propdel('foo', 'A/B/mu')
sbox.simple_commit(message='r5')
if svntest.main.is_fs_type_fsfs and svntest.main.options.fsfs_packing:
# With --fsfs-packing, everything is already packed and we
# can skip this part.
pass
else:
expected_output = ["Packing revisions in shard 0...done.\n",
"Packing revisions in shard 1...done.\n",
"Packing revisions in shard 2...done.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"pack", sbox.repo_dir)
if svntest.main.is_fs_log_addressing():
expected_output = ["* Verifying metadata at revision 0 ...\n",
"* Verifying metadata at revision 2 ...\n",
"* Verifying metadata at revision 4 ...\n",
"* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n",
"* Verified revision 3.\n",
"* Verified revision 4.\n",
"* Verified revision 5.\n"]
else:
expected_output = ["* Verifying repository metadata ...\n",
"* Verified revision 0.\n",
"* Verified revision 1.\n",
"* Verified revision 2.\n",
"* Verified revision 3.\n",
"* Verified revision 4.\n",
"* Verified revision 5.\n"]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
"verify", sbox.repo_dir)
# Test that 'svnadmin freeze' is nestable. (For example, this ensures it
# won't take system-global locks, only repository-scoped ones.)
#
# This could be useful to easily freeze a small number of repositories at once.
#
# ### We don't actually test that freeze takes a write lock anywhere (not even
# ### in C tests.)
def freeze_freeze(sbox):
"svnadmin freeze svnadmin freeze (some-cmd)"
sbox.build(create_wc=False, read_only=True)
second_repo_dir, _ = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(None, [], "hotcopy",
sbox.repo_dir, second_repo_dir)
if svntest.main.is_fs_type_fsx() or \
(svntest.main.is_fs_type_fsfs() and \
svntest.main.options.server_minor_version < 9):
# FSFS repositories created with --compatible-version=1.8 and less
# erroneously share the filesystem data (locks, shared transaction
# data, ...) between hotcopy source and destination. This is fixed
# for new FS formats, but in order to avoid a deadlock for old formats,
# we have to manually assign a new UUID for the hotcopy destination.
# As of trunk@1618024, the same applies to FSX repositories.
svntest.actions.run_and_verify_svnadmin([], None,
'setuuid', second_repo_dir)
svntest.actions.run_and_verify_svnadmin(None, [],
'freeze', '--', sbox.repo_dir,
svntest.main.svnadmin_binary, 'freeze', '--', second_repo_dir,
sys.executable, '-c', 'True')
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file,
"%s\n%s\n" % (sbox.repo_dir, second_repo_dir))
svntest.actions.run_and_verify_svnadmin(None, [],
'freeze', '-F', arg_file, '--',
sys.executable, '-c', 'True')
def verify_metadata_only(sbox):
"verify metadata only"
sbox.build(create_wc = False)
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir,
"--metadata-only")
if errput:
raise SVNUnexpectedStderr(errput)
# Unfortunately, older formats won't test as thoroughly than newer ones
# resulting in different progress output. BDB will do a full check but
# not produce any output.
if svntest.main.is_fs_log_addressing():
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying metadata at revision 0 ...\n",
"* Verifying repository metadata ...\n"], output)
elif svntest.main.fs_has_rep_sharing() \
and not svntest.main.is_fs_type_bdb():
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', ["* Verifying repository metadata ...\n"], output)
else:
svntest.verify.compare_and_display_lines(
"Unexpected error while running 'svnadmin verify'.",
'STDOUT', [], output)
@Skip(svntest.main.is_fs_type_bdb)
def verify_quickly(sbox):
"verify quickly using metadata"
sbox.build(create_wc = False)
rev_file = open(fsfs_file(sbox.repo_dir, 'revs', '1'), 'r+b')
# set new contents
rev_file.seek(8)
rev_file.write(b'#')
rev_file.close()
exit_code, output, errput = svntest.main.run_svnadmin("verify",
sbox.repo_dir,
"--metadata-only")
# unfortunately, some backends needs to do more checks than other
# resulting in different progress output
if svntest.main.is_fs_log_addressing():
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput(["svnadmin: E160004:.*"], False)
else:
exp_out = svntest.verify.RegexListOutput([])
exp_err = svntest.verify.RegexListOutput([])
if (svntest.main.fs_has_rep_sharing()):
exp_out.insert(0, ".*Verifying.*metadata.*")
if svntest.verify.verify_outputs("Unexpected error while running 'svnadmin verify'.",
output, errput, exp_out, exp_err):
raise svntest.Failure
# Don't leave a corrupt repository
svntest.main.safe_rmtree(sbox.repo_dir, True)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_hotcopy_progress(sbox):
"hotcopy progress reporting"
# Check how 'svnadmin hotcopy' reports progress for non-incremental
# and incremental scenarios. The progress output can be affected by
# the --fsfs-packing option, so skip the test if that is the case.
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
# Create an empty repository, configure three files per shard.
sbox.build(create_wc=False, empty=True)
patch_format(sbox.repo_dir, shard_size=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# Nothing really exciting for the empty repository.
expected_full = [
"* Copied revision 0.\n"
]
expected_incremental = [
"* Copied revision 0.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Commit three revisions. After this step we have a full shard
# (r0, r1, r2) and the second shard (r3) with a single revision.
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Pack everything (r3 is still unpacked) and hotcopy again. In this case,
# the --incremental output should track the incoming (r0, r1, r2) pack and
# should not mention r3, because it is already a part of the destination
# and is *not* a part of the incoming pack.
svntest.actions.run_and_verify_svnadmin(None, [], 'pack',
sbox.repo_dir)
expected_full = [
"* Copied revisions from 0 to 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revisions from 0 to 2.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-2')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Fill the second shard, pack again, commit several unpacked revisions
# on top of it. Rerun the hotcopy and check the progress output.
for i in range(4, 6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
svntest.actions.run_and_verify_svnadmin(None, [], 'pack',
sbox.repo_dir)
for i in range(6, 8):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revisions from 0 to 2.\n",
"* Copied revisions from 3 to 5.\n",
"* Copied revision 6.\n",
"* Copied revision 7.\n",
]
expected_incremental = [
"* Copied revisions from 3 to 5.\n",
"* Copied revision 6.\n",
"* Copied revision 7.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-3')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_with_revprop_changes(sbox):
"incremental hotcopy progress with changed revprops"
# The progress output can be affected by the --fsfs-packing
# option, so skip the test if that is the case.
if svntest.main.options.fsfs_packing:
raise svntest.Skip('fsfs packing set')
# Create an empty repository, commit several revisions and hotcopy it.
sbox.build(create_wc=False, empty=True)
for i in range(6):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_output = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
"* Copied revision 4.\n",
"* Copied revision 5.\n",
"* Copied revision 6.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup')
svntest.actions.run_and_verify_svnadmin(expected_output, [],
'hotcopy',
sbox.repo_dir, backup_dir)
# Amend a few log messages in the source, run the --incremental hotcopy.
# The progress output should only mention the corresponding revisions.
revprop_file = sbox.get_tempname()
svntest.main.file_write(revprop_file, "Modified log message.")
for i in [1, 3, 6]:
svntest.actions.run_and_verify_svnadmin(None, [],
'setrevprop',
sbox.repo_dir, '-r', i,
'svn:log', revprop_file)
expected_output = [
"* Copied revision 1.\n",
"* Copied revision 3.\n",
"* Copied revision 6.\n",
]
svntest.actions.run_and_verify_svnadmin(expected_output, [],
'hotcopy', '--incremental',
sbox.repo_dir, backup_dir)
@SkipUnless(svntest.main.is_fs_type_fsfs)
def fsfs_hotcopy_progress_old(sbox):
"hotcopy --compatible-version=1.3 progress"
sbox.build(create_wc=False, empty=True, minor_version=3)
inc_backup_dir, inc_backup_url = sbox.add_repo_path('incremental-backup')
# Nothing really exciting for the empty repository.
expected_full = [
"* Copied revision 0.\n"
]
expected_incremental = [
"* Copied revision 0.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-0')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
# Commit three revisions, hotcopy and check the progress output.
for i in range(3):
svntest.actions.run_and_verify_svn(None, [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir-%i' % i)
expected_full = [
"* Copied revision 0.\n",
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
expected_incremental = [
"* Copied revision 1.\n",
"* Copied revision 2.\n",
"* Copied revision 3.\n",
]
backup_dir, backup_url = sbox.add_repo_path('backup-1')
svntest.actions.run_and_verify_svnadmin(expected_full, [],
'hotcopy',
sbox.repo_dir, backup_dir)
svntest.actions.run_and_verify_svnadmin(expected_incremental, [],
'hotcopy', '--incremental',
sbox.repo_dir, inc_backup_dir)
@SkipUnless(svntest.main.fs_has_unique_freeze)
def freeze_same_uuid(sbox):
"freeze multiple repositories with same UUID"
sbox.build(create_wc=False)
first_repo_dir, _ = sbox.add_repo_path('first')
second_repo_dir, _ = sbox.add_repo_path('second')
# Test that 'svnadmin freeze A (svnadmin freeze B)' does not deadlock for
# new FSFS formats, even if 'A' and 'B' share the same UUID. Create two
# repositories by loading the same dump file, ...
svntest.main.create_repos(first_repo_dir)
svntest.main.create_repos(second_repo_dir)
dump_path = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
svntest.actions.run_and_verify_load(first_repo_dir, dump_contents)
svntest.actions.run_and_verify_load(second_repo_dir, dump_contents)
# ...and execute the 'svnadmin freeze -F' command.
arg_file = sbox.get_tempname()
svntest.main.file_write(arg_file,
"%s\n%s\n" % (first_repo_dir, second_repo_dir))
svntest.actions.run_and_verify_svnadmin(None, None,
'freeze', '-F', arg_file, '--',
sys.executable, '-c', 'True')
@Skip(svntest.main.is_fs_type_fsx)
def upgrade(sbox):
"upgrade --compatible-version=1.3"
sbox.build(create_wc=False, minor_version=3)
svntest.actions.run_and_verify_svnadmin(None, [], "upgrade",
sbox.repo_dir)
# Does the repository work after upgrade?
svntest.actions.run_and_verify_svn(['Committing transaction...\n',
'Committed revision 2.\n'], [], 'mkdir',
'-m', svntest.main.make_log_msg(),
sbox.repo_url + '/dir')
def load_txdelta(sbox):
"exercising svn_txdelta_target on BDB"
sbox.build(empty=True)
# This dumpfile produced a BDB repository that generated cheksum
# mismatches on read caused by the improper handling of
# svn_txdelta_target ops. The bug was fixed by r1640832.
dumpfile_location = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'load_txdelta.dump.gz')
dumpfile = gzip.open(dumpfile_location, "rb").readlines()
load_dumpstream(sbox, dumpfile)
# Verify would fail with a checksum mismatch:
# * Error verifying revision 14.
# svnadmin: E200014: MD5 checksum mismatch on representation 'r':
# expected: 5182e8876ed894dc7fe28f6ff5b2fee6
# actual: 5121f82875508863ad70daa8244e6947
exit_code, output, errput = svntest.main.run_svnadmin("verify", sbox.repo_dir)
if errput:
raise SVNUnexpectedStderr(errput)
if svntest.verify.verify_outputs(
"Output of 'svnadmin verify' is unexpected.", None, output, None,
".*Verified revision *"):
raise svntest.Failure
@Issues(4563)
def load_no_svndate_r0(sbox):
"load without svn:date on r0"
sbox.build(create_wc=False, empty=True)
# svn:date exits
svntest.actions.run_and_verify_svnlook([' svn:date\n'], [],
'proplist', '--revprop', '-r0',
sbox.repo_dir)
dump_old = [b"SVN-fs-dump-format-version: 2\n", b"\n",
b"UUID: bf52886d-358d-4493-a414-944a6e5ad4f5\n", b"\n",
b"Revision-number: 0\n",
b"Prop-content-length: 10\n",
b"Content-length: 10\n", b"\n",
b"PROPS-END\n", b"\n"]
svntest.actions.run_and_verify_load(sbox.repo_dir, dump_old)
# svn:date should have been removed
svntest.actions.run_and_verify_svnlook([], [],
'proplist', '--revprop', '-r0',
sbox.repo_dir)
# This is only supported for FSFS
# The port to FSX is still pending, BDB won't support it.
@SkipUnless(svntest.main.is_fs_type_fsfs)
def hotcopy_read_only(sbox):
"'svnadmin hotcopy' a read-only source repository"
sbox.build()
svntest.main.chmod_tree(sbox.repo_dir, 0, svntest.main.S_ALL_WRITE)
backup_dir, backup_url = sbox.add_repo_path('backup')
exit_code, output, errput = svntest.main.run_svnadmin("hotcopy",
sbox.repo_dir,
backup_dir)
# r/o repos are hard to clean up. Make it writable again.
svntest.main.chmod_tree(sbox.repo_dir, svntest.main.S_ALL_WRITE,
svntest.main.S_ALL_WRITE)
if errput:
logger.warn("Error: hotcopy failed")
raise SVNUnexpectedStderr(errput)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.fs_has_pack)
def fsfs_pack_non_sharded(sbox):
"'svnadmin pack' on a non-sharded repository"
# Configure two files per shard to trigger packing.
sbox.build(create_wc = False,
minor_version = min(svntest.main.options.server_minor_version,3))
# Skip for pre-cooked sharded repositories
if is_sharded(sbox.repo_dir):
raise svntest.Skip('sharded pre-cooked repository')
svntest.actions.run_and_verify_svnadmin(
None, [], "upgrade", sbox.repo_dir)
svntest.actions.run_and_verify_svnadmin(
['svnadmin: Warning - this repository is not sharded. Packing has no effect.\n'],
[], "pack", sbox.repo_dir)
def load_revprops(sbox):
"svnadmin load-revprops"
sbox.build(create_wc=False, empty=True)
dump_path = os.path.join(os.path.dirname(sys.argv[0]),
'svnadmin_tests_data',
'skeleton_repos.dump')
dump_contents = open(dump_path, 'rb').readlines()
load_and_verify_dumpstream(sbox, None, [], None, False, dump_contents)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# After loading the dump, amend one of the log message in the repository.
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks',
'-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# Load the same dump, but with 'svnadmin load-revprops'. Doing so should
# restore the log message to its original state.
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0,
True, dump_contents, 'load-revprops',
sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['Initial setup...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
def dump_revprops(sbox):
"svnadmin dump-revprops"
sbox.build(create_wc=False)
# Dump revprops only.
exit_code, dump_contents, errput = \
svntest.actions.run_and_verify_svnadmin(None, [], "dump-revprops", "-q",
sbox.repo_dir)
# We expect the dump to contain no path changes
for line in dump_contents:
if line.find(b"Node-path: ") > -1:
logger.warn("Error: path change found in revprops-only dump.")
raise svntest.Failure
# Remember the current log message for r1
exit_code, log_msg, errput = \
svntest.actions.run_and_verify_svnlook(None, [], 'log', '-r1',
sbox.repo_dir)
# Now, change the log message in the repository.
input_file = sbox.get_tempname()
svntest.main.file_write(input_file, 'Modified log message...\n')
svntest.actions.run_and_verify_svnadmin([], [], 'setlog', '--bypass-hooks',
'-r1', sbox.repo_dir, input_file)
svntest.actions.run_and_verify_svnlook(['Modified log message...\n', '\n'],
[], 'log', '-r1', sbox.repo_dir)
# Load the same dump with 'svnadmin load-revprops'. Doing so should
# restore the log message to its original state.
svntest.main.run_command_stdin(svntest.main.svnadmin_binary, None, 0,
True, dump_contents, 'load-revprops',
sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(log_msg, [], 'log', '-r1',
sbox.repo_dir)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4598)
def dump_no_op_change(sbox):
"svnadmin dump with no-op changes"
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar')
# Commit a no-op change.
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar')
# Dump and load the repository.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# We expect svn log -v to yield identical results for both original and
# reconstructed repositories. This used to fail as described in the
# Issue 4598 (https://issues.apache.org/jira/browse/SVN-4598), at least
# around r1706415.
#
# Test svn log -v for r2:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
'-r2', sbox.repo_url)
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
'-r2', sbox2.repo_url)
# Test svn log -v for /bar:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
sbox.repo_url + '/bar')
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
sbox2.repo_url + '/bar')
@XFail(svntest.main.is_fs_type_bdb)
@XFail(svntest.main.is_fs_type_fsx)
@Issue(4623)
def dump_no_op_prop_change(sbox):
"svnadmin dump with no-op property change"
sbox.build(create_wc=False, empty=True)
empty_file = sbox.get_tempname()
svntest.main.file_write(empty_file, '')
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'put', empty_file, 'bar',
'propset', 'pname', 'pval', 'bar')
# Commit a no-op property change.
svntest.actions.run_and_verify_svnmucc(None, [],
'-U', sbox.repo_url,
'-m', svntest.main.make_log_msg(),
'propset', 'pname', 'pval', 'bar')
# Dump and load the repository.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Test svn log -v for r2:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
'-r2', sbox.repo_url)
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
'-r2', sbox2.repo_url)
# Test svn log -v for /bar:
_, expected, _ = svntest.actions.run_and_verify_svn(None, [], 'log', '-v',
sbox.repo_url + '/bar')
found = [True for line in expected if line.find('M /bar\n') != -1]
if not found:
raise svntest.Failure
svntest.actions.run_and_verify_svn(expected, [], 'log', '-v',
sbox2.repo_url + '/bar')
def load_no_flush_to_disk(sbox):
"svnadmin load --no-flush-to-disk"
sbox.build(empty=True)
# Can't test the "not flushing to disk part", but loading the
# dump should work.
dump = clean_dumpfile()
expected = [
svntest.wc.State('', {
'A' : svntest.wc.StateItem(contents="text\n",
props={'svn:keywords': 'Id'})
})
]
load_and_verify_dumpstream(sbox, [], [], expected, True, dump,
'--no-flush-to-disk', '--ignore-uuid')
def dump_to_file(sbox):
"svnadmin dump --file ARG"
sbox.build(create_wc=False, empty=False)
expected_dump = svntest.actions.run_and_verify_dump(sbox.repo_dir)
file = sbox.get_tempname()
svntest.actions.run_and_verify_svnadmin2([],
["* Dumped revision 0.\n",
"* Dumped revision 1.\n"],
0, 'dump', '--file', file,
sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
# Test that svnadmin dump --file overwrites existing files.
file = sbox.get_tempname()
svntest.main.file_write(file, '')
svntest.actions.run_and_verify_svnadmin2([],
["* Dumped revision 0.\n",
"* Dumped revision 1.\n"],
0, 'dump', '--file', file,
sbox.repo_dir)
actual_dump = open(file, 'rb').readlines()
svntest.verify.compare_dump_files(None, None, expected_dump, actual_dump)
def load_from_file(sbox):
"svnadmin load --file ARG"
sbox.build(empty=True)
file = sbox.get_tempname()
with open(file, 'wb') as f:
f.writelines(clean_dumpfile())
svntest.actions.run_and_verify_svnadmin2(None, [],
0, 'load', '--file', file,
'--ignore-uuid', sbox.repo_dir)
expected_tree = \
svntest.wc.State('', {
'A' : svntest.wc.StateItem(contents="text\n",
props={'svn:keywords': 'Id'})
})
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
'update', sbox.wc_dir)
svntest.actions.verify_disk(sbox.wc_dir, expected_tree, check_props=True)
def dump_exclude(sbox):
"svnadmin dump with excluded paths"
sbox.build(create_wc=False)
# Dump repository with /A/D/H and /A/B/E paths excluded.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/A/D/H',
'--exclude', '/A/B/E',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/A/D/H' and '/A/B/E' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /A\n'),
re.escape(' A /A/B\n'),
re.escape(' A /A/B/F\n'),
re.escape(' A /A/B/lambda\n'),
re.escape(' A /A/C\n'),
re.escape(' A /A/D\n'),
re.escape(' A /A/D/G\n'),
re.escape(' A /A/D/G/pi\n'),
re.escape(' A /A/D/G/rho\n'),
re.escape(' A /A/D/G/tau\n'),
re.escape(' A /A/D/gamma\n'),
re.escape(' A /A/mu\n'),
re.escape(' A /iota\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_copysource(sbox):
"svnadmin dump with excluded copysource"
sbox.build(create_wc=False, empty=True)
# Create default repository structure.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches',
sbox.repo_url + '/tags',
"-m", "Create repository structure.")
# Create a branch.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches/branch1',
"-m", "Create branch.")
# Dump repository with /trunk excluded.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/trunk',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
re.escape('Changed paths:\n'),
# Simple add, not copy.
re.escape(' A /branches/branch1\n'),
'-+\\n',
'r1\ .*\n',
# '/trunk' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /branches\n'),
re.escape(' A /tags\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_include(sbox):
"svnadmin dump with included paths"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
# Note that we can't use greek tree as it contains only two top-level
# nodes. Including non top-level nodes (e.g. '--include /A/B/E') will
# produce unloadable dump for now.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/A',
sbox.repo_url + '/B',
sbox.repo_url + '/C',
"-m", "Create folder.")
# Dump repository with /A and /C paths included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/A',
'--include', '/C',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/B' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /A\n'),
re.escape(' A /C\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_not_include_copysource(sbox):
"svnadmin dump with not included copysource"
sbox.build(create_wc=False, empty=True)
# Create default repository structure.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches',
sbox.repo_url + '/tags',
"-m", "Create repository structure.")
# Create a branch.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/trunk',
sbox.repo_url + '/branches/branch1',
"-m", "Create branch.")
# Dump repository with only /branches included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/branches',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
re.escape('Changed paths:\n'),
# Simple add, not copy.
re.escape(' A /branches/branch1\n'),
'-+\\n',
'r1\ .*\n',
# Only '/branches' is added in r1.
re.escape('Changed paths:\n'),
re.escape(' A /branches\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_by_pattern(sbox):
"svnadmin dump with paths excluded by pattern"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/aaa',
sbox.repo_url + '/aab',
sbox.repo_url + '/aac',
sbox.repo_url + '/bbc',
"-m", "Create repository structure.")
# Dump with paths excluded by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/aa?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
re.escape('Changed paths:\n'),
# Only '/bbc' is added in r1.
re.escape(' A /bbc\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_include_by_pattern(sbox):
"svnadmin dump with paths included by pattern"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories.
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/aaa',
sbox.repo_url + '/aab',
sbox.repo_url + '/aac',
sbox.repo_url + '/bbc',
"-m", "Create repository structure.")
# Dump with paths included by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/aa?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r1\ .*\n',
# '/bbc' is not added.
re.escape('Changed paths:\n'),
re.escape(' A /aaa\n'),
re.escape(' A /aab\n'),
re.escape(' A /aac\n'),
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def dump_exclude_all_rev_changes(sbox):
"svnadmin dump with all revision changes excluded"
sbox.build(create_wc=False, empty=True)
# Create a couple of directories (r1).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r1a',
sbox.repo_url + '/r1b',
sbox.repo_url + '/r1c',
"-m", "Revision 1.")
# Create a couple of directories (r2).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r2a',
sbox.repo_url + '/r2b',
sbox.repo_url + '/r2c',
"-m", "Revision 2.")
# Create a couple of directories (r3).
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "mkdir",
sbox.repo_url + '/r3a',
sbox.repo_url + '/r3b',
sbox.repo_url + '/r3c',
"-m", "Revision 3.")
# Dump with paths excluded by pattern.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--exclude', '/r2?',
'--pattern',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log. Revision properties ('svn:log' etc.) should be empty for r2.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r3 | jrandom | .* | 1 line\\n',
re.escape('Changed paths:'),
re.escape(' A /r3a'),
re.escape(' A /r3b'),
re.escape(' A /r3c'),
'',
re.escape('Revision 3.'),
'-+\\n',
re.escape('r2 | (no author) | (no date) | 1 line'),
'',
'',
'-+\\n',
'r1 | jrandom | .* | 1 line\\n',
re.escape('Changed paths:'),
re.escape(' A /r1a'),
re.escape(' A /r1b'),
re.escape(' A /r1c'),
'',
re.escape('Revision 1.'),
'-+\\n',
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', sbox2.repo_url)
def dump_invalid_filtering_option(sbox):
"dump with --include and --exclude simultaneously"
sbox.build(create_wc=False, empty=False)
# Attempt to dump repository with '--include' and '--exclude' options
# specified simultaneously.
expected_error = ".*: '--exclude' and '--include' options cannot be used " \
"simultaneously"
svntest.actions.run_and_verify_svnadmin(None, expected_error,
'dump', '-q',
'--exclude', '/A/D/H',
'--include', '/A/B/E',
sbox.repo_dir)
@Issue(4725)
def load_issue4725(sbox):
"""load that triggers issue 4725"""
sbox.build(empty=True)
sbox.simple_mkdir('subversion')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk')
sbox.simple_mkdir('subversion/branches')
sbox.simple_commit()
sbox.simple_mkdir('subversion/trunk/src')
sbox.simple_commit()
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
sbox.repo_dir)
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump, '-M100')
@Issue(4767)
def dump_no_canonicalize_svndate(sbox):
"svnadmin dump shouldn't canonicalize svn:date"
sbox.build(create_wc=False, empty=True)
svntest.actions.enable_revprop_changes(sbox.repo_dir)
# set svn:date in a non-canonical format (not six decimal places)
propval = "2015-01-01T00:00:00.0Z"
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [],
"propset", "--revprop", "-r0", "svn:date",
propval,
sbox.repo_url)
dump_lines = svntest.actions.run_and_verify_dump(sbox.repo_dir)
assert propval.encode() + b'\n' in dump_lines
def check_recover_prunes_rep_cache(sbox, enable_rep_sharing):
"""Check 'recover' prunes the rep-cache while enable-rep-sharing is
true/false.
"""
# Remember the initial rep cache content.
rep_cache_r1 = read_rep_cache(sbox.repo_dir)
#print '\n'.join([h + ": " + repr(ref) for h, ref in rep_cache_r1.items()])
# Commit one new rep and check the rep-cache is extended.
sbox.simple_append('iota', 'New line.\n')
sbox.simple_commit()
rep_cache_r2 = read_rep_cache(sbox.repo_dir)
if not (len(rep_cache_r2) == len(rep_cache_r1) + 1):
raise svntest.Failure
fsfs_conf = svntest.main.get_fsfs_conf_file_path(sbox.repo_dir)
svntest.main.file_append(fsfs_conf,
# Add a newline in case the existing file doesn't
# end with one.
"\n"
"[rep-sharing]\n"
"enable-rep-sharing = %s\n"
% (('true' if enable_rep_sharing else 'false'),))
# Break r2 in such a way that 'recover' will discard it
head_rev_path = fsfs_file(sbox.repo_dir, 'revs', '2')
os.remove(head_rev_path)
current_path = os.path.join(sbox.repo_dir, 'db', 'current')
svntest.main.file_write(current_path, '1\n')
# Recover back to r1.
svntest.actions.run_and_verify_svnadmin(None, [],
"recover", sbox.repo_dir)
svntest.actions.run_and_verify_svnlook(['1\n'], [], 'youngest',
sbox.repo_dir)
# Check the rep-cache is pruned.
rep_cache_recovered = read_rep_cache(sbox.repo_dir)
if not (rep_cache_recovered == rep_cache_r1):
raise svntest.Failure
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_enabled(sbox):
"recover prunes rep cache when enabled"
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=True)
@Issue(4077)
@SkipUnless(svntest.main.is_fs_type_fsfs)
@SkipUnless(svntest.main.python_sqlite_can_read_without_rowid)
def recover_prunes_rep_cache_when_disabled(sbox):
"recover prunes rep cache when disabled"
sbox.build()
check_recover_prunes_rep_cache(sbox, enable_rep_sharing=False)
@Issue(4760)
def dump_include_copied_directory(sbox):
"include copied directory with nested nodes"
sbox.build(create_wc=False)
svntest.actions.run_and_verify_svn(svntest.verify.AnyOutput, [], "copy",
sbox.repo_url + '/A/D',
sbox.repo_url + '/COPY',
"-m", "Create branch.")
# Dump repository with only /COPY path included.
_, dump, _ = svntest.actions.run_and_verify_svnadmin(None, [],
'dump', '-q',
'--include', '/COPY',
sbox.repo_dir)
# Load repository from dump.
sbox2 = sbox.clone_dependent()
sbox2.build(create_wc=False, empty=True)
load_and_verify_dumpstream(sbox2, None, [], None, False, dump)
# Check log.
expected_output = svntest.verify.RegexListOutput([
'-+\\n',
'r2\ .*\n',
# Only '/COPY' is added
re.escape('Changed paths:\n'),
re.escape(' A /COPY'),
re.escape(' A /COPY/G'),
re.escape(' A /COPY/G/pi'),
re.escape(' A /COPY/G/rho'),
re.escape(' A /COPY/G/tau'),
re.escape(' A /COPY/H'),
re.escape(' A /COPY/H/chi'),
re.escape(' A /COPY/H/omega'),
re.escape(' A /COPY/H/psi'),
re.escape(' A /COPY/gamma'),
'-+\\n',
'r1\ .*\n',
'-+\\n'
])
svntest.actions.run_and_verify_svn(expected_output, [],
'log', '-v', '-q', sbox2.repo_url)
def load_normalize_node_props(sbox):
"svnadmin load --normalize node props"
dump_str = b"""SVN-fs-dump-format-version: 2
UUID: dc40867b-38f6-0310-9f5f-f81aa277e06f
Revision-number: 0
Prop-content-length: 56
Content-length: 56
K 8
svn:date
V 27
2005-05-03T19:09:41.129900Z
PROPS-END
Revision-number: 1
Prop-content-length: 99
Content-length: 99
K 7
svn:log
V 0
K 10
svn:author
V 2
pl
K 8
svn:date
V 27
2005-05-03T19:10:19.975578Z
PROPS-END
Node-path:
Node-kind: dir
Node-action: change
Prop-content-length: 32
Content-length: 32
K 10
svn:ignore
V 3
\n\r\n
PROPS-END
"""
sbox.build(empty=True)
# Try to load the dumpstream, expecting a failure (because of mixed
# EOLs in the svn:ignore property value).
exp_err = svntest.verify.RegexListOutput(['svnadmin: E125005:.*',
'svnadmin: E125017:.*'],
match_all=False)
load_and_verify_dumpstream(sbox, [], exp_err, dumpfile_revisions,
False, dump_str, '--ignore-uuid')
# Now try it again with prop normalization.
svntest.actions.load_repo(sbox, dump_str=dump_str,
bypass_prop_validation=False,
normalize_props=True)
# We should get the normalized property value.
exit_code, output, _ = svntest.main.run_svn(None, 'pg', 'svn:ignore',
'--no-newline',
sbox.repo_url)
svntest.verify.verify_exit_code(None, exit_code, 0)
if output != ['\n', '\n']:
raise svntest.Failure("Unexpected property value %s" % output)
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
extra_headers,
extra_blockcontent,
inconsistent_headers,
empty_date,
dump_copied_dir,
dump_move_dir_modify_child,
dump_quiet,
hotcopy_dot,
hotcopy_format,
setrevprop,
verify_windows_paths_in_repos,
verify_incremental_fsfs,
fsfs_recover_db_current,
fsfs_recover_old_db_current,
load_with_parent_dir,
set_uuid,
reflect_dropped_renumbered_revs,
fsfs_recover_handle_missing_revs_or_revprops_file,
create_in_repo_subdir,
verify_with_invalid_revprops,
dont_drop_valid_mergeinfo_during_incremental_loads,
hotcopy_symlink,
load_bad_props,
verify_non_utf8_paths,
test_lslocks_and_rmlocks,
load_ranges,
hotcopy_incremental,
hotcopy_incremental_packed,
locking,
mergeinfo_race,
recover_old_empty,
verify_keep_going,
verify_keep_going_quiet,
verify_invalid_path_changes,
verify_denormalized_names,
fsfs_recover_old_non_empty,
fsfs_hotcopy_old_non_empty,
load_ignore_dates,
fsfs_hotcopy_old_with_id_changes,
verify_packed,
freeze_freeze,
verify_metadata_only,
verify_quickly,
fsfs_hotcopy_progress,
fsfs_hotcopy_progress_with_revprop_changes,
fsfs_hotcopy_progress_old,
freeze_same_uuid,
upgrade,
load_txdelta,
load_no_svndate_r0,
hotcopy_read_only,
fsfs_pack_non_sharded,
load_revprops,
dump_revprops,
dump_no_op_change,
dump_no_op_prop_change,
load_no_flush_to_disk,
dump_to_file,
load_from_file,
dump_exclude,
dump_exclude_copysource,
dump_include,
dump_not_include_copysource,
dump_exclude_by_pattern,
dump_include_by_pattern,
dump_exclude_all_rev_changes,
dump_invalid_filtering_option,
load_issue4725,
dump_no_canonicalize_svndate,
recover_prunes_rep_cache_when_enabled,
recover_prunes_rep_cache_when_disabled,
dump_include_copied_directory,
load_normalize_node_props,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| subversion/tests/cmdline/svnadmin_tests.py | 162,337 | Manages indexes of a rev file in a FSFS format 7 repository.
The interface returns P2L information and allows for item offsets
and lengths to be modified.
Read P2L index using svnfsfs.
Rewrite indexes using svnfsfs.
Verify that the SRC BDB repository has been correctly copied to DST.
Verify that the SRC FSFS repository has been correctly copied to DST.
Verify that the SRC FSX repository has been correctly copied to DST.
Check 'recover' prunes the rep-cache while enable-rep-sharing is
true/false.
Build up a MINOR_VERSION sandbox and test different recovery scenarios
with missing, out-of-date or even corrupt db/current files. Recovery should
behave the same way with all values of MINOR_VERSION, hence this helper
containing the common code that allows us to check it.
'svnadmin create /path/to/repo/subdir'
don't filter mergeinfo revs from incremental dump
'svnadmin dump' on copied directory
svnadmin dump with excluded paths
svnadmin dump with all revision changes excluded
svnadmin dump with paths excluded by pattern
svnadmin dump with excluded copysource
svnadmin dump with included paths
svnadmin dump with paths included by pattern
include copied directory with nested nodes
dump with --include and --exclude simultaneously
'svnadmin dump' on modified child of copied dir
svnadmin dump shouldn't canonicalize svn:date
svnadmin dump with no-op changes
svnadmin dump with no-op property change
svnadmin dump with not included copysource
'svnadmin dump --quiet'
svnadmin dump-revprops
svnadmin dump --file ARG
preserve date-less revisions in load
load success on oversized Content-length
loading of dumpstream with extra headers
svnadmin freeze svnadmin freeze (some-cmd)
freeze multiple repositories with same UUID
fsfs hotcopy non-empty --compatible-version=1.3
fsfs hotcopy old with node-id and copy-id changes
hotcopy progress reporting
hotcopy --compatible-version=1.3 progress
incremental hotcopy progress with changed revprops
'svnadmin pack' on a non-sharded repository
fsfs recover db/current
fsfs recovery checks missing revs / revprops files
fsfs recover db/current --compatible-version=1.3
fsfs recover non-empty --compatible-version=1.3
Return offset, length and type of ITEM.
Get the txn names using 'svnadmin lstxns'.
'svnadmin hotcopy PATH .'
'svnadmin hotcopy' checking db/format file
'svnadmin hotcopy --incremental PATH .'
'svnadmin hotcopy --incremental' with packing
'svnadmin hotcopy' a read-only source repository
'svnadmin hotcopy' replicates symlink
load failure on undersized Content-length
Return whether the FSFS repository REPO_DIR is sharded.
Load the array of lines passed in DUMP into the current tests'
repository and verify the repository content using the array of
wc.States passed in REVS. If CHECK_PROPS is True, check properties
of each rev's items. VARARGS are optional arguments passed to the
'load' command.
svnadmin load with invalid svn: props
Load dump text without verification.
svnadmin load --file ARG
svnadmin load --ignore-dates
load that triggers issue 4725
svnadmin load --no-flush-to-disk
load without svn:date on r0
svnadmin load --normalize node props
'svnadmin load --revision X:Y'
svnadmin load-revprops
exercising svn_txdelta_target on BDB
'svnadmin load --parent-dir' reparents mergeinfo
svnadmin lock tests
concurrent mergeinfo commits invalidate pred-count
Modify offset and length of ITEM.
Rewrite the format of the FSFS or FSX repository REPO_DIR so
that it would use sharding with SHARDS revisions per shard.
Return the rep-cache contents as a dict {hash: (rev, index, ...)}.
recover empty --compatible-version=1.3
recover prunes rep cache when disabled
recover prunes rep cache when enabled
reflect dropped renumbered revs in svn:mergeinfo
Return the repository format number for SBOX.
Replace the changed paths list in the revision file REVISION in SBOX
with the text CHANGES.
test 'svnadmin setuuid'
setlog, setrevprop, delrevprop; bypass hooks
test 'svnadmin lslocks' and 'svnadmin rmlocks'
upgrade --compatible-version=1.3
detect denormalized names and name collisions
svnadmin verify detects corruption dump can't
detect invalid changed path list entries
svnadmin verify --keep-going test
svnadmin verify --keep-going --quiet test
verify metadata only
svnadmin verify with non-UTF-8 paths
verify packed with small shards
verify quickly using metadata
verify a repository containing paths like 'c:hi'
svnadmin verify detects invalid revprops file
!/usr/bin/env python svnadmin_tests.py: testing the 'svnadmin' tool. Subversion is a tool for revision control. See http://subversion.apache.org for more information. ==================================================================== Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. General modules Our testing module (abbreviation) Can't test newer rep-cache schemas with an old built-in SQLite; see the documentation of STMT_CREATE_SCHEMA_V2 in ../../libsvn_fs_fs/rep-cache-db.sql TODO: This function should be extended to verify all hotcopied files, not just compare the output of 'svnadmin dump'. See check_hotcopy_fsfs(). Walk the source and compare all files to the destination Verify that the current directory exists in the destination Verify that all dirents in the current directory also exist in source Ignore auto-created empty lock files as they may or may not be present and are neither required by nor do they harm to the destination repository. Ignore auto-created rep-cache.db-journal file Compare all files in this directory Ignore auto-created empty lock files as they may or may not be present and are neither required by nor do they harm to the destination repository. Ignore auto-created rep-cache.db-journal file Special case for db/uuid: Only the UUID in the first line needs to match. Source and target must have the same number of lines (due to having the same format). Special case for rep-cache: It will always differ in a byte-by-byte comparison, so compare db tables instead. Can't test newer rep-cache schemas with an old built-in SQLite. Special case for revprop-generation: It will always be zero in the hotcopy destination (i.e. a fresh cache generation) both at EOF---------------------------------------------------------------------- How we currently test 'svnadmin' -- 'svnadmin create': Create an empty repository, test that the root node has a proper created-revision, because there was once a bug where it didn't. Note also that "svnadmin create" is tested implicitly every time we run a python test script. (An empty repository is always created and then imported into; if this subcommand failed catastrophically, every test would fail and we would know instantly.) 'svnadmin createtxn' 'svnadmin rmtxn': See below. 'svnadmin lstxns': We don't care about the contents of transactions; we only care that they exist or not. Therefore, we can simply parse transaction headers. 'svnadmin dump': A couple regression tests that ensure dump doesn't error out, and one to check that the --quiet option really does what it's meant to do. The actual contents of the dump aren't verified at all. TODO: someday maybe we could parse the contents of trees too. Helper routines The expected error occurred, so don't try to verify the result verify revs as wc states read full file replace the changed paths list read & parse revision file footer split file contents construct new footer, include indexes as are set new contents Tests---------------------------------------------------------------------- dump stream tests need a dump file-------------------------------------------------------------------------------------------------------------------------------------------- Ensure loading continues after skipping a bit of unknown extra content. Replace "Content-length" line with two lines Insert the extra content after "PROPS-END\n"-------------------------------------------------------------------------------------------------------------------------------------------- Test for issue 2729: Datestamp-less revisions in dump streams do not remain so after load Replace portions of the revision data to drop the svn:date revprop. Verify that the revision still lacks the svn:date property.-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- This test is redundant for FSFS. The hotcopy_dot and hotcopy_incremental tests cover this check for FSFS already. verify that the db/format files are the same---------------------------------------------------------------------- Try a simple log property modification. Make sure it fails without --bypass-hooks. (We haven't called svntest.actions.enable_revprop_changes().) Note that we attempt to set the log message to a different value than the successful call. Verify that the revprop value matches what we set when retrieved through the client. Try an author property modification. Verify that the revprop value matches what we set when retrieved through the client. Delete the property. setup a repo with a directory 'c:hi' unfortunately, some backends needs to do more checks than other resulting in different progress output---------------------------------------------------------------------- Returns the filename of the rev or revprop file (according to KIND) numbered REV in REPO_DIR, which must be in the first shard if we're using a sharded repository. we don't pack revprops rev exists outside a pack file. didn't find the plain file; assume it's in a pack file setup a repo with a directory 'c:hi' use physical addressing as this is hard to provoke with logical addressing Create A/B/E/bravo in r2. Corrupt r2's reference to A/C by replacing "dir 7-1.0.r1/1568" with "dir 7-1.0.r1/1569" (increment offset) and updating the checksum for this directory listing to "c9b5a2d26473a4e28088673dda9df804" so that the listing itself is valid.---------------------------------------------------------------------- Helper for two test functions. Commit up to r3, so we can test various recovery scenarios. Remember the contents of the db/current file. Move aside the current file for r3. Run 'svnadmin recover' and check that the current file is recreated. Now try writing db/current to be one rev lower than it should be. Run 'svnadmin recover' and check that the current file is fixed. Now try writing db/current to be *two* revs lower than it should be. Run 'svnadmin recover' and check that the current file is fixed. Now try writing db/current to be fish revs lower than it should be. Note: I'm not actually sure it's wise to recover from this, but detecting it would require rewriting fs_fs.c:get_youngest() to check the actual contents of its buffer, since atol() will happily convert "fish" to 0. Run 'svnadmin recover' and check that the current file is fixed. Around trunk@1573728, 'svnadmin recover' wrongly errored out for the --compatible-version=1.3 repositories with missing or invalid db/current file: svnadmin: E160006: No such revision 1---------------------------------------------------------------------- See https://issues.apache.org/jira/browse/SVN-2983. Create 'sample' dir in sbox.repo_url, and load the dump stream there. Verify the svn:mergeinfo properties for '--parent-dir' Create 'sample-2' dir in sbox.repo_url, and load the dump stream again. This time, don't include a leading slash on the --parent-dir argument. See issue 3547. Verify the svn:mergeinfo properties for '--parent-dir'.---------------------------------------------------------------------- Squirrel away the original repository UUID. Try setting a new, bogus UUID. Try generating a brand new UUID. Now, try setting the UUID back to the original value.---------------------------------------------------------------------- See https://issues.apache.org/jira/browse/SVN-3020. Create 'toplevel' dir in sbox.repo_url Load the dump stream in sbox.repo_url Load the dump stream in toplevel dir Verify the svn:mergeinfo properties---------------------------------------------------------------------- Set up a repository containing the greek tree. Commit up to r3, so we can test various recovery scenarios. Move aside the revs file for r3. Verify 'svnadmin recover' fails when youngest has a revprops file but no revs file. For example, if svntest.main.fsfs_sharding == 2, then rev_3 would be the pack file for r2:r3, and the error message would report "<= 1". Restore the r3 revs file, thus repairing the repository. Move aside the revprops file for r3. Verify 'svnadmin recover' fails when youngest has a revs file but no revprops file (issue 2992). Restore the r3 revprops file, thus repairing the repository. Change revprops file to a directory for revision 3 Verify 'svnadmin recover' fails when youngest has a revs file but revprops file is not a file (another aspect of issue 2992). Restore the r3 revprops file, thus repairing the repository.---------------------------------------------------------------------- This should fail This should fail, too Run a test verify Empty the revprops file---------------------------------------------------------------------- Even *more* testing for issue 3020 'Reflect dropped/renumbered revisions in svn:mergeinfo data during svnadmin load' Full or incremental dump-load cycles should result in the same mergeinfo in the loaded repository. Given a repository 'SOURCE-REPOS' with mergeinfo, and a repository 'TARGET-REPOS' (which may or may not be empty), either of the following methods to move 'SOURCE-REPOS' to 'TARGET-REPOS' should result in the same mergeinfo on 'TARGET-REPOS': 1) Dump -r1:HEAD from 'SOURCE-REPOS' and load it in one shot to 'TARGET-REPOS'. 2) Dump 'SOURCE-REPOS' in a series of incremental dumps and load each of them to 'TARGET-REPOS'. See https://issues.apache.org/jira/browse/SVN-3020desc13 Create an empty repos. PART 1: Load a full dump to an empty repository. The test repository used here, 'mergeinfo_included_full.dump', is this repos: __________________________________________ | | | ____________________________|_____ | | | | trunk---r2---r3-----r5---r6-------r8---r9---------------> | | r1 | | | | | | initial | | | |______ | | import copy | copy | merge merge | | | merge (r5) (r8) | | | (r9) | | | | | | | | | | V V | | | | branches/B2-------r11---r12----> | | | | r7 |____| | | | | | | | | merge |___ | | | (r6) | | | | |_________________ | | | | | merge | | | | (r11-12) | | | | | | | V V V | | branches/B1-------------------r10--------r13--> | | r4 | | | V V branches/B1/B/E------------------------------r14---r15-> The mergeinfo on this repos@15 is: Properties on 'branches/B1': svn:mergeinfo /branches/B2:11-12 /trunk:6,9 Properties on 'branches/B1/B/E': svn:mergeinfo /branches/B2/B/E:11-12 /trunk/B/E:5-6,8-9 Properties on 'branches/B2': svn:mergeinfo /trunk:9 Check that the mergeinfo is as expected. PART 2: Load a a series of incremental dumps to an empty repository. Incrementally dump the repository into three dump files: Blow away the current repos and create an empty one in its place. Load the three incremental dump files in sequence. Check the mergeinfo, we use the same expected output as before, as it (duh!) should be exactly the same as when we loaded the repos in one shot. Now repeat the above two scenarios, but with an initially non-empty target repository. First, try the full dump-load in one shot. PART 3: Load a full dump to an non-empty repository. Reset our sandbox. Load this skeleton repos into the empty target: Projects/ (Added r1) README (Added r2) Project-X (Added r3) Project-Y (Added r4) Project-Z (Added r5) docs/ (Added r6) README (Added r6) Load 'svnadmin_tests_data/mergeinfo_included_full.dump' in one shot: Check that the mergeinfo is as expected. This is exactly the same expected mergeinfo we previously checked, except that the revisions are all offset +6 to reflect the revions already in the skeleton target before we began loading and the leading source paths are adjusted by the --parent-dir: Properties on 'branches/B1': svn:mergeinfo /Projects/Project-X/branches/B2:17-18 /Projects/Project-X/trunk:12,15 Properties on 'branches/B1/B/E': svn:mergeinfo /Projects/Project-X/branches/B2/B/E:17-18 /Projects/Project-X/trunk/B/E:11-12,14-15 Properties on 'branches/B2': svn:mergeinfo /Projects/Project-X/trunk:15 PART 4: Load a a series of incremental dumps to an non-empty repository. Reset our sandbox. Load this skeleton repos into the empty target: Load the three incremental dump files in sequence. Check the resulting mergeinfo. We expect the exact same results as Part 3. See https://issues.apache.org/jira/browse/SVN-3020desc16. See https://issues.apache.org/jira/browse/SVN-2591. Create a repository. Create a file, a dir and a missing path outside the repoitory. Symlink definitions: base name -> target relpath. Check both existing and nonexistent targets. Check targets both within and outside the source repository. Create symlinks within the repository directory. Create two symlinks to each target - one relative, one absolute. Check if the symlinks were copied correctly. Check two symlinks to each target - one relative, one absolute. Try to load the dumpstream, expecting a failure (because of mixed EOLs). Now try it again bypassing prop validation. (This interface takes care of the removal and recreation of the original repository.) Getting the property should fail. Now try it again with prop normalization. We should get the expected property value. This test intentionally corrupts a revision and assumes an FSFS repository. If you can make it work with BDB please do so. However, the verification triggered by this test is in the repos layer so it will trigger with either backend anyway. Corruption only possible in physically addressed revisions created with pre-1.6 servers. Load the dumpstream Replace the path 'A' in revision 1 with a non-UTF-8 sequence. This has been observed in repositories in the wild, though Subversion 1.6 and greater should prevent such filenames from entering the repository. replace 'A' with a latin1 character -- the new path is not valid UTF-8 phys, PLAIN directories: fix up the representation checksum phys, deltified directories: fix up the representation checksum also fix up the 'created path' field and another occurrance Verify the repository, expecting failure Make sure the repository can still be dumped so that the encoding problem can be fixed in a dump/edit/load cycle. Lock iota and A/B/lambda using svn client empty line List all locks List lock in path /A Remove locks---------------------------------------------------------------------- See https://issues.apache.org/jira/browse/SVN-3734. Load our dumpfile, 2 revisions at a time, verifying that we have the correct youngest revision after each load. There are ordering differences in the property blocks. Configure two files per shard to trigger packing. Pack revisions 0 and 1 if not already packed. Commit 5 more revs, hotcopy and pack after each commit. Test illegal character in comment file. Test locking path with --bypass-hooks Remove lock Test locking path without --bypass-hooks Test locking already locked path. Test locking non-existent path. Test locking a path while specifying a lock token. Test unlocking a path, but provide the wrong lock token. Test unlocking the path again, but this time provide the correct lock token. Install lock/unlock prevention hooks. Test locking a path. Don't use --bypass-hooks, though, as we wish to verify that hook script is really getting executed. Fetch the lock token for our remaining locked path. (We didn't explicitly set it, so it will vary from test run to test run.) Try to unlock a path while providing the correct lock token but with a preventative hook in place. Finally, use --bypass-hooks to unlock the path (again using the correct lock token). This test exercises two commit-time race condition bugs: (a) metadata corruption when concurrent commits change svn:mergeinfo (issue 4129) (b) false positive SVN_ERR_FS_CONFLICT error with httpv1 commits https://mail-archives.apache.org/mod_mbox/subversion-dev/201507.mbox/%3C20150731234536.GA5395@tarsus.local2%3E Both bugs are timing-dependent and might not reproduce 100% of the time. Create wc2. Some random edits. Set random mergeinfo properties. t2 will trigger the issue 4129 sanity check in fs_fs.c Crude attempt to make sure everything worked. TODO: better way to catch exceptions in the thread No support for modifying pack files Create A/B/E/bravo in r2. Don't leave a corrupt repository No support for modifying pack files Create A/B/E/bravo in r2. Insert another expected error from checksum verification Don't leave a corrupt repository No support for modifying pack files Create a number of revisions each adding a single path modify every other revision to make sure that errors are not simply "carried over" but that all corrupts we get detected independently add existing node add into non-existent parent del non-existent node del existent node of the wrong kind THIS WILL NOT BE DETECTED since dump mechanism and file don't care about the types of deleted nodes copy from non-existent node copy from existing node of the wrong kind modify non-existent node modify existent node of the wrong kind replace non-existent node find corruptions Errors generated by FSFS when CHANGED_PATHS is not forced into emulation If CHANGED_PATHS is emulated, FSFS fails earlier, generating fewer of the same messages per revision. Determine which pattern to use. Note that index() will throw an exception if the string can't be found. Don't leave a corrupt repository A/{Eacute}/{aring}lpha Q/{aring}lpha A/{Eacute} The BDB backend doesn't do global metadata verification. Around trunk@1560210, 'svnadmin recover' wrongly errored out for the --compatible-version=1.3 Greek tree repository: svnadmin: E200002: Serialized hash missing terminator Around trunk@1560210, 'svnadmin hotcopy' wrongly errored out for the --compatible-version=1.3 Greek tree repository: svnadmin: E160006: No such revision 1 All revisions in the loaded repository should come after this time. Around trunk@1573728, running 'svnadmin hotcopy' for the --compatible-version=1.3 repository with certain node-id and copy-id changes ended with mismatching db/current in source and destination: source: "2 l 1" destination: "2 k 1", "3 l 2" "3 4 2" (and so on...) We test this case by creating a --compatible-version=1.3 repository and committing things that result in node-id and copy-id changes. After every commit, we hotcopy the repository to a new destination and check whether the source of the backup and the backup itself are identical. We also maintain a separate --incremental backup, which is updated and checked after every commit. r1 = Initial greek tree sandbox. r2 = Add a new property. r3 = Copy a file. r4 = Remove an existing file ... r5 = ...and replace it with a new file here. r6 = Add an entirely new file. r7 = Change the content of the existing file (this changeset does not bump the next-id and copy-id counters in the repository). Configure two files per shard to trigger packing. Play with our greek tree. These changesets fall into two separate shards with r2 and r3 being in shard 1 ... ...and r4 and r5 being in shard 2. With --fsfs-packing, everything is already packed and we can skip this part. Test that 'svnadmin freeze' is nestable. (For example, this ensures it won't take system-global locks, only repository-scoped ones.) This could be useful to easily freeze a small number of repositories at once. We don't actually test that freeze takes a write lock anywhere (not even in C tests.) FSFS repositories created with --compatible-version=1.8 and less erroneously share the filesystem data (locks, shared transaction data, ...) between hotcopy source and destination. This is fixed for new FS formats, but in order to avoid a deadlock for old formats, we have to manually assign a new UUID for the hotcopy destination. As of trunk@1618024, the same applies to FSX repositories. Unfortunately, older formats won't test as thoroughly than newer ones resulting in different progress output. BDB will do a full check but not produce any output. set new contents unfortunately, some backends needs to do more checks than other resulting in different progress output Don't leave a corrupt repository Check how 'svnadmin hotcopy' reports progress for non-incremental and incremental scenarios. The progress output can be affected by the --fsfs-packing option, so skip the test if that is the case. Create an empty repository, configure three files per shard. Nothing really exciting for the empty repository. Commit three revisions. After this step we have a full shard (r0, r1, r2) and the second shard (r3) with a single revision. Pack everything (r3 is still unpacked) and hotcopy again. In this case, the --incremental output should track the incoming (r0, r1, r2) pack and should not mention r3, because it is already a part of the destination and is *not* a part of the incoming pack. Fill the second shard, pack again, commit several unpacked revisions on top of it. Rerun the hotcopy and check the progress output. The progress output can be affected by the --fsfs-packing option, so skip the test if that is the case. Create an empty repository, commit several revisions and hotcopy it. Amend a few log messages in the source, run the --incremental hotcopy. The progress output should only mention the corresponding revisions. Nothing really exciting for the empty repository. Commit three revisions, hotcopy and check the progress output. Test that 'svnadmin freeze A (svnadmin freeze B)' does not deadlock for new FSFS formats, even if 'A' and 'B' share the same UUID. Create two repositories by loading the same dump file, ... ...and execute the 'svnadmin freeze -F' command. Does the repository work after upgrade? This dumpfile produced a BDB repository that generated cheksum mismatches on read caused by the improper handling of svn_txdelta_target ops. The bug was fixed by r1640832. Verify would fail with a checksum mismatch: * Error verifying revision 14. svnadmin: E200014: MD5 checksum mismatch on representation 'r': expected: 5182e8876ed894dc7fe28f6ff5b2fee6 actual: 5121f82875508863ad70daa8244e6947 svn:date exits svn:date should have been removed This is only supported for FSFS The port to FSX is still pending, BDB won't support it. r/o repos are hard to clean up. Make it writable again. Configure two files per shard to trigger packing. Skip for pre-cooked sharded repositories After loading the dump, amend one of the log message in the repository. Load the same dump, but with 'svnadmin load-revprops'. Doing so should restore the log message to its original state. Dump revprops only. We expect the dump to contain no path changes Remember the current log message for r1 Now, change the log message in the repository. Load the same dump with 'svnadmin load-revprops'. Doing so should restore the log message to its original state. Commit a no-op change. Dump and load the repository. We expect svn log -v to yield identical results for both original and reconstructed repositories. This used to fail as described in the Issue 4598 (https://issues.apache.org/jira/browse/SVN-4598), at least around r1706415. Test svn log -v for r2: Test svn log -v for /bar: Commit a no-op property change. Dump and load the repository. Test svn log -v for r2: Test svn log -v for /bar: Can't test the "not flushing to disk part", but loading the dump should work. Test that svnadmin dump --file overwrites existing files. Dump repository with /A/D/H and /A/B/E paths excluded. Load repository from dump. Check log. '/A/D/H' and '/A/B/E' is not added. Create default repository structure. Create a branch. Dump repository with /trunk excluded. Load repository from dump. Check log. Simple add, not copy. '/trunk' is not added. Create a couple of directories. Note that we can't use greek tree as it contains only two top-level nodes. Including non top-level nodes (e.g. '--include /A/B/E') will produce unloadable dump for now. Dump repository with /A and /C paths included. Load repository from dump. Check log. '/B' is not added. Create default repository structure. Create a branch. Dump repository with only /branches included. Load repository from dump. Check log. Simple add, not copy. Only '/branches' is added in r1. Create a couple of directories. Dump with paths excluded by pattern. Load repository from dump. Check log. Only '/bbc' is added in r1. Create a couple of directories. Dump with paths included by pattern. Load repository from dump. Check log. '/bbc' is not added. Create a couple of directories (r1). Create a couple of directories (r2). Create a couple of directories (r3). Dump with paths excluded by pattern. Load repository from dump. Check log. Revision properties ('svn:log' etc.) should be empty for r2. Attempt to dump repository with '--include' and '--exclude' options specified simultaneously. set svn:date in a non-canonical format (not six decimal places) Remember the initial rep cache content.print '\n'.join([h + ": " + repr(ref) for h, ref in rep_cache_r1.items()]) Commit one new rep and check the rep-cache is extended. Add a newline in case the existing file doesn't end with one. Break r2 in such a way that 'recover' will discard it Recover back to r1. Check the rep-cache is pruned. Dump repository with only /COPY path included. Load repository from dump. Check log. Only '/COPY' is added Try to load the dumpstream, expecting a failure (because of mixed EOLs in the svn:ignore property value). Now try it again with prop normalization. We should get the normalized property value. Run the tests list all tests here, starting with None: NOTREACHED End of file. | 32,514 | en | 0.784129 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.