hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aced70aa5c8be45c529f8723fdf6795e9af3db35 | 1,107 | py | Python | python/max_pairwise_product.py | Hash-Studios/algoking | 0653e9c52b6d0bd1ec90da83b0e5aa5449b37caa | [
"BSD-3-Clause"
] | 3 | 2020-10-02T09:01:55.000Z | 2022-01-23T17:45:49.000Z | python/max_pairwise_product.py | codenameakshay/algoking | 0653e9c52b6d0bd1ec90da83b0e5aa5449b37caa | [
"BSD-3-Clause"
] | 6 | 2020-05-24T17:37:35.000Z | 2020-10-03T09:52:46.000Z | python/max_pairwise_product.py | codenameakshay/algoking | 0653e9c52b6d0bd1ec90da83b0e5aa5449b37caa | [
"BSD-3-Clause"
] | 2 | 2020-10-02T14:07:39.000Z | 2020-10-03T09:34:33.000Z | from random import randint
def max_pairwise_product_fast(n, numbers):
numbers.sort(reverse=True)
product = numbers[0]*numbers[1]
return product
def max_pairwise_product_slow(n, numbers):
product = 0
for j in range(len(numbers)):
for k in range(len(numbers)):
if j != k and numbers[j]*numbers[k] > product:
product = numbers[j]*numbers[k]
return product
if __name__ == "__main__":
# Stress Tester
# while True:
# n = randint(2, 100)
# print(n)
# numbers = []
# for i in range(n):
# numbers.append(randint(0, 100000))
# print(numbers)
# product_f = max_pairwise_product_fast(n, numbers)
# product_s = max_pairwise_product_fast(n, numbers)
# print(product_f, product_s)
# if product_f != product_s:
# break
# Comment the following lines if you are running stress tester
n = int(input())
numbers = input()
numbers = list(map(int, numbers.split()))
product_f = max_pairwise_product_fast(n, numbers)
print(product_f)
| 27 | 66 | 0.607949 |
aced7148c6cd04677dc557d7eb430080a7f95ce5 | 3,781 | py | Python | p0sx/pos/models/user.py | bluesnail95m/nuxis | 7539404c65972efb988e5fd2eca216f4fc59d9ab | [
"MIT"
] | 3 | 2016-04-28T10:38:43.000Z | 2020-10-05T17:46:09.000Z | p0sx/pos/models/user.py | bluesnail95m/nuxis | 7539404c65972efb988e5fd2eca216f4fc59d9ab | [
"MIT"
] | 12 | 2016-04-20T11:11:17.000Z | 2021-08-22T09:28:02.000Z | p0sx/pos/models/user.py | bluesnail95m/nuxis | 7539404c65972efb988e5fd2eca216f4fc59d9ab | [
"MIT"
] | 6 | 2016-04-28T09:47:30.000Z | 2021-02-19T15:47:36.000Z | from django.contrib.auth.models import User as DjangoUser
from django.db import models
from .stock import Order
class CreditUpdate(models.Model):
user = models.ForeignKey('User', related_name='user', on_delete=models.CASCADE)
updated_by_user = models.ForeignKey('User', related_name='updated_by_user', on_delete=models.CASCADE, blank=True, null=True)
amount = models.IntegerField()
geekevents_id = models.IntegerField(null=True, blank=True, default=None)
timestamp = models.DateTimeField(auto_now_add=True)
@classmethod
def create(cls, user, updated_by_user, amount, geekevents_id=None):
update = cls(user=user, updated_by_user=updated_by_user, amount=amount, geekevents_id=geekevents_id)
return update
@classmethod
def sumup_create(cls, user, amount, updated_by_user=None, geekevents_id=None):
update = cls(user=user, amount=amount, updated_by_user=updated_by_user, geekevents_id=geekevents_id)
return update
def __str__(self):
extra = f' from GeekEvents item {self.geekevents_id}' if self.geekevents_id is not None else ''
return f'{self.updated_by_user} added {self.amount} kr to {self.user}{extra}'
class User(models.Model):
card = models.CharField(max_length=255, unique=True, blank=False)
credit = models.IntegerField(default=0)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
phone = models.CharField(max_length=12, blank=True)
crew = models.CharField(max_length=255, blank=True)
role = models.CharField(max_length=255, blank=True)
email = models.EmailField(blank=True)
is_cashier = models.BooleanField(default=False)
is_crew = models.BooleanField(default=False)
@property
def used(self):
orders = Order.objects.filter(user_id=self.id)
return sum([order.sum for order in orders])
@property
def left(self):
return self.credit - self.used
@classmethod
def create(cls, card, credit, first_name, last_name, phone, email):
user = cls(card=card,
credit=credit,
first_name=first_name,
last_name=last_name,
phone=phone,
email=email)
return user
def __str__(self):
if hasattr(self, 'geekeventstoken'):
return '{} {} via Geekevents SSO'.format(self.first_name, self.last_name)
return '{} {}'.format(self.first_name, self.last_name)
class Meta:
permissions = (
("update_credit", "Can update the credit limit on a user"),
("import_credit", "Can import credit from GeekEvents"),
)
class GeekeventsToken(models.Model):
token = models.CharField(max_length=255)
ge_user_id = models.CharField(max_length=255)
timestamp = models.CharField(max_length=255)
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
primary_key=True,
)
@classmethod
def create(cls, user_id, timestamp, token, user):
user_token = cls(ge_user_id=user_id, timestamp=timestamp, token=token, user=user)
return user_token
def delete(self, *args, **kwargs):
self.user.delete()
super(GeekeventsToken, self).delete(*args, **kwargs)
def __str__(self):
return self.token
class Meta:
constraints = [
models.UniqueConstraint(fields=['ge_user_id'], name='GeekEvents user ID must be unique')
]
class UserSession(models.Model):
start = models.DateTimeField(auto_now_add=True)
end = models.DateTimeField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
django_user = models.ForeignKey(DjangoUser, blank=True, on_delete=models.CASCADE)
| 35.009259 | 128 | 0.677334 |
aced721a38bc93e8b2c9ccfd84336fde7f48e172 | 3,741 | py | Python | household_contact_tracing/behaviours/infection/contact_rate_reduction.py | TTI-modelling/TestingContactModel | 76e0fd9ae2b2e95c330ed5e5515f6cd1ba22be55 | [
"MIT"
] | null | null | null | household_contact_tracing/behaviours/infection/contact_rate_reduction.py | TTI-modelling/TestingContactModel | 76e0fd9ae2b2e95c330ed5e5515f6cd1ba22be55 | [
"MIT"
] | null | null | null | household_contact_tracing/behaviours/infection/contact_rate_reduction.py | TTI-modelling/TestingContactModel | 76e0fd9ae2b2e95c330ed5e5515f6cd1ba22be55 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from household_contact_tracing.network import Node
from household_contact_tracing.parameterised import Parameterised
class ContactRateReduction(ABC, Parameterised):
"""
An abstract base class used to represent the highest level 'Contact Rate Reduction' behaviour.
Note: This class forms part of a 'Strategy' pattern. All child classes implement a family of possible
behaviours or strategies (ways of obtaining a contact rate reduction).
Add further child classes to add new behaviour types (strategies) that can be selected and updated at
design or run-time.
Attributes
----------
reduce_contacts_by
Todo
global_contact_reduction_imperfect_quarantine
Todo
global_contact_reduction_risky_behaviour
todo
Methods
-------
get_contact_rate_reduction(self, node) -> int
Returns a contact rate reduction, depending upon a nodes current status and various
intervention parameters
"""
def __init__(self, params: dict):
self.reduce_contacts_by = 0
self.global_contact_reduction_imperfect_quarantine = 0
self.global_contact_reduction_risky_behaviour = 0
self.update_params(params)
@abstractmethod
def get_contact_rate_reduction(self, node: Node) -> int:
"""
Returns a contact rate reduction, depending upon a nodes current status and various
intervention parameters
Parameters
-------
:param node (ContactTracingNode)
The node that is having its contact rate reduction calculated
"""
class ContactRateReductionHouseholdLevelTracing(ContactRateReduction):
def get_contact_rate_reduction(self, node: Node) -> int:
"""Returns a contact rate reduction, depending upon a nodes current status and various
intervention parameters
"""
if node.isolated and node.propensity_imperfect_isolation:
return self.global_contact_reduction_imperfect_quarantine
elif node.isolated and not node.propensity_imperfect_isolation:
# return 1 means 100% of contacts are stopped
return 1
else:
return self.reduce_contacts_by
class ContactRateReductionIndividualTracingDaily(ContactRateReduction):
def get_contact_rate_reduction(self, node: Node) -> int:
"""This method overrides the default behaviour. Previously the override behaviour allowed
he global contact reduction to vary by household size.
We override this behaviour, so that we can vary the global contact reduction by whether a
node is isolating or being lfa tested or whether they engage in risky behaviour while they
are being lfa tested.
Remember that a contact rate reduction of 1 implies that 100% of contacts are stopped.
"""
# the isolated status should never apply to an individual who will not uptake intervention
if node.isolated and not node.propensity_imperfect_isolation:
# perfect intervention
return 1
elif node.isolated and node.propensity_imperfect_isolation:
# imperfect intervention
return self.global_contact_reduction_imperfect_quarantine
elif node.being_lateral_flow_tested and node.propensity_risky_behaviour_lfa_testing:
# engaging in risky behaviour while testing negative
return self.global_contact_reduction_risky_behaviour
else:
# normal levels of social distancing
return self.reduce_contacts_by
| 37.787879 | 117 | 0.689922 |
aced7240a5c33946ec182f5d9a255318c90ba4e6 | 512 | py | Python | octosignblockchain/operations/meta.py | octosign/desktop-blockchain | 2915b78b0d25821f5d14871b128eef67bbdaf678 | [
"MIT"
] | null | null | null | octosignblockchain/operations/meta.py | octosign/desktop-blockchain | 2915b78b0d25821f5d14871b128eef67bbdaf678 | [
"MIT"
] | null | null | null | octosignblockchain/operations/meta.py | octosign/desktop-blockchain | 2915b78b0d25821f5d14871b128eef67bbdaf678 | [
"MIT"
] | null | null | null | from web3 import Web3
import sys
from ..config import NETWORK_URL
from ..results import with_meta_result
@with_meta_result
def meta():
"""Checks whether we have correctly working web3 client"""
try:
w3 = Web3(Web3.WebsocketProvider(NETWORK_URL))
gasPrice = w3.eth.gasPrice
except Exception as err:
return {
'status': err,
'supports': ['application/pdf'],
}
return {
'status': True,
'supports': ['application/pdf'],
}
| 22.26087 | 62 | 0.609375 |
aced7324319f21cf64168afad01f79066d96478f | 980 | py | Python | setup.py | tonyseek/python-envcfg | bf95998681cb4215fb97810d476f1a2c5a0ed2e5 | [
"MIT"
] | 29 | 2015-01-14T06:19:47.000Z | 2021-04-15T18:22:26.000Z | setup.py | tonyseek/python-envcfg | bf95998681cb4215fb97810d476f1a2c5a0ed2e5 | [
"MIT"
] | 2 | 2016-04-25T08:15:19.000Z | 2016-07-07T09:04:44.000Z | setup.py | tonyseek/python-envcfg | bf95998681cb4215fb97810d476f1a2c5a0ed2e5 | [
"MIT"
] | 3 | 2016-07-07T08:19:31.000Z | 2017-10-26T08:16:23.000Z | from setuptools import setup, find_packages
with open('README.rst') as readme:
next(readme) # skip the first line
long_description = ''.join(readme).strip()
setup(
name='python-envcfg',
version='0.2.0',
author='Jiangge Zhang',
author_email='tonyseek@gmail.com',
description='Accessing environment variables with a magic module.',
long_description=long_description,
platforms=['Any'],
url='https://github.com/tonyseek/python-envcfg',
license='MIT',
packages=find_packages(),
keywords=['env', 'config', '12-factor'],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries',
]
)
| 30.625 | 71 | 0.631633 |
aced7356a2acd2c145b848712f4a1a727194316f | 3,592 | py | Python | example/ipythonnb/gui_setup.py | linnarsson-lab/tecancavro | 8ec7286fdde84676f04ca0c92f26d516503e363f | [
"MIT"
] | 11 | 2017-02-23T09:28:11.000Z | 2021-08-11T02:30:50.000Z | example/ipythonnb/gui_setup.py | linnarsson-lab/tecancavro | 8ec7286fdde84676f04ca0c92f26d516503e363f | [
"MIT"
] | 3 | 2015-10-29T15:15:15.000Z | 2015-11-18T13:56:21.000Z | example/ipythonnb/gui_setup.py | linnarsson-lab/tecancavro | 8ec7286fdde84676f04ca0c92f26d516503e363f | [
"MIT"
] | 12 | 2015-02-27T16:35:06.000Z | 2021-10-08T20:59:10.000Z | from __future__ import print_function
from warnings import filterwarnings
filterwarnings('ignore', module='IPython.html.widgets')
from IPython.html import widgets
from IPython.display import display, clear_output, HTML
try:
from tecancavro.models import XCaliburD
from tecancavro.transport import TecanAPISerial, TecanAPINode
except ImportError: # Support direct import from package
import sys
import os
dirn = os.path.dirname
LOCAL_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dirn(dirn(LOCAL_DIR)))
from tecancavro.models import XCaliburD
from tecancavro.transport import TecanAPISerial, TecanAPINode
def findSerialPumps():
return TecanAPISerial.findSerialPumps()
def getSerialPumps():
''' Assumes that the pumps are XCaliburD pumps and returns a list of
(<serial port>, <instantiated XCaliburD>) tuples
'''
pump_list = findSerialPumps()
return [(ser_port, XCaliburD(com_link=TecanAPISerial(0,
ser_port, 9600))) for ser_port, _, _ in pump_list]
devices = getSerialPumps()
# devices = [('/dev/tty0', '')]
device_dict = dict(devices)
valve_control = widgets.Dropdown(options=[str(x) for x in range(1,10)])
port_control = widgets.Dropdown(options=[x[0] for x in devices])
pull_volume_control = widgets.BoundedIntText(min=0, max=1000, value=0)
push_volume_control = widgets.BoundedIntText(min=0, max=1000, value=0)
notification_area = widgets.HTML("")
def update_notification(val):
notification_area.value = val
def call_button(button, f):
button.disabled = True
button.disabled = False
pull_button = widgets.Button(description="Extract")
def extract(arg):
global device_dict
push_button.disabled = True
serial_port = port_control.value
valve = int(valve_control.value)
volume = pull_volume_control.value
update_notification("Received extract for: %d μl from port %d on serial port %s" % (volume,
valve, serial_port))
if len(sp) > 0:
device_dict[serial_port].extract(valve, volume)
pull_button.disabled = False
pull_button.on_click(extract)
pull_button.disabled = False
push_button = widgets.Button(description="Dispense")
def dispense(arg):
global device_dict
push_button.disabled = True
serial_port = port_control.value
valve = int(valve_control.value)
volume = push_volume_control.value
update_notification("Received dispense for: %d μl from port %d on serial port %s" % (volume,
valve, serial_port))
if len(sp) > 0:
device_dict[serial_port].dispense(valve, volume)
push_button.disabled = False
push_button.on_click(dispense)
push_button.disabled = False
hbox0 = widgets.HBox()
sp_label = widgets.HTML("Serial Port: ")
sp_label.width = 100
hbox0.children = [sp_label, port_control]
hbox1 = widgets.HBox()
valve_label = widgets.HTML("Valve: ")
valve_label.width = 100
hbox1.children = [valve_label, valve_control]
hbox2 = widgets.HBox()
pull_button.width = 100
hbox2.children = [pull_button, pull_volume_control]
hbox3 = widgets.HBox()
push_button.width = 100
hbox3.children = [push_button, push_volume_control]
hbox4 = widgets.HBox()
notification_label = widgets.HTML("Notifications: ")
notification_label.width = 100
hbox4.children = [notification_label, notification_area]
notification_area.width = 600
vbox = widgets.VBox()
vbox.width = 600
vbox.children = [hbox0, hbox1, hbox2, hbox3, hbox4]
display(vbox)
# checkout
# https://github.com/ipython/ipython/blob/master/IPython/html/widgets/interaction.py
# if you want to create your own interaction situation | 31.234783 | 96 | 0.74471 |
aced740c96a52c7e8f1e8875bb44690e1ff82d91 | 376 | py | Python | chapters/functions_iterators_generators/tests/test_power.py | Srikrishna31/functional_python | 7ba37a8185234ba7d58185781b623d97671fc1d5 | [
"CC0-1.0"
] | null | null | null | chapters/functions_iterators_generators/tests/test_power.py | Srikrishna31/functional_python | 7ba37a8185234ba7d58185781b623d97671fc1d5 | [
"CC0-1.0"
] | null | null | null | chapters/functions_iterators_generators/tests/test_power.py | Srikrishna31/functional_python | 7ba37a8185234ba7d58185781b623d97671fc1d5 | [
"CC0-1.0"
] | null | null | null | import pytest
import power
def test_power_shifty():
assert power.shift(4) == 15
assert power.shift(8) == 255
def test_power_multy():
assert power.mult(4) == 15
assert power.mult(8) == 255
def test_power_faster():
assert power.fast(4) == 15
assert power.fast(8) == 255
if __name__=="__main__":
raise SystemExit(pytest.main([__file__, "-v"]))
| 17.904762 | 51 | 0.656915 |
aced7430bdd02d4d8170197841caf5b74dab4d08 | 675 | py | Python | setup.py | jvuori/ruuvitag-mqtt-publisher | 104f8f158d7e83bf048087872348f56e1f761d35 | [
"MIT"
] | null | null | null | setup.py | jvuori/ruuvitag-mqtt-publisher | 104f8f158d7e83bf048087872348f56e1f761d35 | [
"MIT"
] | null | null | null | setup.py | jvuori/ruuvitag-mqtt-publisher | 104f8f158d7e83bf048087872348f56e1f761d35 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="ruuvitag-mqtt-publisher",
version="0.0.2",
author="Jaakko Vuori",
author_email="jaakko.vuori@gmail.com",
description="RuuviTag MQTT Publisher",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/jvuori/ruuvitag-mqtt",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
],
python_requires=">=3.5",
)
| 29.347826 | 52 | 0.662222 |
aced74571a1f13addc69807590abe4290df80712 | 4,164 | py | Python | dense_fusion/models/resnet.py | iory/dense-fusion | f08b9fc5257212a4f264d12845354f99ced57592 | [
"MIT"
] | 6 | 2020-02-27T11:25:33.000Z | 2021-06-19T05:10:47.000Z | dense_fusion/models/resnet.py | iory/dense-fusion | f08b9fc5257212a4f264d12845354f99ced57592 | [
"MIT"
] | null | null | null | dense_fusion/models/resnet.py | iory/dense-fusion | f08b9fc5257212a4f264d12845354f99ced57592 | [
"MIT"
] | 1 | 2020-12-02T11:07:40.000Z | 2020-12-02T11:07:40.000Z | import math
import torch.nn as nn
def conv3x3(in_planes, out_planes, stride=1, dilation=1):
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=False,
)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=1):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride=stride,
dilation=dilation)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes, stride=1, dilation=dilation)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None,
dilation=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(
planes,
planes,
kernel_size=3,
stride=stride,
dilation=dilation,
padding=dilation,
bias=False,
)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.relu(out)
out = self.conv2(out)
out = self.relu(out)
out = self.conv3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers=(3, 4, 23, 3)):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=1, dilation=2)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=1, dilation=4)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2.0 / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
)
)
layers = [block(self.inplanes, planes, stride, downsample)]
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, dilation=dilation))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x_3 = self.layer3(x)
x = self.layer4(x_3)
return x, x_3
def resnet18():
model = ResNet(BasicBlock, [2, 2, 2, 2])
return model
| 27.576159 | 77 | 0.54755 |
aced7577db5d5f1911dbe6be28e1a7ca71d43d96 | 5,163 | py | Python | esmvaltool/total_column/loadbc.py | mnichol3/emip | fcd5c9fff7f6fdeee5aed3115b8ac63656c9d91d | [
"MIT"
] | null | null | null | esmvaltool/total_column/loadbc.py | mnichol3/emip | fcd5c9fff7f6fdeee5aed3115b8ac63656c9d91d | [
"MIT"
] | null | null | null | esmvaltool/total_column/loadbc.py | mnichol3/emip | fcd5c9fff7f6fdeee5aed3115b8ac63656c9d91d | [
"MIT"
] | 1 | 2021-12-15T15:57:44.000Z | 2021-12-15T15:57:44.000Z | """Derivation of variable ``loadbc``."""
import warnings
import cf_units
import iris
from scipy import constants
from .._regrid import extract_levels, regrid
from ._baseclass import DerivedVariableBase
from ._shared import pressure_level_widths
# Constants
STANDARD_GRAVITY = constants.value('standard acceleration of gravity')
STANDARD_GRAVITY_UNIT = constants.unit('standard acceleration of gravity')
def ensure_correct_lon(mmrbc_cube, ps_cube=None):
"""Ensure that ``mmrbc`` cube contains ``longitude`` and adapt ``ps`` cube."""
if mmrbc_cube.coords('longitude'):
return (mmrbc_cube, ps_cube)
# Get zonal mean ps if necessary
if ps_cube is not None:
ps_cube = ps_cube.collapsed('longitude', iris.analysis.MEAN)
ps_cube.remove_coord('longitude')
# Add longitude dimension to mmrbc (and ps if necessary) with length 1
cubes = (mmrbc_cube, ps_cube)
new_cubes = []
lon_coord = iris.coords.DimCoord([180.0], bounds=[[0.0, 360.0]],
var_name='lon',
standard_name='longitude',
long_name='longitude',
units='degrees_east')
for cube in cubes:
if cube is None:
new_cubes.append(None)
continue
new_dim_coords = [(c, cube.coord_dims(c)) for c in cube.dim_coords]
new_dim_coords.append((lon_coord, cube.ndim))
new_aux_coords = [(c, cube.coord_dims(c)) for c in cube.aux_coords]
new_cube = iris.cube.Cube(cube.core_data()[..., None],
dim_coords_and_dims=new_dim_coords,
aux_coords_and_dims=new_aux_coords)
new_cube.metadata = cube.metadata
new_cubes.append(new_cube)
return tuple(new_cubes)
def interpolate_hybrid_plevs(cube):
"""Interpolate hybrid pressure levels."""
# Use CMIP6's plev19 target levels (in Pa)
target_levels = [
100000.0,
92500.0,
85000.0,
70000.0,
60000.0,
50000.0,
40000.0,
30000.0,
25000.0,
20000.0,
15000.0,
10000.0,
7000.0,
5000.0,
3000.0,
2000.0,
1000.0,
500.0,
100.0,
]
cube.coord('air_pressure').convert_units('Pa')
cube = extract_levels(cube, target_levels, 'linear',
coordinate='air_pressure')
return cube
class DerivedVariable(DerivedVariableBase):
"""Derivation of variable ``loadbc``."""
@staticmethod
def required(project):
"""Declare the variables needed for derivation."""
# TODO: make get_required _derive/__init__.py use variables as argument
# and make this dependent on mip
if project == 'CMIP6':
required = [
{'short_name': 'mmrbc', 'mip': 'AERmon'},
{'short_name': 'ps', 'mip': 'Amon'},
]
else:
required = [
{'short_name': 'mmrbc'},
{'short_name': 'ps'},
]
return required
@staticmethod
def calculate(cubes):
"""Compute total column black carbon.
Note
----
The surface pressure is used as a lower integration bound. A fixed
upper integration bound of 0 Pa is used.
"""
mmrbc_cube = cubes.extract_strict(
iris.Constraint(name='mass_fraction_of_elemental_carbon_dry_aerosol_particles_in_air'))
ps_cube = cubes.extract_strict(
iris.Constraint(name='surface_air_pressure'))
# If mmrbc is given on hybrid pressure levels (e.g., from Table AERmon),
# interpolate it to regular pressure levels
if len(mmrbc_cube.coord_dims('air_pressure')) > 1:
mmrbc_cube = interpolate_hybrid_plevs(mmrbc_cube)
# To support zonal mean mmrbc (e.g., from Table AERmon), add longitude
# coordinate if necessary and ensure that ps has correct shape
(mmrbc_cube, ps_cube) = ensure_correct_lon(mmrbc_cube, ps_cube=ps_cube)
# If the horizontal dimensions of ps and mmrbc differ, regrid ps
# Note: regrid() checks if the regridding is really necessary before
# running the actual interpolation
ps_cube = regrid(ps_cube, mmrbc_cube, 'linear')
# Actual derivation of loadbc using black carbon mass fraction and pressure level
# widths
p_layer_widths = pressure_level_widths(mmrbc_cube,
ps_cube,
top_limit=0.0)
loadbc_cube = (mmrbc_cube * p_layer_widths / STANDARD_GRAVITY)
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', category=UserWarning,
message='Collapsing a non-contiguous coordinate')
loadbc_cube = loadbc_cube.collapsed('air_pressure', iris.analysis.SUM)
loadbc_cube.units = (mmrbc_cube.units * p_layer_widths.units /
STANDARD_GRAVITY_UNIT)
return loadbc_cube
| 35.363014 | 99 | 0.597133 |
aced75ead90d4184bfbf4c398ba03367bdd32443 | 5,287 | py | Python | libraries/layouts/keyboard_layout_win_tr.py | Fescron/Circuitpython_Keyboard_Layouts | 3b4003ea71b4a7b2997652baf9c1d66528888d5c | [
"MIT"
] | null | null | null | libraries/layouts/keyboard_layout_win_tr.py | Fescron/Circuitpython_Keyboard_Layouts | 3b4003ea71b4a7b2997652baf9c1d66528888d5c | [
"MIT"
] | null | null | null | libraries/layouts/keyboard_layout_win_tr.py | Fescron/Circuitpython_Keyboard_Layouts | 3b4003ea71b4a7b2997652baf9c1d66528888d5c | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Neradoc NeraOnGit@ri1.fr
#
# SPDX-License-Identifier: MIT
"""
This file was automatically generated using Circuitpython_Keyboard_Layouts
"""
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/Neradoc/Circuitpython_Keyboard_Layouts.git"
from adafruit_hid.keyboard_layout_base import KeyboardLayoutBase
class KeyboardLayout(KeyboardLayoutBase):
ASCII_TO_KEYCODE = (
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x2a' # BACKSPACE
b'\x2b' # '\t'
b'\x28' # '\n'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x29' # ESC
b'\x00'
b'\x00'
b'\x00'
b'\x00'
b'\x2c' # ' '
b'\x9e' # '!'
b'\x35' # '"'
b'\x20' # '#'
b'\x21' # '$'
b'\xa2' # '%'
b'\xa3' # '&'
b'\x9f' # "'"
b'\xa5' # '('
b'\xa6' # ')'
b'\x2d' # '*'
b'\xa1' # '+'
b'\x31' # ','
b'\x2e' # '-'
b'\x38' # '.'
b'\xa4' # '/'
b'\x27' # '0'
b'\x1e' # '1'
b'\x1f' # '2'
b'\x20' # '3'
b'\x21' # '4'
b'\x22' # '5'
b'\x23' # '6'
b'\x24' # '7'
b'\x25' # '8'
b'\x26' # '9'
b'\xb8' # ':'
b'\xb1' # ';'
b'\x35' # '<'
b'\xa7' # '='
b'\x1e' # '>'
b'\xad' # '?'
b'\x14' # '@'
b'\x84' # 'A'
b'\x85' # 'B'
b'\x86' # 'C'
b'\x87' # 'D'
b'\x88' # 'E'
b'\x89' # 'F'
b'\x8a' # 'G'
b'\x8b' # 'H'
b'\x8c' # 'I'
b'\x8d' # 'J'
b'\x8e' # 'K'
b'\x8f' # 'L'
b'\x90' # 'M'
b'\x91' # 'N'
b'\x92' # 'O'
b'\x93' # 'P'
b'\x94' # 'Q'
b'\x95' # 'R'
b'\x96' # 'S'
b'\x97' # 'T'
b'\x98' # 'U'
b'\x99' # 'V'
b'\x9a' # 'W'
b'\x9b' # 'X'
b'\x9c' # 'Y'
b'\x9d' # 'Z'
b'\x25' # '['
b'\x2d' # '\\'
b'\x26' # ']'
b'\x00'
b'\xae' # '_'
b'\x00' # '`' (Dead key)
b'\x04' # 'a'
b'\x05' # 'b'
b'\x06' # 'c'
b'\x07' # 'd'
b'\x08' # 'e'
b'\x09' # 'f'
b'\x0a' # 'g'
b'\x0b' # 'h'
b'\x0c' # 'i'
b'\x0d' # 'j'
b'\x0e' # 'k'
b'\x0f' # 'l'
b'\x10' # 'm'
b'\x11' # 'n'
b'\x12' # 'o'
b'\x13' # 'p'
b'\x14' # 'q'
b'\x15' # 'r'
b'\x16' # 's'
b'\x17' # 't'
b'\x18' # 'u'
b'\x19' # 'v'
b'\x1a' # 'w'
b'\x1b' # 'x'
b'\x1c' # 'y'
b'\x1d' # 'z'
b'\x24' # '{'
b'\x2e' # '|'
b'\x27' # '}'
b'\x00' # '~' (Dead key)
b'\x00'
)
NEED_ALTGR = '#$<>@[\\]i{|}£½ßæ€₺'
HIGHER_ASCII = {
0xa3: 0x1f, # '£'
0xbd: 0x22, # '½'
0x20ac: 0x08, # '€'
0x20ba: 0x17, # '₺'
0x131: 0x0c, # 'ı'
0x11f: 0x2f, # 'ğ'
0x11e: 0xaf, # 'Ğ'
0xfc: 0x30, # 'ü'
0xdc: 0xb0, # 'Ü'
0xe6: 0x04, # 'æ'
0xdf: 0x16, # 'ß'
0x15f: 0x33, # 'ş'
0x15e: 0xb3, # 'Ş'
0x130: 0xb4, # 'İ'
0xe9: 0xb5, # 'é'
0xf6: 0x36, # 'ö'
0xd6: 0xb6, # 'Ö'
0xe7: 0x37, # 'ç'
0xc7: 0xb7, # 'Ç'
}
COMBINED_KEYS = {
0xe2: 0xa061, # 'â'
0xea: 0xa065, # 'ê'
0xee: 0xa0131, # 'î'
0xf4: 0xa06f, # 'ô'
0xfb: 0xa075, # 'û'
0xc2: 0xa041, # 'Â'
0xca: 0xa045, # 'Ê'
0xce: 0xa0130, # 'Î'
0xd4: 0xa04f, # 'Ô'
0xdb: 0xa055, # 'Û'
0x5e: 0xa020, # '^'
0xe4: 0x2fe1, # 'ä'
0xeb: 0x2fe5, # 'ë'
0xef: 0x2f1b1, # 'ï'
0xf6: 0x2fef, # 'ö'
0xfc: 0x2ff5, # 'ü'
0xc4: 0x2fc1, # 'Ä'
0xcb: 0x2fc5, # 'Ë'
0xcf: 0x2f1b0, # 'Ï'
0xd6: 0x2fcf, # 'Ö'
0xdc: 0x2fd5, # 'Ü'
0xa8: 0x2fa0, # '¨'
0xe3: 0x30e1, # 'ã'
0xf5: 0x30ef, # 'õ'
0xf1: 0x30ee, # 'ñ'
0xc3: 0x30c1, # 'Ã'
0xd5: 0x30cf, # 'Õ'
0xd1: 0x30ce, # 'Ñ'
0x7e: 0x30a0, # '~'
0xe1: 0x33e1, # 'á'
0xe9: 0x33e5, # 'é'
0xed: 0x331b1, # 'í'
0xf3: 0x33ef, # 'ó'
0xfa: 0x33f5, # 'ú'
0xc1: 0x33c1, # 'Á'
0xc9: 0x33c5, # 'É'
0xcd: 0x331b0, # 'Í'
0xd3: 0x33cf, # 'Ó'
0xda: 0x33d5, # 'Ú'
0xb4: 0x33a0, # '´'
0xe0: 0x31e1, # 'à'
0xe8: 0x31e5, # 'è'
0xec: 0x311b1, # 'ì'
0xf2: 0x31ef, # 'ò'
0xf9: 0x31f5, # 'ù'
0xc0: 0x31c1, # 'À'
0xc8: 0x31c5, # 'È'
0xcc: 0x311b0, # 'Ì'
0xd2: 0x31cf, # 'Ò'
0xd9: 0x31d5, # 'Ù'
0x60: 0x31a0, # '`'
}
| 24.031818 | 74 | 0.32457 |
aced7604b505641cfd261f5b47527641b0238439 | 768 | py | Python | cornac/models/ncf/__init__.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/ncf/__init__.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | null | null | null | cornac/models/ncf/__init__.py | redhat6/cornac | 856cf0f546a0dc6b46f407128d89ef2534994c60 | [
"Apache-2.0"
] | 1 | 2020-03-19T13:58:33.000Z | 2020-03-19T13:58:33.000Z | # Copyright 2018 The Cornac Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from .recom_gmf import GMF
from .recom_mlp import MLP
from .recom_neumf import NeuMF | 42.666667 | 78 | 0.692708 |
aced760fb132bc0d3345e967e8e6f83b40984725 | 1,816 | py | Python | aoc2021/day13.py | aod/advent-of-python | 391f1f7317951c59900c9c7a79127a3c8b4a43cf | [
"MIT"
] | null | null | null | aoc2021/day13.py | aod/advent-of-python | 391f1f7317951c59900c9c7a79127a3c8b4a43cf | [
"MIT"
] | null | null | null | aoc2021/day13.py | aod/advent-of-python | 391f1f7317951c59900c9c7a79127a3c8b4a43cf | [
"MIT"
] | null | null | null | from typing import List, Set, Tuple, cast
from dataclasses import dataclass
from aocd import data
XY = Tuple[int, int]
@dataclass
class Paper():
dots: Set[XY]
max_x: int
max_y: int
def fold(self, axis: int, n: int) -> 'Paper':
dots: Set[XY] = set()
for dot in self.dots:
if dot[axis] < n:
dots.add(dot)
else:
x, y = dot
if axis == 1:
dots.add((x, self.max_y - y))
else:
dots.add((self.max_x - x, y))
max_x = self.max_x - n-1 if axis == 0 else self.max_x
max_y = self.max_y - n-1 if axis == 1 else self.max_y
return Paper(dots, max_x, max_y)
def __str__(self):
s = ""
for y in range(self.max_y+1):
for x in range(self.max_x+1):
s += "█" if (x, y) in self.dots else "."
s += "\n"
return s
def parse(input: str = data) -> Tuple[Paper, List[Tuple[int, int]]]:
split = input.split("\n\n")
dots: Set[XY] = set()
for line in split[0].splitlines():
dots.add(cast(XY, tuple(int(x) for x in line.split(","))))
folds: List[Tuple[int, int]] = []
for fold in split[1].splitlines():
axis, n = fold.split(" ")[2].split("=")
folds.append((0 if axis == "x" else 1, int(n)))
return (Paper(dots, max(x for x, _ in dots), max(y for _, y in dots)), folds)
def part1(input: str = data):
paper, folds = parse(input)
paper = paper.fold(*folds[0])
return len(paper.dots)
def part2(input: str = data):
paper, folds = parse(input)
for axis, n in folds:
paper = paper.fold(axis, n)
return str(paper)
if __name__ == "__main__":
print(f"Part 1: {part1()}")
print(f"Part 2:\n{part2()}")
| 24.876712 | 81 | 0.520925 |
aced76d95aa17bcd99539dcc6be03a4f94628be5 | 2,439 | py | Python | aries_cloudagent/messaging/connections/models/connection_target.py | DibbsZA/aries-cloudagent-python | a094dd7697023721ac2a2fd4e58b04d4b37d1f44 | [
"Apache-2.0"
] | 7 | 2020-07-07T15:44:41.000Z | 2022-03-26T21:20:41.000Z | aries_cloudagent/messaging/connections/models/connection_target.py | totemprotocol/aries-fl | dd78dcebc771971abfee301b80cdd5d246c14840 | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/messaging/connections/models/connection_target.py | totemprotocol/aries-fl | dd78dcebc771971abfee301b80cdd5d246c14840 | [
"Apache-2.0"
] | 2 | 2019-12-02T18:59:07.000Z | 2020-06-03T18:58:20.000Z | """Record used to handle routing of messages to another agent."""
from typing import Sequence
from marshmallow import fields
from ...models.base import BaseModel, BaseModelSchema
from ...valid import INDY_DID, INDY_RAW_PUBLIC_KEY
class ConnectionTarget(BaseModel):
"""Record used to handle routing of messages to another agent."""
class Meta:
"""ConnectionTarget metadata."""
schema_class = "ConnectionTargetSchema"
def __init__(
self,
*,
did: str = None,
endpoint: str = None,
label: str = None,
recipient_keys: Sequence[str] = None,
routing_keys: Sequence[str] = None,
sender_key: str = None,
):
"""
Initialize a ConnectionTarget instance.
Args:
did: A did for the connection
endpoint: An endpoint for the connection
label: A label for the connection
recipient_key: A list of recipient keys
routing_keys: A list of routing keys
"""
self.did = did
self.endpoint = endpoint
self.label = label
self.recipient_keys = list(recipient_keys) if recipient_keys else []
self.routing_keys = list(routing_keys) if routing_keys else []
self.sender_key = sender_key
class ConnectionTargetSchema(BaseModelSchema):
"""ConnectionTarget schema."""
class Meta:
"""ConnectionTargetSchema metadata."""
model_class = ConnectionTarget
did = fields.Str(
required=False,
description="",
**INDY_DID
)
endpoint = fields.Str(
required=False,
description="Connection endpoint",
example="http://192.168.56.102:8020",
)
label = fields.Str(
required=False,
description="Connection label",
example="Bob"
)
recipient_keys = fields.List(
fields.Str(
description="Recipient public key",
**INDY_RAW_PUBLIC_KEY
),
required=False,
description="List of recipient keys"
)
routing_keys = fields.List(
fields.Str(
description="Routing key",
**INDY_RAW_PUBLIC_KEY
),
data_key="routingKeys",
required=False,
description="List of routing keys",
)
sender_key = fields.Str(
required=False,
description="Sender public key",
**INDY_RAW_PUBLIC_KEY
)
| 26.51087 | 76 | 0.603116 |
aced77c350002e6c8bcfed366448fbc34bfd0680 | 139 | py | Python | FadingMemory/Frontend/fmflask/test.py | avishaym/FadingMemoriesCamera | c8af71990519b5715b86f12d527fe767cfa30664 | [
"MIT"
] | 1 | 2021-08-02T14:04:34.000Z | 2021-08-02T14:04:34.000Z | FadingMemory/Frontend/fmflask/test.py | avishaym/FadingMemoriesCamera | c8af71990519b5715b86f12d527fe767cfa30664 | [
"MIT"
] | null | null | null | FadingMemory/Frontend/fmflask/test.py | avishaym/FadingMemoriesCamera | c8af71990519b5715b86f12d527fe767cfa30664 | [
"MIT"
] | null | null | null | from memories import generate_memory
approved = "1"
idx, imagefile = generate_memory(approved)
print "result is", idx, "image", imagefile
| 23.166667 | 42 | 0.769784 |
aced780f7dfaaba616840cab111b4351cad08f5c | 477 | py | Python | tests/compat/test_passive.py | psumesh/nmigen | 7d611b8fc1d9e58853ff268ec38ff8f4131a9774 | [
"BSD-2-Clause"
] | 528 | 2020-01-28T18:21:00.000Z | 2021-12-09T06:27:51.000Z | tests/compat/test_passive.py | psumesh/nmigen | 7d611b8fc1d9e58853ff268ec38ff8f4131a9774 | [
"BSD-2-Clause"
] | 360 | 2020-01-28T18:34:30.000Z | 2021-12-10T08:03:32.000Z | tests/compat/test_passive.py | psumesh/nmigen | 7d611b8fc1d9e58853ff268ec38ff8f4131a9774 | [
"BSD-2-Clause"
] | 100 | 2020-02-06T21:55:46.000Z | 2021-11-25T19:20:44.000Z | import unittest
from amaranth.compat import *
class PassiveCase(unittest.TestCase):
def test_terminates_correctly(self):
n = 5
count = 0
@passive
def counter():
nonlocal count
while True:
yield
count += 1
def terminator():
for i in range(n):
yield
run_simulation(Module(), [counter(), terminator()])
self.assertEqual(count, n)
| 19.875 | 59 | 0.519916 |
aced78461aff446fbdc84f006bc6fdd0319cfb74 | 643 | py | Python | spacy/tests/lang/ml/test_text.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 22,040 | 2016-10-03T11:58:15.000Z | 2022-03-31T21:08:19.000Z | spacy/tests/lang/ml/test_text.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 6,927 | 2016-10-03T13:11:11.000Z | 2022-03-31T17:01:25.000Z | spacy/tests/lang/ml/test_text.py | snosrap/spaCy | 3f68bbcfec44ef55d101e6db742d353b72652129 | [
"MIT"
] | 4,403 | 2016-10-04T03:36:33.000Z | 2022-03-31T14:12:34.000Z | import pytest
def test_ml_tokenizer_handles_long_text(ml_tokenizer):
text = """അനാവശ്യമായി കണ്ണിലും മൂക്കിലും വായിലും സ്പർശിക്കാതിരിക്കുക"""
tokens = ml_tokenizer(text)
assert len(tokens) == 5
@pytest.mark.parametrize(
"text,length",
[
(
"എന്നാൽ അച്ചടിയുടെ ആവിർഭാവം ലിപിയിൽ കാര്യമായ മാറ്റങ്ങൾ വരുത്തിയത് കൂട്ടക്ഷരങ്ങളെ അണുഅക്ഷരങ്ങളായി പിരിച്ചുകൊണ്ടായിരുന്നു",
10,
),
("പരമ്പരാഗതമായി മലയാളം ഇടത്തുനിന്ന് വലത്തോട്ടാണ് എഴുതുന്നത്", 5),
],
)
def test_ml_tokenizer_handles_cnts(ml_tokenizer, text, length):
tokens = ml_tokenizer(text)
assert len(tokens) == length
| 27.956522 | 133 | 0.566096 |
aced7928ed77a6d1447cbbc35de4dff5f80c4ae4 | 136 | py | Python | if_else.py | Sukhbir-4259/pythonABC | 273aea37506b71301c0782c6ff27db8da5e1829c | [
"MIT"
] | null | null | null | if_else.py | Sukhbir-4259/pythonABC | 273aea37506b71301c0782c6ff27db8da5e1829c | [
"MIT"
] | null | null | null | if_else.py | Sukhbir-4259/pythonABC | 273aea37506b71301c0782c6ff27db8da5e1829c | [
"MIT"
] | null | null | null | password = 'python'
if password == 'python': # condition
print('Access granted.') # True
else:
print('Wrong password') # False
| 19.428571 | 36 | 0.647059 |
aced79cf9a8ad1d8b888a51882d8e3edf4fc902d | 9,215 | py | Python | parsers/US_NEISO.py | NoraLuisa/electricitymap-contrib | 061b6e8ea69222a21a03e8aa144ebad9964bba79 | [
"MIT"
] | null | null | null | parsers/US_NEISO.py | NoraLuisa/electricitymap-contrib | 061b6e8ea69222a21a03e8aa144ebad9964bba79 | [
"MIT"
] | null | null | null | parsers/US_NEISO.py | NoraLuisa/electricitymap-contrib | 061b6e8ea69222a21a03e8aa144ebad9964bba79 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""Real time parser for the New England ISO (NEISO) area."""
import arrow
from collections import defaultdict
import logging
import requests
import time
url = 'https://www.iso-ne.com/ws/wsclient'
generation_mapping = {
'Coal': 'coal',
'NaturalGas': 'gas',
'Wind': 'wind',
'Hydro': 'hydro',
'Nuclear': 'nuclear',
'Wood': 'biomass',
'Oil': 'oil',
'Refuse': 'biomass',
'LandfillGas': 'biomass',
'Solar': 'solar'
}
def timestring_converter(time_string):
"""Converts ISO-8601 time strings in neiso data into aware datetime objects."""
dt_naive = arrow.get(time_string)
dt_aware = dt_naive.replace(tzinfo='America/New_York').datetime
return dt_aware
def get_json_data(target_datetime, params, session=None):
"""Fetches json data for requested params and target_datetime using a post request."""
epoch_time = str(int(time.time()))
# when target_datetime is None, arrow.get(None) will return current time
target_datetime = arrow.get(target_datetime)
target_ne = target_datetime.to('America/New_York')
target_ne_day = target_ne.format('MM/DD/YYYY')
postdata = {
'_nstmp_formDate': epoch_time,
'_nstmp_startDate': target_ne_day,
'_nstmp_endDate': target_ne_day,
'_nstmp_twodays': 'false',
'_nstmp_showtwodays': 'false'
}
postdata.update(params)
s = session or requests.Session()
req = s.post(url, data=postdata)
json_data = req.json()
raw_data = json_data[0]['data']
return raw_data
def production_data_processer(raw_data, logger):
"""
Takes raw json data and removes unnecessary keys.
Separates datetime key and converts to a datetime object.
Maps generation to type and returns a list of tuples.
"""
other_keys = {'BeginDateMs', 'Renewables', 'BeginDate', 'Other'}
known_keys = generation_mapping.keys() | other_keys
unmapped = set()
clean_data = []
counter = 0
for datapoint in raw_data:
current_keys = datapoint.keys() | set()
unknown_keys = current_keys - known_keys
unmapped = unmapped | unknown_keys
keys_to_remove = {'BeginDateMs', 'Renewables'} | unknown_keys
for k in keys_to_remove:
datapoint.pop(k, None)
time_string = datapoint.pop('BeginDate', None)
if time_string:
dt = timestring_converter(time_string)
else:
# passing None to arrow.get() will return current time
counter += 1
logger.warning('Skipping US-NEISO datapoint missing timestamp.', extra={'key': 'US-NEISO'})
continue
# neiso storage flow signs are opposite to EM
battery_storage = -1*datapoint.pop('Other', 0.0)
production = defaultdict(lambda: 0.0)
for k, v in datapoint.items():
# Need to avoid duplicate keys overwriting.
production[generation_mapping[k]] += v
# move small negative values to 0
for k, v in production.items():
if -5 < v < 0:
production[k] = 0
clean_data.append((dt, dict(production), battery_storage))
for key in unmapped:
logger.warning('Key \'{}\' in US-NEISO is not mapped to type.'.format(key), extra={'key': 'US-NEISO'})
if counter > 0:
logger.warning('Skipped {} US-NEISO datapoints that were missing timestamps.'.format(counter), extra={'key': 'US-NEISO'})
return sorted(clean_data)
def fetch_production(zone_key='US-NEISO', session=None, target_datetime=None, logger=logging.getLogger(__name__)):
"""
Requests the last known production mix (in MW) of a given country
Arguments:
zone_key: specifies which zone to get
session: request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not provided, we should
default it to now. The provided target_datetime is timezone-aware in UTC.
logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
Return:
A list of dictionaries in the form:
{
'zoneKey': 'FR',
'datetime': '2017-01-01T00:00:00Z',
'production': {
'biomass': 0.0,
'coal': 0.0,
'gas': 0.0,
'hydro': 0.0,
'nuclear': null,
'oil': 0.0,
'solar': 0.0,
'wind': 0.0,
'geothermal': 0.0,
'unknown': 0.0
},
'storage': {
'hydro': -10.0,
},
'source': 'mysource.com'
}
"""
postdata = {
'_nstmp_chartTitle': 'Fuel+Mix+Graph',
'_nstmp_requestType': 'genfuelmix',
'_nstmp_fuelType': 'all',
'_nstmp_height': '250'
}
production_json = get_json_data(target_datetime, postdata, session)
points = production_data_processer(production_json, logger)
# Hydro pumped storage is included within the general hydro category.
production_mix = []
for item in points:
data = {
'zoneKey': zone_key,
'datetime': item[0],
'production': item[1],
'storage': {
'hydro': None,
'battery': item[2]
},
'source': 'iso-ne.com'
}
production_mix.append(data)
return production_mix
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):
"""Requests the last known power exchange (in MW) between two zones
Arguments:
zone_key1, zone_key2: specifies which exchange to get
session (optional): request session passed in order to re-use an existing session
target_datetime: the datetime for which we want production data. If not provided, we should
default it to now. The provided target_datetime is timezone-aware in UTC.
logger: an instance of a `logging.Logger`; all raised exceptions are also logged automatically
Return:
A list of dictionaries in the form:
[{
'sortedZoneKeys': 'CA-QC->US-NEISO',
'datetime': '2017-01-01T00:00:00Z',
'netFlow': 0.0,
'source': 'mysource.com'
}]
"""
sorted_zone_keys = '->'.join(sorted([zone_key1, zone_key2]))
# For directions, note that ISO-NE always reports its import as negative
if sorted_zone_keys == 'CA-NB->US-NEISO':
# CA-NB->US-NEISO means import to NEISO should be positive
multiplier = -1
postdata = {
'_nstmp_zone0': '4010' # ".I.SALBRYNB345 1"
}
elif sorted_zone_keys == "CA-QC->US-NEISO":
# CA-QC->US-NEISO means import to NEISO should be positive
multiplier = -1
postdata = {
'_nstmp_zone0': '4012', # ".I.HQ_P1_P2345 5"
'_nstmp_zone1': '4013' # ".I.HQHIGATE120 2"
}
elif sorted_zone_keys == 'US-NEISO->US-NY':
# US-NEISO->US-NY means import to NEISO should be negative
multiplier = 1
postdata = {
'_nstmp_zone0': '4014', # ".I.SHOREHAM138 99"
'_nstmp_zone1': '4017', # ".I.NRTHPORT138 5"
'_nstmp_zone2': '4011' # ".I.ROSETON 345 1"
}
else:
raise Exception('Exchange pair not supported: {}'.format(sorted_zone_keys))
postdata['_nstmp_requestType'] = 'externalflow'
exchange_data = get_json_data(target_datetime, postdata, session)
summed_exchanges = defaultdict(int)
for exchange_key, exchange_values in exchange_data.items():
# sum up values from separate "exchanges" for the same date.
# this works because the timestamp of exchanges is always reported
# in exact 5-minute intervals by the API,
# e.g. "2018-03-18T00:05:00.000-04:00"
for datapoint in exchange_values:
dt = timestring_converter(datapoint['BeginDate'])
summed_exchanges[dt] += datapoint['Actual']
result = [
{
'datetime': timestamp,
'sortedZoneKeys': sorted_zone_keys,
'netFlow': value * multiplier,
'source': 'iso-ne.com'
}
for timestamp, value in summed_exchanges.items()
]
return result
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
from pprint import pprint
print('fetch_production() ->')
pprint(fetch_production())
print('fetch_production(target_datetime=arrow.get("2017-12-31T12:00Z") ->')
pprint(fetch_production(target_datetime=arrow.get('2017-12-31T12:00Z')))
print('fetch_production(target_datetime=arrow.get("2007-03-13T12:00Z") ->')
pprint(fetch_production(target_datetime=arrow.get('2007-03-13T12:00Z')))
print('fetch_exchange("US-NEISO", "CA-QC") ->')
pprint(fetch_exchange("US-NEISO", "CA-QC"))
print('fetch_exchange("US-NEISO", "CA-QC", target_datetime=arrow.get("2017-12-31T12:00Z")) ->')
pprint(fetch_exchange("US-NEISO", "CA-QC", target_datetime=arrow.get("2017-12-31T12:00Z")))
print('fetch_exchange("US-NEISO", "CA-QC", target_datetime=arrow.get("2007-03-13T12:00Z")) ->')
pprint(fetch_exchange("US-NEISO", "CA-QC", target_datetime=arrow.get("2007-03-13T12:00Z")))
| 32.677305 | 129 | 0.628649 |
aced79d6295860154a7fa812038f27b5956e8be5 | 515 | py | Python | tests/runtime/exponent.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 7 | 2021-08-28T18:20:45.000Z | 2022-02-01T07:35:59.000Z | tests/runtime/exponent.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | null | null | null | tests/runtime/exponent.py | yangdanny97/chocopy-python-compiler | 588cba0cb330bd63f00e06420a32ba47c25c4468 | [
"MIT"
] | 2 | 2022-02-05T06:16:16.000Z | 2022-02-24T11:07:09.000Z | # Compute x**y
def exp(x: int, y: int) -> int:
a: int = 0
def f(i: int) -> int:
nonlocal a
def geta() -> int:
return a
if i <= 0:
return geta()
else:
a = a * x
return f(i-1)
a = 1
return f(y)
# Input parameter
# n:int = 42
# # Run [0, n]
# i:int = 0
# # Crunch
# while i <= n:
# print(exp(2, i % 31))
# i = i + 1
# __assert__(exp(2,3) == 8)
# __assert__(exp(3,3) == 27)
# __assert__(exp(3,4) == 81)
# __assert__(exp(4,4) == 256)
# __assert__(exp(5,1) == 5)
# __assert__(exp(1,99) == 1) | 16.09375 | 31 | 0.518447 |
aced7a3519ed4cecc8c5898fffeb3754f48a40f0 | 310 | py | Python | Level 3/prepare-the-bunnies-escape/bomb-baby/solution.py | shubhamnag14/Google-foobar-2020 | cee8539cab09fad0cf257f2d4a40ec839d3df4a8 | [
"Apache-2.0"
] | null | null | null | Level 3/prepare-the-bunnies-escape/bomb-baby/solution.py | shubhamnag14/Google-foobar-2020 | cee8539cab09fad0cf257f2d4a40ec839d3df4a8 | [
"Apache-2.0"
] | null | null | null | Level 3/prepare-the-bunnies-escape/bomb-baby/solution.py | shubhamnag14/Google-foobar-2020 | cee8539cab09fad0cf257f2d4a40ec839d3df4a8 | [
"Apache-2.0"
] | null | null | null | def solution(M, F):
m, f = long(M), long(F)
total = 0
while not (m == 1 and f == 1):
if f <= 0 or m <= 0:
return "impossible"
if f == 1:
return str(total + m - 1)
else:
total += long(m/f)
m, f = f, m % f
return str(total)
| 23.846154 | 37 | 0.4 |
aced7ac8a17db78343c5aa9e5eeaea0365e21edf | 18,188 | py | Python | mantra_mixer/mixer.py | bossauh/mantra-mixer | 6116bc0d620c2e98656a4c349a2fcc6dbed6c955 | [
"MIT"
] | null | null | null | mantra_mixer/mixer.py | bossauh/mantra-mixer | 6116bc0d620c2e98656a4c349a2fcc6dbed6c955 | [
"MIT"
] | null | null | null | mantra_mixer/mixer.py | bossauh/mantra-mixer | 6116bc0d620c2e98656a4c349a2fcc6dbed6c955 | [
"MIT"
] | null | null | null | import os
import threading
import time
import math
from pathlib import Path
from queue import Empty, Queue
from typing import List, Union
import ffmpy
import numpy as np
import asyncio
import sounddevice as sd
import soundfile as sf
from .exceptions import *
RATE = 44100
class InputTrack:
"""
Initializes a InputTrack.
A InputTrack is what's responsible for taking audio data from a input device (e.g., microphone)
then storing it in a variable for later use.
You can use this InputTrack and "cast" it onto a OutputTrack so that whatever the input is hearing, you'll hear.
Parameters
----------
`name` : str
Name of the InputTrack.
`samplerate` : int
Sample rate to use. Defaults to RATE.
`blocksize` : int
Defaults to None, which is calculate by sounddevice from the samplerate.
`channels` : int
Number of channels to use. Defaults to 2.
`device` : int
Device index to use. Defaults to the default device configured by the sound settings on the operating system.
run `python -m sounddevice` to see all devices and their indexes.
`dtype` : str
Data type to be passed as `dtype` into sd.InputStream. Defaults to "float32"
"""
def __init__(self, name: str, **kwargs) -> None:
self.name = name
self.stopped = True
self.samplerate = kwargs.get("samplerate", RATE)
self.blocksize = kwargs.get("blocksize")
self.channels = kwargs.get("channels", 2)
self.device = kwargs.get("device")
self.dtype = kwargs.get("dtype", "float32")
self._stop_signal = False
self.data = None
# Start and wait
self.start()
while self.stopped:
time.sleep(0.01)
async def stop(self) -> None:
"""Stop the InputTrack"""
self._stop_signal = True
while not self.stopped:
await asyncio.sleep(0.01)
def __callback(self, indata, frames, time, status) -> None:
self.stopped = False
self.data = indata
def start(self) -> None:
"""Start the InputTrack"""
threading.Thread(target=self.__start, daemon=True).start()
def __start(self) -> None:
with sd.InputStream(samplerate=self.samplerate, blocksize=self.blocksize, device=self.device, channels=self.channels, callback=self.__callback, dtype=self.dtype):
while not self._stop_signal:
try:
time.sleep(0.001)
except KeyboardInterrupt:
self.stop()
self.stopped = True
self._stop_signal = False
class OutputTrack:
"""
Initialize a OutputTrack.
A track is what's responsible for opening a constant sd.OutputStream in the background.
We can then play audio by putting ndarray data into this track's Queue.
Notes
-----
- The shape of the ndarray must be the same as self.shape
Parameters
----------
`name` : str
The name of this track.
`callback` : Callable
A user supplied function that takes in one argument, the provided value in the parameter is the ndarray's being played. If the track is paused, or there's nothing to play, the callback function is not being called (because there's no ndarray to supply it with). This callback then should return the same or modified ndarray (the returned ndarray is what ends up being played)
`queue_size` : int
The maximum number of frames to put in the queue. Defaults to 20. You normally don't have to touch this.
`samplerate` : int
The initial samplerate of this OutputTrack. Defaults to RATE. Note that samplerate will be changed once update_samplerate is called. Mentioned method is also always called when attempting to play files with a mismatched samplerate.
`blocksize` : int
Blocksize. This parameter is ignored if a InputTrack (see below) is provided. Defaults to None which is a calculated blocksize from the samplerate."
`dtype` : str
The dtype parameter to be passed onto sd.OutputStream. Defaults to float32.
`input_` : InputStream
If a InputStream is provided here, the output of that InputStream will be automatically fed in this OutputStream.
Audio Parameters
----------------
`vol` : int
The initial volume of the track. Defaults to 1 which is 100%. You can go higher than 1 but it starts to sound shit.
"""
def __init__(self, name: str, **kwargs) -> None:
self.name = name
self.occupied = False
self.shape = None
self.samplerate = kwargs.get("samplerate", RATE)
self.blocksize = kwargs.get("blocksize")
self.dtype = kwargs.get("dtype", "float32")
self.callback = kwargs.get("callback")
self.input = kwargs.get("input_")
if self.input is not None:
self.samplerate = self.input.samplerate
self.blocksize = self.input.blocksize
self.queue = Queue(kwargs.get("queue_size", 20))
self.start()
self.stopped = False
self._stop_signal = False
self.paused = False
# Audio related attributes
self.vol = kwargs.get("vol", 1)
self.__previous_vol = self.vol
# Wait for the track to start
while self.shape is None:
time.sleep(0.01)
async def cast_input(self, input_: InputTrack) -> None:
"""
Directs the output of the provided InputTrack into this OutputTrack.
It is recommended to call this function rather than setting self.input manually so that samplerate differences are handled.
"""
rate = input_.samplerate
self.blocksize = input_.blocksize
self.dtype = input_.dtype
await self.update_samplerate(rate)
self.input = input_
async def pause(self, smooth: bool = True) -> None:
"""Pause the track"""
if smooth:
await self.set_volume(0)
self.paused = True
async def resume(self, smooth: bool = True) -> None:
"""Resume the current track"""
self.paused = False
if smooth:
await self.set_volume(self.__previous_vol)
async def set_volume(self, vol: float, smoothness: float = 0.005) -> None:
"""
Change the volume of the track.
You can also just use track.vol = x but if you want a smoother volume change you can use this.
Parameters
----------
`vol` : float
New volume.
`smoothness` : float
The higher this value is, the smoother the change will be. Defaults to 0.005 which is pretty smooth.
"""
self.__previous_vol = self.vol
inc = 0.01
while abs(self.vol - vol) > 0.01:
if vol > self.vol:
self.vol += inc
elif vol < self.vol:
self.vol -= inc
await asyncio.sleep(smoothness)
async def update_samplerate(self, rate: int) -> None:
"""
Update the samplerate of this track.
It does this by stopping the stream and changing the samplerate attribute and starting the stream again.
Parameters
----------
`rate` : int
The samplerate.
"""
if rate == self.samplerate:
return
await self.stop()
self.samplerate = rate
self.shape = None
self.stopped = False
self._stop_signal = False
self.occupied = False
with self.queue.mutex:
self.queue.queue.clear()
self.start()
while self.shape is None:
await asyncio.sleep(0.01)
async def stop(self) -> None:
"""Stop this track's OutputStream"""
self._stop_signal = True
self.input = None
while not self.stopped:
await asyncio.sleep(0.01)
with self.queue.mutex:
self.queue.queue.clear()
def start(self) -> None:
"""Start this track's OutputStream"""
threading.Thread(target=self.__start, daemon=True).start()
def _apply_fx(self, data) -> np.ndarray:
# First apply the volume
data = np.multiply(data, pow(
2, (math.sqrt(math.sqrt(math.sqrt(self.vol))) * 192 - 192) / 6), casting="unsafe")
return data
def __callback(self, outdata, frames, time, status) -> None:
self.shape = outdata.shape
if not self.paused:
try:
if not self.input:
data = self.queue.get(block=False)
else:
data = self.input.data
self.occupied = True
except Empty:
self.occupied = False
data = None
if self.occupied:
if self.callback is not None:
data = self.callback(self, data)
if data is not None:
outdata[:] = self._apply_fx(data)
else:
outdata[:] = 0
def __start(self) -> None:
with sd.OutputStream(samplerate=self.samplerate, blocksize=self.blocksize, channels=2, callback=self.__callback, dtype=self.dtype):
while not self._stop_signal:
try:
time.sleep(0.001)
except KeyboardInterrupt:
self.stop()
self.stopped = True
self._stop_signal = False
class Mixer:
"""
Create a Mixer. This mixer will be responsible for handling all the OutputTracks and playing audio.
Parameters
----------
`tracks` : Union[List[Union[OutputTrack, InputTrack]], int]
Your list of OutputTrack (or InputTrack) or a `int` number of OutputTrack to pre-generate on init. If a integer is provided, that amount of tracks will be pre-generated and the name of each track is going to be `str(id)`. This class can only pre-generate a list of OutputTrack, if you want a InputTrack, you'd have to pass it yourself in this `tracks` paramater or append it to `self.tracks` after pre-generating a list of OutputTrack
`conversion_path` : str
Sometimes loading a file will fail due to it being an invalid format. To get around this, we convert it onto a .wav file, this is where those converted files are stored. Defaults to None which is to just allow loading of unsupported formats to fail.
`tracks_params` : dict
You'll use this if you provided an int to the tracks parameter. This is a dictionary that takes in parameters each OutputTrack object needs.
Ex:
if you want all generatred tracks to have parameters like these:
OutputTrack(vol=0.5, callback=some_func)
You would set this parameter to:
tracks_params={"vol": 0.5, "callback": some_func}
"""
def __init__(self, tracks: Union[List[Union[OutputTrack, InputTrack]], int], conversion_path: str = None, **kwargs) -> None:
self.tracks = tracks
self.conversion_path = conversion_path
self.loop = asyncio.get_event_loop()
self.playing_files = {}
if isinstance(self.tracks, int):
self.tracks = self.generate_tracks(
self.tracks, kwargs.get("tracks_params"))
def generate_tracks(self, count: int, params: dict = None) -> None:
if params is None:
params = {}
return [OutputTrack(str(i), **params) for i in range(count)]
def get_track(self, name: str = None, id_: int = None, require_unoccupied: bool = False, types_: List[str] = None) -> Union[None, OutputTrack]:
"""
Retrieve a specific track either by its name or id.
Parameters
----------
`name` : str
The name of the track.
`id_` : int
The id of the track (it's basically the index of the track in self.tracks)
`require_unoccupied` : bool
Whether the track must be occupied. If this is True and we found a track but it's occupied, None will be return instead, otherwise the OutputTrack is returned. Defaults to False.
`types_` : List[str]
Types of tracks to get. Defaults to ["output"]. List of available types are as follows. ["output", "input"]
Returns
------
Either the OutputTrack or InputTrack if it was found or None.
Raises
------
`ValueError` :
Raised when both name and id_ is not provided.
"""
track = None
types_ = types_ if types_ else ["output"]
instances = []
if "output" in types_:
instances.append(OutputTrack)
if "input" in types_:
instances.append(InputTrack)
instances = tuple(instances)
if name:
track = [x for x in self.tracks if x.name == name and isinstance(x, instances)]
if not track:
return
track = track[0]
elif id_:
try:
track = self.tracks[id_]
if not isinstance(track, instances):
track = None
except IndexError:
return
else:
raise ValueError(
"one of the following parameters has to exist: name, id_")
if require_unoccupied:
if track.occupied:
return
return track
def get_unoccupied_tracks(self) -> List[OutputTrack]:
"""Get a list of unoccupied tracks. Could be an empty list of no unoccupied tracks were found."""
return [x for x in self.tracks if not x.occupied]
def stop_file(self, track: str) -> None:
playing_data = self.playing_files.get(track)
if playing_data:
if playing_data["playing"]:
self.playing_files[track]["stop"] = True
async def stop_all(self) -> None:
"""Stops all tracks"""
for track in self.tracks:
await track.stop()
async def play_file(self, fp: str, **kwargs) -> OutputTrack:
"""
Play the provided file. The file will be split into chunks and is then put in the track's audio queue.
Notes
-----
- If you try to play over a track that is already playing, that track will be stopped and played again with the new provided file.
- If the provided file's format is not supported. It will be converted into a .wav file and that .wav file is stored in self.conversion_path. If the conversion path is None then we will not try to convert it and just continue to raise an `UnsupportedFormat` error.
Parameters
----------
`fp`: str
Path to the file.
`track` : OutputTrack
The track to use. If not provided, a unoccupied track will be used.
`blocking` : bool
Whether to use a thread when putting the items in the queue or not. Defaults to False.
`load_in_memory` : bool
Load the entire audio file in memory. Defaults to False.
Raises
------
`NoUnoccupiedOutputTrack` :
Raised when there's no unoccupied track. Will not be raised if `track` is provided.
`UnsupportedFormat` :
Raised when the provided file format is not supported or when is it converted onto a .wav but it still fails.
Returns
-------
`OutputTrack` :
The track the file is being played at.
"""
track = kwargs.get("track")
if isinstance(track, str):
track = self.get_track(name=track)
if track is None:
track = self.get_unoccupied_tracks()
if not track:
raise NoUnoccupiedTrack
track = track[0]
try:
_, rate = sf.read(fp, frames=1)
except RuntimeError as e:
if self.conversion_path is None:
raise UnsupportedFormat(e)
# Create if it doesn't exist
Path(self.conversion_path).mkdir(parents=True, exist_ok=True)
out = os.path.splitext(fp)[0] + ".wav"
out = os.path.join(self.conversion_path, out)
ff = ffmpy.FFmpeg(
inputs={fp: None},
outputs={out: None},
global_options=["-loglevel", "quiet", "-y"]
)
try:
ff.run()
return await self.play_file(out, **kwargs)
except ffmpy.FFRuntimeError as e:
raise UnsupportedFormat(e)
# Check if the current track is being fed audio data and stop if it is
self.stop_file(track.name)
while True:
playing_data = self.playing_files.get(track.name)
if not playing_data:
break
if not playing_data["playing"]:
break
await track.update_samplerate(rate)
blocksize = track.shape[0]
nds = sf.blocks(
fp,
blocksize=blocksize,
always_2d=True,
fill_value=np.array([0]),
dtype=np.float32
)
load_in_memory = kwargs.get("load_in_memory", False)
def t():
self.playing_files[track.name] = {
"playing": True,
"stop": False
}
def check_signal() -> None:
sig = self.playing_files.get(track.name)
if sig:
if sig["stop"]:
return True
return False
if load_in_memory:
chunks = [nd for nd in nds]
else:
chunks = nds
for nd in chunks:
sig = check_signal()
if sig:
break
track.queue.put(nd)
self.playing_files[track.name] = {
"playing": False,
"stop": False
}
if not kwargs.get("blocking", False):
threading.Thread(target=t, daemon=True).start()
await asyncio.sleep(0.2)
else:
t()
return track
| 34.709924 | 442 | 0.586925 |
aced7b44a8568a1238a72b2fe9fadc0c9e74e130 | 12,453 | py | Python | src/python/turicreate/toolkits/distances/_distances.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 11,356 | 2017-12-08T19:42:32.000Z | 2022-03-31T16:55:25.000Z | src/python/turicreate/toolkits/distances/_distances.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 2,402 | 2017-12-08T22:31:01.000Z | 2022-03-28T19:25:52.000Z | src/python/turicreate/toolkits/distances/_distances.py | Bpowers4/turicreate | 73dad213cc1c4f74337b905baea2b3a1e5a0266c | [
"BSD-3-Clause"
] | 1,343 | 2017-12-08T19:47:19.000Z | 2022-03-26T11:31:36.000Z | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
"""
Distance functions and utilities.
"""
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
import turicreate as _tc
### --------------------------- ###
### Standard distance functions ###
### --------------------------- ###
def euclidean(x, y):
"""
Compute the Euclidean distance between two dictionaries or two lists
of equal length. Suppose `x` and `y` each contain :math:`d`
variables:
.. math:: D(x, y) = \\sqrt{\\sum_i^d (x_i - y_i)^2}
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Euclidean distance between `x` and `y`.
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
References
----------
- `Wikipedia - Euclidean distance
<http://en.wikipedia.org/wiki/Euclidean_distance>`_
Examples
--------
>>> tc.distances.euclidean([1, 2, 3], [4, 5, 6])
5.196152422706632
...
>>> tc.distances.euclidean({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
8.774964387392123
"""
return _tc.extensions._distances.euclidean(x, y)
def gaussian_kernel(x, y):
"""
Compute a Gaussian-type distance between two dictionaries or two lists
of equal length. Suppose `x` and `y` each contain :math:`d`
variables:
.. math:: D(x, y) = 1 - \\exp{-\\sum_i^d (x_i - y_i)^2}
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Gaussian distance between `x` and `y`.
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
References
----------
- `Wikipedia - Euclidean distance
<http://en.wikipedia.org/wiki/Euclidean_distance>`_
Examples
--------
>>> tc.distances.gaussian([.1, .2, .3], [.4, .5, .6])
5.196152422706632
...
>>> tc.distances.euclidean({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
8.774964387392123
"""
return _tc.extensions._distances.gaussian_kernel(x, y)
def squared_euclidean(x, y):
"""
Compute the squared Euclidean distance between two dictionaries or
two lists of equal length. Suppose `x` and `y` each contain
:math:`d` variables:
.. math:: D(x, y) = \\sum_i^d (x_i - y_i)^2
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Squared Euclidean distance between `x` and `y`.
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
- Squared Euclidean distance does not satisfy the triangle
inequality, so it is not a metric. This means the ball tree cannot
be used to compute nearest neighbors based on this distance.
References
----------
- `Wikipedia - Euclidean distance
<http://en.wikipedia.org/wiki/Euclidean_distance>`_
Examples
--------
>>> tc.distances.squared_euclidean([1, 2, 3], [4, 5, 6])
27.0
...
>>> tc.distances.squared_euclidean({'a': 2, 'c': 4},
... {'b': 3, 'c': 12})
77.0
"""
return _tc.extensions._distances.squared_euclidean(x, y)
def manhattan(x, y):
"""
Compute the Manhattan distance between between two dictionaries or
two lists of equal length. Suppose `x` and `y` each contain
:math:`d` variables:
.. math:: D(x, y) = \\sum_i^d |x_i - y_i|
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Manhattan distance between `x` and `y`.
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
- Manhattan distance is also known as "city block" or "taxi cab"
distance.
References
----------
- `Wikipedia - taxicab geometry
<http://en.wikipedia.org/wiki/Taxicab_geometry>`_
Examples
--------
>>> tc.distances.manhattan([1, 2, 3], [4, 5, 6])
9.0
...
>>> tc.distances.manhattan({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
13.0
"""
return _tc.extensions._distances.manhattan(x, y)
def cosine(x, y):
"""
Compute the cosine distance between between two dictionaries or two
lists of equal length. Suppose `x` and `y` each contain
:math:`d` variables:
.. math::
D(x, y) = 1 - \\frac{\\sum_i^d x_i y_i}
{\\sqrt{\\sum_i^d x_i^2}\\sqrt{\\sum_i^d y_i^2}}
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Cosine distance between `x` and `y`.
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
- Cosine distance is not a metric. This means the ball tree cannot
be used to compute nearest neighbors based on this distance.
References
----------
- `Wikipedia - cosine similarity
<http://en.wikipedia.org/wiki/Cosine_similarity>`_
Examples
--------
>>> tc.distances.cosine([1, 2, 3], [4, 5, 6])
0.025368153802923787
...
>>> tc.distances.cosine({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
0.13227816872537534
"""
return _tc.extensions._distances.cosine(x, y)
def levenshtein(x, y):
"""
Compute the Levenshtein distance between between strings. The
distance is the number of insertion, deletion, and substitution edits
needed to transform string `x` into string `y`. The mathematical
definition of Levenshtein is recursive:
.. math::
D(x, y) = d(|x|, |y|)
d(i, j) = \\max(i, j), \\quad \\mathrm{if } \\min(i, j) = 0
d(i, j) = \\min \\Big \\{d(i-1, j) + 1, \\ d(i, j-1) + 1, \\ d(i-1, j-1) + I(x_i \\neq y_i) \\Big \\}, \\quad \\mathrm{else}
Parameters
----------
x : string
First input string.
y : string
Second input string.
Returns
-------
out : float
Levenshtein distance between `x` and `y`.
References
----------
- `Wikipedia - Levenshtein distance
<http://en.wikipedia.org/wiki/Levenshtein_distance>`_
Examples
--------
>>> tc.distances.levenshtein("fossa", "fossil")
2.0
"""
return _tc.extensions._distances.levenshtein(x, y)
def dot_product(x, y):
"""
Compute the dot_product between two dictionaries or two lists of
equal length. Suppose `x` and `y` each contain :math:`d` variables:
.. math:: D(x, y) = \\frac{1}{\\sum_i^d x_i y_i}
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change; it
is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
- Dot product distance is not a metric. This means the ball tree
cannot be used to compute nearest neighbors based on this distance.
Examples
--------
>>> tc.distances.dot_product([1, 2, 3], [4, 5, 6])
0.03125
...
>>> tc.distances.dot_product({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
0.020833333333333332
"""
return _tc.extensions._distances.dot_product(x, y)
def transformed_dot_product(x, y):
"""
Compute the "transformed_dot_product" distance between two dictionaries or
two lists of equal length. This is a way to transform the dot product of the
two inputs---a similarity measure---into a distance measure. Suppose `x` and
`y` each contain :math:`d` variables:
.. math:: D(x, y) = \\log\\{1 + \\exp\\{-\\sum_i^d x_i y_i\\}\\}
.. warning::
The 'dot_product' distance is deprecated and will be removed in future
versions of Turi Create. Please use 'transformed_dot_product'
distance instead, although note that this is more than a name change; it
is a *different* transformation of the dot product of two vectors.
Please see the distances module documentation for more details.
Parameters
----------
x : dict or list
First input vector.
y : dict or list
Second input vector.
Returns
-------
out : float
Notes
-----
- If the input vectors are in dictionary form, keys missing in one
of the two dictionaries are assumed to have value 0.
- Transformed dot product distance is not a metric because the distance from
a point to itself is not 0. This means the ball tree cannot be used to
compute nearest neighbors based on this distance.
Examples
--------
>>> tc.distances.transformed_dot_product([1, 2, 3], [4, 5, 6])
0.03125
...
>>> tc.distances.transformed_dot_product({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
0.020833333333333332
"""
return _tc.extensions._distances.transformed_dot_product(x, y)
def jaccard(x, y):
"""
Compute the Jaccard distance between between two dictionaries.
Suppose :math:`K_x` and :math:`K_y` are the sets of keys from the
two input dictionaries.
.. math:: D(x, y) = 1 - \\frac{|K_x \\cap K_y|}{|K_x \\cup K_y|}
Parameters
----------
x : dict
First input dictionary.
y : dict
Second input dictionary.
Returns
-------
out : float
Jaccard distance between `x` and `y`.
Notes
-----
- Jaccard distance treats the keys in the input dictionaries as
sets, and ignores the values in the input dictionaries.
References
----------
- `Wikipedia - Jaccard distance
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> tc.distances.jaccard({'a': 2, 'c': 4}, {'b': 3, 'c': 12})
0.6666666666666667
"""
return _tc.extensions._distances.jaccard(x, y)
def weighted_jaccard(x, y):
"""
Compute the weighted Jaccard distance between between two
dictionaries. Suppose :math:`K_x` and :math:`K_y` are the sets of
keys from the two input dictionaries, while :math:`x_k` and
:math:`y_k` are the values associated with key :math:`k` in the
respective dictionaries. Typically these values are counts, i.e. of
words or n-grams.
.. math::
D(x, y) = 1 - \\frac{\\sum_{k \\in K_x \\cup K_y} \\min\\{x_k, y_k\\}}
{\\sum_{k \\in K_x \\cup K_y} \\max\\{x_k, y_k\\}}
Parameters
----------
x : dict
First input dictionary.
y : dict
Second input dictionary.
Returns
-------
out : float
Weighted jaccard distance between `x` and `y`.
Notes
-----
- If a key is missing in one of the two dictionaries, it is assumed
to have value 0.
References
----------
- Weighted Jaccard distance: Chierichetti, F., et al. (2010)
`Finding the Jaccard Median
<http://theory.stanford.edu/~sergei/papers/soda10-jaccard.pdf>`_.
Proceedings of the Twenty-First Annual ACM-SIAM Symposium on
Discrete Algorithms. Society for Industrial and Applied
Mathematics.
Examples
--------
>>> tc.distances.weighted_jaccard({'a': 2, 'c': 4},
... {'b': 3, 'c': 12})
0.7647058823529411
"""
return _tc.extensions._distances.weighted_jaccard(x, y)
| 26.552239 | 132 | 0.591825 |
aced7bdb6d53aa30e9dd85ac21beed031f709977 | 2,715 | py | Python | apps/projects/tests.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/projects/tests.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/projects/tests.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.utils import timezone
from django.urls import reverse
from vulnman.tests.mixins import VulnmanTestMixin
from apps.projects import models
class ProjectTests(TestCase, VulnmanTestMixin):
def setUp(self):
self.init_mixin()
def test_listview(self):
response = self.client.get(reverse('projects:project-list'))
self.assertEqual(response.status_code, 302)
project1 = self._create_project(creator=self.user1)
self.assign_perm("projects.view_project", self.pentester, project1)
self._create_project(creator=self.user2)
self.client.force_login(self.pentester)
response = self.client.get(reverse('projects:project-list'))
self.assertEqual(len(response.context['projects']), 1)
self.assertEqual(response.context['projects'].get(), project1)
def test_detailview(self):
project = self._create_project(creator=self.manager)
# test without permission
self.client.force_login(self.user2)
response = self.client.get(reverse('projects:project-detail', kwargs={'pk': project.pk}))
self.assertEqual(response.status_code, 404)
# test with permission
self.client.force_login(self.pentester)
self.assign_perm("projects.view_project", self.pentester, project)
response = self.client.get(reverse('projects:project-detail', kwargs={'pk': project.pk}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['project'], project)
def test_createview(self):
url = self.get_url("projects:project-create")
client = self._create_instance(models.Client)
payload = {"client": str(client.pk), "start_date": timezone.now().date(),
"end_date": timezone.now().date() + timezone.timedelta(days=1),
"pentesters": "%s" % str(self.pentester.pk),
"scope_set-TOTAL_FORMS": "1", "scope_set-0-uuid": "",
"scope_set-0-project": "",
"scope_set-0-name": "Test Scope", "scope_set-0-DELETE": "",
"scope_set-MAX_NUM_FORMS": "4",
"scope_set-INITIAL_FORMS": "0", "scope_set-MIN_NUM_FORMS": "0"
}
# test pentester not allowed to create projects
self.client.force_login(self.pentester)
response = self.client.post(url, payload)
self.assertEqual(response.status_code, 403)
# test as manager
self.client.force_login(self.manager)
response = self.client.post(url, payload)
self.assertEqual(response.status_code, 302)
self.assertEqual(models.Project.objects.count(), 1)
| 47.631579 | 97 | 0.656354 |
aced7c7ae39197b6fa5385249e515bde0fe15cdc | 21,602 | py | Python | tests/nodeos_forked_chain_test.py | mtpbuilder/mtp | 9dbc99e91b7d0900b75ad4a461a1da9da2c80450 | [
"MIT"
] | 27 | 2020-08-04T08:50:59.000Z | 2020-08-07T07:51:52.000Z | tests/nodeos_forked_chain_test.py | zhllljm/mtp | 9dbc99e91b7d0900b75ad4a461a1da9da2c80450 | [
"MIT"
] | null | null | null | tests/nodeos_forked_chain_test.py | zhllljm/mtp | 9dbc99e91b7d0900b75ad4a461a1da9da2c80450 | [
"MIT"
] | 25 | 2020-08-04T08:59:34.000Z | 2020-08-07T07:51:54.000Z | #!/usr/bin/env python3
from testUtils import Utils
import testUtils
import time
from Cluster import Cluster
from WalletMgr import WalletMgr
from Node import BlockType
from Node import Node
from TestHelper import TestHelper
import decimal
import math
import re
import signal
###############################################################
# nodmtp_forked_chain_test
#
# This test sets up 2 producing nodes and one "bridge" node using test_control_api_plugin.
# One producing node has 11 of the elected producers and the other has 10 of the elected producers.
# All the producers are named in alphabetical order, so that the 11 producers, in the one production node, are
# scheduled first, followed by the 10 producers in the other producer node. Each producing node is only connected
# to the other producing node via the "bridge" node.
# The bridge node has the test_control_api_plugin, which exposes a restful interface that the test script uses to kill
# the "bridge" node at a specific producer in the production cycle. This is used to fork the producer network
# precisely when the 11 producer node has finished producing and the other producing node is about to produce.
# The fork in the producer network results in one fork of the block chain that advances with 10 producers with a LIB
# that has advanced, since all of the previous blocks were confirmed and the producer that was scheduled for that
# slot produced it, and one with 11 producers with a LIB that has not advanced. This situation is validated by
# the test script.
# After both chains are allowed to produce, the "bridge" node is turned back on.
# Time is allowed to progress so that the "bridge" node can catchup and both producer nodes to come to consensus
# The block log is then checked for both producer nodes to verify that the 10 producer fork is selected and that
# both nodes are in agreement on the block log.
#
###############################################################
Print=Utils.Print
from core_symbol import CORE_SYMBOL
def analyzeBPs(bps0, bps1, expectDivergence):
start=0
index=None
length=len(bps0)
firstDivergence=None
errorInDivergence=False
analysysPass=0
bpsStr=None
bpsStr0=None
bpsStr1=None
while start < length:
analysysPass+=1
bpsStr=None
for i in range(start,length):
bp0=bps0[i]
bp1=bps1[i]
if bpsStr is None:
bpsStr=""
else:
bpsStr+=", "
blockNum0=bp0["blockNum"]
prod0=bp0["prod"]
blockNum1=bp1["blockNum"]
prod1=bp1["prod"]
numDiff=True if blockNum0!=blockNum1 else False
prodDiff=True if prod0!=prod1 else False
if numDiff or prodDiff:
index=i
if firstDivergence is None:
firstDivergence=min(blockNum0, blockNum1)
if not expectDivergence:
errorInDivergence=True
break
bpsStr+=str(blockNum0)+"->"+prod0
if index is None:
if expectDivergence:
errorInDivergence=True
break
return None
bpsStr0=None
bpsStr2=None
start=length
for i in range(index,length):
if bpsStr0 is None:
bpsStr0=""
bpsStr1=""
else:
bpsStr0+=", "
bpsStr1+=", "
bp0=bps0[i]
bp1=bps1[i]
blockNum0=bp0["blockNum"]
prod0=bp0["prod"]
blockNum1=bp1["blockNum"]
prod1=bp1["prod"]
numDiff="*" if blockNum0!=blockNum1 else ""
prodDiff="*" if prod0!=prod1 else ""
if not numDiff and not prodDiff:
start=i
index=None
if expectDivergence:
errorInDivergence=True
break
bpsStr0+=str(blockNum0)+numDiff+"->"+prod0+prodDiff
bpsStr1+=str(blockNum1)+numDiff+"->"+prod1+prodDiff
if errorInDivergence:
break
if errorInDivergence:
msg="Failed analyzing block producers - "
if expectDivergence:
msg+="nodes do not indicate different block producers for the same blocks, but they are expected to diverge at some point."
else:
msg+="did not expect nodes to indicate different block producers for the same blocks."
msg+="\n Matching Blocks= %s \n Diverging branch node0= %s \n Diverging branch node1= %s" % (bpsStr,bpsStr0,bpsStr1)
Utils.errorExit(msg)
return firstDivergence
def getMinHeadAndLib(prodNodes):
info0=prodNodes[0].getInfo(exitOnError=True)
info1=prodNodes[1].getInfo(exitOnError=True)
headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"]))
libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"]))
return (headBlockNum, libNum)
args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
"--wallet-port"})
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
totalNodes=totalProducerNodes+totalNonProducerNodes
maxActiveProducers=21
totalProducers=maxActiveProducers
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
walletPort=args.wallet_port
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killmtpInstances=not dontKill
killWallet=not dontKill
WalletdName=Utils.mtpWalletName
ClientName="clmtp"
try:
TestHelper.printSystemInfo("BEGIN")
cluster.setWalletMgr(walletMgr)
cluster.killall(allInstances=killAll)
cluster.cleanup()
Print("Stand up cluster")
specificExtraNodmtpArgs={}
# producer nodes will be mapped to 0 through totalProducerNodes-1, so the number totalProducerNodes will be the non-producing node
specificExtraNodmtpArgs[totalProducerNodes]="--plugin mtpio::test_control_api_plugin"
# *** setup topogrophy ***
# "bridge" shape connects defprocera through defproducerk (in node0) to each other and defproducerl through defproduceru (in node01)
# and the only connection between those 2 groups is through the bridge node
if cluster.launch(prodCount=prodCount, topo="bridge", pnodes=totalProducerNodes,
totalNodes=totalNodes, totalProducers=totalProducers,
useBiosBootFile=False, specificExtraNodmtpArgs=specificExtraNodmtpArgs) is False:
Utils.cmdError("launcher")
Utils.errorExit("Failed to stand up mtp cluster.")
Print("Validating system accounts after bootstrap")
cluster.validateAccounts(None)
# *** create accounts to vote in desired producers ***
accounts=cluster.createAccountKeys(5)
if accounts is None:
Utils.errorExit("FAILURE - create keys")
accounts[0].name="tester111111"
accounts[1].name="tester222222"
accounts[2].name="tester333333"
accounts[3].name="tester444444"
accounts[4].name="tester555555"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName, [cluster.mtpioAccount,accounts[0],accounts[1],accounts[2],accounts[3],accounts[4]])
for _, account in cluster.defProducerAccounts.items():
walletMgr.importKey(account, testWallet, ignoreDupKeyWarning=True)
Print("Wallet \"%s\" password=%s." % (testWalletName, testWallet.password.encode("utf-8")))
# *** identify each node (producers and non-producing node) ***
nonProdNode=None
prodNodes=[]
producers=[]
for i in range(0, totalNodes):
node=cluster.getNode(i)
node.producers=Cluster.parseProducers(i)
numProducers=len(node.producers)
Print("node has producers=%s" % (node.producers))
if numProducers==0:
if nonProdNode is None:
nonProdNode=node
nonProdNode.nodeNum=i
else:
Utils.errorExit("More than one non-producing nodes")
else:
for prod in node.producers:
trans=node.regproducer(cluster.defProducerAccounts[prod], "http::/mysite.com", 0, waitForTransBlock=False, exitOnError=True)
prodNodes.append(node)
producers.extend(node.producers)
# *** delegate bandwidth to accounts ***
node=prodNodes[0]
# create accounts via mtpio as otherwise a bid is needed
for account in accounts:
Print("Create new account %s via %s" % (account.name, cluster.mtpioAccount.name))
trans=node.createInitializeAccount(account, cluster.mtpioAccount, stakedDeposit=0, waitForTransBlock=True, stakeNet=1000, stakeCPU=1000, buyRAM=1000, exitOnError=True)
transferAmount="100000000.0000 {0}".format(CORE_SYMBOL)
Print("Transfer funds %s from account %s to %s" % (transferAmount, cluster.mtpioAccount.name, account.name))
node.transferFunds(cluster.mtpioAccount, account, transferAmount, "test transfer", waitForTransBlock=True)
trans=node.delegatebw(account, 20000000.0000, 20000000.0000, waitForTransBlock=True, exitOnError=True)
# *** vote using accounts ***
#verify nodes are in sync and advancing
cluster.waitOnClusterSync(blockAdvancing=5)
index=0
for account in accounts:
Print("Vote for producers=%s" % (producers))
trans=prodNodes[index % len(prodNodes)].vote(account, producers, waitForTransBlock=True)
index+=1
# *** Identify a block where production is stable ***
#verify nodes are in sync and advancing
cluster.waitOnClusterSync(blockAdvancing=5)
blockNum=node.getNextCleanProductionCycle(trans)
blockProducer=node.getBlockProducerByNum(blockNum)
Print("Validating blockNum=%s, producer=%s" % (blockNum, blockProducer))
cluster.biosNode.kill(signal.SIGTERM)
#advance to the next block of 12
lastBlockProducer=blockProducer
while blockProducer==lastBlockProducer:
blockNum+=1
blockProducer=node.getBlockProducerByNum(blockNum)
# *** Identify what the production cycle is ***
productionCycle=[]
producerToSlot={}
slot=-1
inRowCountPerProducer=12
while True:
if blockProducer not in producers:
Utils.errorExit("Producer %s was not one of the voted on producers" % blockProducer)
productionCycle.append(blockProducer)
slot+=1
if blockProducer in producerToSlot:
Utils.errorExit("Producer %s was first seen in slot %d, but is repeated in slot %d" % (blockProducer, producerToSlot[blockProducer], slot))
producerToSlot[blockProducer]={"slot":slot, "count":0}
lastBlockProducer=blockProducer
while blockProducer==lastBlockProducer:
producerToSlot[blockProducer]["count"]+=1
blockNum+=1
blockProducer=node.getBlockProducerByNum(blockNum)
if producerToSlot[lastBlockProducer]["count"]!=inRowCountPerProducer:
Utils.errorExit("Producer %s, in slot %d, expected to produce %d blocks but produced %d blocks. At block number %d." %
(lastBlockProducer, slot, inRowCountPerProducer, producerToSlot[lastBlockProducer]["count"], blockNum-1))
if blockProducer==productionCycle[0]:
break
output=None
for blockProducer in productionCycle:
if output is None:
output=""
else:
output+=", "
output+=blockProducer+":"+str(producerToSlot[blockProducer]["count"])
Print("ProductionCycle ->> {\n%s\n}" % output)
#retrieve the info for all the nodes to report the status for each
for node in cluster.getNodes():
node.getInfo()
cluster.reportStatus()
# *** Killing the "bridge" node ***
Print("Sending command to kill \"bridge\" node to separate the 2 producer groups.")
# block number to start expecting node killed after
preKillBlockNum=nonProdNode.getBlockNum()
preKillBlockProducer=nonProdNode.getBlockProducerByNum(preKillBlockNum)
# kill at last block before defproducerl, since the block it is killed on will get propagated
killAtProducer="defproducerk"
nonProdNode.killNodeOnProducer(producer=killAtProducer, whereInSequence=(inRowCountPerProducer-1))
# *** Identify a highest block number to check while we are trying to identify where the divergence will occur ***
# will search full cycle after the current block, since we don't know how many blocks were produced since retrieving
# block number and issuing kill command
postKillBlockNum=prodNodes[1].getBlockNum()
blockProducers0=[]
blockProducers1=[]
libs0=[]
libs1=[]
lastBlockNum=max([preKillBlockNum,postKillBlockNum])+2*maxActiveProducers*inRowCountPerProducer
actualLastBlockNum=None
prodChanged=False
nextProdChange=False
#identify the earliest LIB to start identify the earliest block to check if divergent branches eventually reach concensus
(headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes)
Print("Tracking block producers from %d till divergence or %d. Head block is %d and lowest LIB is %d" % (preKillBlockNum, lastBlockNum, headBlockNum, libNumAroundDivergence))
transitionCount=0
missedTransitionBlock=None
for blockNum in range(preKillBlockNum,lastBlockNum):
#avoiding getting LIB until my current block passes the head from the last time I checked
if blockNum>headBlockNum:
(headBlockNum, libNumAroundDivergence)=getMinHeadAndLib(prodNodes)
# track the block number and producer from each producing node
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
#in the case that the preKillBlockNum was also produced by killAtProducer, ensure that we have
#at least one producer transition before checking for killAtProducer
if not prodChanged:
if preKillBlockProducer!=blockProducer0:
prodChanged=True
#since it is killing for the last block of killAtProducer, we look for the next producer change
if not nextProdChange and prodChanged and blockProducer1==killAtProducer:
nextProdChange=True
elif nextProdChange and blockProducer1!=killAtProducer:
nextProdChange=False
if blockProducer0!=blockProducer1:
Print("Divergence identified at block %s, node_00 producer: %s, node_01 producer: %s" % (blockNum, blockProducer0, blockProducer1))
actualLastBlockNum=blockNum
break
else:
missedTransitionBlock=blockNum
transitionCount+=1
# allow this to transition twice, in case the script was identifying an earlier transition than the bridge node received the kill command
if transitionCount>1:
Print("At block %d and have passed producer: %s %d times and we have not diverged, stopping looking and letting errors report" % (blockNum, killAtProducer, transitionCount))
actualLastBlockNum=blockNum
break
#if we diverge before identifying the actualLastBlockNum, then there is an ERROR
if blockProducer0!=blockProducer1:
extra="" if transitionCount==0 else " Diverged after expected killAtProducer transition at block %d." % (missedTransitionBlock)
Utils.errorExit("Groups reported different block producers for block number %d.%s %s != %s." % (blockNum,extra,blockProducer0,blockProducer1))
#verify that the non producing node is not alive (and populate the producer nodes with current getInfo data to report if
#an error occurs)
if nonProdNode.verifyAlive():
Utils.errorExit("Expected the non-producing node to have shutdown.")
Print("Analyzing the producers leading up to the block after killing the non-producing node, expecting divergence at %d" % (blockNum))
firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True)
# Nodes should not have diverged till the last block
if firstDivergence!=blockNum:
Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, blockNum))
blockProducers0=[]
blockProducers1=[]
for prodNode in prodNodes:
info=prodNode.getInfo()
Print("node info: %s" % (info))
killBlockNum=blockNum
lastBlockNum=killBlockNum+(maxActiveProducers - 1)*inRowCountPerProducer+1 # allow 1st testnet group to produce just 1 more block than the 2nd
Print("Tracking the blocks from the divergence till there are 10*12 blocks on one chain and 10*12+1 on the other, from block %d to %d" % (killBlockNum, lastBlockNum))
for blockNum in range(killBlockNum,lastBlockNum):
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
Print("Analyzing the producers from the divergence to the lastBlockNum and verify they stay diverged, expecting divergence at block %d" % (killBlockNum))
firstDivergence=analyzeBPs(blockProducers0, blockProducers1, expectDivergence=True)
if firstDivergence!=killBlockNum:
Utils.errorExit("Expected to diverge at %s, but diverged at %s." % (firstDivergence, killBlockNum))
blockProducers0=[]
blockProducers1=[]
for prodNode in prodNodes:
info=prodNode.getInfo()
Print("node info: %s" % (info))
Print("Relaunching the non-producing bridge node to connect the producing nodes again")
if not nonProdNode.relaunch(nonProdNode.nodeNum, None):
errorExit("Failure - (non-production) node %d should have restarted" % (nonProdNode.nodeNum))
Print("Waiting to allow forks to resolve")
for prodNode in prodNodes:
info=prodNode.getInfo()
Print("node info: %s" % (info))
#ensure that the nodes have enough time to get in concensus, so wait for 3 producers to produce their complete round
time.sleep(inRowCountPerProducer * 3 / 2)
remainingChecks=20
match=False
checkHead=False
while remainingChecks>0:
checkMatchBlock=killBlockNum if not checkHead else prodNodes[0].getBlockNum()
blockProducer0=prodNodes[0].getBlockProducerByNum(checkMatchBlock)
blockProducer1=prodNodes[1].getBlockProducerByNum(checkMatchBlock)
match=blockProducer0==blockProducer1
if match:
if checkHead:
break
else:
checkHead=True
continue
Print("Fork has not resolved yet, wait a little more. Block %s has producer %s for node_00 and %s for node_01. Original divergence was at block %s. Wait time remaining: %d" % (checkMatchBlock, blockProducer0, blockProducer1, killBlockNum, remainingChecks))
time.sleep(1)
remainingChecks-=1
for prodNode in prodNodes:
info=prodNode.getInfo()
Print("node info: %s" % (info))
# ensure all blocks from the lib before divergence till the current head are now in consensus
endBlockNum=max(prodNodes[0].getBlockNum(), prodNodes[1].getBlockNum())
Print("Identifying the producers from the saved LIB to the current highest head, from block %d to %d" % (libNumAroundDivergence, endBlockNum))
for blockNum in range(libNumAroundDivergence,endBlockNum):
blockProducer0=prodNodes[0].getBlockProducerByNum(blockNum)
blockProducer1=prodNodes[1].getBlockProducerByNum(blockNum)
blockProducers0.append({"blockNum":blockNum, "prod":blockProducer0})
blockProducers1.append({"blockNum":blockNum, "prod":blockProducer1})
Print("Analyzing the producers from the saved LIB to the current highest head and verify they match now")
analyzeBPs(blockProducers0, blockProducers1, expectDivergence=False)
resolvedKillBlockProducer=None
for prod in blockProducers0:
if prod["blockNum"]==killBlockNum:
resolvedKillBlockProducer = prod["prod"]
if resolvedKillBlockProducer is None:
Utils.errorExit("Did not find find block %s (the original divergent block) in blockProducers0, test setup is wrong. blockProducers0: %s" % (killBlockNum, ", ".join(blockProducers)))
Print("Fork resolved and determined producer %s for block %s" % (resolvedKillBlockProducer, killBlockNum))
blockProducers0=[]
blockProducers1=[]
testSuccessful=True
finally:
TestHelper.shutdown(cluster, walletMgr, testSuccessful=testSuccessful, killmtpInstances=killmtpInstances, killWallet=killWallet, keepLogs=keepLogs, cleanRun=killAll, dumpErrorDetails=dumpErrorDetails)
if not testSuccessful:
Print(Utils.FileDivider)
Print("Compare Blocklog")
cluster.compareBlockLogs()
Print(Utils.FileDivider)
Print("Compare Blocklog")
cluster.printBlockLog()
Print(Utils.FileDivider)
exit(0)
| 43.117764 | 265 | 0.688084 |
aced7c91eec392224ff875201f2df5e50d5c949f | 5,505 | py | Python | tests/test_fooditem.py | codacy-badger/FASTFOODFAST-API | 3ddb2715dd2b19bf0eae823b5a17c3a01e963a53 | [
"MIT"
] | null | null | null | tests/test_fooditem.py | codacy-badger/FASTFOODFAST-API | 3ddb2715dd2b19bf0eae823b5a17c3a01e963a53 | [
"MIT"
] | null | null | null | tests/test_fooditem.py | codacy-badger/FASTFOODFAST-API | 3ddb2715dd2b19bf0eae823b5a17c3a01e963a53 | [
"MIT"
] | null | null | null | import unittest
import json
from .base_test import BaseTest
class TestFoodItem(BaseTest):
def test_get_token(self):
""" Test get token """
self.signup()
response = self.login()
self.assertEqual(response.status_code, 200)
self.assertIn("token", json.loads(response.data))
def test_create_food_item(self):
""" Test create food item """
token = self.get_token_as_admin()
response = self.post_food_item()
self.assertEqual(response.status_code, 201)
self.assertEqual(json.loads(response.data)[
"message"], "Food item created successfully")
def test_invalid_food_name(self):
""" Test food name """
token = self.get_token_as_admin()
response = self.client.post(
"api/v2/menu",
data=json.dumps(self.invalid_food_name),
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data)[
"message"], "foodname must contain alphanumeric"
" characters only")
def test_update_food_item(self):
""" test to update a specific food item """
token = self.get_token_as_admin()
self.post_food_item()
response = self.client.put(
"api/v2/menu/1",
data=json.dumps(self.update_data),
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 200)
def test_update_non_existing_food_item(self):
""" test to update non existing food item """
token = self.get_token_as_admin()
response = self.client.put(
"api/v2/menu/1",
data=json.dumps(self.update_data),
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 404)
def test_invalid_food_description(self):
""" Test food description """
token = self.get_token_as_admin()
response = self.client.post(
"api/v2/menu",
data=json.dumps(self.invalid_description_data),
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 400)
self.assertEqual(json.loads(response.data)[
"message"], "description must contain alphanumeric"
" characters only")
def test_get_all_fooditems(self):
""" Test all food items """
self.post_food_item()
response = self.client.get(
"api/v2/menu"
)
self.assertEqual(response.status_code, 200)
def test_get_all_fooditems_as_admin(self):
""" Test all food items """
token = self.get_token_as_admin()
self.post_food_item()
response = self.client.get(
"api/v2/menu",
headers={"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 200)
def test_customer_post_order(self):
""" Test for a customer to place an order """
token = self.get_token_as_user()
data = {
"destination": "juja"
}
self.post_food_item()
res = self.client.post(
"/api/v2/users/1/orders",
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': f'Bearer {token}'}
)
self.assertEqual(res.status_code, 201)
def test_get_specific_orders(self):
""" Get a specific food order"""
token = self.get_token_as_user()
self.post_food_item()
res = self.client.post(
"/api/v2/users/1/orders",
data=json.dumps(self.post_order_data),
headers={'content-type': 'application/json',
'Authorization': f'Bearer {token}'}
)
response = self.client.get(
"api/v2/orders/1",
headers={'content-type': 'application/json',
'Authorization': f'Bearer {token}'}
)
self.assertEqual(response.status_code, 200)
def test_delete_food_item_as_admin(self):
""" Test to delete a specific food item """
token = self.get_token_as_admin()
self.post_food_item()
response = self.client.delete(
"api/v2/menu/1",
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.data)[
"message"], "item deleted sucessfully")
def test_delete_non_existing_food_item_as_admin(self):
""" Test to delete non existing food item """
token = self.get_token_as_admin()
response = self.client.delete(
"api/v2/menu/1",
headers={'content-type': 'application/json',
"Authorization": f'Bearer {token}'}
)
self.assertEqual(response.status_code, 404)
self.assertEqual(json.loads(response.data)[
"message"], "food item does not exist")
| 29.281915 | 76 | 0.568211 |
aced7db6bb80f512b07189708090708fbdbbef85 | 4,056 | py | Python | redis_cache/test/test_rediscache.py | fjsj/redis-simple-cache | 95e2db870bb26121476c2a3dc2714b1ebad12152 | [
"BSD-3-Clause"
] | null | null | null | redis_cache/test/test_rediscache.py | fjsj/redis-simple-cache | 95e2db870bb26121476c2a3dc2714b1ebad12152 | [
"BSD-3-Clause"
] | 1 | 2016-02-22T07:39:12.000Z | 2016-02-22T07:45:54.000Z | redis_cache/test/test_rediscache.py | fjsj/redis-simple-cache | 95e2db870bb26121476c2a3dc2714b1ebad12152 | [
"BSD-3-Clause"
] | null | null | null | #SimpleCache Tests
#~~~~~~~~~~~~~~~~~~~
from redis_cache import SimpleCache, cache_it, cache_it_json, CacheMissException, ExpiredKeyException
from unittest import TestCase, main
class ComplexNumber(object): # used in pickle test
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __eq__(self, other):
return self.real == other.real and self.imag == other.imag
class SimpleCacheTest(TestCase):
def setUp(self):
self.c = SimpleCache(10) # Cache that has a maximum limit of 10 keys
self.assertIsNotNone(self.c.connection)
def test_expire(self):
import time
quick_c = SimpleCache()
quick_c.store("foo", "bar", expire=1)
time.sleep(1.1)
self.assertRaises(ExpiredKeyException, quick_c.get, "foo")
quick_c.flush()
def test_miss(self):
self.assertRaises(CacheMissException, self.c.get, "blablabla")
def test_store_retrieve(self):
self.c.store("foo", "bar")
foo = self.c.get("foo")
self.assertEqual(foo, "bar")
def test_json(self):
payload = {"example": "data"}
self.c.store_json("json", payload)
self.assertEqual(self.c.get_json("json"), payload)
def test_pickle(self):
payload = ComplexNumber(3,4)
self.c.store_pickle("pickle", payload)
self.assertEqual(self.c.get_pickle("pickle"), payload)
def test_decorator(self):
mutable = []
@cache_it(cache=self.c)
def append(n):
mutable.append(n)
return mutable
append(1)
len_before = len(mutable)
mutable_cached = append(1)
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_json(self):
import random
mutable = {}
@cache_it_json(cache=self.c)
def set_key(n):
mutable[str(random.random())] = n
return mutable
set_key('a')
len_before = len(mutable)
mutable_cached = set_key('a')
len_after = len(mutable)
self.assertEqual(len_before, len_after)
self.assertNotEqual(id(mutable), id(mutable_cached))
self.assertEqual(mutable, mutable_cached)
def test_decorator_complex_type(self):
import math
@cache_it(cache=self.c)
def add(x, y):
return ComplexNumber(x.real + y.real, x.imag + y.imag)
result = add(ComplexNumber(3,4), ComplexNumber(4,5))
result_cached = add(ComplexNumber(3,4), ComplexNumber(4,5))
self.assertNotEqual(id(result), id(result_cached))
self.assertEqual(result, result_cached)
self.assertEqual(result, complex(3,4) + complex(4,5))
def test_cache_limit(self):
for i in range(100):
self.c.store("foo%d" % i, "foobar")
self.failUnless(len(self.c) <= 10)
self.failUnless(len(self.c.keys()) <= 10)
def test_flush(self):
connection = self.c.connection
connection.set("will_not_be_deleted", '42')
self.c.store("will_be_deleted", '10')
len_before = len(self.c)
len_keys_before = len(connection.keys(self.c.make_key("*")))
self.c.flush()
len_after = len(self.c)
len_keys_after = len(connection.keys(self.c.make_key("*")))
self.assertEqual(len_before, 1)
self.assertEqual(len_after, 0)
self.assertEqual(len_keys_before, 1)
self.assertEqual(len_keys_after, 0)
self.assertEqual(connection.get("will_not_be_deleted"), '42')
connection.delete("will_not_be_deleted")
def test_flush_multiple(self):
c1 = SimpleCache(10)
c2 = SimpleCache(10)
c1.store("foo", "bar")
c2.store("foo", "bar")
c1.flush()
self.assertEqual(len(c1), 0)
self.assertEqual(len(c2), 1)
c2.flush()
def tearDown(self):
self.c.flush()
main()
| 31.937008 | 101 | 0.612673 |
aced80518d0d3f802fb1d6af5479954b3cd52b7c | 2,009 | py | Python | tests/st/ops/ascend/vector/test_ceil_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 286 | 2020-06-23T06:40:44.000Z | 2022-03-30T01:27:49.000Z | tests/st/ops/ascend/vector/test_ceil_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 10 | 2020-07-31T03:26:59.000Z | 2021-12-27T15:00:54.000Z | tests/st/ops/ascend/vector/test_ceil_001.py | tianjiashuo/akg | a9cbf642063fb1086a93e8bc6be6feb145689817 | [
"Apache-2.0"
] | 30 | 2020-07-17T01:04:14.000Z | 2021-12-27T14:05:19.000Z | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.ascend.ceil_run import ceil_run
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_ceil_001"
case_path = os.getcwd()
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("========================{0} Setup case=================".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, dimArgs
("ceil_8", ceil_run, [(8,), "float16"]),
("ceil_8_16", ceil_run, [(8, 16), "float16"]),
("ceil_8_16_16", ceil_run, [(8, 16, 16), "float16"]),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def teardown(self):
"""
clean environment
:return:
"""
self._log.info("========================{0} Teardown case=================".format(self.casename))
if __name__ == "__main__":
t = TestCase()
t.setup()
t.test_run()
t.teardown()
| 29.544118 | 107 | 0.600796 |
aced80642e6e60684b9b8ff7b0c672e6622da35b | 12,809 | py | Python | tests/components/lcn/test_device_trigger.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 22,481 | 2020-03-02T13:09:59.000Z | 2022-03-31T23:34:28.000Z | tests/components/lcn/test_device_trigger.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 31,101 | 2020-03-02T13:00:16.000Z | 2022-03-31T23:57:36.000Z | tests/components/lcn/test_device_trigger.py | MrDelik/core | 93a66cc357b226389967668441000498a10453bb | [
"Apache-2.0"
] | 11,411 | 2020-03-02T14:19:20.000Z | 2022-03-31T22:46:07.000Z | """Tests for LCN device triggers."""
from pypck.inputs import ModSendKeysHost, ModStatusAccessControl
from pypck.lcn_addr import LcnAddr
from pypck.lcn_defs import AccessControlPeriphery, KeyAction, SendKeyCommand
import voluptuous_serialize
from homeassistant.components import automation
from homeassistant.components.device_automation import DeviceAutomationType
from homeassistant.components.lcn import device_trigger
from homeassistant.components.lcn.const import DOMAIN, KEY_ACTIONS, SENDKEYS
from homeassistant.const import CONF_DEVICE_ID, CONF_DOMAIN, CONF_PLATFORM, CONF_TYPE
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.setup import async_setup_component
from .conftest import get_device
from tests.common import assert_lists_same, async_get_device_automations
async def test_get_triggers_module_device(hass, entry, lcn_connection):
"""Test we get the expected triggers from a LCN module device."""
device = get_device(hass, entry, (0, 7, False))
expected_triggers = [
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "transmitter",
CONF_DEVICE_ID: device.id,
},
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "transponder",
CONF_DEVICE_ID: device.id,
},
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "fingerprint",
CONF_DEVICE_ID: device.id,
},
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "send_keys",
CONF_DEVICE_ID: device.id,
},
]
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
assert_lists_same(triggers, expected_triggers)
async def test_get_triggers_non_module_device(hass, entry, lcn_connection):
"""Test we get the expected triggers from a LCN non-module device."""
not_included_types = ("transmitter", "transponder", "fingerprint", "send_keys")
device_registry = dr.async_get(hass)
host_device = device_registry.async_get_device({(DOMAIN, entry.entry_id)})
group_device = get_device(hass, entry, (0, 5, True))
resource_device = device_registry.async_get_device(
{(DOMAIN, f"{entry.entry_id}-m000007-output1")}
)
for device in (host_device, group_device, resource_device):
triggers = await async_get_device_automations(
hass, DeviceAutomationType.TRIGGER, device.id
)
for trigger in triggers:
assert trigger[CONF_TYPE] not in not_included_types
async def test_if_fires_on_transponder_event(hass, calls, entry, lcn_connection):
"""Test for transponder event triggers firing."""
address = (0, 7, False)
device = get_device(hass, entry, address)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device.id,
CONF_TYPE: "transponder",
},
"action": {
"service": "test.automation",
"data_template": {
"test": "test_trigger_transponder",
"code": "{{ trigger.event.data.code }}",
},
},
},
]
},
)
inp = ModStatusAccessControl(
LcnAddr(*address),
periphery=AccessControlPeriphery.TRANSPONDER,
code="aabbcc",
)
await lcn_connection.async_process_input(inp)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {
"test": "test_trigger_transponder",
"code": "aabbcc",
}
async def test_if_fires_on_fingerprint_event(hass, calls, entry, lcn_connection):
"""Test for fingerprint event triggers firing."""
address = (0, 7, False)
device = get_device(hass, entry, address)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device.id,
CONF_TYPE: "fingerprint",
},
"action": {
"service": "test.automation",
"data_template": {
"test": "test_trigger_fingerprint",
"code": "{{ trigger.event.data.code }}",
},
},
},
]
},
)
inp = ModStatusAccessControl(
LcnAddr(*address),
periphery=AccessControlPeriphery.FINGERPRINT,
code="aabbcc",
)
await lcn_connection.async_process_input(inp)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {
"test": "test_trigger_fingerprint",
"code": "aabbcc",
}
async def test_if_fires_on_transmitter_event(hass, calls, entry, lcn_connection):
"""Test for transmitter event triggers firing."""
address = (0, 7, False)
device = get_device(hass, entry, address)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device.id,
CONF_TYPE: "transmitter",
},
"action": {
"service": "test.automation",
"data_template": {
"test": "test_trigger_transmitter",
"code": "{{ trigger.event.data.code }}",
"level": "{{ trigger.event.data.level }}",
"key": "{{ trigger.event.data.key }}",
"action": "{{ trigger.event.data.action }}",
},
},
},
]
},
)
inp = ModStatusAccessControl(
LcnAddr(*address),
periphery=AccessControlPeriphery.TRANSMITTER,
code="aabbcc",
level=0,
key=0,
action=KeyAction.HIT,
)
await lcn_connection.async_process_input(inp)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {
"test": "test_trigger_transmitter",
"code": "aabbcc",
"level": 0,
"key": 0,
"action": "hit",
}
async def test_if_fires_on_send_keys_event(hass, calls, entry, lcn_connection):
"""Test for send_keys event triggers firing."""
address = (0, 7, False)
device = get_device(hass, entry, address)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_DEVICE_ID: device.id,
CONF_TYPE: "send_keys",
},
"action": {
"service": "test.automation",
"data_template": {
"test": "test_trigger_send_keys",
"key": "{{ trigger.event.data.key }}",
"action": "{{ trigger.event.data.action }}",
},
},
},
]
},
)
inp = ModSendKeysHost(
LcnAddr(*address),
actions=[SendKeyCommand.HIT, SendKeyCommand.DONTSEND, SendKeyCommand.DONTSEND],
keys=[True, False, False, False, False, False, False, False],
)
await lcn_connection.async_process_input(inp)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data == {
"test": "test_trigger_send_keys",
"key": "a1",
"action": "hit",
}
async def test_get_transponder_trigger_capabilities(hass, entry, lcn_connection):
"""Test we get the expected capabilities from a transponder device trigger."""
address = (0, 7, False)
device = get_device(hass, entry, address)
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "transponder",
CONF_DEVICE_ID: device.id,
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "code", "optional": True, "type": "string", "lower": True}]
async def test_get_fingerprint_trigger_capabilities(hass, entry, lcn_connection):
"""Test we get the expected capabilities from a fingerprint device trigger."""
address = (0, 7, False)
device = get_device(hass, entry, address)
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "fingerprint",
CONF_DEVICE_ID: device.id,
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "code", "optional": True, "type": "string", "lower": True}]
async def test_get_transmitter_trigger_capabilities(hass, entry, lcn_connection):
"""Test we get the expected capabilities from a transmitter device trigger."""
address = (0, 7, False)
device = get_device(hass, entry, address)
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "transmitter",
CONF_DEVICE_ID: device.id,
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{"name": "code", "type": "string", "optional": True, "lower": True},
{"name": "level", "type": "integer", "optional": True, "valueMin": 0},
{"name": "key", "type": "integer", "optional": True, "valueMin": 0},
{
"name": "action",
"type": "select",
"optional": True,
"options": [("hit", "hit"), ("make", "make"), ("break", "break")],
},
]
async def test_get_send_keys_trigger_capabilities(hass, entry, lcn_connection):
"""Test we get the expected capabilities from a send_keys device trigger."""
address = (0, 7, False)
device = get_device(hass, entry, address)
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "send_keys",
CONF_DEVICE_ID: device.id,
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"name": "key",
"type": "select",
"optional": True,
"options": [(send_key.lower(), send_key.lower()) for send_key in SENDKEYS],
},
{
"name": "action",
"type": "select",
"options": [
(key_action.lower(), key_action.lower()) for key_action in KEY_ACTIONS
],
"optional": True,
},
]
async def test_unknown_trigger_capabilities(hass, entry, lcn_connection):
"""Test we get empty capabilities if trigger is unknown."""
address = (0, 7, False)
device = get_device(hass, entry, address)
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
CONF_PLATFORM: "device",
CONF_DOMAIN: DOMAIN,
CONF_TYPE: "dummy",
CONF_DEVICE_ID: device.id,
},
)
assert capabilities == {}
| 32.759591 | 87 | 0.561636 |
aced82762c72867ce32f8866a8f2c4fd42e18a57 | 4,755 | py | Python | airflow/www/extensions/init_views.py | joshowen/airflow | d0cf232919839d0e338dcc38a5c7a1841077eaae | [
"Apache-2.0"
] | 1 | 2020-08-25T05:55:54.000Z | 2020-08-25T05:55:54.000Z | airflow/www/extensions/init_views.py | joshowen/airflow | d0cf232919839d0e338dcc38a5c7a1841077eaae | [
"Apache-2.0"
] | null | null | null | airflow/www/extensions/init_views.py | joshowen/airflow | d0cf232919839d0e338dcc38a5c7a1841077eaae | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
from os import path
import connexion
from connexion import ProblemException
from flask import Flask
log = logging.getLogger(__name__)
# airflow/www/extesions/init_views.py => airflow/
ROOT_APP_DIR = path.abspath(path.join(path.dirname(__file__), path.pardir, path.pardir))
def init_flash_views(app):
"""Init main app view - redirect to FAB"""
from airflow.www.blueprints import routes
app.register_blueprint(routes)
def init_appbuilder_views(app):
"""Initialize Web UI views"""
appbuilder = app.appbuilder
from airflow.www import views
# Remove the session from scoped_session registry to avoid
# reusing a session with a disconnected connection
appbuilder.session.remove()
appbuilder.add_view_no_menu(views.Airflow())
appbuilder.add_view_no_menu(views.DagModelView())
appbuilder.add_view(views.DagRunModelView, "DAG Runs", category="Browse", category_icon="fa-globe")
appbuilder.add_view(views.JobModelView, "Jobs", category="Browse")
appbuilder.add_view(views.LogModelView, "Logs", category="Browse")
appbuilder.add_view(views.SlaMissModelView, "SLA Misses", category="Browse")
appbuilder.add_view(views.TaskInstanceModelView, "Task Instances", category="Browse")
appbuilder.add_view(views.ConfigurationView, "Configurations", category="Admin", category_icon="fa-user")
appbuilder.add_view(views.ConnectionModelView, "Connections", category="Admin")
appbuilder.add_view(views.PoolModelView, "Pools", category="Admin")
appbuilder.add_view(views.VariableModelView, "Variables", category="Admin")
appbuilder.add_view(views.XComModelView, "XComs", category="Admin")
appbuilder.add_view(views.VersionView, 'Version', category='About', category_icon='fa-th')
def init_plugins(app):
"""Integrate Flask and FAB with plugins"""
from airflow import plugins_manager
plugins_manager.initialize_web_ui_plugins()
appbuilder = app.appbuilder
for view in plugins_manager.flask_appbuilder_views:
log.debug("Adding view %s", view["name"])
appbuilder.add_view(view["view"], view["name"], category=view["category"])
for menu_link in sorted(plugins_manager.flask_appbuilder_menu_links, key=lambda x: x["name"]):
log.debug("Adding menu link %s", menu_link["name"])
appbuilder.add_link(
menu_link["name"],
href=menu_link["href"],
category=menu_link["category"],
category_icon=menu_link["category_icon"],
)
for blue_print in plugins_manager.flask_blueprints:
log.debug("Adding blueprint %s:%s", blue_print["name"], blue_print["blueprint"].import_name)
app.register_blueprint(blue_print["blueprint"])
def init_error_handlers(app: Flask):
"""Add custom errors handlers"""
from airflow.www import views
app.register_error_handler(500, views.show_traceback)
app.register_error_handler(404, views.circles)
def init_api_connexion(app: Flask) -> None:
"""Initialize Stable API"""
spec_dir = path.join(ROOT_APP_DIR, 'api_connexion', 'openapi')
connexion_app = connexion.App(__name__, specification_dir=spec_dir, skip_error_handlers=True)
connexion_app.app = app
api_bp = connexion_app.add_api(
specification='v1.yaml', base_path='/api/v1', validate_responses=True, strict_validation=True
).blueprint
app.register_error_handler(ProblemException, connexion_app.common_error_handler)
app.extensions['csrf'].exempt(api_bp)
def init_api_experimental(app):
"""Initialize Experimental API"""
from airflow.www.api.experimental import endpoints
# required for testing purposes otherwise the module retains
# a link to the default_auth
if app.config['TESTING']:
import importlib
importlib.reload(endpoints)
app.register_blueprint(endpoints.api_experimental, url_prefix='/api/experimental')
app.extensions['csrf'].exempt(endpoints.api_experimental)
| 39.625 | 109 | 0.741535 |
aced8296bebc80c1e3dabc0a20f6cecfd01f0a5d | 100,215 | py | Python | pyvista/core/pointset.py | sthagen/pyvista | ffba268b285925eb6103c8ff5072fcf1c0212c53 | [
"MIT"
] | 1 | 2022-03-03T05:58:02.000Z | 2022-03-03T05:58:02.000Z | pyvista/core/pointset.py | sthagen/pyvista | ffba268b285925eb6103c8ff5072fcf1c0212c53 | [
"MIT"
] | null | null | null | pyvista/core/pointset.py | sthagen/pyvista | ffba268b285925eb6103c8ff5072fcf1c0212c53 | [
"MIT"
] | null | null | null | """Sub-classes and wrappers for vtk.vtkPointSet."""
from collections.abc import Iterable
from functools import wraps
import logging
import numbers
import os
import pathlib
from textwrap import dedent
from typing import Sequence, Tuple, Union
import warnings
import numpy as np
import pyvista
from pyvista import _vtk
from pyvista.utilities import PyvistaDeprecationWarning, abstract_class
from pyvista.utilities.cells import (
CellArray,
create_mixed_cells,
generate_cell_offsets,
get_mixed_cells,
numpy_to_idarr,
)
from ..utilities.fileio import get_ext
from .dataset import DataSet
from .errors import DeprecationError, VTKVersionError
from .filters import PolyDataFilters, StructuredGridFilters, UnstructuredGridFilters, _get_output
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
DEFAULT_INPLACE_WARNING = (
'You did not specify a value for `inplace` and the default value will '
'be changing to `False` in future versions for point-based meshes (e.g., '
'`PolyData`). Please make sure you are not assuming this to be an inplace '
'operation.'
)
class _PointSet(DataSet):
"""PyVista's equivalent of vtk.vtkPointSet.
This holds methods common to PolyData and UnstructuredGrid.
"""
_WRITERS = {".xyz": _vtk.vtkSimplePointsWriter}
def center_of_mass(self, scalars_weight=False):
"""Return the coordinates for the center of mass of the mesh.
Parameters
----------
scalars_weight : bool, optional
Flag for using the mesh scalars as weights. Defaults to ``False``.
Returns
-------
numpy.ndarray
Coordinates for the center of mass.
Examples
--------
>>> import pyvista
>>> mesh = pyvista.Sphere(center=(1, 1, 1))
>>> mesh.center_of_mass()
array([1., 1., 1.])
"""
alg = _vtk.vtkCenterOfMass()
alg.SetInputDataObject(self)
alg.SetUseScalarsAsWeights(scalars_weight)
alg.Update()
return np.array(alg.GetCenter())
def shallow_copy(self, to_copy):
"""Create a shallow copy from a different dataset into this one.
This method mutates this dataset and returns ``None``.
Parameters
----------
to_copy : pyvista.DataSet
Data object to perform the shallow copy from.
"""
# Set default points if needed
if not to_copy.GetPoints():
to_copy.SetPoints(_vtk.vtkPoints())
DataSet.shallow_copy(self, to_copy)
def remove_cells(self, ind, inplace=False):
"""Remove cells.
Parameters
----------
ind : sequence
Cell indices to be removed. The array can also be a
boolean array of the same size as the number of cells.
inplace : bool, optional
Whether to update the mesh in-place.
Returns
-------
pyvista.DataSet
Same type as the input, but with the specified cells
removed.
Examples
--------
Remove 20 cells from an unstructured grid.
>>> from pyvista import examples
>>> import pyvista
>>> hex_mesh = pyvista.read(examples.hexbeamfile)
>>> removed = hex_mesh.remove_cells(range(10, 20))
>>> removed.plot(color='tan', show_edges=True, line_width=3)
"""
if isinstance(ind, np.ndarray):
if ind.dtype == np.bool_ and ind.size != self.n_cells:
raise ValueError(
f'Boolean array size must match the number of cells ({self.n_cells})'
)
ghost_cells = np.zeros(self.n_cells, np.uint8)
ghost_cells[ind] = _vtk.vtkDataSetAttributes.DUPLICATECELL
if inplace:
target = self
else:
target = self.copy()
target.cell_data[_vtk.vtkDataSetAttributes.GhostArrayName()] = ghost_cells
target.RemoveGhostCells()
return target
def points_to_double(self):
"""Convert the points datatype to double precision.
Returns
-------
pyvista.PointSet
Pointset with points in double precision.
Notes
-----
This operates in place.
Examples
--------
Create a mesh that has points of the type ``float32`` and
convert the points to ``float64``.
>>> import pyvista
>>> mesh = pyvista.Sphere()
>>> mesh.points.dtype
dtype('float32')
>>> _ = mesh.points_to_double()
>>> mesh.points.dtype
dtype('float64')
"""
if self.points.dtype != np.double:
self.points = self.points.astype(np.double)
return self
# todo: `transform_all_input_vectors` is not handled when modifying inplace
def translate(
self, xyz: Union[list, tuple, np.ndarray], transform_all_input_vectors=False, inplace=None
):
"""Translate the mesh.
Parameters
----------
xyz : list or tuple or numpy.ndarray
Cartesian values to displace with. Length 3 list, tuple or array.
transform_all_input_vectors : bool, optional
When ``True``, all input vectors are transformed. Otherwise, only
the points, normals and active vectors are transformed. This is
only valid when not updating in place.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
pyvista.PointSet
Translated pointset.
Examples
--------
Create a sphere and translate it by ``(2, 1, 2)``.
>>> import pyvista
>>> mesh = pyvista.Sphere()
>>> mesh.center
[0.0, 0.0, 0.0]
>>> trans = mesh.translate((2, 1, 2), inplace=True)
>>> trans.center
[2.0, 1.0, 2.0]
"""
if inplace is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
inplace = True
if inplace:
self.points += np.asarray(xyz) # type: ignore
return self
return super().translate(
xyz, transform_all_input_vectors=transform_all_input_vectors, inplace=inplace
)
def scale(
self, xyz: Union[list, tuple, np.ndarray], transform_all_input_vectors=False, inplace=None
):
"""Scale the mesh.
Parameters
----------
xyz : list or tuple or numpy.ndarray
Scale factor in x, y, and z directions. Length 3 list, tuple or
array.
transform_all_input_vectors : bool, optional
When ``True``, all input vectors are transformed. Otherwise, only
the points, normals and active vectors are transformed. This is
only valid when not updating in place.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
pyvista.PointSet
Scaled pointset.
Notes
-----
``transform_all_input_vectors`` is not handled when modifying inplace.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> pl.subplot(0, 0)
>>> pl.show_axes()
>>> _ = pl.show_grid()
>>> mesh1 = examples.download_teapot()
>>> _ = pl.add_mesh(mesh1)
>>> pl.subplot(0, 1)
>>> pl.show_axes()
>>> _ = pl.show_grid()
>>> mesh2 = mesh1.scale([10.0, 10.0, 10.0], inplace=False)
>>> _ = pl.add_mesh(mesh2)
>>> pl.show(cpos="xy")
"""
if inplace is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
inplace = True
return super().scale(
xyz, transform_all_input_vectors=transform_all_input_vectors, inplace=inplace
)
@wraps(DataSet.flip_x)
def flip_x(self, *args, **kwargs):
"""Wrap ``DataSet.flip_x``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().flip_x(*args, **kwargs)
@wraps(DataSet.flip_y)
def flip_y(self, *args, **kwargs):
"""Wrap ``DataSet.flip_y``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().flip_y(*args, **kwargs)
@wraps(DataSet.flip_z)
def flip_z(self, *args, **kwargs):
"""Wrap ``DataSet.flip_z``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().flip_z(*args, **kwargs)
@wraps(DataSet.flip_normal)
def flip_normal(self, *args, **kwargs):
"""Wrap ``DataSet.flip_normal``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().flip_normal(*args, **kwargs)
@wraps(DataSet.rotate_x)
def rotate_x(self, *args, **kwargs):
"""Wrap ``DataSet.rotate_x``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().rotate_x(*args, **kwargs)
@wraps(DataSet.rotate_y)
def rotate_y(self, *args, **kwargs):
"""Wrap ``DataSet.rotate_y``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().rotate_y(*args, **kwargs)
@wraps(DataSet.rotate_z)
def rotate_z(self, *args, **kwargs):
"""Wrap ``DataSet.rotate_z``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().rotate_z(*args, **kwargs)
@wraps(DataSet.rotate_vector)
def rotate_vector(self, *args, **kwargs):
"""Wrap ``DataSet.rotate_vector``."""
if kwargs.get('inplace') is None:
# Deprecated on v0.32.0, estimated removal on v0.35.0
warnings.warn(DEFAULT_INPLACE_WARNING, PyvistaDeprecationWarning)
kwargs['inplace'] = True
return super().rotate_vector(*args, **kwargs)
class PointSet(_vtk.vtkPointSet, _PointSet):
"""Concrete class for storing a set of points.
This is a concrete class representing a set of points that specifies the
interface for datasets that explicitly use "point" arrays to represent
geometry. This class is useful for improving the performance of filters on
point clouds, but not plotting.
For further details see `VTK: vtkPointSet Details
<https://vtk.org/doc/nightly/html/classvtkPointSet.html#details>`_.
Parameters
----------
points : Sequence, optional
List, numpy array, or sequence containing point locations. Must be an
``(N, 3)`` array of points.
deep : bool, optional
Whether to copy the input ``points``, or to create a PointSet from them
without copying them. Setting ``deep=True`` ensures that the original
arrays can be modified outside the mesh without affecting the
mesh. Default is ``False``.
force_float : bool, optional
Casts the datatype to ``float32`` if points datatype is non-float.
Default ``True``. Set this to ``False`` to allow non-float types,
though this may lead to truncation of intermediate floats when
transforming datasets.
Notes
-----
This class requires ``vtk>=9.1.0``. This is an abstract class in
``vtk<9.1.0`` and cannot be instantiated.
Examples
--------
Create a simple point cloud of 10 points from a numpy array.
>>> import numpy as np
>>> import pyvista
>>> rng = np.random.default_rng()
>>> points = rng.random((10, 3))
>>> pset = pyvista.PointSet(points)
Plot the pointset. Note: this casts to a :class:`pyvista.PolyData`
internally when plotting.
>>> pset.plot(point_size=10)
"""
def __new__(cls, *args, **kwargs):
"""Construct a new PointSet object.
Wrapping this is necessary for us to show an informative error
message when the VTK version is too old, causing PointSet to be
an abstract class. Since we inherit the ``__new__()`` method of
``vtk.vtkPointSet``, we would otherwise see a generic error about
the class being abstract.
"""
if pyvista.vtk_version_info < (9, 1, 0):
raise VTKVersionError("pyvista.PointSet requires VTK >= 9.1.0")
return super().__new__(cls, *args, **kwargs)
def __init__(self, points=None, deep=False, force_float=True):
"""Initialize the pointset."""
super().__init__()
if points is not None:
self.SetPoints(pyvista.vtk_points(points, deep=deep, force_float=force_float))
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
def cast_to_polydata(self, deep=True):
"""Cast this dataset to polydata.
Parameters
----------
deep : bool, optional
Whether to copy the pointset points, or to create a PolyData
without copying them. Setting ``deep=True`` ensures that the
original arrays can be modified outside the PolyData without
affecting the PolyData. Default is ``True``.
Returns
-------
pyvista.PolyData
PointSet cast to a ``pyvista.PolyData``.
"""
pdata = PolyData(self.points, deep=deep)
if deep:
pdata.point_data.update(self.point_data) # update performs deep copy
else:
for key, value in self.point_data.items():
pdata.point_data[key] = value
return pdata
@wraps(DataSet.plot) # type: ignore
def plot(self, *args, **kwargs):
"""Cast to PolyData and plot."""
pdata = self.cast_to_polydata(deep=False)
kwargs.setdefault('style', 'points')
return pdata.plot(*args, **kwargs)
class PolyData(_vtk.vtkPolyData, _PointSet, PolyDataFilters):
"""Dataset consisting of surface geometry (e.g. vertices, lines, and polygons).
Can be initialized in several ways:
- Create an empty mesh
- Initialize from a vtk.vtkPolyData
- Using vertices
- Using vertices and faces
- From a file
Parameters
----------
var_inp : vtk.vtkPolyData, str, sequence, optional
Flexible input type. Can be a ``vtk.vtkPolyData``, in which case
this PolyData object will be copied if ``deep=True`` and will
be a shallow copy if ``deep=False``.
Also accepts a path, which may be local path as in
``'my_mesh.stl'`` or global path like ``'/tmp/my_mesh.ply'``
or ``'C:/Users/user/my_mesh.ply'``.
Otherwise, this must be a points array or list containing one
or more points. Each point must have 3 dimensions.
faces : sequence, optional
Face connectivity array. Faces must contain padding
indicating the number of points in the face. For example, the
two faces ``[10, 11, 12]`` and ``[20, 21, 22, 23]`` will be
represented as ``[3, 10, 11, 12, 4, 20, 21, 22, 23]``. This
lets you have an arbitrary number of points per face.
When not including the face connectivity array, each point
will be assigned to a single vertex. This is used for point
clouds that have no connectivity.
n_faces : int, optional
Number of faces in the ``faces`` connectivity array. While
optional, setting this speeds up the creation of the
``PolyData``.
lines : sequence, optional
The line connectivity array. Like ``faces``, this array
requires padding indicating the number of points in a line
segment. For example, the two line segments ``[0, 1]`` and
``[1, 2, 3, 4]`` will be represented as
``[2, 0, 1, 4, 1, 2, 3, 4]``.
n_lines : int, optional
Number of lines in the ``lines`` connectivity array. While
optional, setting this speeds up the creation of the
``PolyData``.
deep : bool, optional
Whether to copy the inputs, or to create a mesh from them
without copying them. Setting ``deep=True`` ensures that the
original arrays can be modified outside the mesh without
affecting the mesh. Default is ``False``.
force_ext : str, optional
If initializing from a file, force the reader to treat the
file as if it had this extension as opposed to the one in the
file.
force_float : bool, optional
Casts the datatype to ``float32`` if points datatype is
non-float. Default ``True``. Set this to ``False`` to allow
non-float types, though this may lead to truncation of
intermediate floats when transforming datasets.
Examples
--------
>>> import vtk
>>> import numpy as np
>>> from pyvista import examples
>>> import pyvista
Create an empty mesh.
>>> mesh = pyvista.PolyData()
Initialize from a ``vtk.vtkPolyData`` object.
>>> vtkobj = vtk.vtkPolyData()
>>> mesh = pyvista.PolyData(vtkobj)
Initialize from just vertices.
>>> vertices = np.array([[0, 0, 0], [1, 0, 0], [1, 0.5, 0], [0, 0.5, 0]])
>>> mesh = pyvista.PolyData(vertices)
Initialize from vertices and faces.
>>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2]])
>>> mesh = pyvista.PolyData(vertices, faces)
Initialize from vertices and lines.
>>> lines = np.hstack([[2, 0, 1], [2, 1, 2]])
>>> mesh = pyvista.PolyData(vertices, lines=lines)
Initialize from a filename.
>>> mesh = pyvista.PolyData(examples.antfile)
See :ref:`ref_create_poly` for more examples.
"""
_WRITERS = {
'.ply': _vtk.vtkPLYWriter,
'.vtp': _vtk.vtkXMLPolyDataWriter,
'.stl': _vtk.vtkSTLWriter,
'.vtk': _vtk.vtkPolyDataWriter,
}
def __init__(
self,
var_inp=None,
faces=None,
n_faces=None,
lines=None,
n_lines=None,
deep=False,
force_ext=None,
force_float=True,
) -> None:
"""Initialize the polydata."""
local_parms = locals()
super().__init__()
# allow empty input
if var_inp is None:
return
# filename
opt_kwarg = ['faces', 'n_faces', 'lines', 'n_lines']
if isinstance(var_inp, (str, pathlib.Path)):
for kwarg in opt_kwarg:
if local_parms[kwarg]:
raise ValueError(
'No other arguments should be set when first parameter is a string'
)
self._from_file(var_inp, force_ext=force_ext) # is filename
return
# PolyData-like
if isinstance(var_inp, _vtk.vtkPolyData):
for kwarg in opt_kwarg:
if local_parms[kwarg]:
raise ValueError(
'No other arguments should be set when first parameter is a PolyData'
)
if deep:
self.deep_copy(var_inp)
else:
self.shallow_copy(var_inp)
return
# First parameter is points
if isinstance(var_inp, (np.ndarray, list, _vtk.vtkDataArray)):
self.SetPoints(pyvista.vtk_points(var_inp, deep=deep, force_float=force_float))
else:
msg = f"""
Invalid Input type:
Expected first argument to be either a:
- vtk.PolyData
- pyvista.PolyData
- numeric numpy.ndarray (1 or 2 dimensions)
- List (flat or nested with 3 points per vertex)
- vtk.vtkDataArray
Instead got: {type(var_inp)}"""
raise TypeError(dedent(msg.strip('\n')))
# At this point, points have been setup, add faces and/or lines
if faces is None and lines is None:
# one cell per point (point cloud case)
verts = self._make_vertex_cells(self.n_points)
self.verts = CellArray(verts, self.n_points, deep)
elif faces is not None:
# here we use CellArray since we must specify deep and n_faces
self.faces = CellArray(faces, n_faces, deep)
# can always set lines
if lines is not None:
# here we use CellArray since we must specify deep and n_lines
self.lines = CellArray(lines, n_lines, deep)
def _post_file_load_processing(self):
"""Execute after loading a PolyData from file."""
# When loading files with just point arrays, create and
# set the polydata vertices
if self.n_points > 0 and self.n_cells == 0:
verts = self._make_vertex_cells(self.n_points)
self.verts = CellArray(verts, self.n_points, deep=False)
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
@staticmethod
def _make_vertex_cells(npoints):
cells = np.empty((npoints, 2), dtype=pyvista.ID_TYPE)
cells[:, 0] = 1
cells[:, 1] = np.arange(npoints, dtype=pyvista.ID_TYPE)
return cells
@property
def verts(self) -> np.ndarray:
"""Get the vertex cells.
Returns
-------
numpy.ndarray
Array of vertex cell indices.
Examples
--------
Create a point cloud polydata and return the vertex cells.
>>> import pyvista
>>> import numpy as np
>>> points = np.random.random((5, 3))
>>> pdata = pyvista.PolyData(points)
>>> pdata.verts
array([1, 0, 1, 1, 1, 2, 1, 3, 1, 4])
Set vertex cells. Note how the mesh plots both the surface
mesh and the additional vertices in a single plot.
>>> mesh = pyvista.Plane(i_resolution=3, j_resolution=3)
>>> mesh.verts = np.vstack((np.ones(mesh.n_points, dtype=np.int64),
... np.arange(mesh.n_points))).T
>>> mesh.plot(color='tan', render_points_as_spheres=True, point_size=60)
"""
return _vtk.vtk_to_numpy(self.GetVerts().GetData())
@verts.setter
def verts(self, verts):
"""Set the vertex cells."""
if isinstance(verts, CellArray):
self.SetVerts(verts)
else:
self.SetVerts(CellArray(verts))
@property
def lines(self) -> np.ndarray:
"""Return a pointer to the lines as a numpy array.
Examples
--------
Return the lines from a spline.
>>> import pyvista
>>> import numpy as np
>>> points = np.random.random((3, 3))
>>> spline = pyvista.Spline(points, 10)
>>> spline.lines
array([10, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
return _vtk.vtk_to_numpy(self.GetLines().GetData()).ravel()
@lines.setter
def lines(self, lines):
"""Set the lines of the polydata."""
if isinstance(lines, CellArray):
self.SetLines(lines)
else:
self.SetLines(CellArray(lines))
@property
def faces(self) -> np.ndarray:
"""Return a pointer to the faces as a numpy array.
Returns
-------
numpy.ndarray
Array of face indices.
Examples
--------
>>> import pyvista as pv
>>> plane = pv.Plane(i_resolution=2, j_resolution=2)
>>> plane.faces
array([4, 0, 1, 4, 3, 4, 1, 2, 5, 4, 4, 3, 4, 7, 6, 4, 4, 5, 8, 7])
Note how the faces contain a "padding" indicating the number
of points per face:
>>> plane.faces.reshape(-1, 5)
array([[4, 0, 1, 4, 3],
[4, 1, 2, 5, 4],
[4, 3, 4, 7, 6],
[4, 4, 5, 8, 7]])
"""
return _vtk.vtk_to_numpy(self.GetPolys().GetData())
@faces.setter
def faces(self, faces):
"""Set the face cells."""
if isinstance(faces, CellArray):
self.SetPolys(faces)
else:
# TODO: faster to mutate in-place if array is same size?
self.SetPolys(CellArray(faces))
@property
def is_all_triangles(self):
"""Return if all the faces of the :class:`pyvista.PolyData` are triangles.
.. versionchanged:: 0.32.0
``is_all_triangles`` is now a property. Calling this value
will warn the user that this should not be called.
Additionally, the ``is`` operator will not work the return
value of this property since it is not a ``bool``
Returns
-------
CallableBool
``True`` if all the faces of the :class:`pyvista.PolyData`
are triangles and does not contain any vertices or lines.
Notes
-----
The return value is not a ``bool`` for compatibility
reasons, though this behavior will change in a future
release. Future versions will simply return a ``bool``.
Examples
--------
Show a mesh from :func:`pyvista.Plane` is not composed of all
triangles.
>>> import pyvista
>>> plane = pyvista.Plane()
>>> plane.is_all_triangles
False <CallableBool>
Show that the mesh from :func:`pyvista.Sphere` contains only
triangles.
>>> sphere = pyvista.Sphere()
>>> sphere.is_all_triangles
True <CallableBool>
"""
class CallableBool(int): # pragma: no cover
"""Boolean that can be called.
Programmer note: We must subclass int and not bool
https://stackoverflow.com/questions/2172189/why-i-cant-extend-bool-in-python
Implemented for backwards compatibility as
``is_all_triangles`` was changed to be a property in
``0.32.0``.
"""
def __new__(cls, value):
"""Use new instead of __init__.
See:
https://jfine-python-classes.readthedocs.io/en/latest/subclass-int.html#emulating-bool-using-new
"""
return int.__new__(cls, bool(value))
def __call__(self):
"""Return a ``bool`` of self."""
warnings.warn(
'``is_all_triangles`` is now property as of 0.32.0 and does not need ()',
DeprecationWarning,
)
return bool(self)
def __repr__(self):
"""Return the string of bool."""
return f'{bool(self)} <CallableBool>'
# Need to make sure there are only face cells and no lines/verts
if not self.n_faces or self.n_lines or self.n_verts:
return CallableBool(False)
# in VTK9, they use connectivity and offset rather than cell
# data. Use the new API as this is faster
if _vtk.VTK9:
# early return if not all triangular
if self._connectivity_array.size % 3:
return CallableBool(False)
# next, check if there are three points per face
return CallableBool((np.diff(self._offset_array) == 3).all())
else: # pragma: no cover
# All we have are faces, check if all faces are indeed triangles
faces = self.faces # grab once as this takes time to build
if faces.size % 4 == 0:
return CallableBool((faces[::4] == 3).all())
return CallableBool(False)
def __sub__(self, cutting_mesh):
"""Compute boolean difference of two meshes."""
return self.boolean_difference(cutting_mesh)
@property
def _offset_array(self):
"""Return the array used to store cell offsets."""
try:
return _vtk.vtk_to_numpy(self.GetPolys().GetOffsetsArray())
except AttributeError: # pragma: no cover
raise VTKVersionError('Offset array implemented in VTK 9 or newer.')
@property
def _connectivity_array(self):
"""Return the array with the point ids that define the cells connectivity."""
try:
return _vtk.vtk_to_numpy(self.GetPolys().GetConnectivityArray())
except AttributeError: # pragma: no cover
raise VTKVersionError('Connectivity array implemented in VTK 9 or newer.')
@property
def n_lines(self) -> int:
"""Return the number of lines.
Examples
--------
>>> import pyvista
>>> mesh = pyvista.Line()
>>> mesh.n_lines
1
"""
return self.GetNumberOfLines()
@property
def n_verts(self) -> int:
"""Return the number of vertices.
Examples
--------
Create a simple mesh containing just two points and return the
number of vertices.
>>> import pyvista
>>> mesh = pyvista.PolyData([[1.0, 0.0, 0.0], [1.0, 1.0, 1.0]])
>>> mesh.n_verts
2
"""
return self.GetNumberOfVerts()
@property
def n_faces(self) -> int:
"""Return the number of cells.
Alias for ``n_cells``.
Examples
--------
>>> import pyvista
>>> plane = pyvista.Plane(i_resolution=2, j_resolution=2)
>>> plane.n_faces
4
"""
return self.n_cells
@property
def number_of_faces(self): # pragma: no cover
"""Return the number of cells."""
raise DeprecationError('``number_of_faces`` has been deprecated. Please use ``n_faces``')
def save(self, filename, binary=True, texture=None):
"""Write a surface mesh to disk.
Written file may be an ASCII or binary ply, stl, or vtk mesh
file. If ply or stl format is chosen, the face normals are
computed in place to ensure the mesh is properly saved.
Parameters
----------
filename : str
Filename of mesh to be written. File type is inferred from
the extension of the filename unless overridden with
ftype. Can be one of many of the supported the following
types (``'.ply'``, ``'.stl'``, ``'.vtk``).
binary : bool, optional
Writes the file as binary when ``True`` and ASCII when ``False``.
texture : str, numpy.ndarray, optional
Write a single texture array to file when using a PLY
file. Texture array must be a 3 or 4 component array with
the datatype ``np.uint8``. Array may be a cell array or a
point array, and may also be a string if the array already
exists in the PolyData.
If a string is provided, the texture array will be saved
to disk as that name. If an array is provided, the
texture array will be saved as ``'RGBA'`` if the array
contains an alpha channel (i.e. 4 component array), or
as ``'RGB'`` if the array is just a 3 component array.
.. note::
This feature is only available when saving PLY files.
Notes
-----
Binary files write much faster than ASCII and have a smaller
file size.
Examples
--------
Save a mesh as a STL.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.save('my_mesh.stl') # doctest:+SKIP
Save a mesh as a PLY.
>>> sphere = pyvista.Sphere()
>>> sphere.save('my_mesh.ply') # doctest:+SKIP
Save a mesh as a PLY with a texture array. Here we also
create a simple RGB array representing the texture.
>>> import numpy as np
>>> sphere = pyvista.Sphere()
>>> texture = np.zeros((sphere.n_points, 3), np.uint8)
>>> texture[:, 1] = np.arange(sphere.n_points)[::-1] # just blue channel
>>> sphere.point_data['my_texture'] = texture
>>> sphere.save('my_mesh.ply', texture='my_texture') # doctest:+SKIP
Alternatively, provide just the texture array. This will be
written to the file as ``'RGB'`` since it does not contain an
alpha channel.
>>> sphere.save('my_mesh.ply', texture=texture) # doctest:+SKIP
Save a mesh as a VTK file.
>>> sphere = pyvista.Sphere()
>>> sphere.save('my_mesh.vtk') # doctest:+SKIP
"""
filename = os.path.abspath(os.path.expanduser(str(filename)))
ftype = get_ext(filename)
# Recompute normals prior to save. Corrects a bug were some
# triangular meshes are not saved correctly
if ftype in ['.stl', '.ply']:
self.compute_normals(inplace=True)
# validate texture
if ftype == '.ply' and texture is not None:
if isinstance(texture, str):
if self[texture].dtype != np.uint8:
raise ValueError(
f'Invalid datatype {self[texture].dtype} of texture array "{texture}"'
)
elif isinstance(texture, np.ndarray):
if texture.dtype != np.uint8:
raise ValueError(f'Invalid datatype {texture.dtype} of texture array')
else:
raise TypeError(
f'Invalid type {type(texture)} for texture. '
'Should be either a string representing a point or '
'cell array, or a numpy array.'
)
super().save(filename, binary, texture=texture)
@property
def area(self) -> float:
"""Return the mesh surface area.
Returns
-------
float
Total area of the mesh.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.area
3.126
"""
areas = self.compute_cell_sizes(
length=False,
area=True,
volume=False,
)["Area"]
return np.sum(areas)
@property
def volume(self) -> float:
"""Return the volume of the dataset.
This will throw a VTK error/warning if not a closed surface.
Returns
-------
float
Total volume of the mesh.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.volume
0.5183
"""
mprop = _vtk.vtkMassProperties()
mprop.SetInputData(self.triangulate())
return mprop.GetVolume()
@property
def point_normals(self) -> 'pyvista.pyvista_ndarray':
"""Return the point normals.
If the point data already contains an array named ``'Normals'``, this array will be returned. Otherwise, the
normals will be computed using the default options of :func:`PolyData.compute_normals()` and returned.
Returns
-------
pyvista.pyvista_ndarray
Array of point normals.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.point_normals # doctest:+SKIP
pyvista_ndarray([[-2.48721432e-10, -1.08815623e-09, -1.00000000e+00],
[-2.48721432e-10, -1.08815623e-09, 1.00000000e+00],
[-1.18888125e-01, 3.40539310e-03, -9.92901802e-01],
...,
[-3.11940581e-01, -6.81432486e-02, 9.47654784e-01],
[-2.09880397e-01, -4.65070531e-02, 9.76620376e-01],
[-1.15582108e-01, -2.80492082e-02, 9.92901802e-01]],
dtype=float32)
"""
if 'Normals' in self.point_data:
normals = self.point_data['Normals']
else:
normals = self.compute_normals(cell_normals=False, inplace=False).point_data['Normals']
return normals
@property
def cell_normals(self) -> 'pyvista.pyvista_ndarray':
"""Return the cell normals.
If the cell data already contains an array named ``'Normals'``, this array will be returned. Otherwise, the
normals will be computed using the default options of :func:`PolyData.compute_normals()` and returned.
Returns
-------
pyvista.pyvista_ndarray
Array of cell normals.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.cell_normals # doctest:+SKIP
pyvista_ndarray([[-0.05413816, 0.00569015, -0.9985172 ],
[-0.05177207, 0.01682176, -0.9985172 ],
[-0.04714328, 0.02721819, -0.9985172 ],
...,
[-0.26742265, -0.02810723, 0.96316934],
[-0.1617585 , -0.01700151, 0.9866839 ],
[-0.1617585 , -0.01700151, 0.9866839 ]], dtype=float32)
"""
if 'Normals' in self.cell_data:
normals = self.cell_data['Normals']
else:
normals = self.compute_normals(point_normals=False, inplace=False).cell_data['Normals']
return normals
@property
def face_normals(self) -> 'pyvista.pyvista_ndarray':
"""Return the cell normals.
Alias to :func:`PolyData.cell_normals`.
Returns
-------
pyvista.pyvista_ndarray
Array of face normals.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.face_normals # doctest:+SKIP
pyvista_ndarray([[-0.05413816, 0.00569015, -0.9985172 ],
[-0.05177207, 0.01682176, -0.9985172 ],
[-0.04714328, 0.02721819, -0.9985172 ],
...,
[-0.26742265, -0.02810723, 0.96316934],
[-0.1617585 , -0.01700151, 0.9866839 ],
[-0.1617585 , -0.01700151, 0.9866839 ]], dtype=float32)
"""
return self.cell_normals
@property
def obbTree(self):
"""Return the obbTree of the polydata.
An obbTree is an object to generate oriented bounding box (OBB)
trees. An oriented bounding box is a bounding box that does not
necessarily line up along coordinate axes. The OBB tree is a
hierarchical tree structure of such boxes, where deeper levels of OBB
confine smaller regions of space.
"""
if not hasattr(self, '_obbTree'):
self._obbTree = _vtk.vtkOBBTree()
self._obbTree.SetDataSet(self)
self._obbTree.BuildLocator()
return self._obbTree
@property
def n_open_edges(self) -> int:
"""Return the number of open edges on this mesh.
Examples
--------
Return the number of open edges on a sphere.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere.n_open_edges
0
Return the number of open edges on a plane.
>>> plane = pyvista.Plane(i_resolution=1, j_resolution=1)
>>> plane.n_open_edges
4
"""
alg = _vtk.vtkFeatureEdges()
alg.FeatureEdgesOff()
alg.BoundaryEdgesOn()
alg.NonManifoldEdgesOn()
alg.SetInputDataObject(self)
alg.Update()
return alg.GetOutput().GetNumberOfCells()
@property
def is_manifold(self) -> bool:
"""Return if the mesh is manifold (no open edges).
Examples
--------
Show a sphere is manifold.
>>> import pyvista
>>> pyvista.Sphere().is_manifold
True
Show a plane is not manifold.
>>> pyvista.Plane().is_manifold
False
"""
return self.n_open_edges == 0
def __del__(self):
"""Delete the object."""
if hasattr(self, '_obbTree'):
del self._obbTree
@abstract_class
class PointGrid(_PointSet):
"""Class in common with structured and unstructured grids."""
def __init__(self, *args, **kwargs) -> None:
"""Initialize the point grid."""
super().__init__()
def plot_curvature(self, curv_type='mean', **kwargs):
"""Plot the curvature of the external surface of the grid.
Parameters
----------
curv_type : str, optional
One of the following strings indicating curvature types.
- ``'mean'``
- ``'gaussian'``
- ``'maximum'``
- ``'minimum'``
**kwargs : dict, optional
Optional keyword arguments. See :func:`pyvista.plot`.
Returns
-------
list
Camera position, focal point, and view up. Returned when
``return_cpos`` is ``True``.
"""
trisurf = self.extract_surface().triangulate()
return trisurf.plot_curvature(curv_type, **kwargs)
@property
def volume(self) -> float:
"""Compute the volume of the point grid.
This extracts the external surface and computes the interior
volume.
"""
surf = self.extract_surface().triangulate()
return surf.volume
class UnstructuredGrid(_vtk.vtkUnstructuredGrid, PointGrid, UnstructuredGridFilters):
"""Dataset used for arbitrary combinations of all possible cell types.
Can be initialized by the following:
- Creating an empty grid
- From a ``vtk.vtkPolyData`` or ``vtk.vtkStructuredGrid`` object
- From cell, offset, and node arrays
- From a file
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> import vtk
Create an empty grid
>>> grid = pyvista.UnstructuredGrid()
Copy a vtkUnstructuredGrid
>>> vtkgrid = vtk.vtkUnstructuredGrid()
>>> grid = pyvista.UnstructuredGrid(vtkgrid) # Initialize from a vtkUnstructuredGrid
>>> # from arrays (vtk9)
>>> #grid = pyvista.UnstructuredGrid(cells, celltypes, points)
>>> # from arrays (vtk<9)
>>> #grid = pyvista.UnstructuredGrid(offset, cells, celltypes, points)
From a string filename
>>> grid = pyvista.UnstructuredGrid(examples.hexbeamfile)
"""
_WRITERS = {'.vtu': _vtk.vtkXMLUnstructuredGridWriter, '.vtk': _vtk.vtkUnstructuredGridWriter}
def __init__(self, *args, **kwargs) -> None:
"""Initialize the unstructured grid."""
super().__init__()
deep = kwargs.pop('deep', False)
if not len(args):
return
if len(args) == 1:
if isinstance(args[0], _vtk.vtkUnstructuredGrid):
if deep:
self.deep_copy(args[0])
else:
self.shallow_copy(args[0])
elif isinstance(args[0], (str, pathlib.Path)):
self._from_file(args[0], **kwargs)
elif isinstance(args[0], (_vtk.vtkStructuredGrid, _vtk.vtkPolyData)):
vtkappend = _vtk.vtkAppendFilter()
vtkappend.AddInputData(args[0])
vtkappend.Update()
self.shallow_copy(vtkappend.GetOutput())
else:
itype = type(args[0])
raise TypeError(f'Cannot work with input type {itype}')
# Cell dictionary creation
elif len(args) == 2 and isinstance(args[0], dict) and isinstance(args[1], np.ndarray):
self._from_cells_dict(args[0], args[1], deep)
self._check_for_consistency()
elif len(args) == 3: # and VTK9:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
arg2_is_arr = isinstance(args[2], np.ndarray)
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr]):
self._from_arrays(None, args[0], args[1], args[2], deep, **kwargs)
self._check_for_consistency()
else:
raise TypeError('All input types must be np.ndarray')
elif len(args) == 4:
arg0_is_arr = isinstance(args[0], np.ndarray)
arg1_is_arr = isinstance(args[1], np.ndarray)
arg2_is_arr = isinstance(args[2], np.ndarray)
arg3_is_arr = isinstance(args[3], np.ndarray)
if all([arg0_is_arr, arg1_is_arr, arg2_is_arr, arg3_is_arr]):
self._from_arrays(args[0], args[1], args[2], args[3], deep)
self._check_for_consistency()
else:
raise TypeError('All input types must be np.ndarray')
else:
err_msg = (
'Invalid parameters. Initialization with arrays '
+ 'requires the following arrays:\n'
)
if _vtk.VTK9:
raise TypeError(err_msg + '`cells`, `cell_type`, `points`')
else:
raise TypeError(err_msg + '(`offset` optional), `cells`, `cell_type`, `points`')
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
def _from_cells_dict(self, cells_dict, points, deep=True):
if points.ndim != 2 or points.shape[-1] != 3:
raise ValueError("Points array must be a [M, 3] array")
nr_points = points.shape[0]
if _vtk.VTK9:
cell_types, cells = create_mixed_cells(cells_dict, nr_points)
self._from_arrays(None, cells, cell_types, points, deep=deep)
else:
cell_types, cells, offset = create_mixed_cells(cells_dict, nr_points)
self._from_arrays(offset, cells, cell_types, points, deep=deep)
def _from_arrays(
self,
offset,
cells,
cell_type,
points,
deep=True,
force_float=True,
):
"""Create VTK unstructured grid from numpy arrays.
Parameters
----------
offset : numpy.ndarray dtype=np.int64
Array indicating the start location of each cell in the cells
array. Set to ``None`` when using VTK 9+.
cells : numpy.ndarray dtype=np.int64
Array of cells. Each cell contains the number of points in the
cell and the node numbers of the cell.
cell_type : np.uint8
Cell types of each cell. Each cell type numbers can be found from
vtk documentation. See example below.
points : numpy.ndarray
Numpy array containing point locations.
deep : bool, optional
When ``True``, makes a copy of the points array. Default
``False``. Cells and cell types are always copied.
force_float : bool, optional
Casts the datatype to ``float32`` if points datatype is
non-float. Default ``True``. Set this to ``False`` to allow
non-float types, though this may lead to truncation of
intermediate floats when transforming datasets.
Examples
--------
>>> import numpy as np
>>> import vtk
>>> import pyvista
>>> offset = np.array([0, 9])
>>> cells = np.array([8, 0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15])
>>> cell_type = np.array([vtk.VTK_HEXAHEDRON, vtk.VTK_HEXAHEDRON], np.int8)
>>> cell1 = np.array([[0, 0, 0],
... [1, 0, 0],
... [1, 1, 0],
... [0, 1, 0],
... [0, 0, 1],
... [1, 0, 1],
... [1, 1, 1],
... [0, 1, 1]], dtype=np.float32)
>>> cell2 = np.array([[0, 0, 2],
... [1, 0, 2],
... [1, 1, 2],
... [0, 1, 2],
... [0, 0, 3],
... [1, 0, 3],
... [1, 1, 3],
... [0, 1, 3]], dtype=np.float32)
>>> points = np.vstack((cell1, cell2))
>>> grid = pyvista.UnstructuredGrid(offset, cells, cell_type, points)
"""
# Convert to vtk arrays
vtkcells = CellArray(cells, cell_type.size, deep)
if cell_type.dtype != np.uint8:
cell_type = cell_type.astype(np.uint8)
cell_type_np = cell_type
cell_type = _vtk.numpy_to_vtk(cell_type, deep=deep)
points = pyvista.vtk_points(points, deep, force_float)
self.SetPoints(points)
# vtk9 does not require an offset array
if _vtk.VTK9:
if offset is not None:
warnings.warn('VTK 9 no longer accepts an offset array', stacklevel=3)
self.SetCells(cell_type, vtkcells)
else:
if offset is None:
offset = generate_cell_offsets(cells, cell_type_np)
self.SetCells(cell_type, numpy_to_idarr(offset), vtkcells)
def _check_for_consistency(self):
"""Check if size of offsets and celltypes match the number of cells.
Checks if the number of offsets and celltypes correspond to
the number of cells. Called after initialization of the self
from arrays.
"""
if self.n_cells != self.celltypes.size:
raise ValueError(
f'Number of cell types ({self.celltypes.size}) '
f'must match the number of cells {self.n_cells})'
)
if _vtk.VTK9:
if self.n_cells != self.offset.size - 1:
raise ValueError(
f'Size of the offset ({self.offset.size}) '
'must be one greater than the number of cells '
f'({self.n_cells})'
)
else:
if self.n_cells != self.offset.size:
raise ValueError(
f'Size of the offset ({self.offset.size}) '
f'must match the number of cells ({self.n_cells})'
)
@property
def cells(self) -> np.ndarray:
"""Return a pointer to the cells as a numpy object.
Examples
--------
Return the indices of the first two cells from the example hex
beam. Note how the cells have "padding" indicating the number
of points per cell.
>>> import pyvista
>>> from pyvista import examples
>>> hex_beam = pyvista.read(examples.hexbeamfile)
>>> hex_beam.cells[:18] # doctest:+SKIP
array([ 8, 0, 2, 8, 7, 27, 36, 90, 81, 8, 2, 1, 4,
8, 36, 18, 54, 90])
"""
return _vtk.vtk_to_numpy(self.GetCells().GetData())
@property
def cells_dict(self) -> dict:
"""Return a dictionary that contains all cells mapped from cell types.
This function returns a :class:`numpy.ndarray` for each cell
type in an ordered fashion. Note that this function only
works with element types of fixed sizes.
Returns
-------
dict
A dictionary mapping containing all cells of this unstructured grid.
Structure: vtk_enum_type (int) -> cells (:class:`numpy.ndarray`).
Examples
--------
Return the cells dictionary of the sample hex beam. Note how
there is only one key/value pair as the hex beam example is
composed of only all hexahedral cells, which is
``vtk.VTK_HEXAHEDRON``, which evaluates to 12.
Also note how there is no padding for the cell array. This
approach may be more helpful than the ``cells`` property when
extracting cells.
>>> import pyvista
>>> from pyvista import examples
>>> hex_beam = pyvista.read(examples.hexbeamfile)
>>> hex_beam.cells_dict # doctest:+SKIP
{12: array([[ 0, 2, 8, 7, 27, 36, 90, 81],
[ 2, 1, 4, 8, 36, 18, 54, 90],
[ 7, 8, 6, 5, 81, 90, 72, 63],
...
[44, 26, 62, 98, 11, 10, 13, 17],
[89, 98, 80, 71, 16, 17, 15, 14],
[98, 62, 53, 80, 17, 13, 12, 15]])}
"""
return get_mixed_cells(self)
@property
def cell_connectivity(self) -> np.ndarray:
"""Return a the vtk cell connectivity as a numpy array.
This is effecively :attr:`UnstructuredGrid.cells` without the
padding.
.. note::
This is only available in ``vtk>=9.0.0``.
Returns
-------
numpy.ndarray
Connectivity array.
Examples
--------
Return the cell connectivity for the first two cells.
>>> import pyvista
>>> from pyvista import examples
>>> hex_beam = pyvista.read(examples.hexbeamfile)
>>> hex_beam.cell_connectivity[:16]
array([ 0, 2, 8, 7, 27, 36, 90, 81, 2, 1, 4, 8, 36, 18, 54, 90])
"""
carr = self.GetCells()
if _vtk.VTK9:
return _vtk.vtk_to_numpy(carr.GetConnectivityArray())
raise VTKVersionError(
'Install vtk>=9.0.0 for `cell_connectivity`\n'
'Otherwise, use the legacy `cells` method'
) # pragma: no cover
def linear_copy(self, deep=False):
"""Return a copy of the unstructured grid containing only linear cells.
Converts the following cell types to their linear equivalents.
- ``VTK_QUADRATIC_TETRA --> VTK_TETRA``
- ``VTK_QUADRATIC_PYRAMID --> VTK_PYRAMID``
- ``VTK_QUADRATIC_WEDGE --> VTK_WEDGE``
- ``VTK_QUADRATIC_HEXAHEDRON --> VTK_HEXAHEDRON``
Parameters
----------
deep : bool
When ``True``, makes a copy of the points array. Default
``False``. Cells and cell types are always copied.
Returns
-------
pyvista.UnstructuredGrid
UnstructuredGrid containing only linear cells when
``deep=False``.
"""
lgrid = self.copy(deep)
# grab the vtk object
vtk_cell_type = _vtk.numpy_to_vtk(self.GetCellTypesArray(), deep=True)
celltype = _vtk.vtk_to_numpy(vtk_cell_type)
celltype[celltype == _vtk.VTK_QUADRATIC_TETRA] = _vtk.VTK_TETRA
celltype[celltype == _vtk.VTK_QUADRATIC_PYRAMID] = _vtk.VTK_PYRAMID
celltype[celltype == _vtk.VTK_QUADRATIC_WEDGE] = _vtk.VTK_WEDGE
celltype[celltype == _vtk.VTK_QUADRATIC_HEXAHEDRON] = _vtk.VTK_HEXAHEDRON
# track quad mask for later
quad_quad_mask = celltype == _vtk.VTK_QUADRATIC_QUAD
celltype[quad_quad_mask] = _vtk.VTK_QUAD
quad_tri_mask = celltype == _vtk.VTK_QUADRATIC_TRIANGLE
celltype[quad_tri_mask] = _vtk.VTK_TRIANGLE
vtk_offset = self.GetCellLocationsArray()
cells = _vtk.vtkCellArray()
cells.DeepCopy(self.GetCells())
lgrid.SetCells(vtk_cell_type, vtk_offset, cells)
# fixing bug with display of quad cells
if np.any(quad_quad_mask):
if _vtk.VTK9:
quad_offset = lgrid.offset[:-1][quad_quad_mask]
base_point = lgrid.cell_connectivity[quad_offset]
lgrid.cell_connectivity[quad_offset + 4] = base_point
lgrid.cell_connectivity[quad_offset + 5] = base_point
lgrid.cell_connectivity[quad_offset + 6] = base_point
lgrid.cell_connectivity[quad_offset + 7] = base_point
else:
quad_offset = lgrid.offset[quad_quad_mask]
base_point = lgrid.cells[quad_offset + 1]
lgrid.cells[quad_offset + 5] = base_point
lgrid.cells[quad_offset + 6] = base_point
lgrid.cells[quad_offset + 7] = base_point
lgrid.cells[quad_offset + 8] = base_point
if np.any(quad_tri_mask):
if _vtk.VTK9:
tri_offset = lgrid.offset[:-1][quad_tri_mask]
base_point = lgrid.cell_connectivity[tri_offset]
lgrid.cell_connectivity[tri_offset + 3] = base_point
lgrid.cell_connectivity[tri_offset + 4] = base_point
lgrid.cell_connectivity[tri_offset + 5] = base_point
else:
tri_offset = lgrid.offset[quad_tri_mask]
base_point = lgrid.cells[tri_offset + 1]
lgrid.cells[tri_offset + 4] = base_point
lgrid.cells[tri_offset + 5] = base_point
lgrid.cells[tri_offset + 6] = base_point
return lgrid
@property
def celltypes(self) -> np.ndarray:
"""Return the cell types array.
Returns
-------
numpy.ndarray
Array of VTK cell types. Some of the most popular cell types:
* ``VTK_EMPTY_CELL = 0``
* ``VTK_VERTEX = 1``
* ``VTK_POLY_VERTEX = 2``
* ``VTK_LINE = 3``
* ``VTK_POLY_LINE = 4``
* ``VTK_TRIANGLE = 5``
* ``VTK_TRIANGLE_STRIP = 6``
* ``VTK_POLYGON = 7``
* ``VTK_PIXEL = 8``
* ``VTK_QUAD = 9``
* ``VTK_TETRA = 10``
* ``VTK_VOXEL = 11``
* ``VTK_HEXAHEDRON = 12``
* ``VTK_WEDGE = 13``
* ``VTK_PYRAMID = 14``
* ``VTK_PENTAGONAL_PRISM = 15``
* ``VTK_HEXAGONAL_PRISM = 16``
* ``VTK_QUADRATIC_EDGE = 21``
* ``VTK_QUADRATIC_TRIANGLE = 22``
* ``VTK_QUADRATIC_QUAD = 23``
* ``VTK_QUADRATIC_POLYGON = 36``
* ``VTK_QUADRATIC_TETRA = 24``
* ``VTK_QUADRATIC_HEXAHEDRON = 25``
* ``VTK_QUADRATIC_WEDGE = 26``
* ``VTK_QUADRATIC_PYRAMID = 27``
* ``VTK_BIQUADRATIC_QUAD = 28``
* ``VTK_TRIQUADRATIC_HEXAHEDRON = 29``
* ``VTK_QUADRATIC_LINEAR_QUAD = 30``
* ``VTK_QUADRATIC_LINEAR_WEDGE = 31``
* ``VTK_BIQUADRATIC_QUADRATIC_WEDGE = 32``
* ``VTK_BIQUADRATIC_QUADRATIC_HEXAHEDRON = 33``
* ``VTK_BIQUADRATIC_TRIANGLE = 34``
See
https://vtk.org/doc/nightly/html/vtkCellType_8h_source.html
for all cell types.
Examples
--------
This mesh contains only linear hexahedral cells, type
``vtk.VTK_HEXAHEDRON``, which evaluates to 12.
>>> import pyvista
>>> from pyvista import examples
>>> hex_beam = pyvista.read(examples.hexbeamfile)
>>> hex_beam.celltypes # doctest:+SKIP
array([12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12],
dtype=uint8)
"""
return _vtk.vtk_to_numpy(self.GetCellTypesArray())
@property
def offset(self) -> np.ndarray:
"""Return the cell locations array.
In VTK 9, this is the location of the start of each cell in
:attr:`cell_connectivity`, and in VTK < 9, this is the
location of the start of each cell in :attr:`cells`.
Returns
-------
numpy.ndarray
Array of cell offsets indicating the start of each cell.
Examples
--------
Return the cell offset array within ``vtk==9``. Since this
mesh is composed of all hexahedral cells, note how each cell
starts at 8 greater than the prior cell.
>>> import pyvista
>>> from pyvista import examples
>>> hex_beam = pyvista.read(examples.hexbeamfile)
>>> hex_beam.offset
array([ 0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,
104, 112, 120, 128, 136, 144, 152, 160, 168, 176, 184, 192, 200,
208, 216, 224, 232, 240, 248, 256, 264, 272, 280, 288, 296, 304,
312, 320])
"""
carr = self.GetCells()
if _vtk.VTK9:
# This will be the number of cells + 1.
return _vtk.vtk_to_numpy(carr.GetOffsetsArray())
else: # this is no longer used in >= VTK9
return _vtk.vtk_to_numpy(self.GetCellLocationsArray())
def cast_to_explicit_structured_grid(self):
"""Cast to an explicit structured grid.
.. note::
This feature is only available in ``vtk>=9.0.0``
Returns
-------
pyvista.ExplicitStructuredGrid
An explicit structured grid.
Raises
------
TypeError
If the unstructured grid doesn't have the ``'BLOCK_I'``,
``'BLOCK_J'`` and ``'BLOCK_K'`` cells arrays.
See Also
--------
pyvista.ExplicitStructuredGrid.cast_to_unstructured_grid
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured()
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
>>> grid = grid.hide_cells(range(80, 120))
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
>>> grid = grid.cast_to_unstructured_grid()
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
>>> grid = grid.cast_to_explicit_structured_grid()
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
"""
if not _vtk.VTK9:
raise VTKVersionError('VTK 9 or higher is required') # pragma: no cover
s1 = {'BLOCK_I', 'BLOCK_J', 'BLOCK_K'}
s2 = self.cell_data.keys()
if not s1.issubset(s2):
raise TypeError("'BLOCK_I', 'BLOCK_J' and 'BLOCK_K' cell arrays are required")
alg = _vtk.vtkUnstructuredGridToExplicitStructuredGrid()
alg.SetInputData(self)
alg.SetInputArrayToProcess(0, 0, 0, 1, 'BLOCK_I')
alg.SetInputArrayToProcess(1, 0, 0, 1, 'BLOCK_J')
alg.SetInputArrayToProcess(2, 0, 0, 1, 'BLOCK_K')
alg.Update()
grid = _get_output(alg)
grid.cell_data.remove('ConnectivityFlags') # unrequired
return grid
class StructuredGrid(_vtk.vtkStructuredGrid, PointGrid, StructuredGridFilters):
"""Dataset used for topologically regular arrays of data.
Can be initialized in one of the following several ways:
* Create empty grid.
* Initialize from a filename.
* Initialize from a ``vtk.vtkStructuredGrid`` object.
* Initialize directly from one or more :class:`numpy.ndarray`. See the
example or the documentation of ``uinput``.
Parameters
----------
uinput : str, pathlib.Path, vtk.vtkStructuredGrid, numpy.ndarray, optional
Filename, dataset, or array to initialize the structured grid from. If
a filename is passed, pyvista will attempt to load it as a
:class:`StructuredGrid`. If passed a ``vtk.vtkStructuredGrid``, it will
be wrapped as a deep copy.
If a :class:`numpy.ndarray` is provided and ``y`` and ``z`` are empty,
this array will define the points of this :class:`StructuredGrid`.
Set the dimensions with :attr:`StructuredGrid.dimensions`.
Otherwise, this parameter will be loaded as the ``x`` points, and ``y``
and ``z`` points must be set. The shape of this array defines the shape
of the structured data and the shape should be ``(dimx, dimy,
dimz)``. Missing trailing dimensions are assumed to be ``1``.
y : numpy.ndarray, optional
Coordinates of the points in y direction. If this is passed, ``uinput``
must be a :class:`numpy.ndarray` and match the shape of ``y``.
z : numpy.ndarray, optional
Coordinates of the points in z direction. If this is passed, ``uinput``
and ``y`` must be a :class:`numpy.ndarray` and match the shape of ``z``.
**kwargs : dict, optional
Additional keyword arguments passed when reading from a file or loading
from arrays.
Examples
--------
>>> import pyvista
>>> import vtk
>>> import numpy as np
Create an empty structured grid.
>>> grid = pyvista.StructuredGrid()
Initialize from a ``vtk.vtkStructuredGrid`` object
>>> vtkgrid = vtk.vtkStructuredGrid()
>>> grid = pyvista.StructuredGrid(vtkgrid)
Create from NumPy arrays.
>>> xrng = np.arange(-10, 10, 2, dtype=np.float32)
>>> yrng = np.arange(-10, 10, 2, dtype=np.float32)
>>> zrng = np.arange(-10, 10, 2, dtype=np.float32)
>>> x, y, z = np.meshgrid(xrng, yrng, zrng)
>>> grid = pyvista.StructuredGrid(x, y, z)
>>> grid # doctest:+SKIP
StructuredGrid (0x7fb18f2a8580)
N Cells: 729
N Points: 1000
X Bounds: -1.000e+01, 8.000e+00
Y Bounds: -1.000e+01, 8.000e+00
Z Bounds: -1.000e+01, 8.000e+00
Dimensions: 10, 10, 10
N Arrays: 0
"""
_WRITERS = {'.vtk': _vtk.vtkStructuredGridWriter, '.vts': _vtk.vtkXMLStructuredGridWriter}
def __init__(self, uinput=None, y=None, z=None, **kwargs) -> None:
"""Initialize the structured grid."""
super().__init__()
if isinstance(uinput, _vtk.vtkStructuredGrid):
self.deep_copy(uinput)
elif isinstance(uinput, (str, pathlib.Path)):
self._from_file(uinput, **kwargs)
elif (
isinstance(uinput, np.ndarray)
and isinstance(y, np.ndarray)
and isinstance(z, np.ndarray)
):
self._from_arrays(uinput, y, z, **kwargs)
elif isinstance(uinput, np.ndarray) and y is None and z is None:
self.points = uinput # type: ignore
elif uinput is None:
# do nothing, initialize as empty structured grid
pass
else:
raise TypeError(
"Invalid parameters. Expecting one of the following:\n"
" - No arguments\n"
" - Filename as the only argument\n"
" - StructuredGrid as the only argument\n"
" - Single `numpy.ndarray` as the only argument"
" - Three `numpy.ndarray` as the first three arguments"
)
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard str representation."""
return DataSet.__str__(self)
def _from_arrays(self, x, y, z, force_float=True):
"""Create VTK structured grid directly from numpy arrays.
Parameters
----------
x : numpy.ndarray
Position of the points in x direction.
y : numpy.ndarray
Position of the points in y direction.
z : numpy.ndarray
Position of the points in z direction.
force_float : bool, optional
Casts the datatype to ``float32`` if points datatype is
non-float. Default ``True``. Set this to ``False`` to allow
non-float types, though this may lead to truncation of
intermediate floats when transforming datasets.
"""
if not (x.shape == y.shape == z.shape):
raise ValueError('Input point array shapes must match exactly')
# make the output points the same precision as the input arrays
points = np.empty((x.size, 3), x.dtype)
points[:, 0] = x.ravel('F')
points[:, 1] = y.ravel('F')
points[:, 2] = z.ravel('F')
# ensure that the inputs are 3D
dim = list(x.shape)
while len(dim) < 3:
dim.append(1)
# Create structured grid
self.SetDimensions(dim)
self.SetPoints(pyvista.vtk_points(points, force_float=force_float))
@property
def dimensions(self):
"""Return a length 3 tuple of the grid's dimensions.
Returns
-------
tuple
Grid dimensions.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> xrng = np.arange(-10, 10, 1, dtype=np.float32)
>>> yrng = np.arange(-10, 10, 2, dtype=np.float32)
>>> zrng = np.arange(-10, 10, 5, dtype=np.float32)
>>> x, y, z = np.meshgrid(xrng, yrng, zrng)
>>> grid = pyvista.StructuredGrid(x, y, z)
>>> grid.dimensions
(10, 20, 4)
"""
return tuple(self.GetDimensions())
@dimensions.setter
def dimensions(self, dims):
"""Set the dataset dimensions. Pass a length three tuple of integers."""
nx, ny, nz = dims[0], dims[1], dims[2]
self.SetDimensions(nx, ny, nz)
self.Modified()
@property
def x(self):
"""Return the X coordinates of all points.
Returns
-------
numpy.ndarray
Numpy array of all X coordinates.
Examples
--------
>>> import pyvista
>>> import numpy as np
>>> xrng = np.arange(-10, 10, 1, dtype=np.float32)
>>> yrng = np.arange(-10, 10, 2, dtype=np.float32)
>>> zrng = np.arange(-10, 10, 5, dtype=np.float32)
>>> x, y, z = np.meshgrid(xrng, yrng, zrng)
>>> grid = pyvista.StructuredGrid(x, y, z)
>>> grid.x.shape
(10, 20, 4)
"""
return self._reshape_point_array(self.points[:, 0])
@property
def y(self):
"""Return the Y coordinates of all points."""
return self._reshape_point_array(self.points[:, 1])
@property
def z(self):
"""Return the Z coordinates of all points."""
return self._reshape_point_array(self.points[:, 2])
@property
def points_matrix(self):
"""Points as a 4-D matrix, with x/y/z along the last dimension."""
return self.points.reshape((*self.dimensions, 3), order='F')
def _get_attrs(self):
"""Return the representation methods (internal helper)."""
attrs = PointGrid._get_attrs(self)
attrs.append(("Dimensions", self.dimensions, "{:d}, {:d}, {:d}"))
return attrs
def __getitem__(self, key):
"""Slice subsets of the StructuredGrid, or extract an array field."""
# legacy behavior which looks for a point or cell array
if not isinstance(key, tuple):
return super().__getitem__(key)
# convert slice to VOI specification - only "basic indexing" is supported
voi = []
rate = []
if len(key) != 3:
raise RuntimeError('Slices must have exactly 3 dimensions.')
for i, k in enumerate(key):
if isinstance(k, Iterable):
raise RuntimeError('Fancy indexing is not supported.')
if isinstance(k, numbers.Integral):
start = stop = k
step = 1
elif isinstance(k, slice):
start = k.start if k.start is not None else 0
stop = k.stop - 1 if k.stop is not None else self.dimensions[i]
step = k.step if k.step is not None else 1
voi.extend((start, stop))
rate.append(step)
return self.extract_subset(voi, rate, boundary=False)
def hide_cells(self, ind, inplace=False):
"""Hide cells without deleting them.
Hides cells by setting the ghost_cells array to ``HIDDEN_CELL``.
Parameters
----------
ind : sequence
List or array of cell indices to be hidden. The array can
also be a boolean array of the same size as the number of
cells.
inplace : bool, optional
Updates mesh in-place.
Returns
-------
pyvista.StructuredGrid
Structured grid with hidden cells.
Examples
--------
Hide part of the middle of a structured surface.
>>> import pyvista as pv
>>> import numpy as np
>>> x = np.arange(-10, 10, 0.25)
>>> y = np.arange(-10, 10, 0.25)
>>> z = 0
>>> x, y, z = np.meshgrid(x, y, z)
>>> grid = pv.StructuredGrid(x, y, z)
>>> grid = grid.hide_cells(range(79*30, 79*50))
>>> grid.plot(color=True, show_edges=True)
"""
if not inplace:
return self.copy().hide_cells(ind, inplace=True)
if isinstance(ind, np.ndarray):
if ind.dtype == np.bool_ and ind.size != self.n_cells:
raise ValueError(
f'Boolean array size must match the number of cells ({self.n_cells})'
)
ghost_cells = np.zeros(self.n_cells, np.uint8)
ghost_cells[ind] = _vtk.vtkDataSetAttributes.HIDDENCELL
# NOTE: cells cannot be removed from a structured grid, only
# hidden setting ghost_cells to a value besides
# vtk.vtkDataSetAttributes.HIDDENCELL will not hide them
# properly, additionally, calling self.RemoveGhostCells will
# have no effect
# add but do not make active
self.cell_data.set_array(ghost_cells, _vtk.vtkDataSetAttributes.GhostArrayName())
return self
def hide_points(self, ind):
"""Hide points without deleting them.
Hides points by setting the ghost_points array to ``HIDDEN_CELL``.
Parameters
----------
ind : sequence
List or array of point indices to be hidden. The array
can also be a boolean array of the same size as the number
of points.
Returns
-------
pyvista.PointSet
Point set with hidden points.
Examples
--------
Hide part of the middle of a structured surface.
>>> import pyvista as pv
>>> import numpy as np
>>> x = np.arange(-10, 10, 0.25)
>>> y = np.arange(-10, 10, 0.25)
>>> z = 0
>>> x, y, z = np.meshgrid(x, y, z)
>>> grid = pv.StructuredGrid(x, y, z)
>>> grid.hide_points(range(80*30, 80*50))
>>> grid.plot(color=True, show_edges=True)
"""
if isinstance(ind, np.ndarray):
if ind.dtype == np.bool_ and ind.size != self.n_points:
raise ValueError(
f'Boolean array size must match the number of points ({self.n_points})'
)
ghost_points = np.zeros(self.n_points, np.uint8)
ghost_points[ind] = _vtk.vtkDataSetAttributes.HIDDENPOINT
# add but do not make active
self.point_data.set_array(ghost_points, _vtk.vtkDataSetAttributes.GhostArrayName())
def _reshape_point_array(self, array):
"""Reshape point data to a 3-D matrix."""
return array.reshape(self.dimensions, order='F')
def _reshape_cell_array(self, array):
"""Reshape cell data to a 3-D matrix."""
cell_dims = np.array(self.dimensions) - 1
cell_dims[cell_dims == 0] = 1
return array.reshape(cell_dims, order='F')
class ExplicitStructuredGrid(_vtk.vtkExplicitStructuredGrid, PointGrid):
"""Extend the functionality of the ``vtk.vtkExplicitStructuredGrid`` class.
Can be initialized by the following:
- Creating an empty grid
- From a ``vtk.vtkExplicitStructuredGrid`` or ``vtk.vtkUnstructuredGrid`` object
- From a VTU or VTK file
- From ``dims`` and ``corners`` arrays
Examples
--------
>>> import numpy as np
>>> import pyvista as pv
>>>
>>> # grid size: ni*nj*nk cells; si, sj, sk steps
>>> ni, nj, nk = 4, 5, 6
>>> si, sj, sk = 20, 10, 1
>>>
>>> # create raw coordinate grid
>>> grid_ijk = np.mgrid[:(ni+1)*si:si, :(nj+1)*sj:sj, :(nk+1)*sk:sk]
>>>
>>> # repeat array along each Cartesian axis for connectivity
>>> for axis in range(1, 4):
... grid_ijk = grid_ijk.repeat(2, axis=axis)
>>>
>>> # slice off unnecessarily doubled edge coordinates
>>> grid_ijk = grid_ijk[:, 1:-1, 1:-1, 1:-1]
>>>
>>> # reorder and reshape to VTK order
>>> corners = grid_ijk.transpose().reshape(-1, 3)
>>>
>>> dims = np.array([ni, nj, nk]) + 1
>>> grid = pv.ExplicitStructuredGrid(dims, corners)
>>> grid = grid.compute_connectivity()
>>> grid.plot(show_edges=True) # doctest:+SKIP
"""
_WRITERS = {'.vtu': _vtk.vtkXMLUnstructuredGridWriter, '.vtk': _vtk.vtkUnstructuredGridWriter}
def __init__(self, *args, **kwargs):
"""Initialize the explicit structured grid."""
if not _vtk.VTK9:
raise VTKVersionError('VTK 9 or higher is required') # pragma: no cover
super().__init__()
n = len(args)
if n == 1:
arg0 = args[0]
if isinstance(arg0, _vtk.vtkExplicitStructuredGrid):
self.deep_copy(arg0)
elif isinstance(arg0, _vtk.vtkUnstructuredGrid):
grid = arg0.cast_to_explicit_structured_grid()
self.deep_copy(grid)
elif isinstance(arg0, (str, pathlib.Path)):
grid = UnstructuredGrid(arg0)
grid = grid.cast_to_explicit_structured_grid()
self.deep_copy(grid)
elif n == 2:
arg0, arg1 = args
if isinstance(arg0, tuple):
arg0 = np.asarray(arg0)
if isinstance(arg1, list):
arg1 = np.asarray(arg1)
arg0_is_arr = isinstance(arg0, np.ndarray)
arg1_is_arr = isinstance(arg1, np.ndarray)
if all([arg0_is_arr, arg1_is_arr]):
self._from_arrays(arg0, arg1)
def __repr__(self):
"""Return the standard representation."""
return DataSet.__repr__(self)
def __str__(self):
"""Return the standard ``str`` representation."""
return DataSet.__str__(self)
def _from_arrays(self, dims: Sequence, corners: Sequence) -> None:
"""Create a VTK explicit structured grid from NumPy arrays.
Parameters
----------
dims : Sequence
A sequence of integers with shape (3,) containing the
topological dimensions of the grid.
corners : Sequence
A sequence of floats with shape (number of corners, 3)
containing the coordinates of the corner points.
"""
shape0 = np.asanyarray(dims) - 1
shape1 = 2 * shape0
ncells = np.prod(shape0)
cells = 8 * np.ones((ncells, 9), dtype=int)
points, indices = np.unique(corners, axis=0, return_inverse=True)
connectivity = np.asarray(
[[0, 1, 1, 0, 0, 1, 1, 0], [0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1, 1, 1]]
)
for c in range(ncells):
i, j, k = np.unravel_index(c, shape0, order='F')
coord = (2 * i + connectivity[0], 2 * j + connectivity[1], 2 * k + connectivity[2])
cinds = np.ravel_multi_index(coord, shape1, order='F') # type: ignore
cells[c, 1:] = indices[cinds]
cells = cells.flatten()
points = pyvista.vtk_points(points)
cells = CellArray(cells, ncells)
self.SetDimensions(dims)
self.SetPoints(points)
self.SetCells(cells)
def cast_to_unstructured_grid(self) -> 'UnstructuredGrid':
"""Cast to an unstructured grid.
Returns
-------
UnstructuredGrid
An unstructured grid. VTK adds the ``'BLOCK_I'``,
``'BLOCK_J'`` and ``'BLOCK_K'`` cell arrays. These arrays
are required to restore the explicit structured grid.
See Also
--------
pyvista.DataSetFilters.extract_cells : Extract a subset of a dataset.
pyvista.UnstructuredGrid.cast_to_explicit_structured_grid : Cast an unstructured grid to an explicit structured grid.
Notes
-----
The ghost cell array is disabled before casting the
unstructured grid in order to allow the original structure
and attributes data of the explicit structured grid to be
restored. If you don't need to restore the explicit
structured grid later or want to extract an unstructured
grid from the visible subgrid, use the ``extract_cells``
filter and the cell indices where the ghost cell array is
``0``.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
>>> grid = grid.hide_cells(range(80, 120)) # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
>>> grid = grid.cast_to_unstructured_grid() # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
>>> grid = grid.cast_to_explicit_structured_grid() # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
"""
grid = ExplicitStructuredGrid()
grid.copy_structure(self)
alg = _vtk.vtkExplicitStructuredGridToUnstructuredGrid()
alg.SetInputDataObject(grid)
alg.Update()
grid = _get_output(alg)
grid.cell_data.remove('vtkOriginalCellIds') # unrequired
grid.copy_attributes(self) # copy ghost cell array and other arrays
return grid
def save(self, filename, binary=True):
"""Save this VTK object to file.
Parameters
----------
filename : str
Output file name. VTU and VTK extensions are supported.
binary : bool, optional
If ``True`` (default), write as binary, else ASCII.
Notes
-----
VTK adds the ``'BLOCK_I'``, ``'BLOCK_J'`` and ``'BLOCK_K'``
cell arrays. These arrays are required to restore the explicit
structured grid.
Examples
--------
>>> import pyvista as pv
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid = grid.hide_cells(range(80, 120)) # doctest:+SKIP
>>> grid.save('grid.vtu') # doctest:+SKIP
>>> grid = pv.ExplicitStructuredGrid('grid.vtu') # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
>>> grid.show_cells() # doctest:+SKIP
>>> grid.plot(color='w', show_edges=True, show_bounds=True) # doctest:+SKIP
"""
grid = self.cast_to_unstructured_grid()
grid.save(filename, binary)
def hide_cells(self, ind: Sequence[int], inplace=False) -> 'ExplicitStructuredGrid':
"""Hide specific cells.
Hides cells by setting the ghost cell array to ``HIDDENCELL``.
Parameters
----------
ind : sequence(int)
Cell indices to be hidden. A boolean array of the same
size as the number of cells also is acceptable.
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
ExplicitStructuredGrid or None
A deep copy of this grid if ``inplace=False`` with the
hidden cells, or this grid with the hidden cells if
otherwise.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured()
>>> grid = grid.hide_cells(range(80, 120))
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
"""
ind_arr = np.asanyarray(ind)
if inplace:
array = np.zeros(self.n_cells, dtype=np.uint8)
array[ind_arr] = _vtk.vtkDataSetAttributes.HIDDENCELL
name = _vtk.vtkDataSetAttributes.GhostArrayName()
self.cell_data[name] = array
return self
grid = self.copy()
grid.hide_cells(ind, inplace=True)
return grid
def show_cells(self, inplace=False) -> 'ExplicitStructuredGrid':
"""Show hidden cells.
Shows hidden cells by setting the ghost cell array to ``0``
where ``HIDDENCELL``.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
ExplicitStructuredGrid
A deep copy of this grid if ``inplace=False`` with the
hidden cells shown. Otherwise, this dataset with the
shown cells.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured()
>>> grid = grid.hide_cells(range(80, 120))
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
>>> grid = grid.show_cells()
>>> grid.plot(color='w', show_edges=True, show_bounds=True)
"""
if inplace:
name = _vtk.vtkDataSetAttributes.GhostArrayName()
if name in self.cell_data.keys():
array = self.cell_data[name]
ind = np.argwhere(array == _vtk.vtkDataSetAttributes.HIDDENCELL)
array[ind] = 0
return self
else:
grid = self.copy()
grid.show_cells(inplace=True)
return grid
def _dimensions(self):
# This method is required to avoid conflict if a developer extends `ExplicitStructuredGrid`
# and reimplements `dimensions` to return, for example, the number of cells in the I, J and
# K directions.
dims = self.extent
dims = np.reshape(dims, (3, 2))
dims = np.diff(dims, axis=1)
dims = dims.flatten() + 1
return int(dims[0]), int(dims[1]), int(dims[2])
@property
def dimensions(self) -> Tuple[int, int, int]:
"""Return the topological dimensions of the grid.
Returns
-------
tuple(int)
Number of sampling points in the I, J and Z directions respectively.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid.dimensions # doctest:+SKIP
(5, 6, 7)
"""
return self._dimensions()
@property
def visible_bounds(self) -> Tuple[float, float, float, float, float, float]:
"""Return the bounding box of the visible cells.
Different from `bounds`, which returns the bounding box of the
complete grid, this method returns the bounding box of the
visible cells, where the ghost cell array is not
``HIDDENCELL``.
Returns
-------
tuple(float)
The limits of the visible grid in the X, Y and Z
directions respectively.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid = grid.hide_cells(range(80, 120)) # doctest:+SKIP
>>> grid.bounds # doctest:+SKIP
[0.0, 80.0, 0.0, 50.0, 0.0, 6.0]
>>> grid.visible_bounds # doctest:+SKIP
[0.0, 80.0, 0.0, 50.0, 0.0, 4.0]
"""
name = _vtk.vtkDataSetAttributes.GhostArrayName()
if name in self.cell_data:
array = self.cell_data[name]
grid = self.extract_cells(array == 0)
return grid.bounds
else:
return self.bounds
def cell_id(self, coords) -> Union[int, np.ndarray, None]:
"""Return the cell ID.
Parameters
----------
coords : tuple(int), list(tuple(int)) or numpy.ndarray
Cell structured coordinates.
Returns
-------
int, numpy.ndarray, or None
Cell IDs. ``None`` if ``coords`` is outside the grid extent.
See Also
--------
pyvista.ExplicitStructuredGrid.cell_coords : Return the cell structured coordinates.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid.cell_id((3, 4, 0)) # doctest:+SKIP
19
>>> coords = [(3, 4, 0),
... (3, 2, 1),
... (1, 0, 2),
... (2, 3, 2)]
>>> grid.cell_id(coords) # doctest:+SKIP
array([19, 31, 41, 54])
"""
# `vtk.vtkExplicitStructuredGrid.ComputeCellId` is not used
# here because this method returns invalid cell IDs when
# `coords` is outside the grid extent.
if isinstance(coords, list):
coords = np.asarray(coords)
if isinstance(coords, np.ndarray) and coords.ndim == 2:
ncol = coords.shape[1]
coords = [coords[:, c] for c in range(ncol)]
coords = tuple(coords)
dims = self._dimensions()
try:
ind = np.ravel_multi_index(coords, np.array(dims) - 1, order='F') # type: ignore
except ValueError:
return None
else:
return ind
def cell_coords(self, ind):
"""Return the cell structured coordinates.
Parameters
----------
ind : int or iterable(int)
Cell IDs.
Returns
-------
tuple(int), numpy.ndarray, or None
Cell structured coordinates. ``None`` if ``ind`` is
outside the grid extent.
See Also
--------
pyvista.ExplicitStructuredGrid.cell_id : Return the cell ID.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid.cell_coords(19) # doctest:+SKIP
(3, 4, 0)
>>> grid.cell_coords((19, 31, 41, 54)) # doctest:+SKIP
array([[3, 4, 0],
[3, 2, 1],
[1, 0, 2],
[2, 3, 2]])
"""
dims = self._dimensions()
try:
coords = np.unravel_index(ind, np.array(dims) - 1, order='F')
except ValueError:
return None
else:
if isinstance(coords[0], np.ndarray):
coords = np.stack(coords, axis=1)
return coords
def neighbors(self, ind, rel='connectivity') -> list:
"""Return the indices of neighboring cells.
Parameters
----------
ind : int or iterable(int)
Cell IDs.
rel : str, optional
Defines the neighborhood relationship. If
``'topological'``, returns the ``(i-1, j, k)``, ``(i+1, j,
k)``, ``(i, j-1, k)``, ``(i, j+1, k)``, ``(i, j, k-1)``
and ``(i, j, k+1)`` cells. If ``'connectivity'``
(default), returns only the topological neighbors
considering faces connectivity. If ``'geometric'``,
returns the cells in the ``(i-1, j)``, ``(i+1, j)``,
``(i,j-1)`` and ``(i, j+1)`` vertical cell groups whose
faces intersect.
Returns
-------
list(int)
Indices of neighboring cells.
Examples
--------
>>> import pyvista as pv
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> cell = grid.extract_cells(31) # doctest:+SKIP
>>> ind = grid.neighbors(31) # doctest:+SKIP
>>> neighbors = grid.extract_cells(ind) # doctest:+SKIP
>>>
>>> plotter = pv.Plotter()
>>> plotter.add_axes() # doctest:+SKIP
>>> plotter.add_mesh(cell, color='r', show_edges=True) # doctest:+SKIP
>>> plotter.add_mesh(neighbors, color='w', show_edges=True) # doctest:+SKIP
>>> plotter.show() # doctest:+SKIP
"""
def connectivity(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_points = self.cell_points(ind)
if cell_points.shape[0] == 8:
faces = [
[(-1, 0, 0), (0, 4, 7, 3), (1, 5, 6, 2)],
[(+1, 0, 0), (1, 2, 6, 5), (0, 3, 7, 4)],
[(0, -1, 0), (0, 1, 5, 4), (3, 2, 6, 7)],
[(0, +1, 0), (3, 7, 6, 2), (0, 4, 5, 1)],
[(0, 0, -1), (0, 3, 2, 1), (4, 7, 6, 5)],
[(0, 0, +1), (4, 5, 6, 7), (0, 1, 2, 3)],
]
for f in faces:
coords = np.sum([cell_coords, f[0]], axis=0)
ind = self.cell_id(coords)
if ind:
points = self.cell_points(ind)
if points.shape[0] == 8:
a1 = cell_points[f[1], :]
a2 = points[f[2], :]
if np.array_equal(a1, a2):
indices.append(ind)
return indices
def topological(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_neighbors = [(-1, 0, 0), (1, 0, 0), (0, -1, 0), (0, 1, 0), (0, 0, -1), (0, 0, 1)]
for n in cell_neighbors:
coords = np.sum([cell_coords, n], axis=0)
ind = self.cell_id(coords)
if ind:
indices.append(ind)
return indices
def geometric(ind):
indices = []
cell_coords = self.cell_coords(ind)
cell_points = self.cell_points(ind)
if cell_points.shape[0] == 8:
for k in [-1, 1]:
coords = np.sum([cell_coords, (0, 0, k)], axis=0)
ind = self.cell_id(coords)
if ind:
indices.append(ind)
faces = [
[(-1, 0, 0), (0, 4, 3, 7), (1, 5, 2, 6)],
[(+1, 0, 0), (2, 6, 1, 5), (3, 7, 0, 4)],
[(0, -1, 0), (1, 5, 0, 4), (2, 6, 3, 7)],
[(0, +1, 0), (3, 7, 2, 6), (0, 4, 1, 5)],
]
nk = self.dimensions[2]
for f in faces:
cell_z = cell_points[f[1], 2]
cell_z = np.abs(cell_z)
cell_z = cell_z.reshape((2, 2))
cell_zmin = cell_z.min(axis=1)
cell_zmax = cell_z.max(axis=1)
coords = np.sum([cell_coords, f[0]], axis=0)
for k in range(nk):
coords[2] = k
ind = self.cell_id(coords)
if ind:
points = self.cell_points(ind)
if points.shape[0] == 8:
z = points[f[2], 2]
z = np.abs(z)
z = z.reshape((2, 2))
zmin = z.min(axis=1)
zmax = z.max(axis=1)
if (
(zmax[0] > cell_zmin[0] and zmin[0] < cell_zmax[0])
or (zmax[1] > cell_zmin[1] and zmin[1] < cell_zmax[1])
or (zmin[0] > cell_zmax[0] and zmax[1] < cell_zmin[1])
or (zmin[1] > cell_zmax[1] and zmax[0] < cell_zmin[0])
):
indices.append(ind)
return indices
if isinstance(ind, int):
ind = [ind]
rel = eval(rel)
indices = set()
for i in ind:
indices.update(rel(i))
return sorted(indices)
def compute_connectivity(self, inplace=False) -> 'ExplicitStructuredGrid':
"""Compute the faces connectivity flags array.
This method checks the faces connectivity of the cells with
their topological neighbors. The result is stored in the
array of integers ``'ConnectivityFlags'``. Each value in this
array must be interpreted as a binary number, where the digits
shows the faces connectivity of a cell with its topological
neighbors -Z, +Z, -Y, +Y, -X and +X respectively. For example,
a cell with ``'ConnectivityFlags'`` equal to ``27``
(``011011``) indicates that this cell is connected by faces
with their neighbors ``(0, 0, 1)``, ``(0, -1, 0)``,
``(-1, 0, 0)`` and ``(1, 0, 0)``.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` (default)
or to a copy otherwise.
Returns
-------
ExplicitStructuredGrid
A deep copy of this grid if ``inplace=False``, or this
DataSet if otherwise.
See Also
--------
ExplicitStructuredGrid.compute_connections : Compute an array with the number of connected cell faces.
Examples
--------
>>> from pyvista import examples
>>>
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid = grid.compute_connectivity() # doctest:+SKIP
>>> grid.plot(show_edges=True) # doctest:+SKIP
"""
if inplace:
self.ComputeFacesConnectivityFlagsArray()
return self
else:
grid = self.copy()
grid.compute_connectivity(inplace=True)
return grid
def compute_connections(self, inplace=False):
"""Compute an array with the number of connected cell faces.
This method calculates the number of topological cell
neighbors connected by faces. The results are stored in the
``'number_of_connections'`` cell array.
Parameters
----------
inplace : bool, optional
This method is applied to this grid if ``True`` or to a copy
otherwise.
Returns
-------
ExplicitStructuredGrid
A deep copy of this grid if ``inplace=False`` or this
DataSet if otherwise.
See Also
--------
ExplicitStructuredGrid.compute_connectivity : Compute the faces connectivity flags array.
Examples
--------
>>> from pyvista import examples
>>> grid = examples.load_explicit_structured() # doctest:+SKIP
>>> grid = grid.compute_connections() # doctest:+SKIP
>>> grid.plot(show_edges=True) # doctest:+SKIP
"""
if inplace:
if 'ConnectivityFlags' in self.cell_data:
array = self.cell_data['ConnectivityFlags']
else:
grid = self.compute_connectivity(inplace=False)
array = grid.cell_data['ConnectivityFlags']
array = array.reshape((-1, 1))
array = array.astype(np.uint8)
array = np.unpackbits(array, axis=1)
array = array.sum(axis=1)
self.cell_data['number_of_connections'] = array
return self
else:
return self.copy().compute_connections(inplace=True)
| 34.857391 | 125 | 0.560734 |
aced83faa1cab286594a2a83d972fcc3e502469e | 321 | py | Python | opencae2020B02/definition_of_integration_methods.py | tkoyama010/tkoyama010 | bec4d63df35770e821605d722e0eb468fed49a87 | [
"CC0-1.0"
] | 1 | 2021-06-10T14:51:36.000Z | 2021-06-10T14:51:36.000Z | opencae2020B02/definition_of_integration_methods.py | tkoyama010/tkoyama010 | bec4d63df35770e821605d722e0eb468fed49a87 | [
"CC0-1.0"
] | 4 | 2020-07-08T08:21:04.000Z | 2022-03-04T08:17:01.000Z | opencae2020B02/definition_of_integration_methods.py | tkoyama010/tkoyama010 | bec4d63df35770e821605d722e0eb468fed49a87 | [
"CC0-1.0"
] | 2 | 2021-12-10T07:16:24.000Z | 2022-02-26T16:21:25.000Z | ims = []
# im_names is the array of IM name.
for im_name in im_names:
ims.append(
gf.Integ(
"IM_PRODUCT("
+ im_name
+ ", "
+ method
+ ")"
)
)
mims = []
for mesh, im in zip(meshs, ims):
mim = gf.MeshIm(mesh, im)
mims.append(mim)
| 17.833333 | 35 | 0.448598 |
aced8421caa41394e764ac83b975b7bc5b210040 | 459 | py | Python | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test100.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 450 | 2015-09-05T09:12:51.000Z | 2018-08-30T01:45:36.000Z | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test100.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 1,274 | 2015-09-22T20:06:16.000Z | 2018-08-31T22:14:00.000Z | tools/bin/pythonSrc/pychecker-0.8.18/test_input/test100.py | YangHao666666/hawq | 10cff8350f1ba806c6fec64eb67e0e6f6f24786c | [
"Artistic-1.0-Perl",
"ISC",
"bzip2-1.0.5",
"TCL",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"PostgreSQL",
"BSD-3-Clause"
] | 278 | 2015-09-21T19:15:06.000Z | 2018-08-31T00:36:51.000Z | # https://sourceforge.net/tracker/index.php?func=detail&aid=1563494&group_id=24686&atid=382217
# see zope.interface.declarations.Declaration
# for a real-world one line test, pycheck "import zope.interface.declarations"
'd'
class D:
'd'
__bases__ = ()
# this repr avoids getting the default repr, which shows an id
# which changes between test runs
__repr__ = classmethod(lambda cls: 'D')
# instantiating triggers the bug
d = D()
| 28.6875 | 94 | 0.716776 |
aced858aa76858ea30dcbf329ab1e52a3cc00dfd | 606 | py | Python | 7_5.py | profnssorg/claudioSchefer1 | 1d17fde455065c201bdbac7718240263ccdfd39a | [
"MIT"
] | null | null | null | 7_5.py | profnssorg/claudioSchefer1 | 1d17fde455065c201bdbac7718240263ccdfd39a | [
"MIT"
] | null | null | null | 7_5.py | profnssorg/claudioSchefer1 | 1d17fde455065c201bdbac7718240263ccdfd39a | [
"MIT"
] | null | null | null | """Programa 7_5.py
Descrição: Escrever um programa que leia duas strings e gere uma terceira na qual os
caracteres da segunda foram retirados da primeira.
Autor:Cláudio Schefer
Data:
Versão: 001
"""
# Declaração de variáveis
s1 = ""
s2 = ""
s3 = ""
# Entrada de dados
s1 = input("Digite a primeira string: ")
s2 = input("Digite a segunda string: ")
# Processamento e Saída de dados
for letra in s1:
if letra not in s2:
s3 += letra
if s3 == "":
print("Todos os caracteres foram removidos.")
else:
print("Os caracteres %s foram removidos de %s, gerando: %s" %(s2,s1,s3))
| 17.314286 | 85 | 0.668317 |
aced863589477de4b277afc2e9348a4386fa2741 | 985 | py | Python | Pwn2Win/2021/crypto/cladorhizidae/chall.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | 1 | 2021-11-02T20:53:58.000Z | 2021-11-02T20:53:58.000Z | Pwn2Win/2021/crypto/cladorhizidae/chall.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | null | null | null | Pwn2Win/2021/crypto/cladorhizidae/chall.py | ruhan-islam/ctf-archives | 8c2bf6a608c821314d1a1cfaa05a6cccef8e3103 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from uuid import uuid4
from cladorhizidae import hmac
def main():
k = uuid4().bytes
user_ID = uuid4().bytes
token = hmac(k, user_ID)
flag = open('flag.txt').read()
print("This is a valid user_ID: ", user_ID.hex())
print("This is the corresponding access token: ", token.hex())
print("You can make up to 2^16 queries to forge a new token")
i = 0
while i<=2**16:
u_ = bytes.fromhex(input())
if len(u_) in range(4, 34, 2):
t_ = hmac(k, u_)
i+=1
print(t_.hex())
else:
extra = uuid4().bytes + uuid4().bytes
u_ = user_ID + extra
t_ = hmac(k, u_)
print("ok, now give'me the access token for: ", u_.hex())
t_user = bytes.fromhex(input())
if t_user == t_:
print(flag)
return()
else:
print('not quite right')
return()
main()
| 25.25641 | 69 | 0.501523 |
aced87694cf7b6a924e5b9ad3f5607f28614de80 | 154,213 | py | Python | Lib/test/test_subprocess.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 4 | 2019-04-17T19:09:30.000Z | 2021-08-18T14:51:39.000Z | Lib/test/test_subprocess.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 3 | 2022-03-05T03:48:13.000Z | 2022-03-05T03:49:52.000Z | Lib/test/test_subprocess.py | chexca/cpython | cfc6ce4d40f2f01314b7e283fb972a7bb3ed3faa | [
"CNRI-Python-GPL-Compatible"
] | 5 | 2018-12-29T15:43:57.000Z | 2020-12-14T15:29:43.000Z | import unittest
from unittest import mock
from test import support
import subprocess
import sys
import signal
import io
import itertools
import os
import errno
import tempfile
import time
import traceback
import types
import selectors
import sysconfig
import select
import shutil
import threading
import gc
import textwrap
import json
from test.support import FakePath
try:
import _testcapi
except ImportError:
_testcapi = None
try:
import pwd
except ImportError:
pwd = None
try:
import grp
except ImportError:
grp = None
if support.PGO:
raise unittest.SkipTest("test is not helpful for PGO")
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
NONEXISTING_CMD = ('nonexisting_i_hope',)
# Ignore errors that indicate the command was not found
NONEXISTING_ERRORS = (FileNotFoundError, NotADirectoryError, PermissionError)
ZERO_RETURN_CMD = (sys.executable, '-c', 'pass')
def setUpModule():
shell_true = shutil.which('true')
if shell_true is None:
return
if (os.access(shell_true, os.X_OK) and
subprocess.run([shell_true]).returncode == 0):
global ZERO_RETURN_CMD
ZERO_RETURN_CMD = (shell_true,) # Faster than Python startup.
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
if not mswindows:
# subprocess._active is not used on Windows and is set to None.
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(
subprocess._active, "subprocess._active not empty"
)
self.doCleanups()
support.reap_children()
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call(ZERO_RETURN_CMD)
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_bytes_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=os.fsencode(sys.executable))
def test_pathlike_executable(self):
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"],
executable=FakePath(sys.executable))
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(NONEXISTING_ERRORS,
self._assert_python, pre_args,
executable=NONEXISTING_CMD[0])
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_bytes_executable_replaces_shell(self):
self._assert_python([], executable=os.fsencode(sys.executable),
shell=True)
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_pathlike_executable_replaces_shell(self):
self._assert_python([], executable=FakePath(sys.executable),
shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
with support.change_cwd(cwd):
return os.getcwd()
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
def test_cwd_with_bytes(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=os.fsencode(temp_dir))
def test_cwd_with_pathlike(self):
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=FakePath(temp_dir))
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with support.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
with p:
self.assertEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"strawberry")
def test_stderr_redirect_with_no_stdout_redirect(self):
# test stderr=STDOUT while stdout=None (not set)
# - grandchild prints to stderr
# - child redirects grandchild's stderr to its stdout
# - the parent should get grandchild's stderr in child's stdout
p = subprocess.Popen([sys.executable, "-c",
'import sys, subprocess;'
'rc = subprocess.call([sys.executable, "-c",'
' "import sys;"'
' "sys.stderr.write(\'42\')"],'
' stderr=subprocess.STDOUT);'
'sys.exit(rc)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
#NOTE: stdout should get stderr from grandchild
self.assertEqual(stdout, b'42')
self.assertEqual(stderr, b'') # should be empty
self.assertEqual(p.returncode, 0)
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
with p:
self.assertEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') == 1,
'The Python shared library cannot be loaded '
'with an empty environment.')
def test_empty_env(self):
"""Verify that env={} is as empty as possible."""
def is_env_var_to_ignore(n):
"""Determine if an environment variable is under our control."""
# This excludes some __CF_* and VERSIONER_* keys MacOS insists
# on adding even when the environment in exec is empty.
# Gentoo sandboxes also force LD_PRELOAD and SANDBOX_* to exist.
return ('VERSIONER' in n or '__CF' in n or # MacOS
n == 'LD_PRELOAD' or n.startswith('SANDBOX') or # Gentoo
n == 'LC_CTYPE') # Locale coercion triggered
with subprocess.Popen([sys.executable, "-c",
'import os; print(list(os.environ.keys()))'],
stdout=subprocess.PIPE, env={}) as p:
stdout, stderr = p.communicate()
child_env_names = eval(stdout.strip())
self.assertIsInstance(child_env_names, list)
child_env_names = [k for k in child_env_names
if not is_env_var_to_ignore(k)]
self.assertEqual(child_env_names, [])
def test_invalid_cmd(self):
# null character in the command name
cmd = sys.executable + '\0'
with self.assertRaises(ValueError):
subprocess.Popen([cmd, "-c", "pass"])
# null character in the command argument
with self.assertRaises(ValueError):
subprocess.Popen([sys.executable, "-c", "pass#\0"])
def test_invalid_env(self):
# null character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT\0VEGETABLE"] = "cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# null character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange\0VEGETABLE=cabbage"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable name
newenv = os.environ.copy()
newenv["FRUIT=ORANGE"] = "lemon"
with self.assertRaises(ValueError):
subprocess.Popen(ZERO_RETURN_CMD, env=newenv)
# equal character in the environment variable value
newenv = os.environ.copy()
newenv["FRUIT"] = "orange=lemon"
with subprocess.Popen([sys.executable, "-c",
'import sys, os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange=lemon")
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_output(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen(ZERO_RETURN_CMD, **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertEqual(stderr, b"")
def test_universal_newlines_and_text(self):
args = [
sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");']
for extra_kwarg in ('universal_newlines', 'text'):
p = subprocess.Popen(args, **{'stdin': subprocess.PIPE,
'stdout': subprocess.PIPE,
extra_kwarg: True})
with p:
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
for encoding in ['utf-16', 'utf-32-be']:
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding=encoding)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '1\n2\n3\n4')
def test_communicate_errors(self):
for errors, expected in [
('ignore', ''),
('replace', '\ufffd\ufffd'),
('surrogateescape', '\udc80\udc80'),
('backslashreplace', '\\x80\\x80'),
]:
code = ("import sys; "
r"sys.stdout.buffer.write(b'[\x80\x80]')")
args = [sys.executable, '-c', code]
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding='utf-8',
errors=errors)
stdout, stderr = popen.communicate(input='')
self.assertEqual(stdout, '[{}]'.format(expected))
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen(ZERO_RETURN_CMD)
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
self.assertEqual(p.wait(timeout=support.SHORT_TIMEOUT), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen(ZERO_RETURN_CMD, None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen(ZERO_RETURN_CMD, bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
with support.SuppressCrashReport():
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
with self.assertWarnsRegex(RuntimeWarning, 'line buffering'):
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(NONEXISTING_ERRORS):
subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def test_nonexisting_with_pipes(self):
# bpo-30121: Popen with pipes must close properly pipes on error.
# Previously, os.close() was called with a Windows handle which is not
# a valid file descriptor.
#
# Run the test in a subprocess to control how the CRT reports errors
# and to get stderr content.
try:
import msvcrt
msvcrt.CrtSetReportMode
except (AttributeError, ImportError):
self.skipTest("need msvcrt.CrtSetReportMode")
code = textwrap.dedent(f"""
import msvcrt
import subprocess
cmd = {NONEXISTING_CMD!r}
for report_type in [msvcrt.CRT_WARN,
msvcrt.CRT_ERROR,
msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(report_type, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(report_type, msvcrt.CRTDBG_FILE_STDERR)
try:
subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
pass
""")
cmd = [sys.executable, "-c", code]
proc = subprocess.Popen(cmd,
stderr=subprocess.PIPE,
universal_newlines=True)
with proc:
stderr = proc.communicate()[1]
self.assertEqual(stderr, "")
self.assertEqual(proc.returncode, 0)
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(NONEXISTING_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=support.SHORT_TIMEOUT)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = tempfile.mkstemp()
ofhandle, ofname = tempfile.mkstemp()
efhandle, efname = tempfile.mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_repr(self):
# Run a command that waits for user input, to check the repr() of
# a Proc object while and after the sub-process runs.
code = 'import sys; input(); sys.exit(57)'
cmd = [sys.executable, '-c', code]
result = "<Popen: returncode: {}"
with subprocess.Popen(
cmd, stdin=subprocess.PIPE, universal_newlines=True) as proc:
self.assertIsNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
proc.communicate(input='exit...\n')
proc.wait()
self.assertIsNotNone(proc.returncode)
self.assertTrue(
repr(proc).startswith(result.format(proc.returncode)) and
repr(proc).endswith('>')
)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
ZERO_RETURN_CMD, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_includes_filename(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.call(['/opt/nonexistent_binary', 'with', 'some', 'args'])
self.assertEqual(c.exception.filename, '/opt/nonexistent_binary')
@unittest.skipIf(mswindows, "behavior currently not supported on Windows")
def test_file_not_found_with_bad_cwd(self):
with self.assertRaises(FileNotFoundError) as c:
subprocess.Popen(['exit', '0'], cwd='/some/nonexistent/directory')
self.assertEqual(c.exception.filename, '/some/nonexistent/directory')
def test_class_getitems(self):
self.assertIsInstance(subprocess.Popen[bytes], types.GenericAlias)
self.assertIsInstance(subprocess.CompletedProcess[str], types.GenericAlias)
class RunFuncTestCase(BaseTestCase):
def run_python(self, code, **kwargs):
"""Run Python code in a subprocess using subprocess.run"""
argv = [sys.executable, "-c", code]
return subprocess.run(argv, **kwargs)
def test_returncode(self):
# call() function with sequence argument
cp = self.run_python("import sys; sys.exit(47)")
self.assertEqual(cp.returncode, 47)
with self.assertRaises(subprocess.CalledProcessError):
cp.check_returncode()
def test_check(self):
with self.assertRaises(subprocess.CalledProcessError) as c:
self.run_python("import sys; sys.exit(47)", check=True)
self.assertEqual(c.exception.returncode, 47)
def test_check_zero(self):
# check_returncode shouldn't raise when returncode is zero
cp = subprocess.run(ZERO_RETURN_CMD, check=True)
self.assertEqual(cp.returncode, 0)
def test_timeout(self):
# run() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.run waits for the
# child.
with self.assertRaises(subprocess.TimeoutExpired):
self.run_python("while True: pass", timeout=0.0001)
def test_capture_stdout(self):
# capture stdout with zero return code
cp = self.run_python("print('BDFL')", stdout=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stdout)
def test_capture_stderr(self):
cp = self.run_python("import sys; sys.stderr.write('BDFL')",
stderr=subprocess.PIPE)
self.assertIn(b'BDFL', cp.stderr)
def test_check_output_stdin_arg(self):
# run() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
stdin=tf, stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
cp = self.run_python(
"import sys; sys.stdout.write(sys.stdin.read().upper())",
input=b'pear', stdout=subprocess.PIPE)
self.assertIn(b'PEAR', cp.stdout)
def test_check_output_stdin_with_input_arg(self):
# run() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError,
msg="Expected ValueError when stdin and input args supplied.") as c:
output = self.run_python("print('will not be run')",
stdin=tf, input=b'hare')
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
with self.assertRaises(subprocess.TimeoutExpired) as c:
cp = self.run_python((
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"),
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3, stdout=subprocess.PIPE)
self.assertEqual(c.exception.output, b'BDFL')
# output is aliased to stdout
self.assertEqual(c.exception.stdout, b'BDFL')
def test_run_kwargs(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
cp = self.run_python(('import sys, os;'
'sys.exit(33 if os.getenv("FRUIT")=="banana" else 31)'),
env=newenv)
self.assertEqual(cp.returncode, 33)
def test_run_with_pathlike_path(self):
# bpo-31961: test run(pathlike_object)
# the name of a command that can be run without
# any arguments that exit fast
prog = 'tree.com' if mswindows else 'ls'
path = shutil.which(prog)
if path is None:
self.skipTest(f'{prog} required for this test')
path = FakePath(path)
res = subprocess.run(path, stdout=subprocess.DEVNULL)
self.assertEqual(res.returncode, 0)
with self.assertRaises(TypeError):
subprocess.run(path, stdout=subprocess.DEVNULL, shell=True)
def test_run_with_bytes_path_and_arguments(self):
# bpo-31961: test run([bytes_object, b'additional arguments'])
path = os.fsencode(sys.executable)
args = [path, '-c', b'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_run_with_pathlike_path_and_arguments(self):
# bpo-31961: test run([pathlike_object, 'additional arguments'])
path = FakePath(sys.executable)
args = [path, '-c', 'import sys; sys.exit(57)']
res = subprocess.run(args)
self.assertEqual(res.returncode, 57)
def test_capture_output(self):
cp = self.run_python(("import sys;"
"sys.stdout.write('BDFL'); "
"sys.stderr.write('FLUFL')"),
capture_output=True)
self.assertIn(b'BDFL', cp.stdout)
self.assertIn(b'FLUFL', cp.stderr)
def test_stdout_with_capture_output_arg(self):
# run() refuses to accept 'stdout' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stdout and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stdout=tf)
self.assertIn('stdout', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
def test_stderr_with_capture_output_arg(self):
# run() refuses to accept 'stderr' with 'capture_output'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
with self.assertRaises(ValueError,
msg=("Expected ValueError when stderr and capture_output "
"args supplied.")) as c:
output = self.run_python("print('will not be run')",
capture_output=True, stderr=tf)
self.assertIn('stderr', c.exception.args[0])
self.assertIn('capture_output', c.exception.args[0])
# This test _might_ wind up a bit fragile on loaded build+test machines
# as it depends on the timing with wide enough margins for normal situations
# but does assert that it happened "soon enough" to believe the right thing
# happened.
@unittest.skipIf(mswindows, "requires posix like 'sleep' shell command")
def test_run_with_shell_timeout_and_capture_output(self):
"""Output capturing after a timeout mustn't hang forever on open filehandles."""
before_secs = time.monotonic()
try:
subprocess.run('sleep 3', shell=True, timeout=0.1,
capture_output=True) # New session unspecified.
except subprocess.TimeoutExpired as exc:
after_secs = time.monotonic()
stacks = traceback.format_exc() # assertRaises doesn't give this.
else:
self.fail("TimeoutExpired not raised.")
self.assertLess(after_secs - before_secs, 1.5,
msg="TimeoutExpired was delayed! Bad traceback:\n```\n"
f"{stacks}```")
def _get_test_grp_name():
for name_group in ('staff', 'nogroup', 'grp', 'nobody', 'nfsnobody'):
if grp:
try:
grp.getgrnam(name_group)
except KeyError:
continue
return name_group
else:
raise unittest.SkipTest('No identified group name to use for this test on this platform.')
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
else:
self.fail("chdir to nonexistent directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
self.assertEqual(desired_exception.filename, e.filename)
else:
self.fail("Expected OSError: %s" % desired_exception)
# We mock the __del__ method for Popen in the next two tests
# because it does cleanup based on the pid returned by fork_exec
# along with issuing a resource warning if it still exists. Since
# we don't actually spawn a process in these tests we can forego
# the destructor. An alternative would be to set _child_created to
# False before the destructor is called but there is no easy way
# to do that
class PopenNoDestructor(subprocess.Popen):
def __del__(self):
pass
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_normal(self, fork_exec):
"""Test error passing done through errpipe_write in the good case"""
def proper_error(*args):
errpipe_write = args[13]
# Write the hex for the error code EISDIR: 'is a directory'
err_code = '{:x}'.format(errno.EISDIR).encode()
os.write(errpipe_write, b"OSError:" + err_code + b":")
return 0
fork_exec.side_effect = proper_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(IsADirectoryError):
self.PopenNoDestructor(["non_existent_command"])
@mock.patch("subprocess._posixsubprocess.fork_exec")
def test_exception_errpipe_bad_data(self, fork_exec):
"""Test error passing done through errpipe_write where its not
in the expected format"""
error_data = b"\xFF\x00\xDE\xAD"
def bad_error(*args):
errpipe_write = args[13]
# Anything can be in the pipe, no assumptions should
# be made about its encoding, so we'll write some
# arbitrary hex bytes to test it out
os.write(errpipe_write, error_data)
return 0
fork_exec.side_effect = bad_error
with mock.patch("subprocess.os.waitpid",
side_effect=ChildProcessError):
with self.assertRaises(subprocess.SubprocessError) as e:
self.PopenNoDestructor(["non_existent_command"])
self.assertIn(repr(error_data), str(e.exception))
@unittest.skipIf(not os.path.exists('/proc/self/status'),
"need /proc/self/status")
def test_restore_signals(self):
# Blindly assume that cat exists on systems with /proc/self/status...
default_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=False)
for line in default_proc_status.splitlines():
if line.startswith(b'SigIgn'):
default_sig_ign_mask = line
break
else:
self.skipTest("SigIgn not found in /proc/self/status.")
restored_proc_status = subprocess.check_output(
['cat', '/proc/self/status'],
restore_signals=True)
for line in restored_proc_status.splitlines():
if line.startswith(b'SigIgn'):
restored_sig_ign_mask = line
break
self.assertNotEqual(default_sig_ign_mask, restored_sig_ign_mask,
msg="restore_signals=True should've unblocked "
"SIGPIPE and friends.")
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c", "import os; print(os.getsid(0))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_sid = os.getsid(0)
child_sid = int(output)
self.assertNotEqual(parent_sid, child_sid)
@unittest.skipUnless(hasattr(os, 'setreuid'), 'no setreuid on platform')
def test_user(self):
# For code coverage of the user parameter. We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
uid = os.geteuid()
test_users = [65534 if uid != 65534 else 65533, uid]
name_uid = "nobody" if sys.platform != 'darwin' else "unknown"
if pwd is not None:
try:
pwd.getpwnam(name_uid)
test_users.append(name_uid)
except KeyError:
# unknown user name
name_uid = None
for user in test_users:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(user=user, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getuid())"],
user=user,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
except OSError as e:
if e.errno not in (errno.EACCES, errno.EPERM):
raise
else:
if isinstance(user, str):
user_uid = pwd.getpwnam(user).pw_uid
else:
user_uid = user
child_user = int(output)
self.assertEqual(child_user, user_uid)
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=-1)
if pwd is None and name_uid is not None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=name_uid)
@unittest.skipIf(hasattr(os, 'setreuid'), 'setreuid() available on platform')
def test_user_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, user=65535)
@unittest.skipUnless(hasattr(os, 'setregid'), 'no setregid() on platform')
def test_group(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
if grp is not None:
group_list.append(name_group)
for group in group_list + [gid]:
# posix_spawn() may be used with close_fds=False
for close_fds in (False, True):
with self.subTest(group=group, close_fds=close_fds):
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getgid())"],
group=group,
close_fds=close_fds)
except PermissionError: # (EACCES, EPERM)
pass
else:
if isinstance(group, str):
group_gid = grp.getgrnam(group).gr_gid
else:
group_gid = group
child_group = int(output)
self.assertEqual(child_group, group_gid)
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=-1)
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=name_group)
@unittest.skipIf(hasattr(os, 'setregid'), 'setregid() available on platform')
def test_group_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, group=65535)
@unittest.skipUnless(hasattr(os, 'setgroups'), 'no setgroups() on platform')
def test_extra_groups(self):
gid = os.getegid()
group_list = [65534 if gid != 65534 else 65533]
name_group = _get_test_grp_name()
perm_error = False
if grp is not None:
group_list.append(name_group)
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os, sys, json; json.dump(os.getgroups(), sys.stdout)"],
extra_groups=group_list)
except OSError as ex:
if ex.errno != errno.EPERM:
raise
perm_error = True
else:
parent_groups = os.getgroups()
child_groups = json.loads(output)
if grp is not None:
desired_gids = [grp.getgrnam(g).gr_gid if isinstance(g, str) else g
for g in group_list]
else:
desired_gids = group_list
if perm_error:
self.assertEqual(set(child_groups), set(parent_groups))
else:
self.assertEqual(set(desired_gids), set(child_groups))
# make sure we bomb on negative values
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[-1])
if grp is None:
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD,
extra_groups=[name_group])
@unittest.skipIf(hasattr(os, 'setgroups'), 'setgroups() available on platform')
def test_extra_groups_error(self):
with self.assertRaises(ValueError):
subprocess.check_call(ZERO_RETURN_CMD, extra_groups=[])
@unittest.skipIf(mswindows or not hasattr(os, 'umask'),
'POSIX umask() is not available.')
def test_umask(self):
tmpdir = None
try:
tmpdir = tempfile.mkdtemp()
name = os.path.join(tmpdir, "beans")
# We set an unusual umask in the child so as a unique mode
# for us to test the child's touched file for.
subprocess.check_call(
[sys.executable, "-c", f"open({name!r}, 'w').close()"],
umask=0o053)
# Ignore execute permissions entirely in our test,
# filesystems could be mounted to ignore or force that.
st_mode = os.stat(name).st_mode & 0o666
expected_mode = 0o624
self.assertEqual(expected_mode, st_mode,
msg=f'{oct(expected_mode)} != {oct(st_mode)}')
finally:
if tmpdir is not None:
shutil.rmtree(tmpdir)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_CalledProcessError_str_signal(self):
err = subprocess.CalledProcessError(-int(signal.SIGABRT), "fake cmd")
error_string = str(err)
# We're relying on the repr() of the signal.Signals intenum to provide
# the word signal, the signal name and the numeric value.
self.assertIn("signal", error_string.lower())
# We're not being specific about the signal name as some signals have
# multiple names and which name is revealed can vary.
self.assertIn("SIG", error_string)
self.assertIn(str(signal.SIGABRT), error_string)
def test_CalledProcessError_str_unknown_signal(self):
err = subprocess.CalledProcessError(-9876543, "fake cmd")
error_string = str(err)
self.assertIn("unknown signal 9876543.", error_string)
def test_CalledProcessError_str_non_zero(self):
err = subprocess.CalledProcessError(2, "fake cmd")
error_string = str(err)
self.assertIn("non-zero exit status 2.", error_string)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
with p:
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, ZERO_RETURN_CMD,
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
@unittest.skipIf(
sys.platform == 'darwin', 'setrlimit() seems to fail on OS X')
def test_preexec_fork_failure(self):
# The internal code did not preserve the previous exception when
# re-enabling garbage collection
try:
from resource import getrlimit, setrlimit, RLIMIT_NPROC
except ImportError as err:
self.skipTest(err) # RLIMIT_NPROC is specific to Linux and BSD
limits = getrlimit(RLIMIT_NPROC)
[_, hard] = limits
setrlimit(RLIMIT_NPROC, (0, hard))
self.addCleanup(setrlimit, RLIMIT_NPROC, limits)
try:
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
except BlockingIOError:
# Forking should raise EAGAIN, translated to BlockingIOError
pass
else:
self.skipTest('RLIMIT_NPROC had no effect; probably superuser')
def test_args_string(self):
# args is a string
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = tempfile.mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!%s\n" % support.unix_shell)
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
with p:
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
self.assertEqual(out, b'apple')
self.assertEqual(err, b'orange')
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = os.read(temp_fds[0], 1024).strip()
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [tempfile.mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = os.read(stderr_no, 1024).strip()
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def _check_swap_std_fds_with_one_closed(self, from_fds, to_fds):
saved_fds = self._save_fds(range(3))
try:
for from_fd in from_fds:
with tempfile.TemporaryFile() as f:
os.dup2(f.fileno(), from_fd)
fd_to_close = (set(range(3)) - set(from_fds)).pop()
os.close(fd_to_close)
arg_names = ['stdin', 'stdout', 'stderr']
kwargs = {}
for from_fd, to_fd in zip(from_fds, to_fds):
kwargs[arg_names[to_fd]] = from_fd
code = textwrap.dedent(r'''
import os, sys
skipped_fd = int(sys.argv[1])
for fd in range(3):
if fd != skipped_fd:
os.write(fd, str(fd).encode('ascii'))
''')
skipped_fd = (set(range(3)) - set(to_fds)).pop()
rc = subprocess.call([sys.executable, '-c', code, str(skipped_fd)],
**kwargs)
self.assertEqual(rc, 0)
for from_fd, to_fd in zip(from_fds, to_fds):
os.lseek(from_fd, 0, os.SEEK_SET)
read_bytes = os.read(from_fd, 1024)
read_fds = list(map(int, read_bytes.decode('ascii')))
msg = textwrap.dedent(f"""
When testing {from_fds} to {to_fds} redirection,
parent descriptor {from_fd} got redirected
to descriptor(s) {read_fds} instead of descriptor {to_fd}.
""")
self.assertEqual([to_fd], read_fds, msg)
finally:
self._restore_fds(saved_fds)
# Check that subprocess can remap std fds correctly even
# if one of them is closed (#32844).
def test_swap_std_fds_with_one_closed(self):
for from_fds in itertools.combinations(range(3), 2):
for to_fds in itertools.permutations(range(3), 2):
self._check_swap_std_fds_with_one_closed(from_fds, to_fds)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
ZERO_RETURN_CMD,
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process
env['LC_ALL'] = 'C'
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(ZERO_RETURN_CMD[0])
args = list(ZERO_RETURN_CMD[1:])
path, program = os.path.split(ZERO_RETURN_CMD[0])
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program]+args)
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'%s' %s" % (abs_program, " ".join(args).encode("utf-8"))
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program]+args, env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program]+args, env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=fds_to_keep)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse((remaining_fds - fds_to_keep) & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
ZERO_RETURN_CMD,
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
# bpo-32270: Ensure that descriptors specified in pass_fds
# are inherited even if they are used in redirections.
# Contributed by @izbyshev.
def test_pass_fds_redirected(self):
"""Regression test for https://bugs.python.org/issue32270."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
pass_fds = []
for _ in range(2):
fd = os.open(os.devnull, os.O_RDWR)
self.addCleanup(os.close, fd)
pass_fds.append(fd)
stdout_r, stdout_w = os.pipe()
self.addCleanup(os.close, stdout_r)
self.addCleanup(os.close, stdout_w)
pass_fds.insert(1, stdout_w)
with subprocess.Popen([sys.executable, fd_status],
stdin=pass_fds[0],
stdout=pass_fds[1],
stderr=pass_fds[2],
close_fds=True,
pass_fds=pass_fds):
output = os.read(stdout_r, 1024)
fds = {int(num) for num in output.split(b',')}
self.assertEqual(fds, {0, 1, 2} | frozenset(pass_fds), f"output={output!a}")
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen(ZERO_RETURN_CMD,
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
with support.check_warnings(('', ResourceWarning)):
p = None
os.kill(pid, signal.SIGKILL)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
if mswindows:
# subprocess._active is not used on Windows and is set to None.
self.assertIsNone(subprocess._active)
else:
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError) as err:
_posixsubprocess.fork_exec(
args, exe_list,
True, (), cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
False, [], 0, -1,
func)
# Attempt to prevent
# "TypeError: fork_exec() takes exactly N arguments (M given)"
# from passing the test. More refactoring to have us start
# with a valid *args list, confirm a good call with that works
# before mutating it in various ways to ensure that bad calls
# with individual arg type errors raise a typeerror would be
# ideal. Saving that for a future PR...
self.assertNotIn('takes exactly', str(err.exception))
finally:
if not gc_enabled:
gc.disable()
@support.cpython_only
def test_fork_exec_sorted_fd_sanity_check(self):
# Issue #23564: sanity check the fork_exec() fds_to_keep sanity check.
import _posixsubprocess
class BadInt:
first = True
def __init__(self, value):
self.value = value
def __int__(self):
if self.first:
self.first = False
return self.value
raise ValueError
gc_enabled = gc.isenabled()
try:
gc.enable()
for fds_to_keep in (
(-1, 2, 3, 4, 5), # Negative number.
('str', 4), # Not an int.
(18, 23, 42, 2**63), # Out of range.
(5, 4), # Not sorted.
(6, 7, 7, 8), # Duplicate.
(BadInt(1), BadInt(2)),
):
with self.assertRaises(
ValueError,
msg='fds_to_keep={}'.format(fds_to_keep)) as c:
_posixsubprocess.fork_exec(
[b"false"], [b"false"],
True, fds_to_keep, None, [b"env"],
-1, -1, -1, -1,
1, 2, 3, 4,
True, True,
None, None, None, -1,
None)
self.assertIn('fds_to_keep', str(c.exception))
finally:
if not gc_enabled:
gc.disable()
def test_communicate_BrokenPipeError_stdin_close(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
proc.communicate() # Should swallow BrokenPipeError from close.
mock_proc_stdin.close.assert_called_with()
def test_communicate_BrokenPipeError_stdin_write(self):
# By not setting stdout or stderr or a timeout we force the fast path
# that just calls _stdin_write() internally due to our mock.
proc = subprocess.Popen(ZERO_RETURN_CMD)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.write.side_effect = BrokenPipeError
proc.communicate(b'stuff') # Should swallow the BrokenPipeError.
mock_proc_stdin.write.assert_called_once_with(b'stuff')
mock_proc_stdin.close.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_flush(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin, \
open(os.devnull, 'wb') as dev_null:
mock_proc_stdin.flush.side_effect = BrokenPipeError
# because _communicate registers a selector using proc.stdin...
mock_proc_stdin.fileno.return_value = dev_null.fileno()
# _communicate() should swallow BrokenPipeError from flush.
proc.communicate(b'stuff')
mock_proc_stdin.flush.assert_called_once_with()
def test_communicate_BrokenPipeError_stdin_close_with_timeout(self):
# Setting stdin and stdout forces the ._communicate() code path.
# python -h exits faster than python -c pass (but spams stdout).
proc = subprocess.Popen([sys.executable, '-h'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
with proc, mock.patch.object(proc, 'stdin') as mock_proc_stdin:
mock_proc_stdin.close.side_effect = BrokenPipeError
# _communicate() should swallow BrokenPipeError from close.
proc.communicate(timeout=999)
mock_proc_stdin.close.assert_called_once_with()
@unittest.skipUnless(_testcapi is not None
and hasattr(_testcapi, 'W_STOPCODE'),
'need _testcapi.W_STOPCODE')
def test_stopped(self):
"""Test wait() behavior when waitpid returns WIFSTOPPED; issue29335."""
args = ZERO_RETURN_CMD
proc = subprocess.Popen(args)
# Wait until the real process completes to avoid zombie process
support.wait_process(proc.pid, exitcode=0)
status = _testcapi.W_STOPCODE(3)
with mock.patch('subprocess.os.waitpid', return_value=(proc.pid, status)):
returncode = proc.wait()
self.assertEqual(returncode, -3)
def test_send_signal_race(self):
# bpo-38630: send_signal() must poll the process exit status to reduce
# the risk of sending the signal to the wrong process.
proc = subprocess.Popen(ZERO_RETURN_CMD)
# wait until the process completes without using the Popen APIs.
support.wait_process(proc.pid, exitcode=0)
# returncode is still None but the process completed.
self.assertIsNone(proc.returncode)
with mock.patch("os.kill") as mock_kill:
proc.send_signal(signal.SIGTERM)
# send_signal() didn't call os.kill() since the process already
# completed.
mock_kill.assert_not_called()
# Don't check the returncode value: the test reads the exit status,
# so Popen failed to read it and uses a default returncode instead.
self.assertIsNotNone(proc.returncode)
def test_communicate_repeated_call_after_stdout_close(self):
proc = subprocess.Popen([sys.executable, '-c',
'import os, time; os.close(1), time.sleep(2)'],
stdout=subprocess.PIPE)
while True:
try:
proc.communicate(timeout=0.1)
return
except subprocess.TimeoutExpired:
pass
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_keywords(self):
# startupinfo argument
# We use hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USERSHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO(
dwFlags=STARTF_USERSHOWWINDOW,
wShowWindow=SW_MAXIMIZE
)
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_startupinfo_copy(self):
# bpo-34044: Popen must not modify input STARTUPINFO structure
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW
startupinfo.wShowWindow = subprocess.SW_HIDE
# Call Popen() twice with the same startupinfo object to make sure
# that it's not modified
for _ in range(2):
cmd = ZERO_RETURN_CMD
with open(os.devnull, 'w') as null:
proc = subprocess.Popen(cmd,
stdout=null,
stderr=subprocess.STDOUT,
startupinfo=startupinfo)
with proc:
proc.communicate()
self.assertEqual(proc.returncode, 0)
self.assertEqual(startupinfo.dwFlags,
subprocess.STARTF_USESHOWWINDOW)
self.assertIsNone(startupinfo.hStdInput)
self.assertIsNone(startupinfo.hStdOutput)
self.assertIsNone(startupinfo.hStdError)
self.assertEqual(startupinfo.wShowWindow, subprocess.SW_HIDE)
self.assertEqual(startupinfo.lpAttributeList, {"handle_list": []})
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
@support.cpython_only
def test_issue31471(self):
# There shouldn't be an assertion failure in Popen() in case the env
# argument has a bad keys() method.
class BadEnv(dict):
keys = None
with self.assertRaises(TypeError):
subprocess.Popen(ZERO_RETURN_CMD, env=BadEnv())
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_close_fds_with_stdio(self):
import msvcrt
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
handles = []
for fd in fds:
os.set_inheritable(fd, True)
handles.append(msvcrt.get_osfhandle(fd))
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
int(stdout.strip()) # Check that stdout is an integer
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# The same as the previous call, but with an empty handle_list
handle_list = []
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handle_list}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=True)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 1)
self.assertIn(b"OSError", stderr)
# Check for a warning due to using handle_list and close_fds=False
with support.check_warnings((".*overriding close_fds", RuntimeWarning)):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": handles[:]}
p = subprocess.Popen([sys.executable, "-c",
"import msvcrt; print(msvcrt.open_osfhandle({}, 0))".format(handles[0])],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
startupinfo=startupinfo, close_fds=False)
stdout, stderr = p.communicate()
self.assertEqual(p.returncode, 0)
def test_empty_attribute_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_empty_handle_list(self):
startupinfo = subprocess.STARTUPINFO()
startupinfo.lpAttributeList = {"handle_list": []}
subprocess.call(ZERO_RETURN_CMD,
startupinfo=startupinfo)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
with p:
self.assertIn(b"physalis", p.stdout.read())
def test_shell_encodings(self):
# Run command through the shell (string)
for enc in ['ansi', 'oem']:
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv,
encoding=enc)
with p:
self.assertIn("physalis", p.stdout.read(), enc)
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
with p:
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class MiscTests(unittest.TestCase):
class RecordingPopen(subprocess.Popen):
"""A Popen that saves a reference to each instance for testing."""
instances_created = []
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instances_created.append(self)
@mock.patch.object(subprocess.Popen, "_communicate")
def _test_keyboardinterrupt_no_kill(self, popener, mock__communicate,
**kwargs):
"""Fake a SIGINT happening during Popen._communicate() and ._wait().
This avoids the need to actually try and get test environments to send
and receive signals reliably across platforms. The net effect of a ^C
happening during a blocking subprocess execution which we want to clean
up from is a KeyboardInterrupt coming out of communicate() or wait().
"""
mock__communicate.side_effect = KeyboardInterrupt
try:
with mock.patch.object(subprocess.Popen, "_wait") as mock__wait:
# We patch out _wait() as no signal was involved so the
# child process isn't actually going to exit rapidly.
mock__wait.side_effect = KeyboardInterrupt
with mock.patch.object(subprocess, "Popen",
self.RecordingPopen):
with self.assertRaises(KeyboardInterrupt):
popener([sys.executable, "-c",
"import time\ntime.sleep(9)\nimport sys\n"
"sys.stderr.write('\\n!runaway child!\\n')"],
stdout=subprocess.DEVNULL, **kwargs)
for call in mock__wait.call_args_list[1:]:
self.assertNotEqual(
call, mock.call(timeout=None),
"no open-ended wait() after the first allowed: "
f"{mock__wait.call_args_list}")
sigint_calls = []
for call in mock__wait.call_args_list:
if call == mock.call(timeout=0.25): # from Popen.__init__
sigint_calls.append(call)
self.assertLessEqual(mock__wait.call_count, 2,
msg=mock__wait.call_args_list)
self.assertEqual(len(sigint_calls), 1,
msg=mock__wait.call_args_list)
finally:
# cleanup the forgotten (due to our mocks) child process
process = self.RecordingPopen.instances_created.pop()
process.kill()
process.wait()
self.assertEqual([], self.RecordingPopen.instances_created)
def test_call_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.call, timeout=6.282)
def test_run_keyboardinterrupt_no_kill(self):
self._test_keyboardinterrupt_no_kill(subprocess.run, timeout=6.282)
def test_context_manager_keyboardinterrupt_no_kill(self):
def popen_via_context_manager(*args, **kwargs):
with subprocess.Popen(*args, **kwargs) as unused_process:
raise KeyboardInterrupt # Test how __exit__ handles ^C.
self._test_keyboardinterrupt_no_kill(popen_via_context_manager)
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = {"list2cmdline", "Handle", "pwd", "grp"}
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = tempfile.mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
with p:
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(NONEXISTING_ERRORS):
with subprocess.Popen(NONEXISTING_CMD,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen(ZERO_RETURN_CMD,
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
if __name__ == "__main__":
unittest.main()
| 42.529785 | 107 | 0.559985 |
aced883c0eaa48915c097b0da7b48571f437c017 | 2,425 | py | Python | custom_components/hausnet/sensor.py | HausNet/hausnet-hass | bcd3cb48d9fdc201f1a8574b8df9694e56079c88 | [
"MIT"
] | null | null | null | custom_components/hausnet/sensor.py | HausNet/hausnet-hass | bcd3cb48d9fdc201f1a8574b8df9694e56079c88 | [
"MIT"
] | null | null | null | custom_components/hausnet/sensor.py | HausNet/hausnet-hass | bcd3cb48d9fdc201f1a8574b8df9694e56079c88 | [
"MIT"
] | null | null | null | """Support for HausNet sensors."""
import logging
from typing import Callable, Dict, Optional, Any, Union
from homeassistant.helpers.typing import (HomeAssistantType, ConfigType)
from homeassistant.const import CONF_NAME
from hausnet.hausnet import HausNet
from hausnet.builders import DeviceAssembly
# noinspection PyUnresolvedReferences
from . import (
DOMAIN, INTERFACES, CONF_DEVICE_FQID, PLATFORM_SCHEMA, HausNetDevice
)
_LOGGER = logging.getLogger(__name__)
# noinspection PyUnusedLocal
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities: Callable,
discovery_info: Dict
) -> None:
"""Set up a sensor. Called multiple times for each platform-based switch
in configuration.
:param hass: HA internals
:param config: Configuration from file
:param async_add_entities: Callee to add entities
:param discovery_info: Unused
"""
assert DOMAIN in hass.data, "HausNet domain must be defined"
devices = []
_LOGGER.debug("Adding HausNet sensor...")
hausnet: HausNet = hass.data[DOMAIN][INTERFACES]
interface = hausnet.device_assemblies()[config[CONF_DEVICE_FQID]]
sensor = HausNetSensor(
config[CONF_DEVICE_FQID],
interface,
config[CONF_NAME] if CONF_NAME in config else None
)
async_add_entities([sensor])
_LOGGER.debug("Added HausNet sensor: %s", sensor.unique_id)
# noinspection PyAbstractClass
class HausNetSensor(HausNetDevice):
"""Representation of a HausNet Sensor."""
def __init__(
self,
device_id: str,
device_assembly: DeviceAssembly,
name: Optional[str] = None
) -> None:
"""Set up the device_assembly to the (real) basic switch"""
super().__init__(device_id, device_assembly, name)
self._state = None
@property
def state(self) -> Union[bool, float, int]:
"""Return the current state."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of the sensor."""
return self._device_assembly.device.state.unit
def update_state_from_message(self, message: Dict[str, Any]):
"""On receipt of a state update, the parent class calls here, then
updates HASS.
"""
self._state = self._device_assembly.device.state.value
| 31.493506 | 76 | 0.68701 |
aced884d182ab0a48a4ddb58f3e873aeb44e0a88 | 1,959 | py | Python | heritagesites/filters.py | Michael-Cantley/heritagesites | 92aa3526aedd14d0d5632eb7c3f64e1c81617f89 | [
"MIT"
] | null | null | null | heritagesites/filters.py | Michael-Cantley/heritagesites | 92aa3526aedd14d0d5632eb7c3f64e1c81617f89 | [
"MIT"
] | null | null | null | heritagesites/filters.py | Michael-Cantley/heritagesites | 92aa3526aedd14d0d5632eb7c3f64e1c81617f89 | [
"MIT"
] | null | null | null | import django_filters
from heritagesites.models import CountryArea, HeritageSite, HeritageSiteCategory, \
IntermediateRegion, SubRegion, Region
class HeritageSiteFilter(django_filters.FilterSet):
site_name = django_filters.CharFilter(
field_name='site_name',
label='Heritage Site Name',
lookup_expr='icontains'
)
# Add description, heritage_site_category, region, sub_region and intermediate_region filters here
description = django_filters.CharFilter(
field_name='description',
label='Description',
lookup_expr='icontains'
)
heritage_site_category = django_filters.ModelChoiceFilter(
field_name='heritage_site_category',
label='Category',
queryset=HeritageSiteCategory.objects.all().order_by('category_name'),
lookup_expr='exact'
)
region = django_filters.ModelChoiceFilter(
field_name='country_area__location__region__region_name',
label='Region',
queryset=Region.objects.all().order_by('region_name'),
lookup_expr='exact'
)
sub_region = django_filters.ModelChoiceFilter(
field_name='country_area__location__sub_region__sub_region_name',
label='SubRegion',
queryset=SubRegion.objects.all().order_by('sub_region_name'),
lookup_expr='exact'
)
intermediate_region = django_filters.ModelChoiceFilter(
field_name='country_area__location__intermediate_region__intermediate_region_name',
label='Intermediate Region',
queryset=IntermediateRegion.objects.all().order_by('intermediate_region_name'),
lookup_expr='exact'
)
country_area = django_filters.ModelChoiceFilter(
field_name='country_area',
label='Country/Area',
queryset=CountryArea.objects.all().order_by('country_area_name'),
lookup_expr='exact'
)
# Add date_inscribed filter here
date_inscribed = django_filters.NumberFilter(
field_name='date_inscribed',
label='Date Inscribed',
lookup_expr = 'exact'
)
class Meta:
model = HeritageSite
# form = SearchForm
# fields [] is required, even if empty.
fields = []
| 28.808824 | 99 | 0.786626 |
aced88a7d9efbbc3fb94b9c09010ff4d8651084e | 7,881 | py | Python | HeasoftTools/mk_ray_trace_footprints.py | g3-raman/NITRATES | b636e22d49d5d656d651b4972193f4bf9ccfa902 | [
"MIT"
] | 11 | 2021-11-01T23:52:06.000Z | 2022-01-22T09:18:44.000Z | HeasoftTools/mk_ray_trace_footprints.py | g3-raman/NITRATES | b636e22d49d5d656d651b4972193f4bf9ccfa902 | [
"MIT"
] | 4 | 2021-11-03T17:25:10.000Z | 2022-03-10T18:44:27.000Z | HeasoftTools/mk_ray_trace_footprints.py | g3-raman/NITRATES | b636e22d49d5d656d651b4972193f4bf9ccfa902 | [
"MIT"
] | 2 | 2021-11-17T00:40:33.000Z | 2021-12-22T13:59:34.000Z | import os
import subprocess
from gen_tools import run_ftool, ftool_mp, run_ftool2
import argparse
import numpy as np
import time
from astropy.table import Table
def do_ray_trace(out_fname, att_fname, ra, dec, time, detmask, infile):
ftool = "batmaskwtimg"
arg_list = [out_fname, att_fname, str(ra), str(dec)]
arg_list += ["time=%.2f" %(time), "rebalance=NO",
"corrections=forward,unbalanced,flatfield",
"detmask="+detmask, "infile="+infile]
run_ftool(ftool, arg_list)
def do_ray_trace_ra_dec_list(out_fname, att_fname, ras, decs, time, detmask, infile):
ftool = "batmaskwtimg"
for i in xrange(len(ras)):
outf = out_fname + '_%.2f_%.2f.img' %(ras[i], decs[i])
arg_list = [outf, att_fname, str(ras[i]), str(decs[i])]
arg_list += ["time=%.2f" %(time), "rebalance=NO",
"corrections=forward,unbalanced,flatfield",
"detmask="+detmask, "infile="+infile]
#arg_list += ["time=%.2f" %(time), "rebalance=NO",
# "corrections=forward,unbalanced,flatfield",
# "infile="+infile]
run_ftool(ftool, arg_list)
def do_ray_trace_imxy_list(out_fname, att_fname, imxs, imys, detmask, infile):
ftool = "batmaskwtimg"
for i in xrange(len(imxs)):
outf = out_fname + '_%.4f_%.4f.img' %(imxs[i], imys[i])
arg_list = [outf, att_fname, str(imxs[i]), str(imys[i])]
arg_list += ["outtype=NONZERO", "detmask="+detmask,
"infile="+infile, 'coord_type=tanxy',
"aperture=CALDB:DETECTION"]
run_ftool(ftool, arg_list)
def do_footprint_imxy_list(out_fname, att_fname, imxs, imys, time, detmask, infile):
ftool = "batmaskwtimg"
for i in xrange(len(imxs)):
outf = out_fname + '_%.5f_%.5f.img' %(imxs[i], imys[i])
arg_list = [outf, att_fname, str(imxs[i]), str(imys[i])]
arg_list += ["time=%.2f" %(time), "rebalance=NO",
"corrections=forward,unbalanced,flatfield",
"detmask="+detmask, "infile="+infile, 'coord_type=tanxy',
"aperture=CALDB:DETECTION"]
run_ftool(ftool, arg_list)
def do_ray_trace_imxy_tab(out_fname, att_fname, imxs, imys, detmask, infile, incat):
ftool = "batmaskwtimg"
#for i in xrange(len(imxs)):
outf = out_fname + '_%.5f_%.5f_%.5f_%.5f_.img'\
%(np.min(imxs), np.min(imys), np.max(imxs), np.max(imys))
if os.path.isfile(outf):
print "already made"
return
arg_list = [outf, att_fname, "0.0", "0.0"]
arg_list += ["rebalance=NO",
"corrections=forward,unbalanced,flatfield,subpixelate",
"detmask="+detmask, "infile="+infile, 'coord_type=tanxy',
"incatalog="+incat, "racol=IMX", "deccol=IMY",
"catnumcol=NONE", "chatter=1", "distfile=CALDB"]
run_ftool(ftool, arg_list)
def mk_imxy_tab(imxs, imys, fname):
names = ['IMX', 'IMY', 'NAME']
grid_x, grid_y = np.meshgrid(imxs, imys, indexing='ij')
tab = Table()
tab['IMX'] = grid_x.ravel()
tab['IMY'] = grid_y.ravel()
names = np.array(['%.5f %.5f' %(tab['IMX'][i], tab['IMY'][i]) for i in xrange(len(tab))])
tab['NAME'] = names
print len(tab), " positions to do"
tab.write(fname, overwrite=True)
def ev2pha(infile, outfile, tstart, tstop, ebins, detmask):
ftool = "batbinevt"
arg_list = [infile, outfile, 'PHA', '0', 'uniform', ebins]
arg_list += ['tstart='+str(tstart), 'tstop='+str(tstop), 'detmask='+detmask]
run_ftool(ftool, arg_list)
def pha_sys_err(infile, auxfile):
ftool = "batupdatephakw"
arg_list = [infile, auxfile]
run_ftool(ftool, arg_list)
ftool = "batphasyserr"
arg_list = [infile, "CALDB"]
run_ftool(ftool, arg_list)
def mk_small_evt(infile, outfile):
ftool = "fextract-events"
arg_list = [infile+"[pha=100:101]", outfile, "gti=GTI"]
run_ftool(ftool, arg_list)
def mk_rt_aux_file(infile, outfile, imx, imy, dmask, attfile, ra, dec):
ftool = "batmaskwtevt"
arg_list = [infile, attfile, str(ra), str(dec)]
arg_list += ["coord_type=sky",
"auxfile="+outfile, "clobber=True",\
"detmask="+dmask]
run_ftool(ftool, arg_list)
def mk_drm(pha, outfile, dapfile):
ftool = "batdrmgen"
arg_list = [pha, outfile, dapfile, "method=TABLE"]
run_ftool(ftool, arg_list)
def bateconvert(infile, calfile):
ftool = "bateconvert"
arg_list = ['infile='+infile, 'calfile='+calfile, 'residfile=CALDB']
run_ftool(ftool, arg_list)
def detmask(infile, outfile, dmask):
ftool = "batdetmask"
arg_list = [infile, outfile, 'detmask='+dmask]
run_ftool(ftool, arg_list)
def mk_bkg_mod(infile, outfile, detmask):
ftool = "batclean"
arg_list = [infile, outfile]
arg_list += ['incatalog=NONE', 'detmask='+detmask, 'srcclean=NO', 'outversion=bkgfit']
run_ftool(ftool, arg_list)
def mk_pc_img(infile, outfile, detmask, attfile):
ftool = "batfftimage"
arg_list = [infile, outfile]
arg_list += ['detmask='+detmask, 'attitude='+attfile, 'pcodemap=YES']
run_ftool(ftool, arg_list)
def cli():
#default_ebins = '15-40, 25-60, 50-80, 70-100, 90-135, 120-165, 150-195'
parser = argparse.ArgumentParser()
parser.add_argument('--infile', type=str,\
help="In File Name needed for batmaskwtimg",\
default="/storage/work/jjd330/local/bat_data/pha.pha")
parser.add_argument('--t0', type=float,\
help="Start time in MET seconds",\
default=4e8)
parser.add_argument('--imx0', type=float,\
help="imx low value",\
default=-1.5)
parser.add_argument('--imy0', type=float,\
help="imy low value",\
default=-.9)
parser.add_argument('--imx1', type=float,\
help="imx high value",\
default=1.5)
parser.add_argument('--imy1', type=float,\
help="imy high value",\
default=0.9)
parser.add_argument('--rtstep', type=float,\
help="step size in imx/y for ray tracing",\
default=0.002)
parser.add_argument('--pcmin', type=float,\
help="Min Partial coding used",\
default=1e-2)
parser.add_argument('--imrng', type=float,\
help="range for imx/y around center point or all",\
default=0.02)
parser.add_argument('--Njobs', type=int,\
help="Number of jobs running",\
default=16)
parser.add_argument('--job_id', type=int,\
help="Which job is this",\
default=-1)
parser.add_argument('--rtdir', type=str,\
help="Directory to save foot prints to",\
default='/gpfs/scratch/jjd330/bat_data/footprint_dir/')
args = parser.parse_args()
return args
def main(args):
t_0 = time.time()
rng = args.imrng
imx_ax = np.linspace(-1.8, 1.8, 40*36+1)
imy_ax = np.linspace(-1.0, 1.0, 40*20+1)
print imx_ax
print imy_ax
imx_grid, imy_grid = np.meshgrid(imx_ax, imy_ax, indexing='ij')
imxs = imx_grid.ravel()
imys = imy_grid.ravel()
Npnts = len(imxs)
print Npnts, " total points to make"
if args.job_id >= 0:
Nper_job = 1 + Npnts/args.Njobs
i0 = args.job_id*Nper_job
i1 = i0 + Nper_job
imxs = imxs[i0:i1]
imys = imys[i0:i1]
Npnts = len(imxs)
print Npnts, " points to do in this job"
if not os.path.exists(args.rtdir):
os.makedirs(args.rtdir)
out_fname = os.path.join(args.rtdir, 'footprint')
do_ray_trace_imxy_list(out_fname, "NONE", imxs, imys, "NONE", args.infile)
print "Took %.2f seconds, %.2f minutes to do everything" %(time.time()-t_0, (time.time()-t_0)/60.)
if __name__ == '__main__':
args = cli()
main(args)
| 31.906883 | 102 | 0.601447 |
aced891651184bbb8a2bf39ff5f93d4f88a13e80 | 59,037 | py | Python | astropy/modeling/fitting.py | mehrdad-shokri/astropy | abd73b51277694338c8eca7639da956dcd06f207 | [
"BSD-3-Clause"
] | null | null | null | astropy/modeling/fitting.py | mehrdad-shokri/astropy | abd73b51277694338c8eca7639da956dcd06f207 | [
"BSD-3-Clause"
] | null | null | null | astropy/modeling/fitting.py | mehrdad-shokri/astropy | abd73b51277694338c8eca7639da956dcd06f207 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements classes (called Fitters) which combine optimization
algorithms (typically from `scipy.optimize`) with statistic functions to perform
fitting. Fitters are implemented as callable classes. In addition to the data
to fit, the ``__call__`` method takes an instance of
`~astropy.modeling.core.FittableModel` as input, and returns a copy of the
model with its parameters determined by the optimizer.
Optimization algorithms, called "optimizers" are implemented in
`~astropy.modeling.optimizers` and statistic functions are in
`~astropy.modeling.statistic`. The goal is to provide an easy to extend
framework and allow users to easily create new fitters by combining statistics
with optimizers.
There are two exceptions to the above scheme.
`~astropy.modeling.fitting.LinearLSQFitter` uses Numpy's `~numpy.linalg.lstsq`
function. `~astropy.modeling.fitting.LevMarLSQFitter` uses
`~scipy.optimize.leastsq` which combines optimization and statistic in one
implementation.
"""
# pylint: disable=invalid-name
import abc
import inspect
import operator
import warnings
from functools import reduce, wraps
import numpy as np
from astropy.units import Quantity
from astropy.utils.exceptions import AstropyUserWarning
from .utils import poly_map_domain, _combine_equivalency_dict
from .optimizers import (SLSQP, Simplex)
from .statistic import (leastsquare)
from .optimizers import (DEFAULT_MAXITER, DEFAULT_EPS, DEFAULT_ACC)
# Check pkg_resources exists
try:
from pkg_resources import iter_entry_points
HAS_PKG = True
except ImportError:
HAS_PKG = False
__all__ = ['LinearLSQFitter', 'LevMarLSQFitter', 'FittingWithOutlierRemoval',
'SLSQPLSQFitter', 'SimplexLSQFitter', 'JointFitter', 'Fitter']
# Statistic functions implemented in `astropy.modeling.statistic.py
STATISTICS = [leastsquare]
# Optimizers implemented in `astropy.modeling.optimizers.py
OPTIMIZERS = [Simplex, SLSQP]
class ModelsError(Exception):
"""Base class for model exceptions"""
class ModelLinearityError(ModelsError):
""" Raised when a non-linear model is passed to a linear fitter."""
class UnsupportedConstraintError(ModelsError, ValueError):
"""
Raised when a fitter does not support a type of constraint.
"""
class _FitterMeta(abc.ABCMeta):
"""
Currently just provides a registry for all Fitter classes.
"""
registry = set()
def __new__(mcls, name, bases, members):
cls = super().__new__(mcls, name, bases, members)
if not inspect.isabstract(cls) and not name.startswith('_'):
mcls.registry.add(cls)
return cls
def fitter_unit_support(func):
"""
This is a decorator that can be used to add support for dealing with
quantities to any __call__ method on a fitter which may not support
quantities itself. This is done by temporarily removing units from all
parameters then adding them back once the fitting has completed.
"""
@wraps(func)
def wrapper(self, model, x, y, z=None, **kwargs):
equivalencies = kwargs.pop('equivalencies', None)
data_has_units = (isinstance(x, Quantity) or
isinstance(y, Quantity) or
isinstance(z, Quantity))
model_has_units = model._has_units
if data_has_units or model_has_units:
if model._supports_unit_fitting:
# We now combine any instance-level input equivalencies with user
# specified ones at call-time.
input_units_equivalencies = _combine_equivalency_dict(
model.inputs, equivalencies, model.input_units_equivalencies)
# If input_units is defined, we transform the input data into those
# expected by the model. We hard-code the input names 'x', and 'y'
# here since FittableModel instances have input names ('x',) or
# ('x', 'y')
if model.input_units is not None:
if isinstance(x, Quantity):
x = x.to(model.input_units[model.inputs[0]],
equivalencies=input_units_equivalencies[model.inputs[0]])
if isinstance(y, Quantity) and z is not None:
y = y.to(model.input_units[model.inputs[1]],
equivalencies=input_units_equivalencies[model.inputs[1]])
# Create a dictionary mapping the real model inputs and outputs
# names to the data. This remapping of names must be done here, after
# the the input data is converted to the correct units.
rename_data = {model.inputs[0]: x}
if z is not None:
rename_data[model.outputs[0]] = z
rename_data[model.inputs[1]] = y
else:
rename_data[model.outputs[0]] = y
rename_data['z'] = None
# We now strip away the units from the parameters, taking care to
# first convert any parameters to the units that correspond to the
# input units (to make sure that initial guesses on the parameters)
# are in the right unit system
model = model.without_units_for_data(**rename_data)
# We strip away the units from the input itself
add_back_units = False
if isinstance(x, Quantity):
add_back_units = True
xdata = x.value
else:
xdata = np.asarray(x)
if isinstance(y, Quantity):
add_back_units = True
ydata = y.value
else:
ydata = np.asarray(y)
if z is not None:
if isinstance(z, Quantity):
add_back_units = True
zdata = z.value
else:
zdata = np.asarray(z)
# We run the fitting
if z is None:
model_new = func(self, model, xdata, ydata, **kwargs)
else:
model_new = func(self, model, xdata, ydata, zdata, **kwargs)
# And finally we add back units to the parameters
if add_back_units:
model_new = model_new.with_units_from_data(**rename_data)
return model_new
else:
raise NotImplementedError("This model does not support being "
"fit to data with units.")
else:
return func(self, model, x, y, z=z, **kwargs)
return wrapper
class Fitter(metaclass=_FitterMeta):
"""
Base class for all fitters.
Parameters
----------
optimizer : callable
A callable implementing an optimization algorithm
statistic : callable
Statistic function
"""
supported_constraints = []
def __init__(self, optimizer, statistic):
if optimizer is None:
raise ValueError("Expected an optimizer.")
if statistic is None:
raise ValueError("Expected a statistic function.")
if inspect.isclass(optimizer):
# a callable class
self._opt_method = optimizer()
elif inspect.isfunction(optimizer):
self._opt_method = optimizer
else:
raise ValueError("Expected optimizer to be a callable class or a function.")
if inspect.isclass(statistic):
self._stat_method = statistic()
else:
self._stat_method = statistic
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [other_args], [input coordinates]]
other_args may include weights or any other quantities specific for
a statistic
Notes
-----
The list of arguments (args) is set in the `__call__` method.
Fitters may overwrite this method, e.g. when statistic functions
require other arguments.
"""
model = args[0]
meas = args[-1]
_fitter_to_model_params(model, fps)
res = self._stat_method(meas, model, *args[1:-1])
return res
@abc.abstractmethod
def __call__(self):
"""
This method performs the actual fitting and modifies the parameter list
of a model.
Fitter subclasses should implement this method.
"""
raise NotImplementedError("Subclasses should implement this method.")
# TODO: I have ongoing branch elsewhere that's refactoring this module so that
# all the fitter classes in here are Fitter subclasses. In the meantime we
# need to specify that _FitterMeta is its metaclass.
class LinearLSQFitter(metaclass=_FitterMeta):
"""
A class performing a linear least square fitting.
Uses `numpy.linalg.lstsq` to do the fitting.
Given a model and data, fits the model to the data and changes the
model's parameters. Keeps a dictionary of auxiliary fitting information.
Notes
-----
Note that currently LinearLSQFitter does not support compound models.
"""
supported_constraints = ['fixed']
supports_masked_input = True
def __init__(self):
self.fit_info = {'residuals': None,
'rank': None,
'singular_values': None,
'params': None
}
@staticmethod
def _deriv_with_constraints(model, param_indices, x=None, y=None):
if y is None:
d = np.array(model.fit_deriv(x, *model.parameters))
else:
d = np.array(model.fit_deriv(x, y, *model.parameters))
if model.col_fit_deriv:
return d[param_indices]
else:
return d[..., param_indices]
def _map_domain_window(self, model, x, y=None):
"""
Maps domain into window for a polynomial model which has these
attributes.
"""
if y is None:
if hasattr(model, 'domain') and model.domain is None:
model.domain = [x.min(), x.max()]
if hasattr(model, 'window') and model.window is None:
model.window = [-1, 1]
return poly_map_domain(x, model.domain, model.window)
else:
if hasattr(model, 'x_domain') and model.x_domain is None:
model.x_domain = [x.min(), x.max()]
if hasattr(model, 'y_domain') and model.y_domain is None:
model.y_domain = [y.min(), y.max()]
if hasattr(model, 'x_window') and model.x_window is None:
model.x_window = [-1., 1.]
if hasattr(model, 'y_window') and model.y_window is None:
model.y_window = [-1., 1.]
xnew = poly_map_domain(x, model.x_domain, model.x_window)
ynew = poly_map_domain(y, model.y_domain, model.y_window)
return xnew, ynew
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, rcond=None):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
Input coordinates
y : array_like
Input coordinates
z : array_like, optional
Input coordinates.
If the dependent (``y`` or ``z``) co-ordinate values are provided
as a `numpy.ma.MaskedArray`, any masked points are ignored when
fitting. Note that model set fitting is significantly slower when
there are masked points (not just an empty mask), as the matrix
equation has to be solved for each model separately when their
co-ordinate grids differ.
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
rcond : float, optional
Cut-off ratio for small singular values of ``a``.
Singular values are set to zero if they are smaller than ``rcond``
times the largest singular value of ``a``.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
if not model.fittable:
raise ValueError("Model must be a subclass of FittableModel")
if not model.linear:
raise ModelLinearityError('Model is not linear in parameters, '
'linear fit methods should not be used.')
if hasattr(model, "submodel_names"):
raise ValueError("Model must be simple, not compound")
_validate_constraints(self.supported_constraints, model)
model_copy = model.copy()
_, fitparam_indices = _model_to_fit_params(model_copy)
if model_copy.n_inputs == 2 and z is None:
raise ValueError("Expected x, y and z for a 2 dimensional model.")
farg = _convert_input(x, y, z, n_models=len(model_copy),
model_set_axis=model_copy.model_set_axis)
has_fixed = any(model_copy.fixed.values())
if has_fixed:
# The list of fixed params is the complement of those being fitted:
fixparam_indices = [idx for idx in
range(len(model_copy.param_names))
if idx not in fitparam_indices]
# Construct matrix of user-fixed parameters that can be dotted with
# the corresponding fit_deriv() terms, to evaluate corrections to
# the dependent variable in order to fit only the remaining terms:
fixparams = np.asarray([getattr(model_copy,
model_copy.param_names[idx]).value
for idx in fixparam_indices])
if len(farg) == 2:
x, y = farg
# map domain into window
if hasattr(model_copy, 'domain'):
x = self._map_domain_window(model_copy, x)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices,
x=x))
fixderivs = self._deriv_with_constraints(model_copy, fixparam_indices, x=x)
else:
lhs = np.asarray(model_copy.fit_deriv(x, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x)
rhs = y
else:
x, y, z = farg
# map domain into window
if hasattr(model_copy, 'x_domain'):
x, y = self._map_domain_window(model_copy, x, y)
if has_fixed:
lhs = np.asarray(self._deriv_with_constraints(model_copy,
fitparam_indices, x=x, y=y))
fixderivs = self._deriv_with_constraints(model_copy,
fixparam_indices,
x=x, y=y)
else:
lhs = np.asanyarray(model_copy.fit_deriv(x, y, *model_copy.parameters))
sum_of_implicit_terms = model_copy.sum_of_implicit_terms(x, y)
if len(model_copy) > 1:
# Just to be explicit (rather than baking in False == 0):
model_axis = model_copy.model_set_axis or 0
if z.ndim > 2:
# For higher-dimensional z, flatten all the axes except the
# dimension along which models are stacked and transpose so
# the model axis is *last* (I think this resolves Erik's
# pending generalization from 80a6f25a):
rhs = np.rollaxis(z, model_axis, z.ndim)
rhs = rhs.reshape(-1, rhs.shape[-1])
else:
# This "else" seems to handle the corner case where the
# user has already flattened x/y before attempting a 2D fit
# but z has a second axis for the model set. NB. This is
# ~5-10x faster than using rollaxis.
rhs = z.T if model_axis == 0 else z
else:
rhs = z.flatten()
# If the derivative is defined along rows (as with non-linear models)
if model_copy.col_fit_deriv:
lhs = np.asarray(lhs).T
# Some models (eg. Polynomial1D) don't flatten multi-dimensional inputs
# when constructing their Vandermonde matrix, which can lead to obscure
# failures below. Ultimately, np.linalg.lstsq can't handle >2D matrices,
# so just raise a slightly more informative error when this happens:
if np.asanyarray(lhs).ndim > 2:
raise ValueError('{} gives unsupported >2D derivative matrix for '
'this x/y'.format(type(model_copy).__name__))
# Subtract any terms fixed by the user from (a copy of) the RHS, in
# order to fit the remaining terms correctly:
if has_fixed:
if model_copy.col_fit_deriv:
fixderivs = np.asarray(fixderivs).T # as for lhs above
rhs = rhs - fixderivs.dot(fixparams) # evaluate user-fixed terms
# Subtract any terms implicit in the model from the RHS, which, like
# user-fixed terms, affect the dependent variable but are not fitted:
if sum_of_implicit_terms is not None:
# If we have a model set, the extra axis must be added to
# sum_of_implicit_terms as its innermost dimension, to match the
# dimensionality of rhs after _convert_input "rolls" it as needed
# by np.linalg.lstsq. The vector then gets broadcast to the right
# number of sets (columns). This assumes all the models share the
# same input co-ordinates, as is currently the case.
if len(model_copy) > 1:
sum_of_implicit_terms = sum_of_implicit_terms[..., np.newaxis]
rhs = rhs - sum_of_implicit_terms
if weights is not None:
weights = np.asarray(weights, dtype=float)
if len(x) != len(weights):
raise ValueError("x and weights should have the same length")
if rhs.ndim == 2:
lhs *= weights[:, np.newaxis]
# Don't modify in-place in case rhs was the original dependent
# variable array
rhs = rhs * weights[:, np.newaxis]
else:
lhs *= weights[:, np.newaxis]
rhs = rhs * weights
scl = (lhs * lhs).sum(0)
lhs /= scl
masked = np.any(np.ma.getmask(rhs))
if len(model_copy) == 1 or not masked:
# If we're fitting one or more models over a common set of points,
# we only have to solve a single matrix equation, which is an order
# of magnitude faster than calling lstsq() once per model below:
good = ~rhs.mask if masked else slice(None) # latter is a no-op
# Solve for one or more models:
lacoef, resids, rank, sval = np.linalg.lstsq(lhs[good],
rhs[good], rcond)
else:
# Where fitting multiple models with masked pixels, initialize an
# empty array of coefficients and populate it one model at a time.
# The shape matches the number of coefficients from the Vandermonde
# matrix and the number of models from the RHS:
lacoef = np.zeros(lhs.shape[-1:] + rhs.shape[-1:], dtype=rhs.dtype)
# Loop over the models and solve for each one. By this point, the
# model set axis is the second of two. Transpose rather than using,
# say, np.moveaxis(array, -1, 0), since it's slightly faster and
# lstsq can't handle >2D arrays anyway. This could perhaps be
# optimized by collecting together models with identical masks
# (eg. those with no rejected points) into one operation, though it
# will still be relatively slow when calling lstsq repeatedly.
for model_rhs, model_lacoef in zip(rhs.T, lacoef.T):
# Cull masked points on both sides of the matrix equation:
good = ~model_rhs.mask
model_lhs = lhs[good]
model_rhs = model_rhs[good][..., np.newaxis]
# Solve for this model:
t_coef, resids, rank, sval = np.linalg.lstsq(model_lhs,
model_rhs, rcond)
model_lacoef[:] = t_coef.T
self.fit_info['residuals'] = resids
self.fit_info['rank'] = rank
self.fit_info['singular_values'] = sval
lacoef = (lacoef.T / scl).T
self.fit_info['params'] = lacoef
# TODO: Only Polynomial models currently have an _order attribute;
# maybe change this to read isinstance(model, PolynomialBase)
if hasattr(model_copy, '_order') and len(model_copy) == 1 \
and not has_fixed and rank != model_copy._order:
warnings.warn("The fit may be poorly conditioned\n",
AstropyUserWarning)
_fitter_to_model_params(model_copy, lacoef.flatten())
return model_copy
class FittingWithOutlierRemoval:
"""
This class combines an outlier removal technique with a fitting procedure.
Basically, given a maximum number of iterations ``niter``, outliers are
removed and fitting is performed for each iteration, until no new outliers
are found or ``niter`` is reached.
Parameters
----------
fitter : An Astropy fitter
An instance of any Astropy fitter, i.e., LinearLSQFitter,
LevMarLSQFitter, SLSQPLSQFitter, SimplexLSQFitter, JointFitter. For
model set fitting, this must understand masked input data (as
indicated by the fitter class attribute ``supports_masked_input``).
outlier_func : function
A function for outlier removal.
If this accepts an ``axis`` parameter like the `numpy` functions, the
appropriate value will be supplied automatically when fitting model
sets (unless overridden in ``outlier_kwargs``), to find outliers for
each model separately; otherwise, the same filtering must be performed
in a loop over models, which is almost an order of magnitude slower.
niter : int, optional
Maximum number of iterations.
outlier_kwargs : dict, optional
Keyword arguments for outlier_func.
Attributes
----------
fit_info : dict
The ``fit_info`` (if any) from the last iteration of the wrapped
``fitter`` during the most recent fit. An entry is also added with the
keyword ``niter`` that records the actual number of fitting iterations
performed (as opposed to the user-specified maximum).
"""
def __init__(self, fitter, outlier_func, niter=3, **outlier_kwargs):
self.fitter = fitter
self.outlier_func = outlier_func
self.niter = niter
self.outlier_kwargs = outlier_kwargs
self.fit_info = {'niter' : None}
def __str__(self):
return ("Fitter: {0}\nOutlier function: {1}\nNum. of iterations: {2}" +
("\nOutlier func. args.: {3}"))\
.format(self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __repr__(self):
return ("{0}(fitter: {1}, outlier_func: {2}," +
" niter: {3}, outlier_kwargs: {4})")\
.format(self.__class__.__name__,
self.fitter.__class__.__name__,
self.outlier_func.__name__, self.niter,
self.outlier_kwargs)
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Parameters
----------
model : `~astropy.modeling.FittableModel`
An analytic model which will be fit to the provided data.
This also contains the initial guess for an optimization
algorithm.
x : array_like
Input coordinates.
y : array_like
Data measurements (1D case) or input coordinates (2D case).
z : array_like, optional
Data measurements (2D case).
weights : array_like, optional
Weights to be passed to the fitter.
kwargs : dict, optional
Keyword arguments to be passed to the fitter.
Returns
-------
fitted_model : `~astropy.modeling.FittableModel`
Fitted model after outlier removal.
mask : `numpy.ndarray`
Boolean mask array, identifying which points were used in the final
fitting iteration (False) and which were found to be outliers or
were masked in the input (True).
"""
# For single models, the data get filtered here at each iteration and
# then passed to the fitter, which is the historical behavior and
# works even for fitters that don't understand masked arrays. For model
# sets, the fitter must be able to filter masked data internally,
# because fitters require a single set of x/y co-ordinates whereas the
# eliminated points can vary between models. To avoid this limitation,
# we could fall back to looping over individual model fits, but it
# would likely be fiddly and involve even more overhead (and the
# non-linear fitters don't work with model sets anyway, as of writing).
if len(model) == 1:
model_set_axis = None
else:
if not hasattr(self.fitter, 'supports_masked_input') or \
self.fitter.supports_masked_input is not True:
raise ValueError("{} cannot fit model sets with masked "
"values".format(type(self.fitter).__name__))
# Fitters use their input model's model_set_axis to determine how
# their input data are stacked:
model_set_axis = model.model_set_axis
# Construct input co-ordinate tuples for fitters & models that are
# appropriate for the dimensionality being fitted:
if z is None:
coords = (x, )
data = y
else:
coords = x, y
data = z
# For model sets, construct a numpy-standard "axis" tuple for the
# outlier function, to treat each model separately (if supported):
if model_set_axis is not None:
if model_set_axis < 0:
model_set_axis += data.ndim
if 'axis' not in self.outlier_kwargs: # allow user override
# This also works for False (like model instantiation):
self.outlier_kwargs['axis'] = tuple(
n for n in range(data.ndim) if n != model_set_axis
)
loop = False
# Starting fit, prior to any iteration and masking:
fitted_model = self.fitter(model, x, y, z, weights=weights, **kwargs)
filtered_data = np.ma.masked_array(data)
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
filtered_weights = weights
last_n_masked = filtered_data.mask.sum()
n = 0 # (allow recording no. of iterations when 0)
# Perform the iterative fitting:
for n in range(1, self.niter + 1):
# (Re-)evaluate the last model:
model_vals = fitted_model(*coords, model_set_axis=False)
# Determine the outliers:
if not loop:
# Pass axis parameter if outlier_func accepts it, otherwise
# prepare for looping over models:
try:
filtered_data = self.outlier_func(
filtered_data - model_vals, **self.outlier_kwargs
)
# If this happens to catch an error with a parameter other
# than axis, the next attempt will fail accordingly:
except TypeError:
if model_set_axis is None:
raise
else:
self.outlier_kwargs.pop('axis', None)
loop = True
# Construct MaskedArray to hold filtered values:
filtered_data = np.ma.masked_array(
filtered_data,
dtype=np.result_type(filtered_data, model_vals),
copy=True
)
# Make sure the mask is an array, not just nomask:
if filtered_data.mask is np.ma.nomask:
filtered_data.mask = False
# Get views transposed appropriately for iteration
# over the set (handling data & mask separately due to
# NumPy issue #8506):
data_T = np.rollaxis(filtered_data, model_set_axis, 0)
mask_T = np.rollaxis(filtered_data.mask,
model_set_axis, 0)
if loop:
model_vals_T = np.rollaxis(model_vals, model_set_axis, 0)
for row_data, row_mask, row_mod_vals in zip(data_T, mask_T,
model_vals_T):
masked_residuals = self.outlier_func(
row_data - row_mod_vals, **self.outlier_kwargs
)
row_data.data[:] = masked_residuals.data
row_mask[:] = masked_residuals.mask
# Issue speed warning after the fact, so it only shows up when
# the TypeError is genuinely due to the axis argument.
warnings.warn('outlier_func did not accept axis argument; '
'reverted to slow loop over models.',
AstropyUserWarning)
# Recombine newly-masked residuals with model to get masked values:
filtered_data += model_vals
# Re-fit the data after filtering, passing masked/unmasked values
# for single models / sets, respectively:
if model_set_axis is None:
good = ~filtered_data.mask
if weights is not None:
filtered_weights = weights[good]
fitted_model = self.fitter(fitted_model,
*(c[good] for c in coords),
filtered_data.data[good],
weights=filtered_weights, **kwargs)
else:
fitted_model = self.fitter(fitted_model, *coords,
filtered_data,
weights=filtered_weights, **kwargs)
# Stop iteration if the masked points are no longer changing (with
# cumulative rejection we only need to compare how many there are):
this_n_masked = filtered_data.mask.sum() # (minimal overhead)
if this_n_masked == last_n_masked:
break
last_n_masked = this_n_masked
self.fit_info = {'niter' : n}
self.fit_info.update(getattr(self.fitter, 'fit_info', {}))
return fitted_model, filtered_data.mask
class LevMarLSQFitter(metaclass=_FitterMeta):
"""
Levenberg-Marquardt algorithm and least squares statistic.
Attributes
----------
fit_info : dict
The `scipy.optimize.leastsq` result for the most recent fit (see
notes).
Notes
-----
The ``fit_info`` dictionary contains the values returned by
`scipy.optimize.leastsq` for the most recent fit, including the values from
the ``infodict`` dictionary it returns. See the `scipy.optimize.leastsq`
documentation for details on the meaning of these values. Note that the
``x`` return value is *not* included (as it is instead the parameter values
of the returned model).
Additionally, one additional element of ``fit_info`` is computed whenever a
model is fit, with the key 'param_cov'. The corresponding value is the
covariance matrix of the parameters as a 2D numpy array. The order of the
matrix elements matches the order of the parameters in the fitted model
(i.e., the same order as ``model.param_names``).
"""
supported_constraints = ['fixed', 'tied', 'bounds']
"""
The constraint types supported by this fitter type.
"""
def __init__(self):
self.fit_info = {'nfev': None,
'fvec': None,
'fjac': None,
'ipvt': None,
'qtf': None,
'message': None,
'ierr': None,
'param_jac': None,
'param_cov': None}
super().__init__()
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
parameters returned by the fitter
args : list
[model, [weights], [input coordinates]]
"""
model = args[0]
weights = args[1]
_fitter_to_model_params(model, fps)
meas = args[-1]
if weights is None:
return np.ravel(model(*args[2: -1]) - meas)
else:
return np.ravel(weights * (model(*args[2: -1]) - meas))
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None,
maxiter=DEFAULT_MAXITER, acc=DEFAULT_ACC,
epsilon=DEFAULT_EPS, estimate_jacobian=False):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
maxiter : int
maximum number of iterations
acc : float
Relative error desired in the approximate solution
epsilon : float
A suitable step length for the forward-difference
approximation of the Jacobian (if model.fjac=None). If
epsfcn is less than the machine precision, it is
assumed that the relative errors in the functions are
of the order of the machine precision.
estimate_jacobian : bool
If False (default) and if the model has a fit_deriv method,
it will be used. Otherwise the Jacobian will be estimated.
If True, the Jacobian will be estimated in any case.
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
from scipy import optimize
model_copy = _validate_model(model, self.supported_constraints)
farg = (model_copy, weights, ) + _convert_input(x, y, z)
if model_copy.fit_deriv is None or estimate_jacobian:
dfunc = None
else:
dfunc = self._wrap_deriv
init_values, _ = _model_to_fit_params(model_copy)
fitparams, cov_x, dinfo, mess, ierr = optimize.leastsq(
self.objective_function, init_values, args=farg, Dfun=dfunc,
col_deriv=model_copy.col_fit_deriv, maxfev=maxiter, epsfcn=epsilon,
xtol=acc, full_output=True)
_fitter_to_model_params(model_copy, fitparams)
self.fit_info.update(dinfo)
self.fit_info['cov_x'] = cov_x
self.fit_info['message'] = mess
self.fit_info['ierr'] = ierr
if ierr not in [1, 2, 3, 4]:
warnings.warn("The fit may be unsuccessful; check "
"fit_info['message'] for more information.",
AstropyUserWarning)
# now try to compute the true covariance matrix
if (len(y) > len(init_values)) and cov_x is not None:
sum_sqrs = np.sum(self.objective_function(fitparams, *farg)**2)
dof = len(y) - len(init_values)
self.fit_info['param_cov'] = cov_x * sum_sqrs / dof
else:
self.fit_info['param_cov'] = None
return model_copy
@staticmethod
def _wrap_deriv(params, model, weights, x, y, z=None):
"""
Wraps the method calculating the Jacobian of the function to account
for model constraints.
`scipy.optimize.leastsq` expects the function derivative to have the
above signature (parlist, (argtuple)). In order to accommodate model
constraints, instead of using p directly, we set the parameter list in
this function.
"""
if weights is None:
weights = 1.0
if any(model.fixed.values()) or any(model.tied.values()):
# update the parameters with the current values from the fitter
_fitter_to_model_params(model, params)
if z is None:
full = np.array(model.fit_deriv(x, *model.parameters))
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
else:
full = np.array([np.ravel(_) for _ in model.fit_deriv(x, y, *model.parameters)])
if not model.col_fit_deriv:
full_deriv = np.ravel(weights) * full.T
else:
full_deriv = np.ravel(weights) * full
pars = [getattr(model, name) for name in model.param_names]
fixed = [par.fixed for par in pars]
tied = [par.tied for par in pars]
tied = list(np.where([par.tied is not False for par in pars],
True, tied))
fix_and_tie = np.logical_or(fixed, tied)
ind = np.logical_not(fix_and_tie)
if not model.col_fit_deriv:
residues = np.asarray(full_deriv[np.nonzero(ind)]).T
else:
residues = full_deriv[np.nonzero(ind)]
return [np.ravel(_) for _ in residues]
else:
if z is None:
return [np.ravel(_) for _ in np.ravel(weights) *
np.array(model.fit_deriv(x, *params))]
else:
if not model.col_fit_deriv:
return [np.ravel(_) for _ in
(np.ravel(weights) * np.array(model.fit_deriv(x, y, *params)).T).T]
return [np.ravel(_) for _ in weights * np.array(model.fit_deriv(x, y, *params))]
class SLSQPLSQFitter(Fitter):
"""
SLSQP optimization algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = SLSQP.supported_constraints
def __init__(self):
super().__init__(optimizer=SLSQP, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
verblevel : int
0-silent
1-print summary upon completion,
2-print summary after each iteration
maxiter : int
maximum number of iterations
epsilon : float
the step size for finite-difference derivative estimates
acc : float
Requested accuracy
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model, self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class SimplexLSQFitter(Fitter):
"""
Simplex algorithm and least squares statistic.
Raises
------
ModelLinearityError
A linear model is passed to a nonlinear fitter
"""
supported_constraints = Simplex.supported_constraints
def __init__(self):
super().__init__(optimizer=Simplex, statistic=leastsquare)
self.fit_info = {}
@fitter_unit_support
def __call__(self, model, x, y, z=None, weights=None, **kwargs):
"""
Fit data to this model.
Parameters
----------
model : `~astropy.modeling.FittableModel`
model to fit to x, y, z
x : array
input coordinates
y : array
input coordinates
z : array, optional
input coordinates
weights : array, optional
Weights for fitting.
For data with Gaussian uncertainties, the weights should be
1/sigma.
kwargs : dict
optional keyword arguments to be passed to the optimizer or the statistic
maxiter : int
maximum number of iterations
acc : float
Relative error in approximate solution
equivalencies : list or None, optional and keyword-only argument
List of *additional* equivalencies that are should be applied in
case x, y and/or z have units. Default is None.
Returns
-------
model_copy : `~astropy.modeling.FittableModel`
a copy of the input model with parameters set by the fitter
"""
model_copy = _validate_model(model,
self._opt_method.supported_constraints)
farg = _convert_input(x, y, z)
farg = (model_copy, weights, ) + farg
init_values, _ = _model_to_fit_params(model_copy)
fitparams, self.fit_info = self._opt_method(
self.objective_function, init_values, farg, **kwargs)
_fitter_to_model_params(model_copy, fitparams)
return model_copy
class JointFitter(metaclass=_FitterMeta):
"""
Fit models which share a parameter.
For example, fit two gaussians to two data sets but keep
the FWHM the same.
Parameters
----------
models : list
a list of model instances
jointparameters : list
a list of joint parameters
initvals : list
a list of initial values
"""
def __init__(self, models, jointparameters, initvals):
self.models = list(models)
self.initvals = list(initvals)
self.jointparams = jointparameters
self._verify_input()
self.fitparams = self._model_to_fit_params()
# a list of model.n_inputs
self.modeldims = [m.n_inputs for m in self.models]
# sum all model dimensions
self.ndim = np.sum(self.modeldims)
def _model_to_fit_params(self):
fparams = []
fparams.extend(self.initvals)
for model in self.models:
params = model.parameters.tolist()
joint_params = self.jointparams[model]
param_metrics = model._param_metrics
for param_name in joint_params:
slice_ = param_metrics[param_name]['slice']
del params[slice_]
fparams.extend(params)
return fparams
def objective_function(self, fps, *args):
"""
Function to minimize.
Parameters
----------
fps : list
the fitted parameters - result of an one iteration of the
fitting algorithm
args : dict
tuple of measured and input coordinates
args is always passed as a tuple from optimize.leastsq
"""
lstsqargs = list(args)
fitted = []
fitparams = list(fps)
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fitparams[:numjp]
del fitparams[:numjp]
for model in self.models:
joint_params = self.jointparams[model]
margs = lstsqargs[:model.n_inputs + 1]
del lstsqargs[:model.n_inputs + 1]
# separate each model separately fitted parameters
numfp = len(model._parameters) - len(joint_params)
mfparams = fitparams[:numfp]
del fitparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the
# parameter is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
modelfit = model.evaluate(margs[:-1], *mparams)
fitted.extend(modelfit - margs[-1])
return np.ravel(fitted)
def _verify_input(self):
if len(self.models) <= 1:
raise TypeError("Expected >1 models, {} is given".format(
len(self.models)))
if len(self.jointparams.keys()) < 2:
raise TypeError("At least two parameters are expected, "
"{} is given".format(len(self.jointparams.keys())))
for j in self.jointparams.keys():
if len(self.jointparams[j]) != len(self.initvals):
raise TypeError("{} parameter(s) provided but {} expected".format(
len(self.jointparams[j]), len(self.initvals)))
def __call__(self, *args):
"""
Fit data to these models keeping some of the parameters common to the
two models.
"""
from scipy import optimize
if len(args) != reduce(lambda x, y: x + 1 + y + 1, self.modeldims):
raise ValueError("Expected {} coordinates in args but {} provided"
.format(reduce(lambda x, y: x + 1 + y + 1,
self.modeldims), len(args)))
self.fitparams[:], _ = optimize.leastsq(self.objective_function,
self.fitparams, args=args)
fparams = self.fitparams[:]
numjp = len(self.initvals)
# make a separate list of the joint fitted parameters
jointfitparams = fparams[:numjp]
del fparams[:numjp]
for model in self.models:
# extract each model's fitted parameters
joint_params = self.jointparams[model]
numfp = len(model._parameters) - len(joint_params)
mfparams = fparams[:numfp]
del fparams[:numfp]
# recreate the model parameters
mparams = []
param_metrics = model._param_metrics
for param_name in model.param_names:
if param_name in joint_params:
index = joint_params.index(param_name)
# should do this with slices in case the parameter
# is not a number
mparams.extend([jointfitparams[index]])
else:
slice_ = param_metrics[param_name]['slice']
plen = slice_.stop - slice_.start
mparams.extend(mfparams[:plen])
del mfparams[:plen]
model.parameters = np.array(mparams)
def _convert_input(x, y, z=None, n_models=1, model_set_axis=0):
"""Convert inputs to float arrays."""
x = np.asanyarray(x, dtype=float)
y = np.asanyarray(y, dtype=float)
if z is not None:
z = np.asanyarray(z, dtype=float)
# For compatibility with how the linear fitter code currently expects to
# work, shift the dependent variable's axes to the expected locations
if n_models > 1:
if z is None:
if y.shape[model_set_axis] != n_models:
raise ValueError(
"Number of data sets (y array is expected to equal "
"the number of parameter sets)")
# For a 1-D model the y coordinate's model-set-axis is expected to
# be last, so that its first dimension is the same length as the x
# coordinates. This is in line with the expectations of
# numpy.linalg.lstsq:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.linalg.lstsq.html
# That is, each model should be represented by a column. TODO:
# Obviously this is a detail of np.linalg.lstsq and should be
# handled specifically by any fitters that use it...
y = np.rollaxis(y, model_set_axis, y.ndim)
else:
# Shape of z excluding model_set_axis
z_shape = z.shape[:model_set_axis] + z.shape[model_set_axis + 1:]
if not (x.shape == y.shape == z_shape):
raise ValueError("x, y and z should have the same shape")
if z is None:
farg = (x, y)
else:
farg = (x, y, z)
return farg
# TODO: These utility functions are really particular to handling
# bounds/tied/fixed constraints for scipy.optimize optimizers that do not
# support them inherently; this needs to be reworked to be clear about this
# distinction (and the fact that these are not necessarily applicable to any
# arbitrary fitter--as evidenced for example by the fact that JointFitter has
# its own versions of these)
# TODO: Most of this code should be entirely rewritten; it should not be as
# inefficient as it is.
def _fitter_to_model_params(model, fps):
"""
Constructs the full list of model parameters from the fitted and
constrained parameters.
"""
_, fit_param_indices = _model_to_fit_params(model)
has_tied = any(model.tied.values())
has_fixed = any(model.fixed.values())
has_bound = any(b != (None, None) for b in model.bounds.values())
parameters = model.parameters
if not (has_tied or has_fixed or has_bound):
# We can just assign directly
model.parameters = fps
return
fit_param_indices = set(fit_param_indices)
offset = 0
param_metrics = model._param_metrics
for idx, name in enumerate(model.param_names):
if idx not in fit_param_indices:
continue
slice_ = param_metrics[name]['slice']
shape = param_metrics[name]['shape']
# This is determining which range of fps (the fitted parameters) maps
# to parameters of the model
size = reduce(operator.mul, shape, 1)
values = fps[offset:offset + size]
# Check bounds constraints
if model.bounds[name] != (None, None):
_min, _max = model.bounds[name]
if _min is not None:
values = np.fmax(values, _min)
if _max is not None:
values = np.fmin(values, _max)
parameters[slice_] = values
offset += size
# Update model parameters before calling ``tied`` constraints.
model._array_to_parameters()
# This has to be done in a separate loop due to how tied parameters are
# currently evaluated (the fitted parameters need to actually be *set* on
# the model first, for use in evaluating the "tied" expression--it might be
# better to change this at some point
if has_tied:
for idx, name in enumerate(model.param_names):
if model.tied[name]:
value = model.tied[name](model)
slice_ = param_metrics[name]['slice']
# To handle multiple tied constraints, model parameters
# need to be updated after each iterration.
parameters[slice_] = value
model._array_to_parameters()
def _model_to_fit_params(model):
"""
Convert a model instance's parameter array to an array that can be used
with a fitter that doesn't natively support fixed or tied parameters.
In particular, it removes fixed/tied parameters from the parameter
array.
These may be a subset of the model parameters, if some of them are held
constant or tied.
"""
fitparam_indices = list(range(len(model.param_names)))
if any(model.fixed.values()) or any(model.tied.values()):
params = list(model.parameters)
param_metrics = model._param_metrics
for idx, name in list(enumerate(model.param_names))[::-1]:
if model.fixed[name] or model.tied[name]:
slice_ = param_metrics[name]['slice']
del params[slice_]
del fitparam_indices[idx]
return (np.array(params), fitparam_indices)
return (model.parameters, fitparam_indices)
def _validate_constraints(supported_constraints, model):
"""Make sure model constraints are supported by the current fitter."""
message = 'Optimizer cannot handle {0} constraints.'
if (any(model.fixed.values()) and
'fixed' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('fixed parameter'))
if any(model.tied.values()) and 'tied' not in supported_constraints:
raise UnsupportedConstraintError(
message.format('tied parameter'))
if (any(tuple(b) != (None, None) for b in model.bounds.values()) and
'bounds' not in supported_constraints):
raise UnsupportedConstraintError(
message.format('bound parameter'))
if model.eqcons and 'eqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('equality'))
if model.ineqcons and 'ineqcons' not in supported_constraints:
raise UnsupportedConstraintError(message.format('inequality'))
def _validate_model(model, supported_constraints):
"""
Check that model and fitter are compatible and return a copy of the model.
"""
if not model.fittable:
raise ValueError("Model does not appear to be fittable.")
if model.linear:
warnings.warn('Model is linear in parameters; '
'consider using linear fitting methods.',
AstropyUserWarning)
elif len(model) != 1:
# for now only single data sets ca be fitted
raise ValueError("Non-linear fitters can only fit "
"one data set at a time.")
_validate_constraints(supported_constraints, model)
model_copy = model.copy()
return model_copy
def populate_entry_points(entry_points):
"""
This injects entry points into the `astropy.modeling.fitting` namespace.
This provides a means of inserting a fitting routine without requirement
of it being merged into astropy's core.
Parameters
----------
entry_points : a list of `~pkg_resources.EntryPoint`
entry_points are objects which encapsulate
importable objects and are defined on the
installation of a package.
Notes
-----
An explanation of entry points can be found `here <http://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins>`
"""
for entry_point in entry_points:
name = entry_point.name
try:
entry_point = entry_point.load()
except Exception as e:
# This stops the fitting from choking if an entry_point produces an error.
warnings.warn(AstropyUserWarning(
f'{type(e).__name__} error occurred in entry point {name}.'))
else:
if not inspect.isclass(entry_point):
warnings.warn(AstropyUserWarning(
f'Modeling entry point {name} expected to be a Class.'))
else:
if issubclass(entry_point, Fitter):
name = entry_point.__name__
globals()[name] = entry_point
__all__.append(name)
else:
warnings.warn(AstropyUserWarning(
'Modeling entry point {} expected to extend '
'astropy.modeling.Fitter' .format(name)))
# this is so fitting doesn't choke if pkg_resources doesn't exist
if HAS_PKG:
populate_entry_points(iter_entry_points(group='astropy.modeling', name=None))
| 39.253324 | 157 | 0.587344 |
aced89cfd2907e72c065f4479591f5db272a0b23 | 1,198 | py | Python | core/functions.py | Escape-Guys/Escape-traffic | ddb602aa0c1e256068586057b4ba08225f21064b | [
"MIT"
] | 1 | 2021-07-09T07:39:24.000Z | 2021-07-09T07:39:24.000Z | core/functions.py | Escape-Guys/Escape-traffic | ddb602aa0c1e256068586057b4ba08225f21064b | [
"MIT"
] | null | null | null | core/functions.py | Escape-Guys/Escape-traffic | ddb602aa0c1e256068586057b4ba08225f21064b | [
"MIT"
] | 3 | 2021-06-30T10:40:42.000Z | 2021-07-09T07:39:38.000Z | import os
import cv2
import random
import numpy as np
import tensorflow as tf
import pytesseract
from core.utils import read_class_names
from core.config import cfg
# function to count objects, can return total classes or count per class
def count_objects(data, by_class = False, allowed_classes = list(read_class_names(cfg.YOLO.CLASSES).values())):
boxes, scores, classes, num_objects = data
#create dictionary to hold count of objects
counts = dict()
# if by_class = True then count objects per class
if by_class:
class_names = read_class_names(cfg.YOLO.CLASSES)
# loop through total number of objects found
if num_objects:
for i in range(num_objects):
# grab class index and convert into corresponding class name
class_index = int(classes[i])
class_name = class_names[class_index]
if class_name in allowed_classes:
counts[class_name] = counts.get(class_name, 0) + 1
else:
continue
# else count total objects found
else:
counts['total object'] = num_objects
return counts | 39.933333 | 112 | 0.647746 |
aced8ad58ef0cd385361dcd03bda9e2bbe702555 | 1,656 | py | Python | app/labeling/model.py | liephat/ai-image-eye | 583d099e9b46c222597cab3f77a87ce4dd1d7698 | [
"MIT"
] | 1 | 2021-07-17T13:12:10.000Z | 2021-07-17T13:12:10.000Z | app/labeling/model.py | liephat/ai-image-eye | 583d099e9b46c222597cab3f77a87ce4dd1d7698 | [
"MIT"
] | 28 | 2020-12-11T21:10:05.000Z | 2021-08-05T21:00:13.000Z | app/labeling/model.py | liephat/ai-image-eye | 583d099e9b46c222597cab3f77a87ce4dd1d7698 | [
"MIT"
] | 1 | 2020-12-10T21:11:01.000Z | 2020-12-10T21:11:01.000Z | from abc import abstractmethod
import numpy as np
import onnxruntime
class Model:
"""
Baseclass for all machine learning models. The class is used to implement concrete model classes
instantiating sessions based on ONNX format models and classifying images in the instantiated
session.
"""
def __init__(self, model, labels):
"""
:param model: filename with ONNX format containing trained model
:param labels: json file containing output label dictionary
"""
self.model = model
self.labels = labels
self.session = None
def load(self):
"""
Starts an inference session in ONNX runtime.
"""
# Run the model on the backend
self.session = onnxruntime.InferenceSession(self.model, None)
@staticmethod
@abstractmethod
def _preprocess():
pass
@staticmethod
@abstractmethod
def _postprocess():
pass
@abstractmethod
def classify(self, img):
"""
Returns predicted output class labels for an image.
:param img: image file with content that shall be labeled
:return: dictionary with probabilities as keys and predicted output classes as values
"""
pass
@staticmethod
def softmax(x):
"""
Normalizes the output of a model to a probability distribution over all predicted output
classes.
:param x: output of the last layer of a network model
:return: probability distribution over the predicted output classes
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
| 27.6 | 100 | 0.640097 |
aced8b297be7f19576ca416572ec0b87291a04ed | 298 | py | Python | kt/config/docs.py | vijaynaidu123/tasks | 37a74f7f1c039d650cb8f33920a283efab8dfd36 | [
"MIT"
] | null | null | null | kt/config/docs.py | vijaynaidu123/tasks | 37a74f7f1c039d650cb8f33920a283efab8dfd36 | [
"MIT"
] | null | null | null | kt/config/docs.py | vijaynaidu123/tasks | 37a74f7f1c039d650cb8f33920a283efab8dfd36 | [
"MIT"
] | null | null | null | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/kt"
# docs_base_url = "https://[org_name].github.io/kt"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "kt"
| 24.833333 | 68 | 0.708054 |
aced8b64a47ed21206de58fa29d5f033dbef70e0 | 7,621 | py | Python | pytorch_lightning/trainer/logging.py | tullie/pytorch-lightning | b39f4798a6859d2237b48b29b39a2390164612c1 | [
"Apache-2.0"
] | 1 | 2021-04-09T08:32:21.000Z | 2021-04-09T08:32:21.000Z | pytorch_lightning/trainer/logging.py | tullie/pytorch-lightning | b39f4798a6859d2237b48b29b39a2390164612c1 | [
"Apache-2.0"
] | null | null | null | pytorch_lightning/trainer/logging.py | tullie/pytorch-lightning | b39f4798a6859d2237b48b29b39a2390164612c1 | [
"Apache-2.0"
] | null | null | null | import os
from abc import ABC
from typing import Union, Iterable
import torch
from pytorch_lightning.core import memory
from pytorch_lightning.loggers import TensorBoardLogger, LightningLoggerBase, LoggerCollection
from pytorch_lightning.utilities.memory import recursive_detach
class TrainerLoggingMixin(ABC):
# this is just a summary on variables used in this abstract class,
# the proper values/initialisation should be done in child class
current_epoch: int
on_gpu: bool
log_gpu_memory: ...
logger: Union[LightningLoggerBase, bool]
progress_bar_metrics: ...
global_step: int
global_rank: int
use_dp: bool
use_ddp2: bool
default_root_dir: str
slurm_job_id: int
num_gpus: int
def configure_logger(self, logger):
if logger is True:
# default logger
self.logger = TensorBoardLogger(
save_dir=self.default_root_dir,
version=self.slurm_job_id,
name='lightning_logs'
)
elif logger is False:
self.logger = None
else:
if isinstance(logger, Iterable):
self.logger = LoggerCollection(logger)
else:
self.logger = logger
def log_metrics(self, metrics, grad_norm_dic, step=None):
"""Logs the metric dict passed in.
If `step` parameter is None and `step` key is presented is metrics,
uses metrics["step"] as a step
Args:
metrics (dict): Metric values
grad_norm_dic (dict): Gradient norms
step (int): Step for which metrics should be logged. Default value corresponds to `self.global_step`
"""
# add gpu memory
if self.on_gpu and self.log_gpu_memory:
mem_map = memory.get_memory_profile(self.log_gpu_memory)
metrics.update(mem_map)
# add norms
metrics.update(grad_norm_dic)
# turn all tensors to scalars
scalar_metrics = self.metrics_to_scalars(metrics)
if "step" in scalar_metrics and step is None:
step = scalar_metrics.pop("step")
else:
# added metrics by Lightning for convenience
scalar_metrics['epoch'] = self.current_epoch
step = step if step is not None else self.global_step
# log actual metrics
if self.is_global_zero and self.logger is not None:
self.logger.agg_and_log_metrics(scalar_metrics, step=step)
self.logger.save()
self.dev_debugger.track_logged_metrics_history(scalar_metrics)
def add_progress_bar_metrics(self, metrics):
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
self.progress_bar_metrics[k] = v
self.dev_debugger.track_pbar_metrics_history(metrics)
def metrics_to_scalars(self, metrics):
new_metrics = {}
for k, v in metrics.items():
if isinstance(v, torch.Tensor):
v = v.item()
if isinstance(v, dict):
v = self.metrics_to_scalars(v)
new_metrics[k] = v
return new_metrics
def process_output(self, output, train=False):
"""Reduces output according to the training mode.
Separates loss from logging and progress bar metrics
"""
# --------------------------
# handle single scalar only
# --------------------------
# single scalar returned from a xx_step
if isinstance(output, torch.Tensor):
progress_bar_metrics = {}
log_metrics = {}
callback_metrics = {}
hiddens = None
return output, progress_bar_metrics, log_metrics, callback_metrics, hiddens
# ---------------
# EXTRACT CALLBACK KEYS
# ---------------
# all keys not progress_bar or log are candidates for callbacks
callback_metrics = {}
for k, v in output.items():
if k not in ['progress_bar', 'log', 'hiddens']:
callback_metrics[k] = v
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
callback_metrics = self.reduce_distributed_output(callback_metrics, num_gpus)
# ---------------
# EXTRACT PROGRESS BAR KEYS
# ---------------
try:
progress_output = output['progress_bar']
# reduce progress metrics for progress bar when using dp
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
progress_output = self.reduce_distributed_output(progress_output, num_gpus)
progress_bar_metrics = progress_output
except Exception:
progress_bar_metrics = {}
# ---------------
# EXTRACT LOGGING KEYS
# ---------------
# extract metrics to log to experiment
try:
log_output = output['log']
# reduce progress metrics for progress bar when using dp
if train and (self.use_dp or self.use_ddp2):
num_gpus = self.num_gpus
log_output = self.reduce_distributed_output(log_output, num_gpus)
log_metrics = log_output
except Exception:
log_metrics = {}
# ---------------
# EXTRACT LOSS
# ---------------
# if output dict doesn't have the keyword loss
# then assume the output=loss if scalar
loss = None
if train:
try:
loss = output['loss']
except Exception:
if isinstance(output, torch.Tensor):
loss = output
else:
raise RuntimeError(
'No `loss` value in the dictionary returned from `model.training_step()`.'
)
# when using dp need to reduce the loss
if self.use_dp or self.use_ddp2:
loss = self.reduce_distributed_output(loss, self.num_gpus)
# ---------------
# EXTRACT HIDDEN
# ---------------
hiddens = output.get('hiddens')
# use every metric passed in as a candidate for callback
callback_metrics.update(progress_bar_metrics)
callback_metrics.update(log_metrics)
# detach all metrics for callbacks to prevent memory leaks
# no .item() because it will slow things down
callback_metrics = recursive_detach(callback_metrics)
return loss, progress_bar_metrics, log_metrics, callback_metrics, hiddens
def reduce_distributed_output(self, output, num_gpus):
if num_gpus <= 1:
return output
# when using DP, we get one output per gpu
# average outputs and return
if isinstance(output, torch.Tensor):
return output.mean()
for k, v in output.items():
# recurse on nested dics
if isinstance(output[k], dict):
output[k] = self.reduce_distributed_output(output[k], num_gpus)
# compute the average of scalars
elif isinstance(output[k], list):
output[k] = sum(output[k]) / len(output[k])
# do nothing when there's a scalar
elif isinstance(output[k], torch.Tensor) and output[k].dim() == 0:
pass
# do not reduce metrics that have batch size > num gpus
elif output[k].size(0) <= num_gpus:
output[k] = torch.mean(output[k])
return output
| 34.022321 | 112 | 0.579189 |
aced8b6907df9275725e1d08c6beaada14ab7ec5 | 594 | py | Python | diary/migrations/0002_entry_user.py | oluwagbenga-joloko/Diary-API | 23df6008dc27142cba56907cc3f8aced8da26b6b | [
"MIT"
] | null | null | null | diary/migrations/0002_entry_user.py | oluwagbenga-joloko/Diary-API | 23df6008dc27142cba56907cc3f8aced8da26b6b | [
"MIT"
] | 5 | 2019-11-06T16:18:13.000Z | 2021-06-10T21:07:50.000Z | diary/migrations/0002_entry_user.py | oluwagbenga-joloko/Diary-API | 23df6008dc27142cba56907cc3f8aced8da26b6b | [
"MIT"
] | null | null | null | # Generated by Django 2.1.11 on 2019-12-19 17:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('diary', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='entry',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 24.75 | 122 | 0.664983 |
aced8bf3c991137a7f7e07671e26af9b36126960 | 2,708 | py | Python | example_problems/tutorial/triangle/services/check_best_sol_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 4 | 2021-06-27T13:27:24.000Z | 2022-03-24T10:46:28.000Z | example_problems/tutorial/triangle/services/check_best_sol_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 1 | 2021-01-23T06:50:31.000Z | 2021-03-17T15:35:18.000Z | example_problems/tutorial/triangle/services/check_best_sol_server.py | romeorizzi/TALight | 2b694cb487f41dd0d36d7aa39f5c9c5a21bfc18e | [
"MIT"
] | 5 | 2021-04-01T15:21:57.000Z | 2022-01-29T15:07:38.000Z | #!/usr/bin/env python3
from sys import stderr, exit
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
import triangle_lib as tl
# METADATA OF THIS TAL_SERVICE:
args_list = [
('n',int),
('MIN_VAL',int),
('MAX_VAL',int),
('how_to_input_the_triangle',str),
('opt_sol_val',int),
('feedback',str),
('silent',bool),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
# TRIANGLE GENERATION
if ENV['how_to_input_the_triangle'] == "my_own_triangle":
triangle = []
TAc.print(LANG.render_feedback("insert-triangle", f'Please, insert your triangle, line by line. For every i in [1,{ENV["n"]}], line i comprises i integers separated by spaces.'), "yellow", ["bold"])
for i in range(1,ENV["n"]+1):
TAc.print(LANG.render_feedback("insert-line", f'Insert line i={i}, that is, {i} integers separated by spaces:'), "yellow")
line = TALinput(int, i, token_recognizer=lambda val,TAc,LANG: tl.check_val_range(val,ENV['MIN_VAL'],ENV['MAX_VAL'],TAc,LANG), TAc=TAc, LANG=LANG)
triangle.append(line)
TAc.OK()
TAc.print(LANG.render_feedback("triangle-insertion-completed", f'Insertion complete. Your triangle has been successfully inserted.'), "green")
else:
triangle = tl.random_triangle(ENV["n"],ENV['MIN_VAL'],ENV['MAX_VAL'],int(ENV['how_to_input_the_triangle']),TAc,LANG)
print(triangle)
best_reward = tl.best_path_cost(triangle)
if ENV['opt_sol_val'] == best_reward:
if ENV['feedback'] == "yes_no" or not ENV['silent']:
TAc.OK()
TAc.print(LANG.render_feedback("right-best-sol", f'We agree, the solution value you provided is the best one for your triangle.'), "green", ["bold"])
exit(0)
else:
TAc.NO()
if ENV['feedback'] == "yes_no":
TAc.print(LANG.render_feedback("wrong-best-sol", f'We don\'t agree, the solution value you provided is not the best one for your triangle.'), "red", ["bold"])
if ENV['feedback'] == "bigger_or_smaller":
if ENV['opt_sol_val'] < best_reward:
TAc.print(LANG.render_feedback("smaller-than-best", f'We don\'t agree, the solution value you provided is smaller than the best one for your triangle.'), "red", ["bold"])
exit(0)
TAc.print(LANG.render_feedback("bigger-than-best", f'We don\'t agree, the solution value you provided is bigger than the best one for your triangle.'), "red", ["bold"])
exit(0)
if ENV['feedback'] == "true_opt_val":
TAc.print(LANG.render_feedback("best-value", f'The best reward for your triangle is {best_reward}.'), "yellow", ["bold"])
exit(0)
exit(0)
| 43.677419 | 202 | 0.667282 |
aced8d403c55d1e88ef65ddd3039f678d41f9a7b | 1,506 | py | Python | cosim_example_demos/TVB-NEST-demo/nest_elephant_tvb/app_interscalehub.py | sontheimer/ModularScience-Cosim-Template | cc5718217a695b70d8f38c38452f1403706014d3 | [
"BSD-3-Clause"
] | 1 | 2022-03-28T17:33:35.000Z | 2022-03-28T17:33:35.000Z | cosim_example_demos/TVB-NEST-demo/nest_elephant_tvb/app_interscalehub.py | sontheimer/ModularScience-Cosim-Template | cc5718217a695b70d8f38c38452f1403706014d3 | [
"BSD-3-Clause"
] | null | null | null | cosim_example_demos/TVB-NEST-demo/nest_elephant_tvb/app_interscalehub.py | sontheimer/ModularScience-Cosim-Template | cc5718217a695b70d8f38c38452f1403706014d3 | [
"BSD-3-Clause"
] | 2 | 2022-01-27T13:48:16.000Z | 2022-03-24T18:07:33.000Z | # ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
import sys
from Interscale_hub.InterscaleHub import InterscaleHub
from Interscale_hub.parameter import Parameter
def run_wrapper(direction, path):
# def run_wrapper(path):
# print(f'****************input from pipe:{input()}')
# direction
# 1 --> nest to Tvb
# 2 --> tvb to nest
param = Parameter()
direction = int(direction) # NOTE: will be changed
# direction = 1 # NOTE: will be changed
# receive steering commands init,start,stop
# 1) init InterscaleHUB
# includes param setup, buffer creation
hub = InterscaleHub(param, direction)
# 2) Start signal
# receive, pivot, transform, send
hub.start()
# 3) Stop signal
# disconnect and close ports
hub.stop()
if __name__ == '__main__':
# args 1 = direction
sys.exit(run_wrapper(sys.argv[1],sys.argv[2]))
| 32.042553 | 81 | 0.616202 |
aced8d437d5744bc640e844af87086ece17ca3b4 | 1,856 | py | Python | boundary/alarm_create.py | jdgwartney/boundary-api-cli | f17bc252e3958656514042360c9f96fd50ab496c | [
"Apache-2.0"
] | null | null | null | boundary/alarm_create.py | jdgwartney/boundary-api-cli | f17bc252e3958656514042360c9f96fd50ab496c | [
"Apache-2.0"
] | null | null | null | boundary/alarm_create.py | jdgwartney/boundary-api-cli | f17bc252e3958656514042360c9f96fd50ab496c | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import AlarmModify
import requests
class AlarmCreate(AlarmModify):
def __init__(self, **kwargs):
AlarmModify.__init__(self, False)
self._kwargs = kwargs
self.method = "POST"
self._alarm_result = None
def add_arguments(self):
self.parser.add_argument('-n', '--alarm-name', dest='alarm_name', action='store', required=True,
metavar='alarm_name', help='Name of the alarm')
self.parser.add_argument('-y', '--alarm-type', dest='alarm_type', action='store', required=False,
choices=['threshold', 'host', 'api'],
help='Type of the alarm either: threshold or communication. Default threshold')
AlarmModify.add_arguments(self)
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
AlarmModify.get_arguments(self)
def get_description(self):
return 'Creates an alarm definition in an {0} account'.format(self.product_name)
def good_response(self, status_code):
"""
Determines what status codes represent a good response from an API call.
"""
return status_code == requests.codes.created
| 35.018868 | 112 | 0.658405 |
aced8d637788457caa4efe9b855224121f0033aa | 793 | py | Python | example/example/urls.py | dhepper/django-model-path-converter | 1096f9b3907c16482b31576beb263c5d2723843a | [
"MIT"
] | 11 | 2018-05-14T12:04:24.000Z | 2022-01-28T05:39:49.000Z | example/example/urls.py | dhepper/django-model-path-converter | 1096f9b3907c16482b31576beb263c5d2723843a | [
"MIT"
] | 6 | 2018-05-15T03:57:50.000Z | 2021-06-10T20:30:19.000Z | example/example/urls.py | dhepper/django-model-path-converter | 1096f9b3907c16482b31576beb263c5d2723843a | [
"MIT"
] | 1 | 2018-11-19T11:56:13.000Z | 2018-11-19T11:56:13.000Z | """example URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog.urls'))
]
| 34.478261 | 77 | 0.702396 |
aced8dbfa621e579e3e152895770392065e9b8f3 | 138 | py | Python | niftymic_register_image.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 86 | 2017-11-23T01:37:42.000Z | 2022-03-10T01:46:48.000Z | niftymic_register_image.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 20 | 2018-10-26T04:14:53.000Z | 2022-03-31T07:44:58.000Z | niftymic_register_image.py | martaranzini/NiftyMIC | 6bd3c914dad8f2983e84ef009b944c429e1fafb3 | [
"BSD-3-Clause"
] | 23 | 2018-01-26T12:56:37.000Z | 2022-01-24T05:20:18.000Z | # -*- coding: utf-8 -*-
import sys
from niftymic.application.register_image import main
if __name__ == "__main__":
sys.exit(main())
| 17.25 | 52 | 0.688406 |
aced8e7ebd4ff40e2867b77f00610b5fcedfe276 | 551 | py | Python | venv/lib/python3.6/site-packages/django/db/backends/base/client.py | xiegudong45/typeidea | db6504a232d120d6ffa185730bd35b9b9ecffa6c | [
"Apache-2.0"
] | 5,079 | 2015-01-01T03:39:46.000Z | 2022-03-31T07:38:22.000Z | venv/lib/python3.6/site-packages/django/db/backends/base/client.py | xiegudong45/typeidea | db6504a232d120d6ffa185730bd35b9b9ecffa6c | [
"Apache-2.0"
] | 1,623 | 2015-01-01T08:06:24.000Z | 2022-03-30T19:48:52.000Z | virtual/lib/python3.6/site-packages/django/db/backends/base/client.py | ngishjonathan/gallery | dd67f28887316d6277927c667f6641d26317b0b8 | [
"MIT"
] | 2,033 | 2015-01-04T07:18:02.000Z | 2022-03-28T19:55:47.000Z | class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError('subclasses of BaseDatabaseClient must provide a runshell() method')
| 34.4375 | 102 | 0.704174 |
aced8f4c0bf0d6a9b90b476a03fe3e900992477f | 5,086 | py | Python | chia/harvester/harvester.py | demosglok/chia-blockchain | a0d85fc81f435df86000c39d041fdd912333010d | [
"Apache-2.0"
] | null | null | null | chia/harvester/harvester.py | demosglok/chia-blockchain | a0d85fc81f435df86000c39d041fdd912333010d | [
"Apache-2.0"
] | null | null | null | chia/harvester/harvester.py | demosglok/chia-blockchain | a0d85fc81f435df86000c39d041fdd912333010d | [
"Apache-2.0"
] | null | null | null | import asyncio
import concurrent
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Dict, List, Optional, Set, Tuple
from blspy import G1Element
import chia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from chia.consensus.constants import ConsensusConstants
from chia.plotting.plot_tools import PlotInfo
from chia.plotting.plot_tools import add_plot_directory as add_plot_directory_pt
from chia.plotting.plot_tools import get_plot_directories as get_plot_directories_pt
from chia.plotting.plot_tools import load_plots
from chia.plotting.plot_tools import remove_plot_directory as remove_plot_directory_pt
log = logging.getLogger(__name__)
class Harvester:
provers: Dict[Path, PlotInfo]
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
root_path: Path
_is_shutdown: bool
executor: ThreadPoolExecutor
state_changed_callback: Optional[Callable]
cached_challenges: List
constants: ConsensusConstants
_refresh_lock: asyncio.Lock
def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants):
self.root_path = root_path
# From filename to prover
self.provers = {}
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self._is_shutdown = False
self.farmer_public_keys = []
self.pool_public_keys = []
self.match_str = None
self.show_memo: bool = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self.state_changed_callback = None
self.server = None
self.constants = constants
self.cached_challenges = []
self.log = log
self.state_changed_callback: Optional[Callable] = None
self.last_load_time: float = 0
self.plot_load_frequency = config.get("plot_loading_frequency_seconds", 120)
async def _start(self):
self._refresh_lock = asyncio.Lock()
def _close(self):
self._is_shutdown = True
self.executor.shutdown(wait=True)
async def _await_closed(self):
pass
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
def on_disconnect(self, connection: ws.WSChiaConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self._state_changed("close_connection")
def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]:
response_plots: List[Dict] = []
for path, plot_info in self.provers.items():
prover = plot_info.prover
response_plots.append(
{
"filename": str(path),
"size": prover.get_size(),
"plot-seed": prover.get_id(),
"pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash,
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": plot_info.time_modified,
}
)
return (
response_plots,
[str(s) for s, _ in self.failed_to_open_filenames.items()],
[str(s) for s in self.no_key_filenames],
)
async def refresh_plots(self):
locked: bool = self._refresh_lock.locked()
changed: bool = False
if not locked:
async with self._refresh_lock:
# Avoid double refreshing of plots
(changed, self.provers, self.failed_to_open_filenames, self.no_key_filenames,) = load_plots(
self.provers,
self.failed_to_open_filenames,
self.farmer_public_keys,
self.pool_public_keys,
self.match_str,
self.show_memo,
self.root_path,
)
if changed:
self._state_changed("plots")
def delete_plot(self, str_path: str):
path = Path(str_path).resolve()
if path in self.provers:
del self.provers[path]
# Remove absolute and relative paths
if path.exists():
path.unlink()
self._state_changed("plots")
return True
async def add_plot_directory(self, str_path: str) -> bool:
add_plot_directory_pt(str_path, self.root_path)
await self.refresh_plots()
return True
async def get_plot_directories(self) -> List[str]:
return get_plot_directories_pt(self.root_path)
async def remove_plot_directory(self, str_path: str) -> bool:
remove_plot_directory_pt(str_path, self.root_path)
return True
def set_server(self, server):
self.server = server
| 35.075862 | 108 | 0.651199 |
aced9056be53d5cb3ab9b218adb0c1684e092e11 | 5,429 | py | Python | tests/unit/testActionRules/testActionRules.py | KIZI/actionrules | 227e021fa60ce40a1492322fe9bec35f0469e19c | [
"MIT"
] | 8 | 2019-10-11T09:49:20.000Z | 2022-03-21T23:23:55.000Z | tests/unit/testActionRules/testActionRules.py | hhl60492/actionrules | cdd1f58b44278e033d2eed7c603938e29368c9fa | [
"MIT"
] | 15 | 2019-12-29T20:14:36.000Z | 2021-12-10T13:16:00.000Z | tests/unit/testActionRules/testActionRules.py | KIZI/actionrules | 227e021fa60ce40a1492322fe9bec35f0469e19c | [
"MIT"
] | 7 | 2019-10-10T15:51:36.000Z | 2022-03-23T00:33:30.000Z | import unittest
import pandas as pd
from actionrules.actionRules import ActionRules
from actionrules.desiredState import DesiredState
class TestActionRules(unittest.TestCase):
def setUp(self):
self.actionRulesDiscoveryEmptyNotNan = ActionRules([pd.DataFrame()],
[pd.DataFrame()],
[pd.DataFrame()],
DesiredState(),
[pd.Series()],
[pd.Series()])
self.actionRulesDiscoveryEmptyNan = ActionRules([pd.DataFrame()],
[pd.DataFrame()],
[pd.DataFrame()],
DesiredState(),
[pd.Series()],
[pd.Series()],
True)
def test_is_action_couple_when_stable_candidate_not_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '0', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_not_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '1', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_not_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('nan', '1', "stable")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_not_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '0', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_not_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('0', '1', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0', '1'), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_not_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNotNan._is_action_couple('nan', '1', "flexible")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_stable_candidate_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '0', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_stable_candidate_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '1', "stable")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_stable_candidate_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('nan', '1', "stable")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('1*',), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_not_flexible_candidate_nan_same_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '0', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (False, None, True)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_nan_different_values(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('0', '1', "flexible")
#(bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('0', '1'), False)
self.assertEqual(expected, result)
def test_is_action_couple_when_flexible_candidate_nan_missing_value(self):
result = self.actionRulesDiscoveryEmptyNan._is_action_couple('nan', '1', "flexible")
# (bool is_action_pair, (before, after) action_pair, bool break_rule)
expected = (True, ('None', '1'), False)
self.assertEqual(expected, result)
def test_get_uplift(self):
result = self.actionRulesDiscoveryEmptyNan._get_uplift(0.2, 0.8, 0.8)
expected = 0.15
self.assertAlmostEqual(expected, result)
if __name__ == '__main__':
unittest.main()
| 52.708738 | 95 | 0.634371 |
aced913b23a1113bdbceed0ec259d309af763bf0 | 1,149 | py | Python | main_job.py | vsantiago113/Netmiko-Boilerplate | 71996902f347c0b5c917c7ca0c7832c4bc653108 | [
"MIT"
] | null | null | null | main_job.py | vsantiago113/Netmiko-Boilerplate | 71996902f347c0b5c917c7ca0c7832c4bc653108 | [
"MIT"
] | null | null | null | main_job.py | vsantiago113/Netmiko-Boilerplate | 71996902f347c0b5c917c7ca0c7832c4bc653108 | [
"MIT"
] | null | null | null | from netmiko import ConnectHandler
from multiprocessing import Pool, Manager
import tqdm
def run_job(args):
device, dictionary = args
connection = ConnectHandler(**device)
output = connection.send_command('show ip int b')
dictionary[device['host']] = {'results': [output]}
connection.disconnect()
if __name__ == '__main__':
devices = [{'device_type': 'cisco_ios', 'host': '192.168.1.201', 'username': 'admin', 'password': 'Cisco123'},
{'device_type': 'cisco_ios', 'host': '192.168.1.202', 'username': 'admin', 'password': 'Cisco123'},
{'device_type': 'cisco_ios', 'host': '192.168.1.203', 'username': 'admin', 'password': 'Cisco123'}]
manager = Manager()
shared_dictionary = manager.dict()
devices = [(i, shared_dictionary) for i in devices]
pool = Pool(processes=7)
for _ in tqdm.tqdm(pool.imap_unordered(run_job, devices), total=len(devices)):
pass
pool.close()
pool.join()
for k, v in shared_dictionary.items():
print(f'Host: {k}')
for o in v['results']:
print(o)
print('~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
| 32.828571 | 114 | 0.601393 |
aced914301b5bd0fdc503b602c3b1707225d9df7 | 15,072 | py | Python | sdk/python/pulumi_azure_native/network/v20171101/application_security_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20171101/application_security_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/network/v20171101/application_security_group.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['ApplicationSecurityGroupArgs', 'ApplicationSecurityGroup']
@pulumi.input_type
class ApplicationSecurityGroupArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
application_security_group_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ApplicationSecurityGroup resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] application_security_group_name: The name of the application security group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_security_group_name is not None:
pulumi.set(__self__, "application_security_group_name", application_security_group_name)
if id is not None:
pulumi.set(__self__, "id", id)
if location is not None:
pulumi.set(__self__, "location", location)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationSecurityGroupName")
def application_security_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the application security group.
"""
return pulumi.get(self, "application_security_group_name")
@application_security_group_name.setter
def application_security_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "application_security_group_name", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ApplicationSecurityGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_security_group_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
An application security group in a resource group.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] application_security_group_name: The name of the application security group.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ApplicationSecurityGroupArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An application security group in a resource group.
:param str resource_name: The name of the resource.
:param ApplicationSecurityGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ApplicationSecurityGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_security_group_name: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ApplicationSecurityGroupArgs.__new__(ApplicationSecurityGroupArgs)
__props__.__dict__["application_security_group_name"] = application_security_group_name
__props__.__dict__["id"] = id
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["tags"] = tags
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20171101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20170901:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20170901:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20171001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20171001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20180801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20180801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20181001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181001:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20181101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20181201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20181201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20190901:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20190901:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20191101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20191201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20191201:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200301:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200301:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200401:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200501:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200501:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200601:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200701:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20200801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20200801:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-native:network/v20201101:ApplicationSecurityGroup"), pulumi.Alias(type_="azure-nextgen:network/v20201101:ApplicationSecurityGroup")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ApplicationSecurityGroup, __self__).__init__(
'azure-native:network/v20171101:ApplicationSecurityGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ApplicationSecurityGroup':
"""
Get an existing ApplicationSecurityGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ApplicationSecurityGroupArgs.__new__(ApplicationSecurityGroupArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["resource_guid"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return ApplicationSecurityGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the application security group resource. Possible values are: 'Succeeded', 'Updating', 'Deleting', and 'Failed'.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
The resource GUID property of the application security group resource. It uniquely identifies a resource, even if the user changes its name or migrate the resource across subscriptions or resource groups.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 57.090909 | 4,406 | 0.701566 |
aced9199eda40bd30d873dd5a9c371bba55ed884 | 1,679 | py | Python | manager/projects/ui/views/reviews.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 30 | 2016-03-26T12:08:04.000Z | 2021-12-24T14:48:32.000Z | manager/projects/ui/views/reviews.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 1,250 | 2016-03-23T04:56:50.000Z | 2022-03-28T02:27:58.000Z | manager/projects/ui/views/reviews.py | jlbrewe/hub | c737669e6493ad17536eaa240bed3394b20c6b7d | [
"Apache-2.0"
] | 11 | 2016-07-14T17:04:20.000Z | 2021-07-01T16:19:09.000Z | from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from projects.api.serializers import ReviewUpdateSerializer
from projects.api.views.reviews import ProjectsReviewsViewSet
def list(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
List reviews for a project.
"""
viewset = ProjectsReviewsViewSet.init("list", request, args, kwargs)
reviews = viewset.get_queryset()
context = viewset.get_response_context(queryset=reviews)
meta = viewset.get_project().get_meta()
return render(request, "projects/reviews/list.html", dict(**context, meta=meta))
def create(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Create a review for a project.
"""
viewset = ProjectsReviewsViewSet.init("create", request, args, kwargs)
serializer = viewset.get_serializer()
context = viewset.get_response_context(serializer=serializer)
meta = viewset.get_project().get_meta()
return render(request, "projects/reviews/create.html", dict(**context, meta=meta))
def retrieve(request: HttpRequest, *args, **kwargs) -> HttpResponse:
"""
Retrieve a review from a project.
"""
viewset = ProjectsReviewsViewSet.init("retrieve", request, args, kwargs)
review = viewset.get_object()
context = viewset.get_response_context(instance=review)
serializer = (
ReviewUpdateSerializer()
if context.get("is_editor") or context.get("is_user")
else None
)
meta = viewset.get_project().get_meta()
return render(
request,
"projects/reviews/retrieve.html",
dict(**context, serializer=serializer, meta=meta),
)
| 34.979167 | 86 | 0.703395 |
aced91facdf4e138241689673859243b68176be9 | 13,404 | py | Python | venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | venv/lib/python3.8/site-packages/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py | saeedya/docker-ansible | 6fb0cfc6bc4a5925b21380952a5a4502ec02119a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Dell EMC OpenManage Ansible Modules
# Version 4.0.0
# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r"""
---
module: ome_domain_user_groups
short_description: Create, modify, or delete an Active Directory user group on
OpenManage Enterprise and OpenManage Enterprise Modular
version_added: "4.0.0"
description: This module allows to create, modify, or delete an Active Directory user group on
OpenManage Enterprise and OpenManage Enterprise Modular.
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
options:
state:
type: str
description:
- C(present) imports or modifies the Active Directory user group.
- C(absent) deletes an existing Active Directory user group.
choices: [present, absent]
default: present
group_name:
type: str
required: True
description:
- The desired Active Directory user group name to be imported or removed.
- "Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator."
- I(group_name) value is case insensitive.
role:
type: str
description:
- The desired roles and privilege for the imported Active Directory user group.
- "OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER,
FABRIC MANAGER, VIEWER."
- "OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER."
- I(role) value is case insensitive.
directory_name:
type: str
description:
- The directory name set while adding the Active Directory.
- I(directory_name) is mutually exclusive with I(directory_id).
directory_id:
type: int
description:
- The ID of the Active Directory.
- I(directory_id) is mutually exclusive with I(directory_name).
domain_username:
type: str
description:
- Active directory domain username.
- "Example: username@domain or domain\\username."
domain_password:
type: str
description:
- Active directory domain password.
requirements:
- "python >= 2.7.17"
author:
- "Felix Stephen (@felixs88)"
notes:
- This module supports C(check_mode) and idempotency.
- Run this module from a system that has direct access to OpenManage Enterprise
or OpenManage Enterprise Modular.
"""
EXAMPLES = r"""
---
- name: Create Active Directory user group
dellemc.openmanage.ome_domain_user_groups:
hostname: "192.168.0.1"
username: "username"
password: "password"
state: present
group_name: account operators
directory_name: directory_name
role: administrator
domain_username: username@domain
domain_password: domain_password
- name: Update Active Directory user group
dellemc.openmanage.ome_domain_user_groups:
hostname: "192.168.0.1"
username: "username"
password: "password"
state: present
group_name: account operators
role: viewer
- name: Delete active directory user group
dellemc.openmanage.ome_domain_user_groups:
hostname: "192.168.0.1"
username: "username"
password: "password"
state: absent
group_name: administrators
"""
RETURN = r"""
---
msg:
type: str
description: Overall status of the Active Directory user group operation.
returned: always
sample: Successfully imported the active directory user group.
domain_user_status:
description: Details of the domain user operation, when I(state) is C(present).
returned: When I(state) is C(present).
type: dict
sample: {
"Description": null,
"DirectoryServiceId": 16097,
"Enabled": true,
"Id": "16617",
"IsBuiltin": false,
"IsVisible": true,
"Locked": false,
"Name": "Account Operators",
"ObjectGuid": "a491859c-031e-42a3-ae5e-0ab148ecf1d6",
"ObjectSid": null,
"Oem": null,
"Password": null,
"PlainTextPassword": null,
"RoleId": "16",
"UserName": "Account Operators",
"UserTypeId": 2
}
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
type: dict
sample: {
"error": {
"code": "Base.1.0.GeneralError",
"message": "A general error has occurred. See ExtendedInfo for more information.",
"@Message.ExtendedInfo": [
{
"MessageId": "GEN1234",
"RelatedProperties": [],
"Message": "Unable to process the request because an error occurred.",
"MessageArgs": [],
"Severity": "Critical",
"Resolution": "Retry the operation. If the issue persists, contact your system administrator."
}
]
}
}
"""
import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
ROLE_URI = "AccountService/Roles"
ACCOUNT_URI = "AccountService/Accounts"
GET_AD_ACC = "AccountService/ExternalAccountProvider/ADAccountProvider"
IMPORT_ACC_PRV = "AccountService/Actions/AccountService.ImportExternalAccountProvider"
SEARCH_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
def get_directory(module, rest_obj):
user_dir_name = module.params.get("directory_name")
user_dir_id = module.params.get("directory_id")
key = "name" if user_dir_name is not None else "id"
value = user_dir_name if user_dir_name is not None else user_dir_id
dir_id = None
if user_dir_name is None and user_dir_id is None:
module.fail_json(msg="missing required arguments: directory_name or directory_id")
directory_resp = rest_obj.invoke_request("GET", GET_AD_ACC)
for dire in directory_resp.json_data["value"]:
if user_dir_name is not None and dire["Name"] == user_dir_name:
dir_id = dire["Id"]
break
if user_dir_id is not None and dire["Id"] == user_dir_id:
dir_id = dire["Id"]
break
else:
module.fail_json(msg="Unable to complete the operation because the entered "
"directory {0} '{1}' does not exist.".format(key, value))
return dir_id
def search_directory(module, rest_obj, dir_id):
group_name, obj_gui_id, common_name = module.params["group_name"], None, None
payload = {"DirectoryServerId": dir_id, "Type": "AD",
"UserName": module.params["domain_username"],
"Password": module.params["domain_password"],
"CommonName": group_name}
try:
resp = rest_obj.invoke_request("POST", SEARCH_AD, data=payload)
for ad in resp.json_data:
if ad["CommonName"].lower() == group_name.lower():
obj_gui_id = ad["ObjectGuid"]
common_name = ad["CommonName"]
break
else:
module.fail_json(msg="Unable to complete the operation because the entered "
"group name '{0}' does not exist.".format(group_name))
except HTTPError as err:
error = json.load(err)
if error['error']['@Message.ExtendedInfo'][0]['MessageId'] in ["CGEN1004", "CSEC5022"]:
module.fail_json(msg="Unable to complete the operation because the entered "
"domain username or domain password are invalid.")
return obj_gui_id, common_name
def directory_user(module, rest_obj):
user = get_directory_user(module, rest_obj)
new_role_id = get_role(module, rest_obj)
dir_id = get_directory(module, rest_obj)
domain_resp, msg = None, ''
if user is None:
obj_gui_id, common_name = search_directory(module, rest_obj, dir_id)
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
payload = [
{"UserTypeId": 2, "DirectoryServiceId": dir_id, "Description": None,
"Name": common_name, "Password": "", "UserName": common_name, "RoleId": new_role_id, "Locked": False,
"IsBuiltin": False, "Enabled": True, "ObjectGuid": obj_gui_id}
]
domain_resp = rest_obj.invoke_request("POST", IMPORT_ACC_PRV, data=payload)
msg = 'imported'
else:
if (int(user["RoleId"]) == new_role_id):
user = rest_obj.strip_substr_dict(user)
module.exit_json(msg=NO_CHANGES_MSG, domain_user_status=user)
else:
payload = {"Id": str(user["Id"]), "UserTypeId": 2, "DirectoryServiceId": dir_id,
"UserName": user["UserName"], "RoleId": str(new_role_id), "Enabled": user["Enabled"]}
update_uri = "{0}('{1}')".format(ACCOUNT_URI, user['Id'])
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=payload)
domain_resp = rest_obj.invoke_request("PUT", update_uri, data=payload)
msg = 'updated'
if domain_resp is None:
module.fail_json(msg="Unable to complete the Active Directory user account.")
return domain_resp.json_data, msg
def get_role(module, rest_obj):
role_name, role_id = module.params.get("role"), None
if role_name is None:
module.fail_json(msg="missing required arguments: role")
resp_role = rest_obj.invoke_request("GET", ROLE_URI)
role_list = resp_role.json_data["value"]
for role in role_list:
if role["Name"] == role_name.upper().replace(" ", "_"):
role_id = int(role["Id"])
break
else:
module.fail_json(msg="Unable to complete the operation because the entered "
"role name '{0}' does not exist.".format(role_name))
return role_id
def get_directory_user(module, rest_obj):
user_group_name, user = module.params.get("group_name"), None
state = module.params["state"]
if user_group_name is None:
module.fail_json(msg="missing required arguments: group_name")
user_resp = rest_obj.invoke_request('GET', ACCOUNT_URI)
for usr in user_resp.json_data["value"]:
if usr["UserName"].lower() == user_group_name.lower() and usr["UserTypeId"] == 2:
user = usr
if module.check_mode and state == "absent":
user = rest_obj.strip_substr_dict(usr)
module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=user)
break
else:
if state == "absent":
module.exit_json(msg=NO_CHANGES_MSG)
return user
def delete_directory_user(rest_obj, user_id):
delete_uri, changed = "{0}('{1}')".format(ACCOUNT_URI, user_id), False
msg = "Invalid active directory user group name provided."
resp = rest_obj.invoke_request('DELETE', delete_uri)
if resp.status_code == 204:
changed = True
msg = "Successfully deleted the active directory user group."
return msg, changed
def main():
module = AnsibleModule(
argument_spec={
"hostname": {"required": True, "type": 'str'},
"username": {"required": True, "type": 'str'},
"password": {"required": True, "type": 'str', "no_log": True},
"port": {"required": False, "default": 443, "type": 'int'},
"state": {"required": False, "type": 'str', "default": "present",
"choices": ['present', 'absent']},
"group_name": {"required": True, "type": 'str'},
"role": {"required": False, "type": 'str'},
"directory_name": {"required": False, "type": 'str'},
"directory_id": {"required": False, "type": 'int'},
"domain_username": {"required": False, "type": 'str'},
"domain_password": {"required": False, "type": 'str', "no_log": True},
},
mutually_exclusive=[['directory_name', 'directory_id'], ],
supports_check_mode=True)
try:
with RestOME(module.params, req_session=True) as rest_obj:
if module.params["state"] == "present":
resp, msg = directory_user(module, rest_obj)
if isinstance(resp, list):
resp = resp[0]
module.exit_json(
msg="Successfully {0} the active directory user group.".format(msg),
domain_user_status=resp, changed=True
)
if module.params["state"] == "absent":
user = get_directory_user(module, rest_obj)
msg, changed = delete_directory_user(rest_obj, int(user["Id"]))
user = rest_obj.strip_substr_dict(user)
module.exit_json(msg=msg, changed=changed, domain_user_status=user)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError) as err:
module.fail_json(msg=str(err))
if __name__ == '__main__':
main()
| 38.965116 | 114 | 0.654208 |
aced92c0ebcbbe0421030a6bbdc60231a4821650 | 2,450 | py | Python | ppocr/data/imaug/__init__.py | YuxinZou/Ultra_light_OCR_No.4 | 2b2aaa6400e8b7bdc85affbfd1c1779286b8b3a9 | [
"Apache-2.0"
] | 7 | 2021-07-08T07:40:17.000Z | 2022-02-26T07:59:17.000Z | ppocr/data/imaug/__init__.py | YuxinZou/Ultra_light_OCR_No.4 | 2b2aaa6400e8b7bdc85affbfd1c1779286b8b3a9 | [
"Apache-2.0"
] | null | null | null | ppocr/data/imaug/__init__.py | YuxinZou/Ultra_light_OCR_No.4 | 2b2aaa6400e8b7bdc85affbfd1c1779286b8b3a9 | [
"Apache-2.0"
] | 1 | 2021-11-10T17:45:39.000Z | 2021-11-10T17:45:39.000Z | # flake8: noqa
# copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .iaa_augment import IaaAugment
from .make_border_map import MakeBorderMap
from .make_shrink_map import MakeShrinkMap
from .random_crop_data import EastRandomCropData, PSERandomCrop
from .rec_general import (
Albu,
Imau,
HeightRatioCrop,
Resize,
ToFloat,
Normalize,
Pad,
PatchPad,
WPad,
Transpose,
)
from .rec_img_aug import (
RecAug,
RecResizeImg,
ClsResizeImg,
SRNRecResizeImg,
Tia,
HeightCrop,
GaussBlur,
Color,
ShiftJitter,
GaussNoise,
PixelReverse,
Iaa_AdditiveGaussianNoise,
Iaa_MotionBlur,
RandomPadding,
)
from .randaugment import RandAugment
from .operators import *
from .label_ops import *
from .east_process import *
from .sast_process import *
from .pg_process import *
def transform(data, ops=None):
""" transform """
if ops is None:
ops = []
for op in ops:
data = op(data)
if data is None:
return None
return data
def create_operators(op_param_list, global_config=None):
"""
create operators based on the config
Args:
params(list): a dict list, used to create some operators
"""
assert isinstance(op_param_list, list), ('operator config should be a list')
ops = []
for operator in op_param_list:
assert isinstance(operator,
dict) and len(operator) == 1, "yaml format error"
op_name = list(operator)[0]
param = {} if operator[op_name] is None else operator[op_name]
if global_config is not None:
param.update(global_config)
op = eval(op_name)(**param)
ops.append(op)
return ops
| 26.630435 | 80 | 0.693878 |
aced92ec8abd0cc57bce77d3924ffeafda6888b6 | 102,713 | py | Python | app/venv/lib/python2.7/site-packages/pandas/io/tests/test_sql.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | app/venv/lib/python2.7/site-packages/pandas/io/tests/test_sql.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | app/venv/lib/python2.7/site-packages/pandas/io/tests/test_sql.py | anaheino/Ufo-sightings-map | 64af02093f97737cbbdfd8af9e1aeb4d8aa8fcdc | [
"MIT"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | """SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL, PostgreSQL)
derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback` and `TestMySQLLegacy`)
"""
from __future__ import print_function
import unittest
import sqlite3
import csv
import os
import sys
import nose
import warnings
import numpy as np
import pandas as pd
from datetime import datetime, date, time
from pandas import DataFrame, Series, Index, MultiIndex, isnull, concat
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
from pandas.core import common as com
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
from pandas.io.sql import read_sql_table, read_sql_query
import pandas.util.testing as tm
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
'create_iris': {
'sqlite': """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
'mysql': """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
'postgresql': """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)"""
},
'insert_iris': {
'sqlite': """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
'mysql': """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
'postgresql': """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);"""
},
'create_test_types': {
'sqlite': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
'mysql': """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
'postgresql': """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)"""
},
'insert_test_types': {
'sqlite': {
'query': """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'mysql': {
'query': """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
'postgresql': {
'query': """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
'fields': (
'TextCol', 'DateCol', 'DateColWithTz', 'IntDateCol', 'FloatCol',
'IntCol', 'BoolCol', 'IntColWithNull', 'BoolColWithNull'
)
},
},
'read_parameters': {
'sqlite': "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
'mysql': 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
'postgresql': 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s'
},
'read_named_parameters': {
'sqlite': """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
'mysql': """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
'postgresql': """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
"""
},
'create_view': {
'sqlite': """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
}
}
class MixInBase(object):
def tearDown(self):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % sql._get_valid_mysql_name(table_name))
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute('SHOW TABLES')
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute("DROP TABLE IF EXISTS %s" % sql._get_valid_sqlite_name(table_name))
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest(unittest.TestCase):
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, 'execute'):
return self.conn
else:
return self.conn.cursor()
def _load_iris_data(self):
import io
iris_csv_file = os.path.join(tm.get_data_path(), 'iris.csv')
self.drop_table('iris')
self._get_exec().execute(SQL_STRINGS['create_iris'][self.flavor])
with io.open(iris_csv_file, mode='r', newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS['insert_iris'][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table('iris_view')
self._get_exec().execute(SQL_STRINGS['create_view'][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
self.assertTrue(
issubclass(pytype, np.floating), 'Loaded frame has incorrect type')
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _load_test1_data(self):
columns = ['index', 'A', 'B', 'C', 'D']
data = [(
'2000-01-03 00:00:00', 0.980268513777, 3.68573087906, -0.364216805298, -1.15973806169),
('2000-01-04 00:00:00', 1.04791624281, -
0.0412318367011, -0.16181208307, 0.212549316967),
('2000-01-05 00:00:00', 0.498580885705,
0.731167677815, -0.537677223318, 1.34627041952),
('2000-01-06 00:00:00', 1.12020151869, 1.56762092543, 0.00364077397681, 0.67525259227)]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(dict(A=[4, 1, 3, 6],
B=['asd', 'gsq', 'ylt', 'jkl'],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=['1990-11-22', '1991-10-26', '1993-11-26', '1995-12-12']))
df['E'] = to_datetime(df['E'])
self.test_frame2 = df
def _load_test3_data(self):
columns = ['index', 'A', 'B']
data = [(
'2000-01-03 00:00:00', 2 ** 31 - 1, -1.987670),
('2000-01-04 00:00:00', -29, -0.0412318367011),
('2000-01-05 00:00:00', 20000, 0.731167677815),
('2000-01-06 00:00:00', -290867, 1.56762092543)]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table('types_test_data')
self._get_exec().execute(SQL_STRINGS['create_test_types'][self.flavor])
ins = SQL_STRINGS['insert_test_types'][self.flavor]
data = [
{
'TextCol': 'first',
'DateCol': '2000-01-03 00:00:00',
'DateColWithTz': '2000-01-01 00:00:00-08:00',
'IntDateCol': 535852800,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': 1,
'BoolColWithNull': False,
},
{
'TextCol': 'first',
'DateCol': '2000-01-04 00:00:00',
'DateColWithTz': '2000-06-01 00:00:00-07:00',
'IntDateCol': 1356998400,
'FloatCol': 10.10,
'IntCol': 1,
'BoolCol': False,
'IntColWithNull': None,
'BoolColWithNull': None,
},
]
for d in data:
self._get_exec().execute(
ins['query'],
[d[field] for field in ins['fields']]
)
def _count_rows(self, table_name):
result = self._get_exec().execute(
"SELECT count(*) AS count_1 FROM %s" % table_name).fetchone()
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS['read_parameters'][self.flavor]
params = ['Iris-setosa', 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS['read_named_parameters'][self.flavor]
params = {'name': 'Iris-setosa', 'length': 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame1')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
# Nuke table
self.drop_table('test_frame1')
def _to_sql_empty(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], 'test_frame1')
def _to_sql_fail(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
self.assertRaises(ValueError, self.pandasSQL.to_sql,
self.test_frame1, 'test_frame1', if_exists='fail')
self.drop_table('test_frame1')
def _to_sql_replace(self):
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='replace')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _to_sql_append(self):
# Nuke table just in case
self.drop_table('test_frame1')
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='fail')
# Add to table again
self.pandasSQL.to_sql(
self.test_frame1, 'test_frame1', if_exists='append')
self.assertTrue(self.pandasSQL.has_table(
'test_frame1'), 'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame1')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
self.drop_table('test_frame1')
def _roundtrip(self):
self.drop_table('test_frame_roundtrip')
self.pandasSQL.to_sql(self.test_frame1, 'test_frame_roundtrip')
result = self.pandasSQL.read_query('SELECT * FROM test_frame_roundtrip')
result.set_index('level_0', inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def _to_sql_save_index(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A'])
self.pandasSQL.to_sql(df, 'test_to_sql_saves_index')
ix_cols = self._get_index_columns('test_to_sql_saves_index')
self.assertEqual(ix_cols, [['A',],])
def _transaction_test(self):
self.pandasSQL.execute("CREATE TABLE test_trans (A INT, B TEXT)")
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
# Make sure when transaction is rolled back, no rows get inserted
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise Exception('error')
except:
# ignore raised exception
pass
res = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res), 0)
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query('SELECT * FROM test_trans')
self.assertEqual(len(res2), 1)
#------------------------------------------------------------------------------
#--- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode (`TestSQLiteFallbackApi`).
These tests are run with sqlite3. Specific tests for the different
sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = 'sqlite'
mode = None
def setUp(self):
self.conn = self.connect()
self._load_iris_data()
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query(
"SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_legacy_read_frame(self):
with tm.assert_produces_warning(FutureWarning):
iris_frame = sql.read_frame(
"SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn, flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='sqlite'), 'Table not written to DB')
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, 'test_frame2',
self.conn, flavor='sqlite', if_exists='fail')
self.assertTrue(
sql.has_table('test_frame2', self.conn, flavor='sqlite'), 'Table not written to DB')
self.assertRaises(ValueError, sql.to_sql, self.test_frame1,
'test_frame2', self.conn, flavor='sqlite', if_exists='fail')
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame3',
self.conn, flavor='sqlite', if_exists='replace')
self.assertTrue(
sql.has_table('test_frame3', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = len(self.test_frame1)
num_rows = self._count_rows('test_frame3')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='fail')
# Add to table again
sql.to_sql(self.test_frame1, 'test_frame4',
self.conn, flavor='sqlite', if_exists='append')
self.assertTrue(
sql.has_table('test_frame4', self.conn, flavor='sqlite'),
'Table not written to DB')
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows('test_frame4')
self.assertEqual(
num_rows, num_entries, "not the same number of rows as entries")
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, 'test_frame5',
self.conn, flavor='sqlite', index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype='int64'), name='series')
sql.to_sql(s, "test_series", self.conn, flavor='sqlite', index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_to_sql_panel(self):
panel = tm.makePanel()
self.assertRaises(NotImplementedError, sql.to_sql, panel,
'test_panel', self.conn, flavor='sqlite')
def test_legacy_write_frame(self):
# Assume that functionality is already tested above so just do
# quick check that it basically works
with tm.assert_produces_warning(FutureWarning):
sql.write_frame(self.test_frame1, 'test_frame_legacy', self.conn,
flavor='sqlite')
self.assertTrue(
sql.has_table('test_frame_legacy', self.conn, flavor='sqlite'),
'Table not written to DB')
def test_roundtrip(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip',
con=self.conn, flavor='sqlite')
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index('level_0', inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(self.test_frame1, 'test_frame_roundtrip', con=self.conn,
index=False, flavor='sqlite', chunksize=2)
result = sql.read_sql_query(
'SELECT * FROM test_frame_roundtrip',
con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_date_parsing(self):
# Test date parsing in read_sq
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
self.assertFalse(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(
issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn,
index_col='DateCol',
parse_dates=['DateCol', 'IntDateCol'])
self.assertTrue(issubclass(df.index.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(['00:00:01', '00:00:03'], name='foo')).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql('test_timedelta', self.conn)
result = sql.read_sql_query('SELECT * FROM test_timedelta', self.conn)
tm.assert_series_equal(result['foo'], df['foo'].astype('int64'))
def test_complex(self):
df = DataFrame({'a':[1+1j, 2j]})
# Complex data type should raise error
self.assertRaises(ValueError, df.to_sql, 'test_complex', self.conn)
def test_to_sql_index_label(self):
temp_frame = DataFrame({'col1': range(4)})
# no index name, defaults to 'index'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
# using the index name
temp_frame.index.name = 'index_name'
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'index_name',
"Index name not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label='other_label')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'other_label',
"Specified index_label not written to database")
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame({'col1': range(4)},
index=MultiIndex.from_product([('A0', 'A1'), ('B0', 'B1')]))
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, 'test_index_label', self.conn)
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[0], 'level_0')
self.assertEqual(frame.columns[1], 'level_1')
# specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['A', 'B'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Specified index_labels not written to database")
# using the index name
temp_frame.index.names = ['A', 'B']
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace')
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['A', 'B'],
"Index names not written to database")
# has index name, but specifying index_label
sql.to_sql(temp_frame, 'test_index_label', self.conn,
if_exists='replace', index_label=['C', 'D'])
frame = sql.read_sql_query('SELECT * FROM test_index_label', self.conn)
self.assertEqual(frame.columns[:2].tolist(), ['C', 'D'],
"Specified index_labels not written to database")
# wrong length of index_label
self.assertRaises(ValueError, sql.to_sql, temp_frame,
'test_index_label', self.conn, if_exists='replace',
index_label='C')
def test_multiindex_roundtrip(self):
df = DataFrame.from_records([(1,2.1,'line1'), (2,1.5,'line2')],
columns=['A','B','C'], index=['A','B'])
df.to_sql('test_multiindex_roundtrip', self.conn)
result = sql.read_sql_query('SELECT * FROM test_multiindex_roundtrip',
self.conn, index_col=['A','B'])
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn,
if_exists='replace')
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn)
self.assertTrue('CREATE' in create_sql)
def test_get_schema_dtypes(self):
float_frame = DataFrame({'a':[1.1,1.2], 'b':[2.1,2.2]})
dtype = sqlalchemy.Integer if self.mode == 'sqlalchemy' else 'INTEGER'
create_sql = sql.get_schema(float_frame, 'test', 'sqlite',
con=self.conn, dtype={'b':dtype})
self.assertTrue('CREATE' in create_sql)
self.assertTrue('INTEGER' in create_sql)
def test_get_schema_keys(self):
frame = DataFrame({'Col1':[1.1,1.2], 'Col2':[2.1,2.2]})
create_sql = sql.get_schema(frame, 'test', 'sqlite',
con=self.conn, keys='Col1')
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
self.assertTrue(constraint_sentence in create_sql)
# multiple columns as key (GH10385)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite',
con=self.conn, keys=['A', 'B'])
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
self.assertTrue(constraint_sentence in create_sql)
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
df.to_sql('test_chunksize', self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query("select * from test_chunksize",
self.conn, chunksize=5):
res2 = concat([res2, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == 'sqlalchemy':
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn,
chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
self.assertEqual(len(chunk), sizes[i])
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{'person_id': [1, 2, 3],
'person_name': ['John P. Doe', 'Jane Dove', 'John P. Doe']})
df2 = df.copy()
df2['person_name'] = df2['person_name'].astype('category')
df2.to_sql('test_categorical', self.conn, index=False)
res = sql.read_sql_query('SELECT * FROM test_categorical', self.conn)
tm.assert_frame_equal(res, df)
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = 'sqlite'
mode = 'sqlalchemy'
def connect(self):
if SQLALCHEMY_INSTALLED:
return sqlalchemy.create_engine('sqlite:///:memory:')
else:
raise nose.SkipTest('SQLAlchemy not installed')
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
cols = ['A', 'B']
result = sql.read_sql_table('test_frame', self.conn, columns=cols)
self.assertEqual(result.columns.tolist(), cols,
"Columns not correctly selected")
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, 'test_frame', self.conn)
result = sql.read_sql_table('test_frame', self.conn, index_col="index")
self.assertEqual(result.index.names, ["index"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
result = sql.read_sql_table('test_frame', self.conn, index_col=["A", "B"],
columns=["C", "D"])
self.assertEqual(result.index.names, ["A", "B"],
"index_col not correctly set")
self.assertEqual(result.columns.tolist(), ["C", "D"],
"columns not set correctly whith index_col")
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query(
"SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql(
"SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table('iris', self.conn)
iris_frame2 = sql.read_sql('iris', self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table('other_table', self.conn)
sql.read_sql_query('SELECT * FROM other_table', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for other table")
def test_warning_case_insensitive_table_name(self):
# see GH7815.
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql('CaseSensitive', self.conn)
# Verify some things
self.assertEqual(len(w), 0, "Warning triggered for writing a table")
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes('test_index_saved')
ixs = [i['column_names'] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
self.assertTrue(isinstance(table.table.c['time'].type, sqltypes.DateTime))
def test_to_sql_read_sql_with_database_uri(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
#db_uri = 'sqlite:///:memory:' # raises sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = 'sqlite:///' + name
table = 'iris'
test_frame1.to_sql(table, db_uri, if_exists='replace', index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = 'SELECT * FROM iris'
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table('iris', metadata,
sa.Column('SepalLength', sa.REAL),
sa.Column('SepalWidth', sa.REAL),
sa.Column('PetalLength', sa.REAL),
sa.Column('PetalWidth', sa.REAL),
sa.Column('Name', sa.TEXT)
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text('select * from iris where name=:name')
iris_df = sql.read_sql(name_text, self.conn, params={'name': 'Iris-versicolor'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-versicolor']))
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(iris.c.Name == sqlalchemy.bindparam('name'))
iris_df = sql.read_sql(name_select, self.conn, params={'name': 'Iris-setosa'})
all_names = set(iris_df['Name'])
self.assertEqual(all_names, set(['Iris-setosa']))
class _EngineToConnMixin(object):
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
def setUp(self):
super(_EngineToConnMixin, self).setUp()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
def tearDown(self):
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
super(_EngineToConnMixin, self).tearDown()
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = 'sqlite'
mode = 'fallback'
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn,
flavor="sqlite", index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;",
conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
self.assertRaises(sql.DatabaseError, sql.read_sql, 'iris', self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b ']) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn,
flavor="sqlite", index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, 'test', 'sqlite')
self.assertTrue('CREATE' in create_sql)
def test_tquery(self):
with tm.assert_produces_warning(FutureWarning):
iris_results = sql.tquery("SELECT * FROM iris", con=self.conn)
row = iris_results[0]
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, 'Iris-setosa'])
def test_uquery(self):
with tm.assert_produces_warning(FutureWarning):
rows = sql.uquery("SELECT * FROM iris LIMIT 1", con=self.conn)
self.assertEqual(rows, -1)
def _get_sqlite_column_type(self, schema, column):
for col in schema.split('\n'):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError('Column %s not found' % (column))
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame({'time': to_datetime(['201412120154', '201412110254'],
utc=True)})
db = sql.SQLiteDatabase(self.conn, self.flavor)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
self.assertEqual(self._get_sqlite_column_type(schema, 'time'),
"TIMESTAMP")
#------------------------------------------------------------------------------
#--- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor = None
@classmethod
def setUpClass(cls):
cls.setup_import()
cls.setup_driver()
# test connection
try:
conn = cls.connect()
conn.connect()
except sqlalchemy.exc.OperationalError:
msg = "{0} - can't connect to {1} server".format(cls, cls.flavor)
raise nose.SkipTest(msg)
def setUp(self):
self.setup_connect()
self._load_iris_data()
self._load_raw_sql()
self._load_test1_data()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
raise nose.SkipTest('SQLAlchemy not installed')
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
raise nose.SkipTest("Can't connect to {0} server".format(self.flavor))
def test_aread_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, 'temp_frame')
self.assertTrue(
temp_conn.has_table('temp_frame'), 'Table not written to DB')
pandasSQL.drop_table('temp_frame')
self.assertFalse(
temp_conn.has_table('temp_frame'), 'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=['SepalLength', 'SepalLength'])
tm.equalContents(
iris_frame.columns.values, ['SepalLength', 'SepalLength'])
def test_read_table_absent(self):
self.assertRaises(
ValueError, sql.read_sql_table, "this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.bool_),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA values becomes object
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.object),
"BoolColWithNull loaded with incorrect type")
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={'i64':[2**62]})
df.to_sql('test_bigint', self.conn, index=False)
result = sql.read_sql_table('test_bigint', self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if com.is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))
elif com.is_datetime64tz_dtype(col.dtype):
self.assertTrue(str(col.dt.tz) == 'UTC')
# "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00', tz='UTC'))
# "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00', tz='UTC'))
else:
raise AssertionError("DateCol loaded with incorrect type -> {0}".format(col.dtype))
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df,'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a Postgrsql server
# version difference
col = df.DateColWithTz
self.assertTrue(com.is_object_dtype(col.dtype) or com.is_datetime64_dtype(col.dtype) \
or com.is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}".format(col.dtype))
df = pd.read_sql_query("select * from types_test_data", self.conn, parse_dates=['DateColWithTz'])
if not hasattr(df,'DateColWithTz'):
raise nose.SkipTest("no column with datetime with time zone")
check(df.DateColWithTz)
df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
self.conn,chunksize=1)),ignore_index=True)
col = df.DateColWithTz
self.assertTrue(com.is_datetime64tz_dtype(col.dtype),
"DateCol loaded with incorrect type -> {0}".format(col.dtype))
self.assertTrue(str(col.dt.tz) == 'UTC')
expected = sql.read_sql_table("types_test_data", self.conn)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz.astype('datetime64[ns, UTC]'))
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates=['DateCol'])
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn,
parse_dates={'DateCol': '%Y-%m-%d %H:%M:%S'})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
df = sql.read_sql_table("types_test_data", self.conn, parse_dates={
'DateCol': {'format': '%Y-%m-%d %H:%M:%S'}})
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=['IntDateCol'])
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': 's'})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={'IntDateCol': {'unit': 's'}})
self.assertTrue(issubclass(df.IntDateCol.dtype.type, np.datetime64),
"IntDateCol loaded with incorrect type")
def test_datetime(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.to_sql('test_datetime', self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
result = result.drop('index', axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
result = result.drop('index', axis=1)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame({'A': date_range('2013-01-01 09:00:00', periods=3),
'B': np.arange(3.0)})
df.loc[1, 'A'] = np.nan
df.to_sql('test_datetime', self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table('test_datetime', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query('SELECT * FROM test_datetime', self.conn)
if self.flavor == 'sqlite':
self.assertTrue(isinstance(result.loc[0, 'A'], string_types))
result['A'] = to_datetime(result['A'], errors='coerce')
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False)
res = read_sql_table('test_date', self.conn)
# comes back as datetime64
tm.assert_series_equal(res['a'], to_datetime(df['a']))
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql('test_time', self.conn, index=False)
res = read_sql_table('test_time', self.conn)
tm.assert_frame_equal(res, df)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2**25 + 1,dtype=np.int32)
s2 = Series(0.0,dtype=np.float32)
df = DataFrame({'s1': s1, 's2': s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({'A':[0, 1, 2], 'B':[0.2, np.nan, 5.6]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({'A':[0, 1, 2], 'B':[np.nan, np.nan, np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df['B'] = df['B'].astype('object')
df['B'] = None
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({'A':[0, 1, 2], 'B':['a', 'b', np.nan]})
df.to_sql('test_nan', self.conn, index=False)
# NaNs are coming back as None
df.loc[2, 'B'] = None
# with read_table
result = sql.read_sql_table('test_nan', self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query('SELECT * FROM test_nan', self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i['column_names'] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = 'test_get_schema_create_table'
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df)
self.drop_table(tbl)
def test_dtype(self):
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables['dtype_test2'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.TEXT))
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': str})
# GH9083
df.to_sql('dtype_test3', self.conn, dtype={'B': sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables['dtype_test3'].columns['B'].type
self.assertTrue(isinstance(sqltype, sqlalchemy.String))
self.assertEqual(sqltype.length, 10)
def test_notnull_dtype(self):
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == 'mysql':
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
self.assertTrue(isinstance(col_dict['Bool'].type, my_type))
self.assertTrue(isinstance(col_dict['Date'].type, sqltypes.DateTime))
self.assertTrue(isinstance(col_dict['Int'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['Float'].type, sqltypes.Float))
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame({'f32':Series([V,], dtype='float32'),
'f64':Series([V,], dtype='float64'),
'f64_as_f32':Series([V,], dtype='float64'),
'i32':Series([5,], dtype='int32'),
'i64':Series([5,], dtype='int64'),
})
df.to_sql('test_dtypes', self.conn, index=False, if_exists='replace',
dtype={'f64_as_f32':sqlalchemy.Float(precision=23)})
res = sql.read_sql_table('test_dtypes', self.conn)
# check precision of float64
self.assertEqual(np.round(df['f64'].iloc[0],14),
np.round(res['f64'].iloc[0],14))
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables['test_dtypes'].columns
self.assertEqual(str(col_dict['f32'].type),
str(col_dict['f64_as_f32'].type))
self.assertTrue(isinstance(col_dict['f32'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['f64'].type, sqltypes.Float))
self.assertTrue(isinstance(col_dict['i32'].type, sqltypes.Integer))
self.assertTrue(isinstance(col_dict['i64'].type, sqltypes.BigInteger))
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pydata/pandas/issues/10104
def foo(connection):
query = 'SELECT test_foo_data FROM test_foo_data'
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name='test_foo_data', con=connection, if_exists='append')
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({'test_foo_data': [0, 1, 2]}).to_sql('test_foo_data', self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = u'Hello, World!'
expected = DataFrame({'spam': [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = 'temp_test'
__table_args__ = {'prefixes': ['TEMPORARY']}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(
sql=sqlalchemy.select([Temporary.spam]),
con=conn,
)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
raise nose.SkipTest("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy(object):
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlalchemy.create_engine('sqlite:///:memory:')
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# sqlite has no boolean type, so integer type is returned
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Non-native Bool column with NA values stays as float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
self.assertFalse(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({'a':[1,2]}, dtype='int64')
df.to_sql('test_bigintwarning', self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table('test_bigintwarning', self.conn)
self.assertEqual(len(w), 0, "Warning triggered for other table")
class _TestMySQLAlchemy(object):
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = 'mysql'
@classmethod
def connect(cls):
url = 'mysql+{driver}://root@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = 'pymysql'
except ImportError:
raise nose.SkipTest('pymysql not installed')
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
self.assertTrue(issubclass(df.FloatCol.dtype.type, np.floating),
"FloatCol loaded with incorrect type")
self.assertTrue(issubclass(df.IntCol.dtype.type, np.integer),
"IntCol loaded with incorrect type")
# MySQL has no real BOOL type (it's an alias for TINYINT)
self.assertTrue(issubclass(df.BoolCol.dtype.type, np.integer),
"BoolCol loaded with incorrect type")
# Int column with NA values stays as float
self.assertTrue(issubclass(df.IntColWithNull.dtype.type, np.floating),
"IntColWithNull loaded with incorrect type")
# Bool column with NA = int column with NA values => becomes float
self.assertTrue(issubclass(df.BoolColWithNull.dtype.type, np.floating),
"BoolColWithNull loaded with incorrect type")
def test_read_procedure(self):
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({'a': [1, 2, 3], 'b':[0.1, 0.2, 0.3]})
df.to_sql('test_procedure', self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc)
trans.commit()
except:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy(object):
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = 'postgresql'
@classmethod
def connect(cls):
url = 'postgresql+{driver}://postgres@localhost/pandas_nosetest'
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
try:
import psycopg2
cls.driver = 'psycopg2'
except ImportError:
raise nose.SkipTest('psycopg2 not installed')
def test_schema_support(self):
# only test this for postgresql (schema's not supported in mysql/sqlite)
df = DataFrame({'col1':[1, 2], 'col2':[0.1, 0.2], 'col3':['a', 'n']})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql('test_schema_public', self.conn, index=False)
df.to_sql('test_schema_public_explicit', self.conn, index=False,
schema='public')
df.to_sql('test_schema_other', self.conn, index=False, schema='other')
# read dataframes back in
res1 = sql.read_sql_table('test_schema_public', self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table('test_schema_public_explicit', self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table('test_schema_public_explicit', self.conn,
schema='public')
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table('test_schema_other', self.conn,
schema='other')
tm.assert_frame_equal(df, res4)
self.assertRaises(ValueError, sql.read_sql_table, 'test_schema_other',
self.conn, schema='public')
## different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql('test_schema_other', self.conn, schema='other', index=False)
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='replace')
df.to_sql('test_schema_other', self.conn, schema='other', index=False,
if_exists='append')
res = sql.read_sql_table('test_schema_other', self.conn, schema='other')
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
## specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema='other')
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, 'test_schema_other2', index=False)
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='replace')
pdsql.to_sql(df, 'test_schema_other2', index=False, if_exists='append')
res1 = sql.read_sql_table('test_schema_other2', self.conn, schema='other')
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
#------------------------------------------------------------------------------
#--- Test Sqlite / MySQL fallback
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = 'sqlite'
@classmethod
def connect(cls):
return sqlite3.connect(':memory:')
def setUp(self):
self.conn = self.connect()
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'sqlite')
self._load_iris_data()
self._load_test1_data()
def test_invalid_flavor(self):
self.assertRaises(
NotImplementedError, sql.SQLiteDatabase, self.conn, 'oracle')
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{'one': [1., 2., 3., 4.], 'two': [4., 3., 2., 1.]})
self.pandasSQL.to_sql(temp_frame, 'drop_test_frame')
self.assertTrue(self.pandasSQL.has_table('drop_test_frame'),
'Table not written to DB')
self.pandasSQL.drop_table('drop_test_frame')
self.assertFalse(self.pandasSQL.has_table('drop_test_frame'),
'Table not deleted from DB')
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql('test_date', self.conn, index=False, flavor=self.flavor)
res = read_sql_query('SELECT * FROM test_date', self.conn)
if self.flavor == 'sqlite':
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == 'mysql':
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
# test it raises an error and not fails silently (GH8341)
if self.flavor == 'sqlite':
self.assertRaises(sqlite3.InterfaceError, sql.to_sql, df,
'test_time', self.conn)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' " +
"AND tbl_name = '%s'" % tbl_name, self.conn)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(
"PRAGMA index_info(%s)" % ix_name, self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute('PRAGMA table_info(%s)' % table)
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError('Table %s, column %s not found' % (table, column))
def test_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = ['A', 'B']
data = [(0.8, True),
(0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql('dtype_test', self.conn)
df.to_sql('dtype_test2', self.conn, dtype={'B': 'STRING'})
# sqlite stores Boolean values as INTEGER
self.assertEqual(self._get_sqlite_column_type('dtype_test', 'B'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type('dtype_test2', 'B'), 'STRING')
self.assertRaises(ValueError, df.to_sql,
'error', self.conn, dtype={'B': bool})
def test_notnull_dtype(self):
if self.flavor == 'mysql':
raise nose.SkipTest('Not applicable to MySQL legacy')
cols = {'Bool': Series([True,None]),
'Date': Series([datetime(2012, 5, 1), None]),
'Int' : Series([1, None], dtype='object'),
'Float': Series([1.1, None])
}
df = DataFrame(cols)
tbl = 'notnull_dtype_test'
df.to_sql(tbl, self.conn)
self.assertEqual(self._get_sqlite_column_type(tbl, 'Bool'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Date'), 'TIMESTAMP')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Int'), 'INTEGER')
self.assertEqual(self._get_sqlite_column_type(tbl, 'Float'), 'REAL')
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# Raise error on blank
self.assertRaises(ValueError, df.to_sql, "", self.conn,
flavor=self.flavor)
for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[',
'test_weird_name`','test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
'99beginswithnumber', '12345']):
df.to_sql(weird_name, self.conn, flavor=self.flavor)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', weird_name])
c_tbl = 'test_weird_col_name%d'%ndx
df2.to_sql(c_tbl, self.conn, flavor=self.flavor)
sql.table_exists(c_tbl, self.conn)
class TestMySQLLegacy(MySQLMixIn, TestSQLiteFallback):
"""
Test the legacy mode against a MySQL database.
"""
flavor = 'mysql'
@classmethod
def setUpClass(cls):
cls.setup_driver()
# test connection
try:
cls.connect()
except cls.driver.err.OperationalError:
raise nose.SkipTest("{0} - can't connect to MySQL server".format(cls))
@classmethod
def setup_driver(cls):
try:
import pymysql
cls.driver = pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed')
@classmethod
def connect(cls):
return cls.driver.connect(host='127.0.0.1', user='root', passwd='', db='pandas_nosetest')
def _count_rows(self, table_name):
cur = self._get_exec()
cur.execute(
"SELECT count(*) AS count_1 FROM %s" % table_name)
rows = cur.fetchall()
return rows[0][0]
def setUp(self):
try:
self.conn = self.connect()
except self.driver.err.OperationalError:
raise nose.SkipTest("Can't connect to MySQL server")
self.pandasSQL = sql.SQLiteDatabase(self.conn, 'mysql')
self._load_iris_data()
self._load_test1_data()
def test_a_deprecation(self):
with tm.assert_produces_warning(FutureWarning):
sql.to_sql(self.test_frame1, 'test_frame1', self.conn,
flavor='mysql')
self.assertTrue(
sql.has_table('test_frame1', self.conn, flavor='mysql'),
'Table not written to DB')
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SHOW INDEX IN %s" % tbl_name, self.conn)
ix_cols = {}
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return list(ix_cols.values())
def test_to_sql_save_index(self):
self._to_sql_save_index()
for ix_name, ix_col in zip(ixs.Key_name, ixs.Column_name):
if ix_name not in ix_cols:
ix_cols[ix_name] = []
ix_cols[ix_name].append(ix_col)
return ix_cols.values()
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_illegal_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
# These tables and columns should be ok
for ndx, ok_name in enumerate(['99beginswithnumber','12345']):
df.to_sql(ok_name, self.conn, flavor=self.flavor, index=False,
if_exists='replace')
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', ok_name])
df2.to_sql('test_ok_col_name', self.conn, flavor=self.flavor, index=False,
if_exists='replace')
# For MySQL, these should raise ValueError
for ndx, illegal_name in enumerate(['test_illegal_name]','test_illegal_name[',
'test_illegal_name`','test_illegal_name"', 'test_illegal_name\'', '']):
self.assertRaises(ValueError, df.to_sql, illegal_name, self.conn,
flavor=self.flavor, index=False)
df2 = DataFrame([[1, 2], [3, 4]], columns=['a', illegal_name])
self.assertRaises(ValueError, df2.to_sql, 'test_illegal_col_name%d'%ndx,
self.conn, flavor=self.flavor, index=False)
#------------------------------------------------------------------------------
#--- Old tests from 0.13.1 (before refactor using sqlalchemy)
_formatters = {
datetime: lambda dt: "'%s'" % date_format(dt),
str: lambda x: "'%s'" % x,
np.str_: lambda x: "'%s'" % x,
compat.text_type: lambda x: "'%s'" % x,
compat.binary_type: lambda x: "'%s'" % x,
float: lambda x: "%.8f" % x,
int: lambda x: "%s" % x,
type(None): lambda x: "NULL",
np.float64: lambda x: "%.10f" % x,
bool: lambda x: "'%s'" % x,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isnull(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def _skip_if_no_pymysql():
try:
import pymysql
except ImportError:
raise nose.SkipTest('pymysql not installed, skipping')
class TestXSQLite(SQLiteMixIn, tm.TestCase):
def setUp(self):
self.conn = sqlite3.connect(':memory:')
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_frame("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.ix[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_frame("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'sqlite', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY ("A", "B")' in create_sql)
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.write_frame(frame, name='test_table', con=self.conn)
result = sql.read_frame("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
frame2['Idx'] = Index(lrange(len(frame2))) + 10
sql.write_frame(frame2, name='test_table2', con=self.conn)
result = sql.read_frame("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
expected.index = Index(lrange(len(frame2))) + 10
expected.index.name = 'Idx'
tm.assert_frame_equal(expected, result)
def test_tquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.conn)
result = sql.tquery("select A from test_table", self.conn)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
frame = tm.makeTimeDataFrame()
sql.write_frame(frame, name='test_table', con=self.conn)
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.conn), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.conn, name = 'testkeywords')
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df=DataFrame([1 , 2], columns=['c0'])
sql.write_frame(mono_df, con = self.conn, name = 'mono_df')
# computing the sum via sql
con_x=self.conn
the_sum=sum([my_c0[0] for my_c0 in con_x.execute("select * from mono_df")])
# it should not fail, and gives 3 ( Issue #3628 )
self.assertEqual(the_sum , 3)
result = sql.read_frame("select * from mono_df",con_x)
tm.assert_frame_equal(result,mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='sqlite',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='sqlite',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='sqlite', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='sqlite', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='sqlite', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
class TestXMySQL(MySQLMixIn, tm.TestCase):
@classmethod
def setUpClass(cls):
_skip_if_no_pymysql()
# test connection
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def setUp(self):
_skip_if_no_pymysql()
import pymysql
try:
# Try Travis defaults.
# No real user should allow root access with a blank password.
self.conn = pymysql.connect(host='localhost', user='root', passwd='',
db='pandas_nosetest')
except:
pass
else:
return
try:
self.conn = pymysql.connect(read_default_group='pandas')
except pymysql.ProgrammingError as e:
raise nose.SkipTest(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
except pymysql.Error as e:
raise nose.SkipTest(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf. ")
def test_basic(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
frame.ix[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
sql.tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_frame("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame)
def test_execute(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql')
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
row = frame.ix[0].values.tolist()
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_frame("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
_skip_if_no_pymysql()
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, 'test', 'mysql')
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(' ')
if len(tokens) == 2 and tokens[0] == 'A':
self.assertTrue(tokens[1] == 'DATETIME')
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, 'test', 'mysql', keys=['A', 'B'],)
lines = create_sql.splitlines()
self.assertTrue('PRIMARY KEY (`A`, `B`)' in create_sql)
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
def test_execute_fail(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.execute,
'INSERT INTO test VALUES("foo", "bar", 7)',
self.conn)
finally:
sys.stdout = sys.__stdout__
def test_execute_closed_connection(self):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a(5), b(5))
);
"""
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
try:
sys.stdout = StringIO()
self.assertRaises(Exception, sql.tquery, "select * from test",
con=self.conn)
finally:
sys.stdout = sys.__stdout__
# Initialize connection again (needed for tearDown)
self.setUp()
def test_na_roundtrip(self):
_skip_if_no_pymysql()
pass
def _check_roundtrip(self, frame):
_skip_if_no_pymysql()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
result = sql.read_frame("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
result.index.name = frame.index.name
expected = frame
tm.assert_frame_equal(result, expected)
frame['txt'] = ['a'] * len(frame)
frame2 = frame.copy()
index = Index(lrange(len(frame2))) + 10
frame2['Idx'] = index
drop_sql = "DROP TABLE IF EXISTS test_table2"
cur = self.conn.cursor()
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "Unknown table.*")
cur.execute(drop_sql)
sql.write_frame(frame2, name='test_table2', con=self.conn, flavor='mysql')
result = sql.read_frame("select * from test_table2", self.conn,
index_col='Idx')
expected = frame.copy()
# HACK! Change this once indexes are handled properly.
expected.index = index
expected.index.names = result.index.names
tm.assert_frame_equal(expected, result)
def test_tquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
result = sql.tquery("select A from test_table", self.conn)
expected = Series(frame.A.values, frame.index) # not to have name
result = Series(result, frame.index)
tm.assert_series_equal(result, expected)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'select * from blah', con=self.conn, retry=True)
finally:
sys.stdout = sys.__stdout__
def test_uquery(self):
try:
import pymysql
except ImportError:
raise nose.SkipTest("no pymysql")
frame = tm.makeTimeDataFrame()
drop_sql = "DROP TABLE IF EXISTS test_table"
cur = self.conn.cursor()
cur.execute(drop_sql)
sql.write_frame(frame, name='test_table', con=self.conn, flavor='mysql')
stmt = 'INSERT INTO test_table VALUES(2.314, -123.1, 1.234, 2.3)'
self.assertEqual(sql.uquery(stmt, con=self.conn), 1)
try:
sys.stdout = StringIO()
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn)
self.assertRaises(sql.DatabaseError, sql.tquery,
'insert into blah values (1)', con=self.conn,
retry=True)
finally:
sys.stdout = sys.__stdout__
def test_keyword_as_column_names(self):
'''
'''
_skip_if_no_pymysql()
df = DataFrame({'From':np.ones(5)})
sql.write_frame(df, con = self.conn, name = 'testkeywords',
if_exists='replace', flavor='mysql')
def test_if_exists(self):
_skip_if_no_pymysql()
df_if_exists_1 = DataFrame({'col1': [1, 2], 'col2': ['A', 'B']})
df_if_exists_2 = DataFrame({'col1': [3, 4, 5], 'col2': ['C', 'D', 'E']})
table_name = 'table_if_exists'
sql_select = "SELECT * FROM %s" % table_name
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
# test if invalid value for if_exists raises appropriate error
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='mysql',
if_exists='notvalidvalue')
clean_up(table_name)
# test if_exists='fail'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='fail')
self.assertRaises(ValueError,
sql.write_frame,
frame=df_if_exists_1,
con=self.conn,
name=table_name,
flavor='mysql',
if_exists='fail')
# test if_exists='replace'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='mysql', if_exists='replace')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
# test if_exists='append'
sql.write_frame(frame=df_if_exists_1, con=self.conn, name=table_name,
flavor='mysql', if_exists='fail')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B')])
sql.write_frame(frame=df_if_exists_2, con=self.conn, name=table_name,
flavor='mysql', if_exists='append')
self.assertEqual(sql.tquery(sql_select, con=self.conn),
[(1, 'A'), (2, 'B'), (3, 'C'), (4, 'D'), (5, 'E')])
clean_up(table_name)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| 37.637596 | 147 | 0.588679 |
aced9377220b555d87e208bb213d1375f7cde20d | 1,822 | py | Python | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/semtools/utilities/tests/test_auto_BRAINSAlignMSP.py | HussainAther/nipype | 7e33d086fd5cea6ef6de99ee3e35929c1d5730d4 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brains import BRAINSAlignMSP
def test_BRAINSAlignMSP_inputs():
input_map = dict(BackgroundFillValue=dict(argstr='--BackgroundFillValue %s',
),
OutputresampleMSP=dict(argstr='--OutputresampleMSP %s',
hash_files=False,
),
args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
interpolationMode=dict(argstr='--interpolationMode %s',
),
mspQualityLevel=dict(argstr='--mspQualityLevel %d',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
rescaleIntensities=dict(argstr='--rescaleIntensities ',
),
rescaleIntensitiesOutputRange=dict(argstr='--rescaleIntensitiesOutputRange %s',
sep=',',
),
resultsDir=dict(argstr='--resultsDir %s',
hash_files=False,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
trimRescaledIntensities=dict(argstr='--trimRescaledIntensities %f',
),
verbose=dict(argstr='--verbose ',
),
writedebuggingImagesLevel=dict(argstr='--writedebuggingImagesLevel %d',
),
)
inputs = BRAINSAlignMSP.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_BRAINSAlignMSP_outputs():
output_map = dict(OutputresampleMSP=dict(),
resultsDir=dict(),
)
outputs = BRAINSAlignMSP.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 29.387097 | 83 | 0.669045 |
aced942b402881db13cac1df58674d6197cf6151 | 7,371 | py | Python | config/NASFPN/retina_r50v1b_nasfpn_1280_7@384_25epoch.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 3,195 | 2019-01-29T09:08:46.000Z | 2022-03-29T08:20:44.000Z | config/NASFPN/retina_r50v1b_nasfpn_1280_7@384_25epoch.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 275 | 2019-01-29T10:16:12.000Z | 2022-03-15T17:56:39.000Z | config/NASFPN/retina_r50v1b_nasfpn_1280_7@384_25epoch.py | happywu/simpledet-1 | 5d1de1edfbe745b05b49d9c19eca1e496ded11b7 | [
"Apache-2.0"
] | 563 | 2019-01-29T09:32:07.000Z | 2022-03-22T06:58:01.000Z | from models.retinanet.builder import RetinaNet as Detector
from models.NASFPN.builder import ResNetV1bFPN as Backbone
from models.NASFPN.builder import NASFPNNeck as Neck
from models.NASFPN.builder import RetinaNetHeadWithBN as RpnHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 2 if is_train else 1
fp16 = True
class KvstoreParam:
kvstore = "local"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="syncbn", ndev=8, wd_mult=1.0)
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 50
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
dim_reduced = 384
num_stage = 7
S0_kernel = 1
class RpnParam:
num_class = 1 + 80
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
sync_loss = True
class anchor_generate:
scale = (4 * 2 ** 0, 4 * 2 ** (1.0 / 3.0), 4 * 2 ** (2.0 / 3.0))
ratio = (0.5, 1.0, 2.0)
stride = (8, 16, 32, 64, 128)
image_anchor = None
class head:
conv_channel = 256
mean = None
std = None
class proposal:
pre_nms_top_n = 1000
post_nms_top_n = None
nms_thr = None
min_bbox_side = None
min_det_score = 0.05 # filter score in network
class focal_loss:
alpha = 0.25
gamma = 2.0
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = None
image_roi = None
batch_image = None
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = None
stride = None
class DatasetParam:
if is_train:
image_set = ("coco_train2017", "coco_val2017")
else:
image_set = ("coco_test-dev2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head)
test_sym = None
else:
train_sym = None
test_sym = detector.get_test_symbol(backbone, neck, rpn_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage4_unit3_relu"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = None
class schedule:
begin_epoch = 0
end_epoch = 25
lr_iter = [15272 * 15 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
15272 * 20 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.001 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
iter = 15272 * 1 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)
class TestParam:
min_det_score = 0 # filter appended boxes
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_val2017.json"
# data processing
class NormParam:
mean = (123.688, 116.779, 103.939) # RGB order
std = (58.393, 57.12, 57.375)
class ResizeParam:
short = 1280
long = 1280
scale_min = 0.8
scale_max = 1.2
class PadParam:
short = ResizeParam.short
long = ResizeParam.long
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.short = (160, 80, 40, 20, 10)
self.long = (160, 80, 40, 20, 10)
self.stride = (8, 16, 32, 64, 128)
scales = (4 * 2 ** 0, 4 * 2 ** (1.0 / 3.0), 4 * 2 ** (2.0 / 3.0))
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 9999
pos_thr = 0.5
neg_thr = 0.5
min_pos_thr = 0.0
class sample:
image_anchor = None
pos_fraction = None
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord
from models.NASFPN.input import RandResizeCrop2DImageBbox, ResizeCrop2DImageBbox
from models.retinanet.input import PyramidAnchorTarget2D, Norm2DImage, \
AverageFgCount
if is_train:
transform = {
"sample": [
ReadRoiRecord(None),
Norm2DImage(NormParam),
RandResizeCrop2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
PyramidAnchorTarget2D(AnchorTarget2DParam()),
RenameRecord(RenameParam.mapping)
],
"batch": [
AverageFgCount("rpn_fg_count")
]
}
data_name = ["data"]
label_name = ["rpn_cls_label", "rpn_fg_count", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
ResizeCrop2DImageBbox(ResizeParam),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
from models.retinanet import metric
rpn_acc_metric = metric.FGAccMetric(
"FGAcc",
["cls_loss_output"],
["rpn_cls_label"]
)
metric_list = [rpn_acc_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| 29.134387 | 95 | 0.547144 |
aced9443ad1f3f1f92e9fce28f410d7d184285a9 | 41,272 | py | Python | donationlogging/dono.py | Izumi-DiscordBot/cray-cogs | ef4c3bb40dc32b458a64c789bbf7971aa2389578 | [
"MIT"
] | null | null | null | donationlogging/dono.py | Izumi-DiscordBot/cray-cogs | ef4c3bb40dc32b458a64c789bbf7971aa2389578 | [
"MIT"
] | 2 | 2021-11-19T09:17:10.000Z | 2021-11-19T09:17:52.000Z | donationlogging/dono.py | Izumi-DiscordBot/cray-cogs | ef4c3bb40dc32b458a64c789bbf7971aa2389578 | [
"MIT"
] | null | null | null | import asyncio
import re
import time
import typing
from collections import namedtuple
import discord
from discord import Member
from discord.ext.commands.converter import Greedy, RoleConverter
from discord.ext.commands.view import StringView
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.commands import RedHelpFormatter
from redbot.core.utils import mod
from redbot.core.utils.chat_formatting import humanize_list, humanize_number
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu, start_adding_reactions
from redbot.core.utils.predicates import MessagePredicate, ReactionPredicate
from .utils import *
class flags(commands.Converter):
"""
This is a custom flag parsing class made by me with help from skelmis (ethan) from menudocs."""
def __init__(self, *, delim=None, start=None):
self.delim = delim or " "
self.start = start or "--"
async def convert(self, ctx, argument):
x = True
argless = []
data = {None: []}
argument = argument.split(self.start)
if (length := len(argument)) == 1:
# No flags
argless.append(argument[0])
x = False # Don't loop
i = 0
while x:
if i >= length:
# Have consumed all
break
if self.delim in argument[i]:
# Get the arg name minus start state
arg = argument[i].split(self.delim, 1)
if len(arg) == 1:
# Arg has no value, so its argless
# This still removes the start and delim however
argless.append(arg)
i += 1
continue
arg_name = arg[0]
arg_value = arg[1].strip()
data[arg_name] = arg_value
else:
argless.append(argument[i])
i += 1
# Time to manipulate argless
# into the same expected string pattern
# as dpy's argparsing
for arg in argless:
view = StringView(arg)
while not view.eof:
word = view.get_quoted_word()
data[None].append(word)
view.skip_ws()
if not bool(data[None]):
data.pop(None)
return data
class DonationLogging(commands.Cog):
"""
Donation logging commands. Helps you in counting and tracking user donations and automatically assigning them roles.
"""
__version__ = "1.5.0"
__author__ = ["crayyy_zee#2900"]
def __init__(self, bot: Red):
self.bot = bot
self.config = Config.get_conf(self, identifier=123_6969_420)
self.cache = {}
default_guild = {
"managers": [],
"logchannel": None,
"donations": {},
"assignroles": {},
"currency": "⏣",
"autoadd": False,
"autoremove": False,
}
default_member = {"donations": 0, "notes": {}}
self.config.register_member(**default_member)
self.config.register_guild(**default_guild)
asyncio.create_task(self.to_cache())
return
def format_help_for_context(self, ctx: commands.Context):
pre_processed = super().format_help_for_context(ctx)
n = "\n" if "\n\n" not in pre_processed else ""
text = [
f"{pre_processed}{n}",
f"Cog Version: **{self.__version__}**",
f"Author: {humanize_list(self.__author__)}",
]
return "\n".join(text)
def cog_unload(self):
asyncio.create_task(self.to_config())
async def red_delete_data_for_user(self, *, requester, user_id: int):
if requester not in ("discord_deleted_user", "user"):
return
for guild, data in self.cache.items():
try:
del data[str(user_id)]
except KeyError:
continue
async def to_cache(self):
data = await self.config.all_members()
final = {}
for guild, memberdata in data.items():
final[guild] = {}
for k, v in memberdata.items():
final[guild][k] = v["donations"]
self.cache = final
async def to_config(self):
for guild, memberdata in self.cache.items():
for member, data in memberdata.items():
await self.config.member_from_ids(int(guild), int(member)).donations.set(data)
async def GetMessage(self, ctx: commands.Context, contentOne, contentTwo, timeout=100):
embed = discord.Embed(
title=f"{contentOne}",
description=f"{contentTwo}",
color=await ctx.embed_color(),
)
sent = await ctx.send(embed=embed)
try:
msg = await self.bot.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
and message.channel == ctx.channel,
)
if msg:
return msg.content
except asyncio.TimeoutError:
return False
def is_dmgr():
async def predicate(ctx):
data = await ctx.cog.config.guild(ctx.guild).managers()
if data:
for i in data:
role = ctx.guild.get_role(int(i))
if role and role in ctx.author.roles:
return True
elif ctx.author.guild_permissions.administrator == True:
return True
elif await mod.is_mod_or_superior(ctx.bot, ctx.author) == True:
return True
return commands.check(predicate)
async def open_account(self, user, guild):
data = self.cache.get(guild.id)
if data:
if user.id in data:
return False
else:
data[user.id] = 0
return True
else:
self.cache[guild.id] = {}
self.cache[guild.id][user.id] = 0
return True
async def get_data(self, user, guild):
await self.open_account(user, guild)
data = self.cache.get(guild.id)
if donos := data.get(user.id):
return donos
else:
data[user.id] = 0
donos = data[user.id]
return donos
async def donoroles(self, ctx, user: Member, amount):
if not await self.config.guild(ctx.guild).autoadd():
return f"Auto role adding is disabled for this server. Enable with `{ctx.prefix}donoset autorole add true`."
try:
data = await self.config.guild(ctx.guild).assignroles()
roles = []
for key, value in data.items():
if amount >= int(key):
if isinstance(value, list):
# role = [ctx.guild.get_role(int(i)) for i in value]
for i in value:
role = ctx.guild.get_role(int(i))
if role not in user.roles:
try:
await user.add_roles(
role,
reason=f"Automatic role adding based on donation logging, requested by {ctx.author}",
)
roles.append(f"`{role.name}`")
except:
pass
elif isinstance(value, int):
role = ctx.guild.get_role(int(value))
if role not in user.roles:
try:
await user.add_roles(
role,
reason=f"Automatic role adding based on donation logging, requested by {ctx.author}",
)
roles.append(f"`{role.name}`")
except:
pass
roleadded = (
f"The following roles were added to `{user.name}`: {humanize_list(roles)}"
if roles
else ""
)
return roleadded
except:
pass
async def remove_roles(self, ctx, user: Member, amount):
if not await self.config.guild(ctx.guild).autoremove():
return f"Auto role removing is disabled for this server. Enable with `{ctx.prefix}donoset autorole remove true`."
try:
data = await self.config.guild(ctx.guild).assignroles()
roles_removed = []
for key, value in data.items():
if amount < int(key):
if isinstance(value, list):
# role = [ctx.guild.get_role(int(i)) for i in value]
for i in value:
role = ctx.guild.get_role(int(i))
if role in user.roles:
try:
await user.remove_roles(
role,
reason=f"Automatic role removing based on donation logging, requested by {ctx.author}",
)
roles_removed.append(f"`{role.name}`")
except:
pass
elif isinstance(value, int):
role = ctx.guild.get_role(int(value))
if role in user.roles:
try:
await user.remove_roles(
role,
reason=f"Automatic role removing based on donation logging, requested by {ctx.author}",
)
roles_removed.append(f"`{role.name}`")
except:
pass
roleadded = (
f"The following roles were removed from `{user}` {humanize_list(roles_removed)}"
if roles_removed
else ""
)
return roleadded
except:
pass
@commands.group(
name="dono",
help="Donation logging. (most subcommands require admin perms or manager role) Run `{pre}dono setup` before using any commands.",
description="Parameters:\n\nNone",
invoke_without_command=True,
)
async def dono(self, ctx):
await ctx.send_help("dono")
@dono.command(name="setup")
@commands.guild_only()
@commands.has_permissions(administrator=True)
async def setup(self, ctx):
"""
A step by step interactive setup command.
This helps you setup the logging channel and the manager roles.
Alternatively you can use the `[p]donoset managers` and `[p]donoset logchannel` commands."""
await ctx.send(
"Ok so you want to setup donation logging for your server. Type 'yes' to start the process and `no` to cancel."
)
pred = MessagePredicate.yes_or_no(ctx, ctx.channel, ctx.author)
try:
message = await self.bot.wait_for("message", timeout=30, check=pred)
except asyncio.TimeoutError:
return await ctx.send("You didn't answer in time. Please try again and answer faster.")
if pred.result:
await ctx.send(
"ok lets do this. Please provide answers to the following questions properly."
)
else:
return await ctx.send("Some other time i guess.")
questions = [
[
"Which roles do you want to be able to manage donations?",
"You can provide multiple roles. Send their ids all in one message separated by a space.",
],
[
"Which channel do you want the donations to be logged to?",
"Type 'None' if you dont want that",
],
]
answers = {}
for i, question in enumerate(questions):
answer = await self.GetMessage(ctx, question[0], question[1])
if not answer:
await ctx.send("You didn't answer in time. Please try again and answer faster.")
return
answers[i] = answer
try:
roleids = answers[0].split()
roles = []
failed = []
for id in roleids:
role = ctx.guild.get_role(int(id))
if not role:
failed.append(id)
else:
roles.append(role)
except:
await ctx.send("You didn't provide a proper role id. Try again.")
return
if answers[1].lower() != "none":
try:
chan = re.findall(r"[0-9]+", answers[1])[0]
channel = self.bot.get_channel(int(chan))
ch = channel.id
except:
await ctx.send("You didn't provide a proper channel.")
return
else:
ch = None
emb = discord.Embed(
title="Is all this information valid?",
color=await ctx.embed_color(),
)
emb.add_field(
name=f"Question: `{questions[0][0]}`",
value=f"Answer: `{' '.join([role.name for role in roles])}\n{'Couldnt find roles with following ids'+' '.join([i for i in failed]) if failed else ''}`",
inline=False,
)
emb.add_field(
name=f"Question: `{questions[1][0]}`",
value="Answer: `{}`".format(f"#{channel.name}" if ch else "None"),
inline=False,
)
confirmation = await ctx.send(embed=emb)
start_adding_reactions(confirmation, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(confirmation, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=30)
except:
return await ctx.send("Request timed out.")
if not pred.result:
return await ctx.send("Aight, retry the command and do it correctly this time.")
await self.config.guild(ctx.guild).logchannel.set(ch)
await self.config.guild(ctx.guild).managers.set([role.id for role in roles])
return await ctx.send(
f"Alright. I've noted that down, Do you want to setup autoroles too? Use the `{ctx.prefix}donoset roles` command."
)
@dono.command(name="roles")
@commands.guild_only()
@commands.has_guild_permissions(administrator=True)
async def roles(self, ctx):
"""
Shows the donation autoroles setup for the server
These can be setup with `[p]donoset roles`"""
data = await self.config.guild(ctx.guild).assignroles()
embed = discord.Embed(
title=f"Donation autoroles for {ctx.guild.name}",
color=await ctx.embed_color(),
)
embed.set_footer(text=f"{ctx.guild.name}", icon_url=ctx.author.avatar.url)
emoji = await self.config.guild(ctx.guild).currency()
if data:
rolelist = ""
for key, value in data.items():
if isinstance(value, list):
role = [ctx.guild.get_role(int(i)).mention for i in value]
rolelist += "{} for amount: {} {:,}\n\n".format(
humanize_list(role), emoji, int(key)
)
elif isinstance(value, int):
role = ctx.guild.get_role(int(value))
rolelist += "{} for amount: {} {:,}\n\n".format(role.mention, emoji, int(key))
embed.description = f"{rolelist}"
elif not data:
embed.description = f"There are no autoroles setup for this guild.\nRun `{ctx.prefix}dono setroles` to set them up."
if not await self.config.guild(ctx.guild).autoadd():
embed.set_footer(
text="These roles are dull and wont be automatically added/removed since auto adding of roles is disabled for this server."
)
await ctx.send(embed=embed)
@dono.command(name="bal", aliases=["mydono"])
@commands.guild_only()
async def bal(self, ctx):
"""
Check the amount you have donated in the current server
For admins, if you want to check other's donations, use `[p]dono check`"""
donos = await self.get_data(ctx.author, ctx.guild)
emoji = await self.config.guild(ctx.guild).currency()
embed = discord.Embed(
title=f"Your donations in **__{ctx.guild.name}__**",
description="Total amount donated: {} *{:,}*".format(emoji, donos),
color=await ctx.embed_color(),
)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar.url)
embed.set_footer(
text="Thanks for donating. Keep donating for awesome perks. <3",
icon_url=ctx.guild.icon.url,
)
await ctx.send(embed=embed)
async def dono_Add(self, ctx, user, amount):
await self.open_account(user, ctx.guild)
self.cache[ctx.guild.id][user.id] += amount
return await self.get_data(user, ctx.guild)
async def dono_log(self, ctx, action, user, amount, donos, role=None, note=None):
emoji = await self.config.guild(ctx.guild).currency()
embed = discord.Embed(
title="***__Added!__***" if action.lower() == "add" else "***__Removed!__***",
description=f"{emoji} {humanize_number(amount)} was {'added to' if action.lower() == 'add' else 'removed from'} {user.name}'s donations balance.\n",
color=await ctx.embed_color(),
)
embed.add_field(
name="Note: ",
value=note if note else "No note taken.",
inline=False,
)
embed.add_field(
name="Their total donations are: ",
value="{} {:,}".format(emoji, donos),
)
embed.add_field(
name="Jump Link To The Command:",
value=f"[click here]({ctx.message.jump_url})",
)
embed.set_footer(
text=f"Command executed by: {ctx.author.display_name}",
icon_url=ctx.guild.icon.url,
)
chanid = await self.config.guild(ctx.guild).logchannel()
if chanid and chanid != "none":
if isinstance(chanid, str):
log = discord.utils.find(lambda m: m.name == chanid, ctx.guild.channels)
else:
log = await self.bot.fetch_channel(int(chanid))
if log:
await log.send(role, embed=embed)
else:
await ctx.send(role + "\n Couldn't find the logging channel.", embed=embed)
await ctx.message.add_reaction("✅")
elif not chanid:
await ctx.send(role, embed=embed)
async def add_note(self, member, message, flag={}):
if note := flag.get("note"):
data = {
"content": note,
"message_id": message.id,
"channel_id": message.channel.id,
"author": message.author.id,
"at": int(time.time()),
}
async with self.config.member(member).notes() as notes:
if not notes:
notes[1] = data
else:
notes[len(notes) + 1] = data
return data["content"]
return
@dono.command(name="add")
@is_dmgr()
@commands.guild_only()
async def add(
self,
ctx,
amount: MoniConverter,
user: typing.Optional[discord.Member] = None,
*,
flag: flags = None,
):
"""
Add an amount to someone's donation balance.
This requires either one of the donation manager roles or the bot mod role.
[flag] parameter is a flag used for setting notes for a donation
For example:
`[p]dono add 1000 @Twentysix --note hes cute"""
user = user or ctx.author
if not amount:
return
await self.open_account(user, ctx.guild)
donos = await self.dono_Add(ctx, user, amount)
note = await self.add_note(user, ctx.message, flag if flag else {})
role = await self.donoroles(ctx, user, donos)
await self.dono_log(ctx, "add", user, amount, donos, role, note)
async def dono_Remove(self, ctx, user, amount):
donation = await self.get_data(user, ctx.guild)
self.cache[ctx.guild.id][user.id] -= amount
return await self.get_data(user, ctx.guild)
async def get_member_notes(self, member: discord.Member):
async with self.config.member(member).notes() as notes:
return notes
@dono.command(name="remove")
@is_dmgr()
@commands.guild_only()
async def remove(
self,
ctx,
amount: MoniConverter,
user: typing.Optional[discord.Member] = None,
*,
flag: flags = None,
):
"""
Remove an amount from someone's donation balance.
This requires either one of the donation manager roles or the bot mod role."""
user = user or ctx.author
if not amount:
return
donation = await self.dono_Remove(ctx, user, amount)
role = await self.remove_roles(ctx, user, donation)
note = await self.add_note(user, ctx.message, flag if flag else {})
await self.dono_log(ctx, "remove", user, amount, donation, role, note)
@dono.command(
name="reset",
description="Parameters:\n\n<user> user to reset the donation balance of.",
help="Resets a person's donation balance. Requires the manager role.",
)
@is_dmgr()
@commands.guild_only()
async def reset(self, ctx, user: discord.Member = None):
"""
Reset someone's donation balance
This will set their donations to 0.
This requires either one of the donation manager roles or the bot mod role."""
user = user or ctx.author
donation = await self.get_data(user, ctx.guild)
donation -= donation
self.cache[ctx.guild.id][user.id] = 0
emoji = await self.config.guild(ctx.guild).currency()
embed = discord.Embed(
title="***__Reset!__***",
description=f"Resetted {user.name}'s donation bal. Their current donation amount is {emoji} 0",
color=await ctx.embed_color(),
)
embed.add_field(
name="Jump Link To The Command:",
value=f"[click here]({ctx.message.jump_url})",
)
embed.set_footer(
text=f"Command executed by: {ctx.author.display_name}",
icon_url=ctx.guild.icon.url,
)
chanid = await self.config.guild(ctx.guild).logchannel()
role = await self.remove_roles(ctx, user, 0)
if chanid and chanid != "none":
channel = await self.bot.fetch_channel(chanid)
await ctx.message.add_reaction("✅")
await channel.send(role, embed=embed)
else:
await ctx.send(role, embed=embed)
@dono.command(name="notes")
@commands.guild_only()
@is_dmgr()
async def check_notes(self, ctx, member: typing.Optional[discord.Member] = None, number=None):
EmbedField = namedtuple("EmbedField", "name value inline")
member = member or ctx.author
notes = await self.get_member_notes(member)
if not notes:
return await ctx.send(f"*{member}* has no notes!")
if number != None:
note = notes.get(str(number))
print(number)
if not note:
return await ctx.send(
f"That doesn't seem to a valid note! **{member}** only has *{len(notes)}* notes."
)
embed = discord.Embed(
title=f"{member.display_name.capitalize()}'s Notes!",
description=f"Note taken on <t:{int(note['at'])}:D>",
color=discord.Color.green(),
)
embed.add_field(
name=f"**Note Number {number}**",
value=f"*[{note['content']}]({(await (self.bot.get_channel(note['channel_id'])).fetch_message(int(note['message_id']))).jump_url})*",
inline=False,
)
embed.set_footer(
text=f"Note taken by {await self.bot.get_or_fetch_member(ctx.guild, note['author'])}"
)
return await ctx.send(embed=embed)
fields = []
embeds = []
emb = {
"embed": {
"title": f"{member.name.capitalize()}'s Notes!",
"description": "",
},
"footer": {"text": "", "icon_url": ctx.author.avatar.url},
"fields": [],
}
for key, value in notes.items():
field = EmbedField(
f"**Note Number {key}.**",
f"*[{value['content'][:20] if len(value['content']) > 20 else value['content']}]({(await(self.bot.get_channel(value['channel_id'])).fetch_message(int(value['message_id']))).jump_url})*",
False,
)
fields.append(field)
fieldgroups = RedHelpFormatter.group_embed_fields(fields, 200)
page_len = len(fieldgroups)
for i, group in enumerate(fieldgroups, 1):
embed = discord.Embed(color=0x303036, **emb["embed"])
emb["footer"][
"text"
] = f"Use `{ctx.prefix}notes {member} [number]` to look at a specific note.\nPage {i}/{page_len}."
embed.set_footer(**emb["footer"])
for field in group:
embed.add_field(**field._asdict())
embeds.append(embed)
if len(embeds) > 1:
return await menu(ctx, embeds, DEFAULT_CONTROLS)
else:
return await ctx.send(embed=embeds[0])
@dono.command(name="check")
@commands.guild_only()
@is_dmgr()
async def check(self, ctx, user: discord.Member = None):
"""
Check someone's donation balance.
This requires either one of the donation manager roles or the bot mod role."""
if not user:
await ctx.send("Please mention a user or provide their id to check their donations")
return
await self.open_account(user, ctx.guild)
donos = await self.get_data(user, ctx.guild)
emoji = await self.config.guild(ctx.guild).currency()
notes = len(await self.config.member(user).notes())
embed = discord.Embed(
title=f"{user.name}'s donations in **__{ctx.guild.name}__**",
description="Total amount donated: {}{:,}\n\nThey have **{}** notes".format(
emoji, donos, notes
),
color=discord.Color.random(),
)
embed.set_author(name=ctx.author, icon_url=ctx.author.avatar.url)
embed.set_footer(text=f"{ctx.guild.name}", icon_url=ctx.guild.icon.url)
await ctx.send(embed=embed)
@dono.command(
name="leaderboard",
description="Parameters:\n\n<topnumber> The amount of people to show on the leaderboard. deafaults to 5.",
help="Shows a leaderboard containing the top donators in the guild.",
aliases=["lb", "topdonators"],
)
@commands.guild_only()
async def leaderboard(self, ctx, topnumber=5):
"""
See the top donators in the server.
Use the <topnumber> parameter to see the top `x` donators."""
data = self.cache[ctx.guild.id]
data = await sortdict(data)
embed = discord.Embed(title=f"Top {topnumber} donators ", color=discord.Color.random())
emoji = await self.config.guild(ctx.guild).currency()
index = 1
for index, (key, value) in enumerate(data.items(), 1):
if value != 0:
user = await self.bot.get_or_fetch_user(int(key))
embed.add_field(
name=f"{index}. **{user.name}**",
value="{} {:,}".format(emoji, value),
inline=False,
)
if (index) == topnumber:
break
embed.set_thumbnail(url=ctx.guild.icon.url)
embed.set_author(name=ctx.guild.name)
embed.set_footer(text=f"For a higher top number, do `{ctx.prefix}dono lb [amount]`")
await ctx.send(embed=embed)
@commands.group(name="donoset", invoke_without_command=True)
@commands.mod_or_permissions(administrator=True)
async def donoset(self, ctx):
"""
Base command for changing donation settings for your server."""
@donoset.group(name="autorole", invoke_without_command=True)
@commands.mod_or_permissions(administrator=True)
async def autorole(self, ctx):
"""
Change settings for Auto donation roles behaviour in your server."""
await ctx.send_help("donoset autorole")
@autorole.command(name="add")
@commands.mod_or_permissions(administrator=True)
async def ar_add(self, ctx, true_or_false: bool):
"""
Set whether donation roles(set with `[p]donoset roles`) automatically get added to users or not.
\n<true_or_false> is supposed to be either of True or False.
True to enable and False to disable."""
toggle = await self.config.guild(ctx.guild).autoadd()
if toggle and true_or_false:
return await ctx.send("Auto-role adding is already enabled for this server.")
elif not toggle and not true_or_false:
return await ctx.send("Auto-role adding is already disabled for this server.")
await self.config.guild(ctx.guild).autoadd.set(true_or_false)
return await ctx.send(
f"{'Disabled' if true_or_false == False else 'Enabled'} auto role adding for this server"
)
@autorole.command(name="remove")
@commands.mod_or_permissions(administrator=True)
async def ar_remove(self, ctx, true_or_false: bool):
"""
Set whether donation roles (set with `[p]donoset roles`) automatically get removed from users or not.
\n<true_or_false> is supposed to be either of True or False.
True to enable and False to disable."""
toggle = await self.config.guild(ctx.guild).autoremove()
if toggle and true_or_false:
return await ctx.send("Auto-role removing is already enabled for this server.")
elif not toggle and not true_or_false:
return await ctx.send("Auto-role removing is already disabled for this server.")
await self.config.guild(ctx.guild).autoremove.set(true_or_false)
return await ctx.send(
f"{'Disabled' if true_or_false == False else 'Enabled'} auto role removing for this server"
)
@donoset.command(name="currency")
@commands.mod_or_permissions(administrator=True)
async def currency(self, ctx, icon):
"""
Change the currency symbol for donations in your server.
This symbol/icon will show up next to the amounts in all dono commands.
This defaults to ⏣"""
old_icon = await self.config.guild(ctx.guild).currency()
msg = await ctx.send(
f"Your current icon is {old_icon}. Are you sure you want to change it to {icon}?"
)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await ctx.bot.wait_for("reaction_add", timeout=30, check=pred)
except asyncio.TimeoutError:
return await ctx.send("You took too long to respond. Aborting.")
if pred.result:
await self.config.guild(ctx.guild).currency.set(icon)
return await ctx.send("New icon updated!")
else:
return await ctx.send("Okay. Thank you for wasting my time.")
@donoset.command(name="addrole")
@commands.mod_or_permissions(administrator=True)
async def addrole(self, ctx, role: discord.Role, amount: MoniConverter):
"""
Add a new autorole for a specific amount without going through the long setup command."""
data = await self.config.guild(ctx.guild).assignroles()
if role.position > ctx.bot.top_role.position:
return await ctx.send(
"That role's position than me, I cannot manually assign it to people. Please move the role created by me, above the assigning roles."
)
elif role.is_bot_managed():
return await ctx.send("That role is managed by a bot so it can't be assigned by me.")
elif role.position > ctx.author.top_role.position:
return await ctx.send(
"That role's postition is above you and can not be set to be assigned by you."
)
if str(amount) in data:
msg = await ctx.send(
"There is already an auto-role for that amount. Do you want to replace it or add multiple?\nReact with the tick to replace and cross to add multiple."
)
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await ctx.bot.wait_for("reaction_add", timeout=30, check=pred)
except asyncio.TimeoutError:
return await ctx.send("You took too long to respond. Aborting.")
if not pred.result:
prev = data[str(amount)]
if isinstance(prev, list):
return await ctx.send("You can't have more than 2 autoroles per amount!")
elif isinstance(prev, int):
new = [prev, role.id]
data[str(amount)] = new
else:
data[str(amount)] = role.id
await ctx.send("Done!")
else:
await ctx.send(
f"Added auto role: `@{role.name}` for amount: {humanize_number(amount)}"
)
data[str(amount)] = role.id
await self.config.guild(ctx.guild).assignroles.set(data)
@donoset.command(name="roles")
@commands.mod_or_permissions(administrator=True)
async def setroles(self, ctx):
"""
A step by step interactive process to set donation autoroles for your server.
These roles will be automaitcally assigned upon reaching a certain amount of donation.
"""
if not await self.config.guild(ctx.guild).autoadd():
return await ctx.send(
"Auto-adding for roles is disabled here. Please enable that before using this command."
)
await ctx.send(
"Let's setup autoroles for donations. Send the amount and role to be assigned in the following format:\n*__Amount:roleid__*\nKeep doing that and when you are done, just type 'done' and the process will stop."
)
ardict = {}
while True:
try:
message = await self.bot.wait_for(
"message",
timeout=90,
check=lambda message: message.author == ctx.author
and message.channel == ctx.channel,
)
except asyncio.TimeoutError:
await ctx.send("I guess that's all you want. Timed out.")
break
try:
key, value = message.content.split(":")
except:
if message.content.lower() == "done":
break
else:
return await ctx.send(
"Messages must be in the format of ***__Amount:RoleID__***. Try again."
)
amount = await MoniConverter().convert(ctx, str(key))
if not amount:
return
role = await RoleConverter().convert(ctx, value)
if not role:
return await ctx.send("Try again and provide a proper role id.")
if role.position > ctx.me.top_role.position:
return await ctx.send(
"That role's position is higher than me, I cannot manually assign it to people. Please move the role created by me, above the assigning roles."
)
elif role.is_bot_managed():
return await ctx.send(
"That role is managed by a bot so it can't be assigned by me."
)
elif role.position > ctx.author.top_role.position:
return await ctx.send(
"That role's postition is above you and can not be set to be assigned by you."
)
ardict[amount] = role.id
await message.add_reaction("✅")
embed = discord.Embed(
title=f"Autoroles setup for {ctx.guild.name}: ",
color=discord.Color.green(),
)
final = ""
ardict = await sortdict(ardict, "key")
for index, (key, value) in enumerate(ardict.items()):
final += "**Dono Role {}**\n{:,} - {}\n\n".format(index + 1, int(key), f"<@&{value}>")
embed.description = final
confirmation = await ctx.send(embed=embed)
pred = ReactionPredicate.yes_or_no(confirmation, ctx.author)
start_adding_reactions(confirmation, ReactionPredicate.YES_OR_NO_EMOJIS)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=30)
except:
return await ctx.send("You took too long. Request timed out.")
if pred.result:
await ctx.send(
"alright done. Redo the command to setup roles again if there is any problem."
)
await self.config.guild(ctx.guild).assignroles.set(ardict)
else:
await ctx.send("Aight try again later.")
@donoset.command(name="managers")
@commands.mod_or_permissions(administrator=True)
async def set_managers(self, ctx, add_or_remove, roles: Greedy[discord.Role] = None):
"""Adds or removes managers for your guild.
This is an alternative to `[p]dono setup`.
You can use this to add or remove manager roles post setup.
<add_or_remove> should be either `add` to add roles or `remove` to remove roles.
"""
if roles is None:
return await ctx.send("`Roles` is a required argument.")
for role in roles:
async with self.config.guild(ctx.guild).managers() as l:
if add_or_remove.lower() == "add":
if not role.id in l:
l.append(role.id)
elif add_or_remove.lower() == "remove":
if role.id in l:
l.remove(role.id)
return await ctx.send(
f"Successfully {'added' if add_or_remove.lower() == 'add' else 'removed'} {len([role for role in roles])} roles."
)
@donoset.command(name="logchannel")
@commands.mod_or_permissions(administrator=True)
async def set_channel(self, ctx, channel: discord.TextChannel = None):
"""Set the donation logging channel or reset it.
This is an alternative to `[p]dono setup`.
You can use this to change or reset log channel post setup.
"""
await self.config.guild(ctx.guild).logchannel.set(None if not channel else channel.id)
return await ctx.send(
f"Successfully set {channel.mention} as the donation logging channel."
if channel
else "Successfully reset the log channel."
)
@donoset.command(name="showsettings", aliases=["showset", "ss"])
async def showsettings(self, ctx):
data = await self.config.guild(ctx.guild).all()
embed = discord.Embed(
title=f"Donation Logging settings for {ctx.guild}",
color=0x303036,
timestamp=ctx.message.created_at,
)
managers = (
humanize_list([(ctx.guild.get_role(i)).mention for i in data["managers"]])
if data["managers"] and isinstance(data["managers"], list)
else data["managers"]
)
embed.add_field(
name="Donation Managers: ",
value=(managers) if data["managers"] else "None",
inline=False,
)
embed.add_field(
name="Log Channel: ",
value=f"<#{data['logchannel']}>" if data["logchannel"] else "None",
inline=False,
)
embed.add_field(name="Auto Add Roles: ", value=data["autoadd"], inline=False)
embed.add_field(name="Auto Remove Roles: ", value=data["autoremove"])
embed.add_field(
name="Auto Assignable roles: ",
value=f"Use `{ctx.prefix}dono roles` to see these.",
inline=False,
)
await ctx.send(embed=embed)
| 37.691324 | 220 | 0.554856 |
aced945234d284fa1d1d022030f6adf76ae5300e | 3,476 | py | Python | test.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | 1 | 2021-07-11T08:32:32.000Z | 2021-07-11T08:32:32.000Z | test.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | null | null | null | test.py | lsrcz/SyGuS | 5aab1b2c324d8a3c20e51f8acb2866190a1431d3 | [
"MIT"
] | 1 | 2020-12-20T16:08:10.000Z | 2020-12-20T16:08:10.000Z | #!/usr/bin/python
import datetime
import os
import os.path
import platform
import signal
import subprocess
import time
import sexp
import translator
programdir = './programs/'
testroot = './tests/'
hiddentests = testroot + 'hidden_tests/'
opentests = testroot + 'open_tests/'
class TimeoutError(Exception):
pass
def stripComments(bmFile):
noComments = '('
for line in bmFile:
line = line.split(';', 1)[0]
noComments += line
return noComments + ')'
def run_command(cmd, timeout=60):
is_linux = platform.system() == 'Linux'
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True,
preexec_fn=os.setsid if is_linux else None)
t_beginning = datetime.datetime.now()
seconds_passed = 0
while True:
timepassed = datetime.datetime.now() - t_beginning
rtimepassed = timepassed.seconds + timepassed.microseconds / 1000000.0
if p.poll() is not None:
break
if timeout and rtimepassed > timeout:
if is_linux:
os.killpg(p.pid, signal.SIGTERM)
else:
p.terminate()
raise TimeoutError(cmd, timeout)
time.sleep(0.01)
return p.stdout.read(), rtimepassed
def bytesToStr(item):
if isinstance(item, bytes):
return str(item, encoding='utf-8')
if isinstance(item, list):
return list(map(bytesToStr, item))
if isinstance(item, tuple):
return tuple(map(bytesToStr, item))
return item
def my_test(cmd, outputfile, testname, timeout=300):
outputfile.write('\t%s:' % (testname))
print(cmd)
try:
result, rtime = run_command(cmd, timeout)
except TimeoutError:
outputfile.write('timeout after %i \n' % (timeout))
else:
print(result)
benchmarkFile = open(testname)
bm = stripComments(benchmarkFile)
bmExpr = sexp.sexp.parseString(bm, parseAll=True).asList()[0] # Parse string to python list
checker = translator.ReadQuery(bmExpr)
try:
checkresult = checker.check(str(result, encoding='utf-8'))
except Exception as e:
# outputfile.write('Wrong Answer: Invalid check result %s(%f)\n' %(result,rtime))
outputfile.write('Invalid format(%f)\n' % (rtime))
else:
if (checkresult == None):
outputfile.write('Passed(%f)\n' % (rtime))
else:
# outputfile.write('Wrong Answer: get %s(%f)\n' %(result,rtime))
outputfile.write('Wrong Answer(%f)\n' % (rtime))
if __name__ == '__main__':
timeout = 300
testresultfile = 'testresult.txt'
outfile = open(testresultfile, 'w')
i = 0
for studentname in os.listdir(programdir):
if studentname != "jry2" and studentname != "combine" and studentname != "decisiontree": continue
toexe = programdir + '\'' + studentname + '\'' + '/main.py '
outfile.write(studentname + ': \n')
# cmd = ('python3.5 ' if '3.5' in studentname else 'python ')
cmd = 'python3 '
i = i + 1
print(i)
# arg = opentests + 'three.sl'
# my_test(cmd + toexe + arg, outfile, arg, timeout)
for testgroup in [opentests]:
for test in os.listdir(testgroup):
if '.sl' not in test: continue
arg = testgroup + test
my_test(cmd + toexe + arg, outfile, arg, timeout)
| 31.6 | 105 | 0.598389 |
aced94773a5585fb843804682c7fdea34e778083 | 18,017 | py | Python | behavior_tree_learning/behavior_tree.py | jstyrud/planning-and-learning | 1ea0f306d5979d2548da5ae9a8d92d3cc4b6b9db | [
"Apache-2.0"
] | 8 | 2021-03-16T11:49:22.000Z | 2022-02-10T10:13:27.000Z | behavior_tree_learning/behavior_tree.py | jstyrud/planning-and-learning | 1ea0f306d5979d2548da5ae9a8d92d3cc4b6b9db | [
"Apache-2.0"
] | null | null | null | behavior_tree_learning/behavior_tree.py | jstyrud/planning-and-learning | 1ea0f306d5979d2548da5ae9a8d92d3cc4b6b9db | [
"Apache-2.0"
] | 2 | 2021-05-15T06:00:49.000Z | 2021-11-03T13:46:32.000Z | # pylint: disable=global-at-module-level, global-statement, global-variable-undefined
"""
Class for handling string representations of behavior trees
"""
import random
import yaml
# Below are lists of the possible node types
global FALLBACK_NODES
"""
A list of all types of fallback nodes used, typically just one
"""
global SEQUENCE_NODES
"""
A list of all types of sequence nodes used, typically just one
"""
global CONTROL_NODES
"""
Control nodes are nodes that may have one or more children/subtrees.
Subsequent nodes will be children/subtrees until the corresponding up character is reached
List will contain FALLBACK_NODES, SEQUENCE_NODES and any other control nodes, e.g. parallel nodes
"""
global CONDITION_NODES
"""
Conditions nodes are childless leaf nodes that never return RUNNING state.
They may never be the last child of any parent.
"""
global ACTION_NODES
"""
Conditions nodes are also childless leaf nodes but may return RUNNING state. They may
also be the last child of any parent.
"""
global ATOMIC_FALLBACK_NODES
"""
Atomic fallback nodes are fallback nodes that have a predetermined set of children/subtrees
that cannot be changed. They behave mostly like action nodes except that they may not be the
children of fallback nodes. Length is counted as one.
"""
global ATOMIC_SEQUENCE_NODES
"""
Atomic sequence nodes are sequence nodes that have a predetermined set of children/subtrees
that cannot be changed. They behave mostly like action nodes except that they may not be the
children of sequence nodes. Length is counted as one.
"""
global UP_NODE
"""
The up node is not really a node but a character that marks the end of a control nodes
set of children and subtrees
"""
global LEAF_NODES
"""
CONDITIONS + ACTION_NODES + ATOMIC_FALLBACK_NODES + ATOMIC_SEQUENCE_NODES
Any kind of leaf node.
"""
global BEHAVIOR_NODES
"""
ACTION_NODES + ATOMIC_FALLBACK_NODES + ATOMIC_SEQUENCE_NODES
Basically leaf nodes that actually do something and that may be implemented as the last child.
"""
global ALL_NODES
"""
All list of all the nodes
"""
def load_settings_from_file(file):
# pylint: disable=too-many-statements
"""
Sets the lists of allowed nodes module wide.
"""
global FALLBACK_NODES
global SEQUENCE_NODES
global CONTROL_NODES
global CONDITION_NODES
global ACTION_NODES
global ATOMIC_FALLBACK_NODES
global ATOMIC_SEQUENCE_NODES
global UP_NODE
global LEAF_NODES
global BEHAVIOR_NODES
global ALL_NODES
FALLBACK_NODES = []
SEQUENCE_NODES = []
CONTROL_NODES = []
CONDITION_NODES = []
ACTION_NODES = []
ATOMIC_FALLBACK_NODES = []
ATOMIC_SEQUENCE_NODES = []
LEAF_NODES = []
BEHAVIOR_NODES = []
ALL_NODES = []
with open(file) as f:
bt_settings = yaml.load(f, Loader=yaml.FullLoader)
try:
FALLBACK_NODES = bt_settings["fallback_nodes"]
if FALLBACK_NODES is None:
FALLBACK_NODES = []
except KeyError:
pass
try:
SEQUENCE_NODES = bt_settings["sequence_nodes"]
if SEQUENCE_NODES is None:
SEQUENCE_NODES = []
except KeyError:
pass
try:
CONTROL_NODES = bt_settings["control_nodes"]
except KeyError:
pass
CONTROL_NODES += FALLBACK_NODES
CONTROL_NODES += SEQUENCE_NODES
ALL_NODES += CONTROL_NODES
try:
CONDITION_NODES = bt_settings["condition_nodes"]
except KeyError:
pass
LEAF_NODES += CONDITION_NODES
ALL_NODES += CONDITION_NODES
try:
ACTION_NODES = bt_settings["action_nodes"]
except KeyError:
pass
BEHAVIOR_NODES += ACTION_NODES
ALL_NODES += ACTION_NODES
try:
ATOMIC_FALLBACK_NODES = bt_settings["atomic_fallback_nodes"]
if ATOMIC_FALLBACK_NODES is None:
ATOMIC_FALLBACK_NODES = []
except KeyError:
pass
BEHAVIOR_NODES += ATOMIC_FALLBACK_NODES
ALL_NODES += ATOMIC_FALLBACK_NODES
try:
ATOMIC_SEQUENCE_NODES = bt_settings["atomic_sequence_nodes"]
if ATOMIC_SEQUENCE_NODES is None:
ATOMIC_SEQUENCE_NODES = []
except KeyError:
pass
BEHAVIOR_NODES += ATOMIC_SEQUENCE_NODES
ALL_NODES += ATOMIC_SEQUENCE_NODES
try:
UP_NODE = bt_settings["up_node"]
except KeyError:
pass
ALL_NODES += UP_NODE
LEAF_NODES += BEHAVIOR_NODES
def get_action_list():
"""
Returns list of actions
"""
global ACTION_NODES
return ACTION_NODES
class BT:
"""
Class for handling string representations of behavior trees
"""
def __init__(self, bt):
"""
Creates a bt
"""
self.bt = bt[:]
def set(self, bt):
"""
Sets bt string
"""
self.bt = bt[:]
return self
def random(self, length):
"""
Creates a random bt of the given length
Tries to follow some of the rules for valid trees to speed up the process
"""
self.bt = []
while not self.is_valid():
if length == 1:
self.bt = [random.choice(BEHAVIOR_NODES)]
else:
self.bt = [random.choice(CONTROL_NODES)]
for _ in range(length - 1):
if self.bt[-1] in CONTROL_NODES:
child = [BT.random_node()]
while child in UP_NODE:
child = [BT.random_node()]
self.bt += child
else:
self.bt += [BT.random_node()]
if self.bt[-1] in ACTION_NODES:
self.bt += [UP_NODE[0]]
for _ in range(length - self.length() - 1):
# add nodes to match the number of individuals defined in length
# this is required when random node gives 'up' nodes
# condition nodes make it more likely to be valid
self.bt += [random.choice(CONDITION_NODES)]
if self.length() < length:
self.bt += [random.choice(BEHAVIOR_NODES)]
self.close()
return self.bt
def is_valid(self):
"""
Checks if bt is a valid behavior tree.
Checks are somewhat in order of likelihood to fail.
"""
valid = True
#Empty string
if len(self.bt) <= 0:
valid = False
# The first element cannot be a leaf if after it there are other elements
elif (self.bt[0] not in CONTROL_NODES) and (len(self.bt) != 1):
valid = False
else:
for i in range(len(self.bt) - 1):
#'up' directly after a control node
if (self.bt[i] in CONTROL_NODES) and (self.bt[i+1] in UP_NODE):
valid = False
#Identical condition nodes directly after one another - waste
elif self.bt[i] in CONDITION_NODES and self.bt[i] == self.bt[i+1]:
valid = False
# check for non-BT elements
elif self.bt[i] not in ALL_NODES:
valid = False
if valid:
# Check on the bt depth: to be > 0
depth = self.depth()
if (depth < 0) or (depth == 0 and len(self.bt) > 1):
valid = False
if valid and self.bt[0] in CONTROL_NODES:
fallback_allowed = True
sequence_allowed = True
if self.bt[0] in FALLBACK_NODES:
fallback_allowed = False
elif self.bt[0] in SEQUENCE_NODES:
sequence_allowed = False
valid = self.is_subtree_valid(self.bt[1:], fallback_allowed, sequence_allowed)
return valid
def is_subtree_valid(self, string, fallback_allowed, sequence_allowed):
# pylint: disable=too-many-return-statements, too-many-branches
"""
Checks whether the subtree starting with string[0] is valid according to a couple rules
1. Fallbacks must not be children of fallbacks
2. Sequences must not be children of sequences
"""
while len(string) > 0:
node = string.pop(0)
if node in UP_NODE:
return True
if node in ATOMIC_FALLBACK_NODES:
if not fallback_allowed:
return False
elif node in ATOMIC_SEQUENCE_NODES:
if not sequence_allowed:
return False
elif node in CONTROL_NODES:
if node in FALLBACK_NODES:
if fallback_allowed:
if not self.is_subtree_valid(string, False, True):
return False
else:
return False
elif node in SEQUENCE_NODES:
if sequence_allowed:
if not self.is_subtree_valid(string, True, False):
return False
else:
return False
else:
if not self.is_subtree_valid(string, True, True):
return False
return False
def close(self):
"""
Adds missing up nodes at the end, or removes from the end if too many
"""
open_subtrees = 0
#Make sure tree always ends with up node if starts with control node
if len(self.bt) > 0:
if self.bt[0] in CONTROL_NODES and self.bt[len(self.bt)-1] not in UP_NODE:
self.bt += UP_NODE
for node in self.bt:
if node in CONTROL_NODES:
open_subtrees += 1
elif node in UP_NODE:
open_subtrees -= 1
if open_subtrees > 0:
for _ in range(open_subtrees):
self.bt += UP_NODE
elif open_subtrees < 0:
for _ in range(-open_subtrees):
#Do not remove the very last node, and only up nodes
for j in range(len(self.bt) - 2, 0, -1): # pragma: no branch, we will always find an up
if self.bt[j] in UP_NODE:
self.bt.pop(j)
break
def trim(self):
"""
Removes control nodes with only one child
"""
for index in range(len(self.bt) -1, 0, -1):
if self.bt[index] in CONTROL_NODES:
children = self.find_children(index)
if len(children) <= 1:
up_node_index = self.find_up_node(index)
self.bt.pop(up_node_index)
if len(children) == 1:
parent = self.find_parent(index)
if parent is not None and self.bt[parent] == self.bt[children[0]]:
#Parent and only child will be identical control nodes,
#child can be removed
up_node_index = self.find_up_node(children[0])
self.bt.pop(up_node_index)
self.bt.pop(children[0])
self.bt.pop(index)
def depth(self):
"""
Returns depth of the bt
"""
depth = 0
max_depth = 0
for i in range(len(self.bt)):
if self.bt[i] in CONTROL_NODES:
depth += 1
max_depth = max(depth, max_depth)
elif self.bt[i] in UP_NODE:
depth -= 1
if (depth < 0) or (depth == 0 and i is not len(self.bt) - 1):
return -1
if depth != 0:
return -1
return max_depth
def length(self):
"""
Counts number of nodes in bt. Doesn't count up characters.
"""
length = 0
for node in self.bt:
if node not in UP_NODE:
length += 1
return length
@staticmethod
def random_node():
"""
Returns a random node.
Usually the set of leaf nodes is much larger than the set of
control nodes but we still typically want the final distribution to
be approximately 50-50 between node types so this function reflects that.
(Typically slightly more leaf nodes than control nodes, but this really depends)
"""
if random.random() < 0.5:
return random.choice(CONTROL_NODES)
return random.choice(LEAF_NODES)
def change_node(self, index, new_node=None):
"""
Changes node at index
"""
if self.bt[index] in UP_NODE:
return
if new_node is None:
new_node = BT.random_node()
# Change control node to leaf node, remove whole subtree
if new_node in LEAF_NODES and self.bt[index] in CONTROL_NODES:
self.delete_node(index)
self.bt.insert(index, new_node)
# Change leaf node to control node. Add up and extra condition/behavior node child
elif new_node in CONTROL_NODES and self.bt[index] in LEAF_NODES:
old_node = self.bt[index]
self.bt[index] = new_node
if old_node in BEHAVIOR_NODES:
self.bt.insert(index + 1, random.choice(LEAF_NODES))
self.bt.insert(index + 2, old_node)
else: #CONDITION_NODE
self.bt.insert(index + 1, old_node)
self.bt.insert(index + 2, random.choice(BEHAVIOR_NODES))
self.bt.insert(index + 3, UP_NODE[0])
else:
self.bt[index] = new_node
def add_node(self, index, new_node=None):
"""
Adds new node at index
"""
if new_node is None:
new_node = BT.random_node()
if new_node in CONTROL_NODES:
if index == 0:
#Adding new control node to encapsulate entire tree
self.bt.insert(index, new_node)
self.bt.append(UP_NODE[0])
else:
self.bt.insert(index, new_node)
self.bt.insert(index + 1, random.choice(LEAF_NODES))
self.bt.insert(index + 2, random.choice(BEHAVIOR_NODES))
self.bt.insert(index + 3, UP_NODE[0])
else:
self.bt.insert(index, new_node)
def delete_node(self, index):
"""
Deletes node at index
"""
if self.bt[index] in UP_NODE:
return
if self.bt[index] in CONTROL_NODES:
up_node_index = self.find_up_node(index)
for i in range(up_node_index, index, -1):
self.bt.pop(i)
self.bt.pop(index)
def find_parent(self, index):
"""
Returns index of the closest parent to the node at input index
"""
if index == 0:
return None
parent = index
siblings_left = 0
while parent > 0:
parent -= 1
if self.bt[parent] in CONTROL_NODES:
if siblings_left == 0:
return parent
siblings_left -= 1
elif self.bt[parent] in UP_NODE:
siblings_left += 1
return None
def find_children(self, index):
"""
Finds all children to the node at index
"""
children = []
if self.bt[index] in CONTROL_NODES:
child = index + 1
level = 0
while level >= 0:
if self.bt[child] in UP_NODE:
level -= 1
elif level == 0:
children.append(child)
if self.bt[child] in CONTROL_NODES:
level += 1
child += 1
return children
def find_up_node(self, index):
"""
Returns index of the up node connected to the control node at input index
"""
if self.bt[index] not in CONTROL_NODES:
raise Exception('Invalid call. Node at index not a control node')
if index == 0:
if self.bt[len(self.bt)-1] in UP_NODE:
index = len(self.bt) - 1
else:
raise Exception('Changing invalid BT. Missing up.')
else:
level = 1
while level > 0:
index += 1
if index == len(self.bt):
raise Exception('Changing invalid BT. Missing up.')
if self.bt[index] in CONTROL_NODES:
level += 1
elif self.bt[index] in UP_NODE:
level -= 1
return index
def get_subtree(self, index):
"""
Get subtree starting at index
"""
subtree = []
if self.bt[index] in LEAF_NODES:
subtree = [self.bt[index]]
elif self.bt[index] in CONTROL_NODES:
subtree = self.bt[index : self.find_up_node(index) + 1]
else:
subtree = []
return subtree
def insert_subtree(self, subtree, index):
"""
Insert subtree at given index
"""
for i in range(len(subtree)):
self.bt.insert(index + i, subtree.pop(0))
def swap_subtrees(self, bt2, index1, index2):
"""
Swaps two subtrees at given indices
"""
subtree1 = self.get_subtree(index1)
subtree2 = bt2.get_subtree(index2)
if subtree1 != [] and subtree2 != []:
# Remove subtrees that will be replaced
for _ in range(len(subtree1)):
self.bt.pop(index1)
for _ in range(len(subtree2)):
bt2.bt.pop(index2)
self.insert_subtree(subtree2, index1)
bt2.insert_subtree(subtree1, index2)
def is_subtree(self, index):
"""
Checks if node at index is root of a subtree
"""
return bool(0 <= index < len(self.bt) and self.bt[index] not in UP_NODE)
| 32.230769 | 103 | 0.557751 |
aced94db2adef7a555f283c567b9a4f5eb406267 | 6,049 | py | Python | maze.py | raymondchiang/Maze | 880305f85af8e3d796b96fa4027d3370797c902d | [
"MIT"
] | 1 | 2016-09-24T01:48:57.000Z | 2016-09-24T01:48:57.000Z | maze.py | raymondchiang/Maze | 880305f85af8e3d796b96fa4027d3370797c902d | [
"MIT"
] | null | null | null | maze.py | raymondchiang/Maze | 880305f85af8e3d796b96fa4027d3370797c902d | [
"MIT"
] | null | null | null | if __name__ == '__main__':
print('Initialization...')
#----------- Built-in Packages -------------------------------
import os
import sys
import random
import time
from datetime import date
from termcolor import cprint, colored #for color
#----------- Custom Packages -------------------------------------------
from printing import (
Menu, Centerize, GetUnicode,
Clear, PaddingPrint, HideCursor,
ShowCursor, ResetCursor,
PrintFiglet)
from level import Level
from generator import MazeGenerator
from constants import *
from loader import ListLevels, LoadLevel
#-------------------------------------------
PRINT_BLOCKS = {
BLOCK_AIR : CB.WHITE + ' ',
BLOCK_WALL : CB.GREEN + ' ',
BLOCK_EXIT : CB.RED + ' ',
BLOCK_PLAYER : CB.CYAN + ' ',
BLOCK_BORDER : CB.GREEN + ' ',
BLOCK_KEY : CB.WHITE + CF.YELLOW + '@=',
BLOCK_GATE : CB.YELLOW + CF.RED + 'XX',
BLOCK_PORTAL : CB.BLUE + '*>',
BLOCK_VEIW_BUSTER : CB.WHITE + CF.MAGENTA + ' +'
}
#--------------------------------------------------------------------
# How many levels are prepared
TOTAL_LEVELS = 6
#--------------------------------------------------------------------
def GetLevel(level_no=0):
if level_no >= TOTAL_LEVELS:
return None
level_id = str(level_no)
while len(level_id) < 3:
level_id = '0' + level_id
level = LoadLevel('levels/%s.lvl' % level_id)
if level_no+1 < TOTAL_LEVELS:
# Register next level
level.next_level_generator = lambda: GetLevel(level_no+1)
return level
def ShowMatrix(matrix, zoom=1):
for row in matrix:
line = ''
length = 0
for cell in row:
for _ in range(zoom):
line+=PRINT_BLOCKS[cell]
length+=2
line += CS.RESET_ALL
line = Centerize(line, length=length)
for _ in range(zoom):
PaddingPrint(line, centerize=False)
class Game:
def __init__(self):
self.level = None
self.zoom = 3
self.cheated = False
def MainMenu(self):
menu = ['Start Game', 'Daliy Run', 'Random Level',
'Level Select', 'Exit']
selected = Menu(menu, Large=True)
if selected == 0:
self.Play()
elif selected == 1:
self.DaliyRun()
elif selected == 2:
self.RandomLevel()
elif selected == 3:
self.LevelSelect()
else:
self.Exit()
def Exit(self):
Clear()
sys.exit()
def Pause(self):
Esc = Menu(['Continue','Back to Menu','Exit'],
header=lambda: PrintFiglet('Pause'), Large=True)
#pause_menu():
if Esc==0 or Esc==-1:
Clear()
elif Esc==1:
self.MainMenu()
else:
self.Exit()
def Cheat(self):
self.cheated = True
Clear()
print()
PaddingPrint('You cheat!', 'white', 'on_red')
print()
ShowMatrix(self.level.Maze())
print()
PaddingPrint('You cheat!', 'white', 'on_red')
print()
time.sleep(CHEAT_PEEK_TIMER)
Clear()
def Play(self):
Clear()
if not self.level:
self.level = GetLevel()
update_needed = True
self.zoom = 3
self.cheated = False
while True:
if update_needed:
self.FrameUpdate()
if self.level.gameover:
self.GameOver()
return
update_needed = False
code = GetUnicode()
if code in [27, ord('q'), ord('Q')]:
self.Pause()
update_needed = True
elif code == ord('c'):
self.Cheat()
update_needed = True
elif code == ord('z'):
self.zoom += 1
if self.zoom >= 4:
self.zoom = 1
update_needed = True
Clear()
elif code in KEY_TO_DIRECTION.keys():
update_needed = self.level.Move(KEY_TO_DIRECTION[code])
def GameOver(self):
next_level = self.level.NextLevel()
self.level.Reset()
menu = ['Main Menu', 'Exit']
header = lambda: PrintFiglet('Maze Solved!',color='yellow')
if next_level:
menu = ['Next Level'] + menu
selected = Menu(menu, header=header, Large=True)
if not next_level:
selected += 1
if selected == 0:
self.level = next_level
self.Play()
elif selected == 1:
self.MainMenu()
else:
self.Exit()
def Help(self):
pass
def RandomLevel(self):
mg = MazeGenerator()
self.level = mg.to_level()
self.Play()
def LevelSelect(self):
levels = ListLevels()
levelnames = [os.path.basename(x) for x in levels]
selected = Menu(levelnames+['< Back'], header=lambda: PrintFiglet('Select a Level'), Large=True)
if selected in [len(levelnames),-1]:
# Back
self.MainMenu()
else:
self.level = GetLevel(selected)
self.Play()
def DaliyRun(self):
today = date.today().strftime('%Y-%m-%d')
mg = MazeGenerator(seed=today+'MAZEPY')
self.level = mg.to_level()
self.level.name = 'Daliy Run'
self.level.subname = today
self.Play()
def FrameUpdate(self):
#Clear()
ResetCursor()
print()
PaddingPrint(self.level.name)
if self.level.subname:
PaddingPrint(self.level.subname, 'cyan')
print()
ShowMatrix(self.level.View(), self.zoom)
print()
PaddingPrint('Step:'+str(self.level.step))
print()
#------------------------------------------------------------
if __name__ == '__main__':
HideCursor()
Clear()
g = Game()
g.MainMenu()
ShowCursor()
Clear()
| 28.804762 | 104 | 0.497768 |
aced95355734aeb857b888919702bc7779bc4cf5 | 496 | py | Python | part2/arrays.py | nazariinyzhnyk/fluent-python | 466303a5191c2cae9c9fd986fb8b71c3eeea8479 | [
"MIT"
] | null | null | null | part2/arrays.py | nazariinyzhnyk/fluent-python | 466303a5191c2cae9c9fd986fb8b71c3eeea8479 | [
"MIT"
] | null | null | null | part2/arrays.py | nazariinyzhnyk/fluent-python | 466303a5191c2cae9c9fd986fb8b71c3eeea8479 | [
"MIT"
] | null | null | null | import array
import random
from collections import deque
import numpy as np
a = [random.random() for _ in range(100000)]
print(a)
a = array.array('d', (random.random() for _ in range(100000)))
print(a)
a = np.arange(12)
print(a)
print(a.shape)
a.shape = 3, 4
print(a)
a = a.transpose()
print(a)
dq = deque(range(10), maxlen=10)
print(dq)
dq.rotate(3)
print(dq)
dq.rotate(-4)
print(dq)
dq.appendleft(-1)
print(dq)
dq.extend([11, 12, 12])
print(dq)
dq.extendleft([11, 23, 45])
print(dq)
| 12.717949 | 62 | 0.671371 |
aced95387be1c8c94d07a583562874639e6698ed | 1,005 | py | Python | meggie/actions/raw_resample/__init__.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 4 | 2020-04-29T08:57:11.000Z | 2021-01-15T21:21:51.000Z | meggie/actions/raw_resample/__init__.py | Teekuningas/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 16 | 2019-05-03T10:31:16.000Z | 2021-05-06T14:59:55.000Z | meggie/actions/raw_resample/__init__.py | cibr-jyu/meggie | 0790559febb990a5487d4f0c92987066632e1d99 | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2020-12-12T09:57:00.000Z | 2020-12-20T17:12:05.000Z | """ Contains implementation for raw resample.
"""
import logging
import matplotlib.pyplot as plt
import numpy as np
from meggie.utilities.messaging import messagebox
from meggie.utilities.messaging import exc_messagebox
from meggie.utilities.threading import threaded
from meggie.actions.raw_resample.dialogs.resamplingDialogMain import ResamplingDialog
from meggie.mainwindow.dynamic import Action
from meggie.mainwindow.dynamic import subject_action
class Resample(Action):
""" Shows a dialog for getting parameters and allows
resampling data of the subject.
"""
@subject_action
def handler(self, subject, params):
"""
"""
@threaded
def resample_fun():
subject.get_raw().resample(params['rate'])
resample_fun(do_meanwhile=self.window.update_ui)
subject.save()
def run(self):
resampling_dialog = ResamplingDialog(
self.window, self.experiment, self.handler)
resampling_dialog.show()
| 26.447368 | 85 | 0.720398 |
aced965b37617ff6d2429b08bfdcf220acf30eca | 2,219 | py | Python | hypergan/losses/ali_loss.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | 1 | 2020-01-02T06:29:56.000Z | 2020-01-02T06:29:56.000Z | hypergan/losses/ali_loss.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | 218 | 2021-05-25T01:46:15.000Z | 2022-02-11T01:08:52.000Z | hypergan/losses/ali_loss.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | null | null | null | import tensorflow as tf
import hyperchamber as hc
from hypergan.losses.base_loss import BaseLoss
class AliLoss(BaseLoss):
def required(self):
return "reduce".split()
def _create(self, d_real, d_fake):
ops = self.ops
config = self.config
gan = self.gan
pq = d_real
pp = d_fake
zeros = tf.zeros_like(d_fake)
ones = tf.ones_like(d_fake)
if config.type == 'original':
d_loss = -tf.log(tf.nn.sigmoid(pq))-tf.log(1-tf.nn.sigmoid(pp))
g_loss = -tf.log(1-tf.nn.sigmoid(pq))-tf.log(tf.nn.sigmoid(pp))
elif config.type == 'least_squares':
a,b,c = config.labels
square = ops.lookup('square')
d_loss = 0.5*square(d_real - b) + 0.5*square(d_fake - a)
g_loss = 0.5*square(d_fake - c) + 0.5*square(d_real - a)
#g_loss = 0.5*square(d_fake - c) - d_real
#g_loss = 0.5*square(d_fake - c) + 0.5*(b-d_real)
elif config.type == 'logistic':
d_loss = tf.nn.softplus(-d_real) + tf.nn.softplus(d_fake)
g_loss = tf.nn.softplus(-d_fake) + tf.nn.softplus(d_real)
elif config.type == 'wasserstein':
d_loss = -pq+pp
g_loss = pq-pp
elif config.type == 'label_smoothing':
generator_target_probability = config.generator_target_probability or 0.8
label_smooth = config.label_smooth or 0.2
g_loss = self.sigmoid_kl_with_logits(d_fake, generator_target_probability) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=zeros)
d_loss = self.sigmoid_kl_with_logits(d_real, 1.-label_smooth) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros)
else:
g_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=zeros) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=ones)
d_loss = tf.nn.sigmoid_cross_entropy_with_logits(logits=d_real, labels=zeros) + \
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_fake, labels=ones)
return [d_loss, g_loss]
| 39.625 | 93 | 0.605678 |
aced96c9abe2247fd8ff0b08de46cd5ad3564c24 | 659 | py | Python | turboflow/callbacks.py | Chutlhu/TurboSuperResultion | 6ef6db8669270681a07e028cf3fd17a747b3ba5d | [
"Apache-2.0"
] | null | null | null | turboflow/callbacks.py | Chutlhu/TurboSuperResultion | 6ef6db8669270681a07e028cf3fd17a747b3ba5d | [
"Apache-2.0"
] | null | null | null | turboflow/callbacks.py | Chutlhu/TurboSuperResultion | 6ef6db8669270681a07e028cf3fd17a747b3ba5d | [
"Apache-2.0"
] | null | null | null | import torch
from pytorch_lightning.callbacks import Callback
class RefineSolution(Callback):
def __init__(self, thr: float = 1e-5):
super().__init__()
self.monitor = 'valid_loss'
self.thr = thr
self.is_enabled = False
def on_validation_end(self, trainer, model):
logs = trainer.callback_metrics
if (not self.is_enabled) and logs.get(self.monitor) < 1e-6:
print('Switched to LBFGS')
trainer.optimizers = [torch.optim.LBFGS(model.parameters(), lr=1e-4)]
self.is_enabled = True
# trainer.lr_schedulers = trainer.configure_schedulers([new_schedulers]) | 36.611111 | 84 | 0.649469 |
aced9937b9e3e5c98acada73bd2c84d9d6a85cc5 | 2,066 | py | Python | other/dft_workflow/mof_screen/pymofscreen/kpts_handler.py | arosen93/QMOF | c3faff291bba31727132a29371d04e6246377321 | [
"MIT"
] | 47 | 2020-10-29T19:31:52.000Z | 2022-03-23T07:29:43.000Z | other/dft_workflow/mof_screen/pymofscreen/kpts_handler.py | pspiewak/QMOF | 13b57ab9884a65eea3c95283f2e76190cfa0a2d9 | [
"MIT"
] | 25 | 2020-10-30T07:47:53.000Z | 2021-10-05T02:58:46.000Z | other/dft_workflow/mof_screen/pymofscreen/kpts_handler.py | pspiewak/QMOF | 13b57ab9884a65eea3c95283f2e76190cfa0a2d9 | [
"MIT"
] | 14 | 2021-01-05T23:00:48.000Z | 2022-03-10T11:54:24.000Z | import numpy as np
import os
try:
import pymatgen as pm
from pymatgen.io.cif import CifParser
from pymatgen.io.vasp.inputs import Kpoints
has_pm = True
except:
has_pm = False
def get_kpts(screener,cif_file,level):
"""
Obtain the number of kpoints
Args:
screener (class): pymofscreen.screener class
cif_file (string): name of CIF file
level (string): accuracy level
Returns:
kpts (list of ints): kpoint grid
gamma (bool): True for gamma-centered
"""
niggli = screener.niggli
mofpath = screener.mofpath
kpts_path = screener.kpts_path
kppas = screener.kppas
kpts = None
if not mofpath:
mofpath = ''
if kpts_path == 'Auto' and has_pm:
if level == 'low':
kppa = kppas[0]
elif level == 'high':
kppa = kppas[1]
else:
raise ValueError('kpoints accuracy level not defined')
filepath = os.path.join(mofpath,cif_file)
if '.cif' in cif_file:
parser = CifParser(filepath)
pm_mof = parser.get_structures(primitive=niggli)[0]
else:
pm_mof = pm.Structure.from_file(filepath,primitive=niggli)
pm_kpts = Kpoints.automatic_density(pm_mof,kppa)
kpts = pm_kpts.kpts[0]
if pm_kpts.style.name == 'Gamma':
gamma = True
else:
gamma = None
elif kpts_path == 'Auto' and not has_pm:
raise ValueError('Pymatgen not installed. Please provide a kpts file.')
else:
old_cif_name = cif_file.split('.cif')[0].split('_')[0]
infile = open(kpts_path,'r')
lines = infile.read().splitlines()
infile.close()
for i in range(len(lines)):
if old_cif_name in lines[i]:
if level == 'low':
kpts = lines[i+1]
gamma = lines[i+2]
elif level == 'high':
kpts = lines[i+3]
gamma = lines[i+4]
else:
raise ValueError('Incompatible KPPA with prior runs')
break
kpts = np.squeeze(np.asarray(np.matrix(kpts))).tolist()
if not kpts or len(kpts) != 3:
raise ValueError('Error parsing k-points for '+cif_file)
if gamma == 'True':
gamma = True
elif gamma == 'False':
gamma = False
else:
raise ValueError('Error parsing gamma for '+cif_file)
return kpts, gamma | 24.305882 | 73 | 0.677638 |
aced997be88908388535e74f2f559a8b41bff882 | 353 | py | Python | src/settings.py | tim-barnes/bind-docker | 5021e8e161e37c59c16859dbc8e3c51c0a67e334 | [
"Unlicense"
] | null | null | null | src/settings.py | tim-barnes/bind-docker | 5021e8e161e37c59c16859dbc8e3c51c0a67e334 | [
"Unlicense"
] | null | null | null | src/settings.py | tim-barnes/bind-docker | 5021e8e161e37c59c16859dbc8e3c51c0a67e334 | [
"Unlicense"
] | null | null | null | import os
from pydantic import BaseSettings
class Settings(BaseSettings):
zone: str = '.muppets.things'
hostname: str = os.environ.get('HOSTNAME')
# uncomment this when doing encryption
# private_key: str = ...
class Config:
fields = {
'hostname': {
'alias': 'hostname'
}
} | 20.764706 | 46 | 0.560907 |
aced99ea255c2f32d779a17cbc4d6da0683b14ec | 660 | py | Python | plenum/test/bls/test_send_txns_no_bls.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/bls/test_send_txns_no_bls.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | plenum/test/bls/test_send_txns_no_bls.py | steptan/indy-plenum | 488bf63c82753a74a92ac6952da784825ffd4a3d | [
"Apache-2.0"
] | null | null | null | from plenum.test.bls.helper import check_bls_multi_sig_after_send
from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \
client1, wallet1, client1Connected
nodeCount = 4
nodes_wth_bls = 0
def test_each_node_has_bls(txnPoolNodeSet):
for node in txnPoolNodeSet:
assert node.bls_bft
assert node.replicas[0]._bls_bft_replica
def test_send_txns_no_bls(looper, txnPoolNodeSet,
client1, client1Connected, wallet1):
check_bls_multi_sig_after_send(looper, txnPoolNodeSet,
client1, wallet1,
saved_multi_sigs_count=0)
| 33 | 78 | 0.689394 |
aced9a1d3832ba662a2aeeded6ed874b11767315 | 4,355 | py | Python | babynames.py | ChaoYiChenTW/stanCode | d660df265e01e7a58f30dc4e45161be935a74526 | [
"MIT"
] | null | null | null | babynames.py | ChaoYiChenTW/stanCode | d660df265e01e7a58f30dc4e45161be935a74526 | [
"MIT"
] | null | null | null | babynames.py | ChaoYiChenTW/stanCode | d660df265e01e7a58f30dc4e45161be935a74526 | [
"MIT"
] | null | null | null | """
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
YOUR DESCRIPTION HERE
"""
import sys
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
if name not in name_data:
name_data[name] = {year: rank}
elif year in name_data[name]:
if int(rank) < int(name_data[name][year]):
name_data[name][year] = rank
else:
name_data[name][year] = rank
def add_file(name_data, filename):
"""
Reads the information from the specified file and populates the name_data
dict with the data found in the file.
Input:
name_data (dict): dict holding baby name data
filename (str): name of the file holding baby name data
Output:
This function modifies the name_data dict to store information from
the provided file name. This function does not return any value.
"""
with open(filename, 'r') as f:
year = f.readline().strip()
for line in f.readlines():
rank = line.split(',')[0].strip()
nameBoy = line.split(',')[1].strip()
nameGirl = line.split(',')[2].strip()
add_data_for_name(name_data, year, rank, nameBoy)
add_data_for_name(name_data, year, rank, nameGirl)
def read_files(filenames):
"""
Reads the data from all files specified in the provided list
into a single name_data dict and then returns that dict.
Input:
filenames (List[str]): a list of filenames containing baby name data
Returns:
name_data (dict): the dict storing all baby name data in a structured manner
"""
name_data = {}
for filename in filenames:
add_file(name_data, filename)
return name_data
def search_names(name_data, target):
"""
Given a name_data dict that stores baby name information and a target string,
returns a list of all names in the dict that contain the target string. This
function should be case-insensitive with respect to the target string.
Input:
name_data (dict): a dict containing baby name data organized by name
target (str): a string to look for in the names contained within name_data
Returns:
matching_names (List[str]): a list of all names from name_data that contain
the target string
"""
matching_names = []
for key in name_data:
if target.lower() in key.lower():
matching_names.append(key)
return matching_names
def print_names(name_data):
"""
(provided, DO NOT MODIFY)
Given a name_data dict, print out all its data, one name per line.
The names are printed in alphabetical order,
with the corresponding years data displayed in increasing order.
Input:
name_data (dict): a dict containing baby name data organized by name
Returns:
This function does not return anything
"""
for key, value in sorted(name_data.items()):
print(key, sorted(value.items()))
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# Two command line forms
# 1. file1 file2 file3 ..
# 2. -search target file1 file2 file3 ..
# Assume no search, so list of filenames to read
# is the args list
filenames = args
# Check if we are doing search, set target variable
target = ''
if len(args) >= 2 and args[0] == '-search':
target = args[1]
filenames = args[2:] # Update filenames to skip first 2
# Read in all the filenames: baby-1990.txt, baby-2000.txt, ...
names = read_files(filenames)
# Either we do a search or just print everything.
if len(target) > 0:
search_results = search_names(names, target)
for name in search_results:
print(name)
else:
print_names(names)
if __name__ == '__main__':
main()
| 29.828767 | 84 | 0.642939 |
aced9ac91e674c4a2aeea1437cc2694b9e1e2597 | 823 | py | Python | Client/unsafe_tester.py | PoCInnovation/Hexapod | 7c1941f7dba906bd3fb3d768b58edbb03b5875a0 | [
"MIT"
] | 8 | 2020-11-02T10:46:10.000Z | 2022-03-21T08:30:32.000Z | Client/unsafe_tester.py | PoCFrance/hexapod | 7c1941f7dba906bd3fb3d768b58edbb03b5875a0 | [
"MIT"
] | null | null | null | Client/unsafe_tester.py | PoCFrance/hexapod | 7c1941f7dba906bd3fb3d768b58edbb03b5875a0 | [
"MIT"
] | 1 | 2020-07-14T12:53:39.000Z | 2020-07-14T12:53:39.000Z | #!/usr/bin/python3
"""
This is just used to debug or test commands quickly.
Refers to ../Doc/lynxmotion_ssc-32u_usb_user_guide.pdf
to learn how commands are formatted
"""
from values import *
from hexapod_connection import*
import sys
class Hexapod:
def __init__(self, mode):
self.connection = HexapodConnection(mode=mode)
self.start_prompt()
def start_prompt(self):
while True:
try:
command = input("$ ")
except:
self.connection.close()
exit(0)
self.send_command(command)
def send_command(self, command):
self.connection.send_command(command, 0)
if __name__ == '__main__':
if len(sys.argv) == 2 and sys.argv[1] == "--wire":
Hexapod("wire")
else:
Hexapod("wifi")
| 22.243243 | 54 | 0.605103 |
aced9b947e14776f1a4738279dcf151629de16db | 4,939 | py | Python | neo/Core/TX/ClaimTransaction.py | kartava/neo-python | accc431cbf8d5846c8bf1d6433f4e952f8731534 | [
"MIT"
] | null | null | null | neo/Core/TX/ClaimTransaction.py | kartava/neo-python | accc431cbf8d5846c8bf1d6433f4e952f8731534 | [
"MIT"
] | null | null | null | neo/Core/TX/ClaimTransaction.py | kartava/neo-python | accc431cbf8d5846c8bf1d6433f4e952f8731534 | [
"MIT"
] | null | null | null | from itertools import groupby
from neo.Core.TX.Transaction import TransactionType, Transaction
from neo.Core.Fixed8 import Fixed8
from neo.Core.Blockchain import Blockchain
from neo.Core.CoinReference import CoinReference
from neo.Core.Size import GetVarSize
from neo.logging import log_manager
logger = log_manager.getLogger()
class ClaimTransaction(Transaction):
Claims = set()
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return super(ClaimTransaction, self).Size() + GetVarSize(self.Claims)
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(ClaimTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.ClaimTransaction
def NetworkFee(self):
"""
Get the network fee for a claim transaction.
Returns:
Fixed8: currently fixed to 0.
"""
return Fixed8(0)
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
Raises:
Exception: If the transaction type is incorrect or if there are no claims.
"""
self.Type = TransactionType.ClaimTransaction
if self.Version != 0:
raise Exception('Format Exception')
numrefs = reader.ReadVarInt()
claims = []
for i in range(0, numrefs):
c = CoinReference()
c.Deserialize(reader)
claims.append(c)
self.Claims = claims
if len(self.Claims) == 0:
raise Exception('Format Exception')
def GetScriptHashesForVerifying(self):
"""
Get a list of script hashes for verifying transactions.
Raises:
Exception: if there are no valid transactions to claim from.
Returns:
list: of UInt160 type script hashes.
"""
hashes = super(ClaimTransaction, self).GetScriptHashesForVerifying()
for hash, group in groupby(self.Claims, lambda x: x.PrevHash):
tx, height = Blockchain.Default().GetTransaction(hash)
if tx is None:
raise Exception("Invalid Claim Operation")
for claim in group:
if len(tx.outputs) <= claim.PrevIndex:
raise Exception("Invalid Claim Operation")
script_hash = tx.outputs[claim.PrevIndex].ScriptHash
if script_hash not in hashes:
hashes.append(script_hash)
hashes.sort()
return hashes
def SerializeExclusiveData(self, writer):
"""
Serialize object.
Args:
writer (neo.IO.BinaryWriter):
"""
writer.WriteSerializableArray(self.Claims)
def ToJson(self):
"""
Convert object members to a dictionary that can be parsed as JSON.
Returns:
dict:
"""
json = super(ClaimTransaction, self).ToJson()
json['claims'] = [claim.ToJson() for claim in self.Claims]
return json
def Verify(self, mempool):
"""
Verify the transaction.
Args:
mempool:
Returns:
bool: True if verified. False otherwise.
"""
if not super(ClaimTransaction, self).Verify(mempool):
return False
# wat does this do
# get all claim transactions from mempool list
# that are not this claim
# and gather all the claims of those claim transactions
# and see if they intersect the claims of this transaction
# and if that number is greater than zero that we do not verify
# (now, to do that in python)
# if (mempool.OfType < ClaimTransaction > ().Where(p => p != this).SelectMany(p= > p.Claims).Intersect(Claims).Count() > 0)
# return false;
# im sorry about the below
otherclaimTxs = [tx for tx in mempool if tx is ClaimTransaction and tx is not self]
for other in otherclaimTxs:
# check to see if the length of the intersection between this objects claim's and the other txs claims is > 0
if len([list(filter(lambda x: x in self.Claims, otherClaims)) for otherClaims in other.Claims]):
return False
txResult = None
for tx in self.GetTransactionResults():
if tx.AssetId == Blockchain.SystemCoin().Hash:
txResult = tx
break
if txResult is None or txResult.Amount > Fixed8(0):
return False
try:
return Blockchain.CalculateBonusIgnoreClaimed(self.Claims, False) == -txResult.Amount
except Exception as e:
logger.error('Could not calculate bonus: %s ' % e)
return False
| 29.052941 | 131 | 0.589796 |
aced9ba936d041042d94ca33f46954ff1286dfca | 11,985 | py | Python | userbot/modules/virus.py | akborana/Devil | 30ef9c5ac910d6344e206921e343a0932ffd6460 | [
"MIT"
] | 1 | 2021-05-06T18:30:50.000Z | 2021-05-06T18:30:50.000Z | userbot/modules/virus.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null | userbot/modules/virus.py | hellboi-atul/javes-3.0 | 8777d482bd1ee877a96332a2cd84d880c151fa43 | [
"MIT"
] | null | null | null |
#telegram javes05
import asyncio
import math
import os
import time
import requests
import datetime
import asyncio
from telethon import events
from telethon.errors.rpcerrorlist import YouBlockedUserError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from userbot.events import javes05, rekcah05
import json, os
a = 0
try:
from virustotal_python import Virustotal ; a = 1
except:
a = 2 ; pass
Vapi = os.environ.get("VTOTAL_API", None)
from userbot import CMD_HELP, ALIVE_NAME, PM_MESSAGE, JAVES_NAME, JAVES_MSG, ORI_MSG, bot, TEMP_DOWNLOAD_DIRECTORY, BOTLOG, BOTLOG_CHATID
JAVES_NNAME = str(JAVES_NAME) if JAVES_NAME else str(JAVES_MSG)
javes = bot
from pathlib import Path
import json, os, subprocess, time, math, asyncio
from pySmartDL import SmartDL
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
from telethon.tl.types import DocumentAttributeVideo
from userbot import LOGS, CMD_HELP, TEMP_DOWNLOAD_DIRECTORY
text = "scanning"
async def progress(current, total, event, start, type_of_ps, file_name=None):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
progress_str = "[{0}{1}] {2}%\n".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░" for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
tmp = progress_str + \
"{0} of {1}\nETA: {2}".format(
humanbytes(current),
humanbytes(total),
time_formatter(estimated_total_time)
)
if file_name:
await event.edit("{}\nFile Name: `{}`\n{}".format(
type_of_ps, file_name, tmp))
else:
await event.edit("{}\n{}".format(type_of_ps, tmp))
def humanbytes(size):
if not size:
return ""
power = 2**10
raised_to_pow = 0
dict_power_n = {0: "", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
while size > power:
size /= power
raised_to_pow += 1
return str(round(size, 2)) + " " + dict_power_n[raised_to_pow] + "B"
def time_formatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + " day(s), ") if days else "") + \
((str(hours) + " hour(s), ") if hours else "") + \
((str(minutes) + " minute(s), ") if minutes else "") + \
((str(seconds) + " second(s), ") if seconds else "") + \
((str(milliseconds) + " millisecond(s), ") if milliseconds else "")
return tmp[:-2]
@javes05(pattern=r"^\!vscan(?: |$)(.*)", outgoing=True)
@javes.on(rekcah05(pattern=f"vscan(?: |$)(.*)", allow_sudo=True))
async def vt(event):
await event.edit(f"Analyzing Datas......")
input_str = event.pattern_match.group(1)
if not os.path.isdir(TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(TEMP_DOWNLOAD_DIRECTORY)
if "|" in input_str:
url, file_name = input_str.split("|")
url = url.strip()
file_name = file_name.strip()
head, tail = os.path.split(file_name)
if head:
if not os.path.isdir(os.path.join(TEMP_DOWNLOAD_DIRECTORY, head)):
os.makedirs(os.path.join(TEMP_DOWNLOAD_DIRECTORY, head))
file_name = os.path.join(head, tail)
downloaded_file_name = TEMP_DOWNLOAD_DIRECTORY + "" + file_name
downloader = SmartDL(url, downloaded_file_name, progress_bar=False)
downloader.start(blocking=False)
c_time = time.time()
display_message = None
while not downloader.isFinished():
status = downloader.get_status().capitalize()
total_length = downloader.filesize if downloader.filesize else None
downloaded = downloader.get_dl_size()
now = time.time()
diff = now - c_time
percentage = downloader.get_progress() * 100
speed = downloader.get_speed()
elapsed_time = round(diff) * 1000
progress_str = "[{0}{1}] {2}%".format(
''.join(["█" for i in range(math.floor(percentage / 10))]),
''.join(["░"
for i in range(10 - math.floor(percentage / 10))]),
round(percentage, 2))
estimated_total_time = downloader.get_eta(human=True)
try:
current_message = f"{status}..\
\nURL: {url}\
\nFile Name: {file_name}\
\n{progress_str}\
\n{humanbytes(downloaded)} of {humanbytes(total_length)}\
\nETA: {estimated_total_time}"
if round(diff %
10.00) == 0 and current_message != display_message:
await event.edit(current_message)
display_message = current_message
except Exception as e:
LOGS.info(str(e))
if downloader.isSuccessful():
await event.edit(f"{text} \n\nDownloaded successfully !!")
else:
await event.edit("Incorrect URL\n{}".format(url))
elif event.reply_to_msg_id:
try:
c_time = time.time()
downloaded_file_name = await event.client.download_media(
await event.get_reply_message(),
TEMP_DOWNLOAD_DIRECTORY,
progress_callback=lambda d, t: asyncio.get_event_loop(
).create_task(
progress(d, t, event, c_time, f"{text} \n\nDownloading...")))
except Exception as e: # pylint:disable=C0103,W0703
await event.edit(str(e))
else:
await event.edit(f"{text} \n\nDownloaded successfully !!")
else:
return await event.edit(f"Error\n`Reply to a file to scan.`")
await event.edit(" `Scanning......`")
vscan = downloaded_file_name
if not vscan:
return await event.edit("`downloaded_file missing`")
try:
vtotal = Virustotal(Vapi)
except:
return await event.edit("Failed to connect virus total , is api key added? type `!help virus_scan` for more info")
try:
vr = vtotal.file_scan(vscan)
test = vr['json_resp'] ; link = test['permalink'] ; scan_id = test['scan_id'] ; response_code = test['response_code']
return await event.edit(""
f"• **Virus Total Response Code:** `{response_code}`\n"
f"• **Scan Results:** [ClickHere]({link}) ")
except:
url = "https://www.virustotal.com/vtapi/v2/file/scan"
params = {"apikey": Vapi}
files = {"file": (downloaded_file_name, open(downloaded_file_name, "rb"))}
response = requests.post(url, files=files, params=params)
try:
a = response.json()
b = a["permalink"]
except Exception as e:
await event.edit(str(e))
try:
await event.edit(
f"<b><u> File Scan Request Complete</u></b>\n\n<b>Link of the report:-</b>\n{b}\n\nNote:- Please open the link after 5-10 minutes.",
parse_mode="HTML",
)
except Exception as e:
await event.edit(str(e))
else:
await event.edit("Some Internal Issus")
@javes05(outgoing=True, pattern="^!scan2(?: |$)(.*)")
async def _(event):
reply_message = await event.get_reply_message()
if not reply_message or not event.reply_to_msg_id or not reply_message.media or not reply_message.media:
return await event.edit("```Reply to a media message```")
chat = "@DrWebBot"
sender = reply_message.sender
await event.edit(" `Scanning......`")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=161163358))
await conv.send_message(reply_message)
song2 = await conv.get_response()
return await event.edit(f"**{JAVES_NNAME}:** {song2.message}")
except:
return await event.reply(f"Please unblock @DrWebBot and try again")
@javes.on(rekcah05(pattern=f"scan2(?: |$)(.*)", allow_sudo=True))
async def _(event):
reply_message = await event.get_reply_message()
if not reply_message or not event.reply_to_msg_id or not reply_message.media or not reply_message.media:
return await event.reply("```Reply to a media message```")
chat = "@DrWebBot"
sender = reply_message.sender
rkp = await event.reply(" `Scanning......`")
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=161163358))
await conv.send_message(reply_message)
song2 = await conv.get_response()
return await rkp.edit(f"**{JAVES_NNAME}:** {song2.message}")
except:
return await event.reply(f"Please unblock @DrWebBot and try again")
@javes05(outgoing=True, pattern="^!uscan(?: |$)(.*)")
async def _(event):
rksong = event.pattern_match.group(1)
if not rksong:
return await event.edit("`Give a link to scan.....`")
await event.edit(" `Scanning url.........`")
chat = "@DrWebBot"
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=161163358))
await conv.send_message(rksong)
song2 = await conv.get_response()
return await event.edit(f"**{JAVES_NNAME}**: {song2.message}")
except :
return await event.reply("Please unblock @DrWebBot and try again")
@javes.on(rekcah05(pattern=f"uscan(?: |$)(.*)", allow_sudo=True))
async def _(event):
rksong = event.pattern_match.group(1)
if not rksong:
return await event.reply("`Give a link to scan.....`")
rkp = await event.reply(" `Scanning url.........`")
chat = "@DrWebBot"
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(events.NewMessage(incoming=True,from_users=161163358))
await conv.send_message(rksong)
song2 = await conv.get_response()
return await rkp.edit(f"**{JAVES_NNAME}**: {song2.message}")
except :
return await event.reply("Please unblock @DrWebBot and try again")
CMD_HELP.update({
"virus_scan":
"`!scan <file path>`\
\n**Example:** `!scan reply to a file` \
\n**Usage:** Scan file in https://www.virustotal.com/gui/\
\n\n`!scan2 <reply to a message>`\
\n**Usage:** scan the file\
\n`!uscan <url>`\
\n**Usage:** scan the url\
\n\n**All Commands Support Sudo type !help sudo fore more info**\
"
})
| 38.66129 | 153 | 0.557363 |
aced9cb087c0787a61b0197819d47f3413ee23f0 | 1,657 | py | Python | util/third_party/tensorflow_extra/tool/tflite/tflite/ReverseSequenceOptions.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 82 | 2016-06-29T17:24:43.000Z | 2021-04-16T06:49:17.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite/ReverseSequenceOptions.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | util/third_party/tensorflow_extra/tool/tflite/tflite/ReverseSequenceOptions.py | PascalGuenther/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 56 | 2016-08-02T10:50:50.000Z | 2021-07-19T08:57:34.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class ReverseSequenceOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsReverseSequenceOptions(cls, buf, offset=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = ReverseSequenceOptions()
x.Init(buf, n + offset)
return x
@classmethod
def ReverseSequenceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# ReverseSequenceOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# ReverseSequenceOptions
def SeqDim(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
# ReverseSequenceOptions
def BatchDim(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int32Flags, o + self._tab.Pos)
return 0
def ReverseSequenceOptionsStart(builder): builder.StartObject(2)
def ReverseSequenceOptionsAddSeqDim(builder, seqDim): builder.PrependInt32Slot(0, seqDim, 0)
def ReverseSequenceOptionsAddBatchDim(builder, batchDim): builder.PrependInt32Slot(1, batchDim, 0)
def ReverseSequenceOptionsEnd(builder): return builder.EndObject()
| 36.822222 | 114 | 0.726011 |
aced9cb467350f1485f323992a85202a023f805b | 1,861 | py | Python | data_engine/rebuild_dataset_from_config.py | angelnm/nmt-keras-click | 0deb0a4f23be3d77fa914372123627f504edd2b0 | [
"MIT"
] | 16 | 2019-03-07T19:33:09.000Z | 2021-01-07T02:10:09.000Z | data_engine/rebuild_dataset_from_config.py | angelnm/nmt-keras-click | 0deb0a4f23be3d77fa914372123627f504edd2b0 | [
"MIT"
] | null | null | null | data_engine/rebuild_dataset_from_config.py | angelnm/nmt-keras-click | 0deb0a4f23be3d77fa914372123627f504edd2b0 | [
"MIT"
] | 8 | 2019-05-15T10:49:18.000Z | 2021-04-12T03:28:47.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import logging
import ast
logging.basicConfig(level=logging.INFO, format='[%(asctime)s] %(message)s', datefmt='%d/%m/%Y %H:%M:%S')
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser("Rebuilds a dataset object from a given config instance.")
parser.add_argument("-c", "--config", required=False, help="Config pkl for loading the model configuration. "
"If not specified, hyperparameters "
"are read from config.py")
parser.add_argument("-ch", "--changes", nargs="*", help="Changes to the config. Following the syntax Key=Value",
default="")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
if args.config is None:
logger.info("Reading parameters from config.py")
from config import load_parameters
params = load_parameters()
else:
logger.info("Loading parameters from %s" % str(args.config))
from keras_wrapper.extra.read_write import pkl2dict
params = pkl2dict(args.config)
try:
for arg in args.changes:
try:
k, v = arg.split('=')
except ValueError:
print ('Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes))
exit(1)
try:
params[k] = ast.literal_eval(v)
except ValueError:
params[k] = v
except ValueError:
print ('Error processing arguments: (', k, ",", v, ")")
exit(2)
params['REBUILD_DATASET'] = True
from prepare_data import build_dataset
dataset = build_dataset(params)
| 38.770833 | 118 | 0.58302 |
aced9d0261c5b2d5e96491a5d94d83973fc5c999 | 102 | py | Python | sfftkplus/__init__.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | sfftkplus/__init__.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | sfftkplus/__init__.py | emdb-empiar/sfftk-plus | 7ceca24b78c540169bddb3fd433b4aed050f40ec | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
BASE_DIR = os.path.dirname(__file__)
SFFTKPLUS_VERSION = 'v0.4.7'
| 14.571429 | 36 | 0.666667 |
aced9e4fc5a33caf12311da57c2cf401b563b694 | 2,202 | py | Python | seq2seq/data_provider.py | wondervictor/DeepLearningWithPaddle | 55fa817584fd207430e083c20cfadb18727209e1 | [
"MIT"
] | 5 | 2017-06-20T06:13:01.000Z | 2021-04-21T03:54:08.000Z | seq2seq/data_provider.py | GuangyanZhang/DeepLearningWithPaddle | 55fa817584fd207430e083c20cfadb18727209e1 | [
"MIT"
] | null | null | null | seq2seq/data_provider.py | GuangyanZhang/DeepLearningWithPaddle | 55fa817584fd207430e083c20cfadb18727209e1 | [
"MIT"
] | 2 | 2017-07-16T03:19:48.000Z | 2019-01-16T05:33:20.000Z | # -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2017 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
__all__ = ['create_reader']
def open_file(path):
data = []
with open(path, 'r') as ff:
lines = ff.readlines()
for line in lines:
line = line.rstrip('\r\n')
seq = map(int, line.split(','))
data.append(seq)
return data
GO_ID = 1
EOS_ID = 2
# def test():
# answer_path = 'data/train_answers'
# question_path = 'data/train_questions'
#
# answers = open_file(answer_path)
# questions = open_file(question_path)
#
# test()
def create_reader(is_train=True):
def reader():
if is_train:
answer_path = 'data/train_answers'
question_path = 'data/train_questions'
size = 20000
else:
answer_path = 'data/test_answers'
question_path = 'data/test_questions'
size = 9000
questions = open_file(question_path)
answers = open_file(answer_path)
for i in range(size):
yield ([GO_ID]+questions[i]+[EOS_ID]), \
([GO_ID]+answers[i]), \
(answers[i]+[EOS_ID])
return reader
| 31.014085 | 78 | 0.670754 |
aced9e940ac962c44f4ca150fcac91218fcadcd6 | 425 | py | Python | panflute-dash-vertical.py | ayhy/pandoc-filters-ja-util | da0088e640dc9cf678613c8c8b9ecb8570135aa5 | [
"MIT"
] | null | null | null | panflute-dash-vertical.py | ayhy/pandoc-filters-ja-util | da0088e640dc9cf678613c8c8b9ecb8570135aa5 | [
"MIT"
] | null | null | null | panflute-dash-vertical.py | ayhy/pandoc-filters-ja-util | da0088e640dc9cf678613c8c8b9ecb8570135aa5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
- Pandoc filter to convert dash ― into horizontal line │,
which allows continuous line in double dash――.
"""
from panflute import run_filter, Str
import re
def caps(elem, doc):
if type(elem) == Str:
elem.text = elem.text.replace("―","│")
return elem
def main(doc=None):
return run_filter(caps, doc=doc)
if __name__ == "__main__":
main() | 18.478261 | 57 | 0.625882 |
aced9eb78f31d9ec8a44a90874055da69975d9dc | 1,155 | py | Python | whatistheplan/views/user_login.py | Cookie150CC/whatistheplan.com | bcee8f769a0e820b4bc8f619b3fb118fd6f1e68c | [
"MIT"
] | 5 | 2015-04-06T16:56:20.000Z | 2017-03-27T15:34:12.000Z | whatistheplan/views/user_login.py | Cookie150CC/whatistheplan.com | bcee8f769a0e820b4bc8f619b3fb118fd6f1e68c | [
"MIT"
] | 48 | 2015-04-03T23:15:42.000Z | 2018-10-05T19:08:50.000Z | whatistheplan/views/user_login.py | Cookie150CC/whatistheplan.com | bcee8f769a0e820b4bc8f619b3fb118fd6f1e68c | [
"MIT"
] | 7 | 2015-04-10T20:50:17.000Z | 2018-09-07T18:28:09.000Z | """view controller for user login route"""
from django.template import RequestContext
from django.shortcuts import render
from django.contrib.auth import authenticate, login
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
def user_login(request):
"""Log in view"""
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user:
if user.is_active:
login(request, user)
return HttpResponseRedirect(reverse('Home'))
else:
return render(
request,
'login.html',
{'login_msg': 'user is inactive, please talk to an admin.'}
)
else:
return render(
request,
'login.html',
{'login_msg': 'invalid credentials!'}
)
else:
return render(
request,
'login.html',
{'login_msg': 'please log in:'}
)
| 30.394737 | 79 | 0.547186 |
aced9ec6791f90094fbb87101636ae46b11f99c3 | 6,466 | py | Python | home.py | krbundy/GradeApp116 | 6248ae42bd189a84aac0c3ab74181bc154d66f8a | [
"MIT"
] | null | null | null | home.py | krbundy/GradeApp116 | 6248ae42bd189a84aac0c3ab74181bc154d66f8a | [
"MIT"
] | null | null | null | home.py | krbundy/GradeApp116 | 6248ae42bd189a84aac0c3ab74181bc154d66f8a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 12:17:13 2020
@author: kenne
"""
from wtforms import (Form, validators,SubmitField,DecimalField)
import numpy as np
from flask import Flask
from flask import request
from flask import render_template
class ReusableForm(Form):
#Grade entries
test_one_score = DecimalField("Enter First Exam Percentage",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
test_two_score = DecimalField("Enter Second Exam Percentage",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
test_three_score = DecimalField("Enter Third Exam Percentage",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
test_four_score = DecimalField("Enter Fourth Exam Percentage",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
final_exam_score = DecimalField("Enter Final Exam Percentage",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
quiz_average = DecimalField("Enter Average Quiz Grade",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
homework_average = DecimalField("Enter Average Homework Grade",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
attendance_score = DecimalField("Enter Attendance Grade",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
video_quiz_average = DecimalField("Enter Video Quiz Average",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
project_score = DecimalField("Enter Project Average",
validators=[validators.InputRequired(),
validators.NumberRange(min=0.0,
max=120.0,
message = 'Score must be betwoeen 0 and 120')])
#Submit button
submit = SubmitField("Calculate")
app=Flask(__name__)
#Homepage for the app
@app.route("/",methods=['GET','POST'])
def home():
form=ReusableForm(request.form)
if request.method=='POST' and form.validate():
#Extract all of the data fields from the webform
exam_one_score = request.form['test_one_score']
exam_two_score = request.form['test_two_score']
exam_three_score = request.form['test_three_score']
exam_four_score = request.form['test_four_score']
final_exam_score = request.form['final_exam_score']
attendance_score = request.form['attendance_score']
homework_average = request.form['homework_average']
quiz_average = request.form['quiz_average']
video_quiz_average = request.form['video_quiz_average']
project_score = request.form['project_score']
#grades = np.array((exam_one_score,exam_two_score,exam_three_score,exam_four_score,final_exam_score,
# homework_average,quiz_average,attendance_score),dtype=np.float32)
#
#weights = np.array((0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.1),dtype=np.float32)
course_grade = float(np.dot(np.array((exam_one_score,exam_two_score,exam_three_score,exam_four_score,final_exam_score,
homework_average,quiz_average,attendance_score,video_quiz_average,project_score),dtype=np.float32).reshape((1,10)),
np.array((0.1,0.1,0.1,0.1,0.2,0.1,0.1,0.05,0.05,0.1),dtype=np.float32).reshape((10,1))))
return render_template('filled.html', input=str(course_grade))
return render_template('index.html',form=form)
#app.run(host='0.0.0.0',port=5000)
| 50.124031 | 144 | 0.444943 |
aced9f56831313a757373db0d29115d3401ad978 | 4,927 | py | Python | tests/test_lag_2.py | starlant/sonic-mgmt | 14338a9199648a89bfc6d1cb1670e0406abbc0f1 | [
"Apache-2.0"
] | null | null | null | tests/test_lag_2.py | starlant/sonic-mgmt | 14338a9199648a89bfc6d1cb1670e0406abbc0f1 | [
"Apache-2.0"
] | null | null | null | tests/test_lag_2.py | starlant/sonic-mgmt | 14338a9199648a89bfc6d1cb1670e0406abbc0f1 | [
"Apache-2.0"
] | null | null | null | import pytest
import json
import time
import logging
import os
from ptf_runner import ptf_runner
from common.devices import AnsibleHostBase
@pytest.fixture(scope="module")
def common_setup_teardown(duthost, ptfhost, testbed, conn_graph_facts):
logging.info("########### Setup for lag testing ###########")
lag_facts = duthost.lag_facts(host = duthost.hostname)['ansible_facts']['lag_facts']
if lag_facts['names'] == []:
pytest.skip("No lag configuration found in %s" % duthost.hostname)
mg_facts = duthost.minigraph_facts(host=duthost.hostname)['ansible_facts']
logging.info("dut hostname: %s" % mg_facts)
vm_neighbors = mg_facts['minigraph_neighbors']
# Copy PTF test into PTF-docker for test LACP DU
test_files = ['lag_test.py', 'acs_base_test.py', 'router_utils.py']
for test_file in test_files:
src = "../ansible/roles/test/files/acstests/%s" % test_file
dst = "/tmp/%s" % test_file
ptfhost.copy(src=src, dest=dst)
# Copy tests to the PTF-docker
ptfhost.copy(src="ptftests", dest="/root")
# Inlucde testbed topology configuration
testbed_type = testbed['topo']['name']
support_testbed_types = frozenset(['t1-lag', 't0', 't0-116'])
if testbed_type not in support_testbed_types:
pytest.skip("Not support given test bed type %s" % testbed_type)
yield ptfhost, testbed, vm_neighbors, mg_facts, lag_facts
def test_lag_2(common_setup_teardown, nbrhosts):
ptfhost, testbed, vm_neighbors, mg_facts, lag_facts = common_setup_teardown
# Test for each lag
for lag_name in lag_facts['names']:
check_single_lag_lacp_rate(common_setup_teardown, nbrhosts, lag_name)
def check_single_lag_lacp_rate(common_setup_teardown, nbrhosts, lag_name):
ptfhost, testbed, vm_neighbors, mg_facts, lag_facts = common_setup_teardown
logging.info("Start checking single lap lacp rate for: %s" % lag_name)
po_interfaces = lag_facts['lags'][lag_name]['po_config']['ports']
intf = lag_facts['lags'][lag_name]['po_config']['ports'].keys()[0]
# Figure out remote VM and interface info
peer_device = vm_neighbors[intf]['name']
# Prepare for the remote VM interfaces that using PTF docker to check if the LACP DU packet rate is correct
iface_behind_lag_member = []
for neighbor_int in mg_facts['minigraph_neighbors'].keys():
if peer_device == mg_facts['minigraph_neighbors'][neighbor_int]['name']:
iface_behind_lag_member.append(mg_facts['minigraph_port_indices'][neighbor_int])
neighbor_lag_intfs = []
for po_interface in po_interfaces:
neighbor_lag_intfs.append(vm_neighbors[po_interface]['port'])
try:
lag_rate_current_setting = None
# Get the vm host(veos) by it host name
vm_host = nbrhosts[peer_device]
# Make sure all lag members on VM are set to fast
logging.info("Changing lacp rate to fast for %s" % neighbor_lag_intfs[0])
set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'fast')
lag_rate_current_setting = 'fast'
time.sleep(5)
for iface_behind_lag in iface_behind_lag_member:
verify_lag_lacp_timing(ptfhost, peer_device, 1, iface_behind_lag)
# Make sure all lag members on VM are set to slow
set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'normal')
lag_rate_current_setting = 'slow'
time.sleep(5)
for iface_behind_lag in iface_behind_lag_member:
verify_lag_lacp_timing(ptfhost, peer_device, 30, iface_behind_lag)
finally:
# Restore lag rate setting on VM in case of failure
if lag_rate_current_setting == 'fast':
set_interface_lacp_rate(vm_host, neighbor_lag_intfs[0], 'normal')
def verify_lag_lacp_timing(ptfhost, vm_name, lacp_timer, exp_iface):
if exp_iface is None:
return
# Check LACP timing
params = {
'exp_iface': exp_iface,
'timeout': 35,
'packet_timing': lacp_timer,
'ether_type': 0x8809,
'interval_count': 3
}
ptf_runner(ptfhost, '/tmp', "lag_test.LacpTimingTest", '/root/ptftests', params=params)
@pytest.fixture(scope="module")
def conn_graph_facts(testbed_devices):
dut = testbed_devices["dut"]
return get_conn_graph_facts(testbed_devices, dut.hostname)
def get_conn_graph_facts(testbed_devices, host):
localhost = testbed_devices["localhost"]
base_path = os.path.dirname(os.path.realpath(__file__))
lab_conn_graph_file = os.path.join(base_path, "../ansible/files/lab_connection_graph.xml")
result = localhost.conn_graph_facts(host=host, filename=lab_conn_graph_file)['ansible_facts']
return result
def set_interface_lacp_rate(vm_host, intf, mode):
vm_host.eos_config(
lines=['lacp rate %s' % mode],
parents='interface %s' % intf)
logging.info("Set interface [%s] lacp rate to [%s]" % (intf, mode)) | 39.103175 | 111 | 0.701441 |
aced9fc4d8340888a001a76b3a4997aeb8180fb3 | 12,518 | py | Python | pyhgvs/models/variants.py | SACGF/hgvs | eef2514c916d3f3a8909f48fca5f3021fa27cfef | [
"MIT"
] | null | null | null | pyhgvs/models/variants.py | SACGF/hgvs | eef2514c916d3f3a8909f48fca5f3021fa27cfef | [
"MIT"
] | null | null | null | pyhgvs/models/variants.py | SACGF/hgvs | eef2514c916d3f3a8909f48fca5f3021fa27cfef | [
"MIT"
] | null | null | null | """
Methods for manipulating genetic variants.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
_COMP = dict(A='T', C='G', G='C', T='A', N='N',
a='t', c='g', g='c', t='a', n='n')
class Position(object):
"""A position in the genome."""
def __init__(self, chrom, chrom_start, chrom_stop, is_forward_strand):
self.chrom = chrom
self.chrom_start = chrom_start
self.chrom_stop = chrom_stop
self.is_forward_strand = is_forward_strand
def __repr__(self):
return "<Position %s[%d:%d]>" % (
self.chrom, self.chrom_start, self.chrom_stop)
def revcomp(seq):
"""Reverse complement."""
return ''.join(_COMP[base] for base in reversed(seq))
def get_sequence(genome, chrom, start, end, is_forward_strand=True):
"""Return a sequence for the genomic region.
Coordinates are 0-based, end-exclusive.
"""
# Prevent fetching negative coordinates.
start = max(start, 0)
if start >= end:
return ''
else:
seq = genome[str(chrom)][start:end]
if not is_forward_strand:
seq = -seq
return str(seq).upper()
def get_sequence_from_position(genome, position):
"""Return a sequence for the genomic region
Position is 0-based, end-exclusive.
"""
return get_sequence(genome, position.chrom,
position.chrom_start, position.chrom_stop,
position.is_forward_strand)
def justify_indel(start, end, indel, seq, justify):
"""
Justify an indel to the left or right along a sequence 'seq'.
start, end: 0-based, end-exclusive coordinates of 'indel' within the
sequence 'seq'. Inserts denote the insertion point using start=end
and deletions indicate the deleted region with (start,end).
indel: indel sequence, can be insertion or deletion.
seq: a larger sequence containing the indel. Can be a fragment from the
genome.
justify: Which direction to justify the indel ('left', 'right').
"""
# No justification needed for empty indel.
if len(indel) == 0:
return start, end, indel
if justify == 'left':
while start > 0 and seq[start - 1] == indel[-1]:
seq_added = seq[start - 1]
indel = seq_added + indel[:-1]
start -= 1
end -= 1
elif justify == 'right':
while end < len(seq) and seq[end] == indel[0]:
seq_added = seq[end]
indel = indel[1:] + seq_added
start += 1
end += 1
else:
raise ValueError('unknown justify "%s"' % justify)
return start, end, indel
def justify_genomic_indel(genome, chrom, start, end, indel, justify,
flank_length=20):
"""
start, end: 0-based, end-exclusive coordinates of 'indel'.
"""
ref_len = end - start
while True:
seq_start = max(start - flank_length, 0)
indel_len = len(indel)
fetch_len = indel_len + 2 * flank_length
seq = get_sequence(
genome, chrom, seq_start, seq_start + fetch_len)
seq_end = seq_start + len(seq)
if seq_end <= end and justify == 'right':
# Indel is at end of chromosome, cannot justify right any further.
return start, end, indel
chrom_end = seq_end if seq_end < seq_start + fetch_len else 1e100
# Get coordinates of indel within seq.
indel_start = flank_length
indel_end = flank_length + indel_len
indel_start, indel_end, indel = justify_indel(
indel_start, indel_end, indel, seq, justify)
# Get indel coordinates with chrom.
start = seq_start + indel_start
end = start + ref_len
if ((indel_start > 0 or seq_start == 0) and
(indel_end < len(seq) or seq_end == chrom_end)):
return start, end, indel
# Since indel was justified to edge of seq, see if more justification
# can be done.
def normalize_variant(chrom, offset, ref_sequence, alt_sequences, genome,
justify='left', flank_length=30, indels_start_with_same_base=True):
"""
Normalize variant according to the GATK/VCF standard.
chrom: chromsome containing variant.
offset: 1-based coordinate of reference allele in the genome.
ref_sequence: reference allele.
alt_sequences: list of all alternate sequences.
genome: pygr-compatiable genome object.
"""
start = offset - 1
end = start + len(ref_sequence)
position = Position(
chrom=chrom,
chrom_start=start,
chrom_stop=end,
is_forward_strand=True)
return NormalizedVariant(position, ref_sequence, alt_sequences,
genome=genome, justify=justify, indels_start_with_same_base=indels_start_with_same_base)
class NormalizedVariant(object):
"""
Normalizes variant representation to match GATK/VCF.
"""
def __init__(self, position, ref_allele, alt_alleles,
seq_5p='', seq_3p='', genome=None, justify='left',
indels_start_with_same_base=True):
"""
position: a 0-index genomic Position.
ref_allele: the reference allele sequence.
alt_alleles: a list of alternate allele sequences.
seq_5p: 5 prime flanking sequence of variant.
seq_3p: 3 prime flanking sequence of variant.
genome: a pygr compatible genome object (optional).
indels_start_with_same_base: DML - I have no idea why this is required
but am keeping for backwards compat
"""
self.position = position
self.alleles = [ref_allele] + list(alt_alleles)
self.seq_5p = seq_5p
self.seq_3p = seq_3p
self.genome = genome
self.log = []
self.indels_start_with_same_base = indels_start_with_same_base
self._on_forward_strand()
self._trim_common_prefix()
self._trim_common_suffix()
self._align(justify)
self._1bp_pad()
self._set_1based_position()
def _on_forward_strand(self):
"""
Ensure variant is on forward strand.
"""
if not self.position.is_forward_strand:
self.log.append('flip strand')
seq_5p = self.seq_5p
seq_3p = self.seq_3p
self.seq_5p = revcomp(seq_3p)
self.seq_3p = revcomp(seq_5p)
self.alleles = [revcomp(allele) for allele in self.alleles]
def _trim_common_prefix(self):
"""
Trim the common prefix amongst all alleles.
"""
minlength = min(map(len, self.alleles))
common_prefix = 0
for i in range(minlength):
if len(set(allele[i] for allele in self.alleles)) > 1:
# Not all alleles match at this site, so common prefix ends.
break
common_prefix = i + 1
# Remove common prefix from all alleles.
if common_prefix:
self.log.append('trim common prefix')
self.position.chrom_start += common_prefix
self.seq_5p += self.alleles[0][:common_prefix]
for i, allele in enumerate(self.alleles):
self.alleles[i] = allele[common_prefix:]
def _trim_common_suffix(self):
"""
Trim the common suffix amongst all alleles.
"""
minlength = min(map(len, self.alleles))
common_suffix = 0
for i in range(1, minlength + 1):
if len(set(allele[-i] for allele in self.alleles)) > 1:
# Not all alleles match at this site, so common suffix ends.
break
common_suffix = i
# Remove common prefix from all alleles.
if common_suffix:
self.log.append('trim common suffix')
self.position.chrom_stop -= common_suffix
self.seq_3p = self.alleles[0][-common_suffix:] + self.seq_3p
for i, allele in enumerate(self.alleles):
self.alleles[i] = allele[:-common_suffix]
def _align(self, justify):
"""
Align variant as far to the left or right as possible.
"""
# Aligning only makes sense for INDELs.
if self.molecular_class != "INDEL":
return
# Identify the inserted or deleted sequence.
alleles_with_seq = [i for i, allele in enumerate(self.alleles)
if allele]
# Can only left-align biallelic, non ins-plus-del indels.
if len(alleles_with_seq) == 1:
i = alleles_with_seq[0]
allele = self.alleles[i]
if self.genome:
start, end, allele = justify_genomic_indel(
self.genome, self.position.chrom,
self.position.chrom_start, self.position.chrom_stop,
allele, justify)
# if right-aligning an insertion, insert at the end
if justify == 'right' and i != 0:
start += len(allele)
end += len(allele)
self.position.chrom_start = start
self.position.chrom_stop = end
flank_length = 30
self.seq_5p = get_sequence(self.genome, self.position.chrom,
start - flank_length, start)
self.seq_3p = get_sequence(self.genome, self.position.chrom,
end, end + flank_length)
self.alleles[i] = allele
else:
offset = len(self.seq_5p)
offset2, _, allele = justify_indel(
offset, offset, allele, self.seq_5p, justify)
delta = offset - offset2
if delta > 0:
self.position.chrom_start -= delta
self.position.chrom_stop -= delta
self.seq_5p = self.seq_5p[:-delta]
seq = self.ref_allele + self.seq_3p
self.seq_3p = seq[:delta] + self.seq_3p
self.alleles[i] = allele
def _1bp_pad(self):
"""
Ensure no alleles are the empty string by padding to the left 1bp.
"""
# Padding is only required for INDELs.
if self.molecular_class != "INDEL":
return
# Pad sequences with one 5-prime base before the mutation event.
empty_seq = any(not allele for allele in self.alleles)
uniq_starts = set(allele[0] for allele in self.alleles if allele)
if empty_seq or (self.indels_start_with_same_base and len(uniq_starts) > 1):
# Fetch more 5p flanking sequence if needed.
if self.genome and self.seq_5p == '':
start = self.position.chrom_start
self.seq_5p = get_sequence(
self.genome, self.position.chrom, start - 5, start)
self.log.append('1bp pad')
if self.seq_5p:
for i, allele in enumerate(self.alleles):
self.alleles[i] = self.seq_5p[-1] + self.alleles[i]
self.seq_5p = self.seq_5p[:-1]
self.position.chrom_start -= 1
else:
# According to VCF standard, if there is no 5prime sequence,
# use 3prime sequence instead.
assert self.seq_3p
for i, allele in enumerate(self.alleles):
self.alleles[i] = self.alleles[i] + self.seq_3p[0]
self.seq_3p = self.seq_3p[1:]
self.position.chrom_stop += 1
if self.indels_start_with_same_base:
if len(set(a[0] for a in self.alleles)) != 1:
raise AssertionError(
"All INDEL alleles should start with same base.")
def _set_1based_position(self):
"""
Convert to 1-based end-inclusive coordinates.
"""
self.position.chrom_start += 1
@property
def molecular_class(self):
for allele in self.alleles:
if len(allele) != 1:
return 'INDEL'
return 'SNP'
@property
def ref_allele(self):
return self.alleles[0]
@property
def alt_alleles(self):
return sorted(self.alleles[1:])
@property
def variant(self):
return (self.position.chrom, self.position.chrom_start,
self.ref_allele, self.alt_alleles)
| 35.971264 | 117 | 0.581323 |
aceda205c32b2386ab719fd5203f336f5742ea3b | 4,308 | py | Python | numericclub/settings.py | GiggleLiu/numericclub | 93e1453eff5379f68a006a827006425f8ffc59d2 | [
"Apache-2.0"
] | 1 | 2018-03-09T06:50:03.000Z | 2018-03-09T06:50:03.000Z | numericclub/settings.py | GiggleLiu/numericclub | 93e1453eff5379f68a006a827006425f8ffc59d2 | [
"Apache-2.0"
] | 1 | 2018-03-16T05:37:14.000Z | 2018-03-16T13:31:06.000Z | numericclub/settings.py | GiggleLiu/numericclub | 93e1453eff5379f68a006a827006425f8ffc59d2 | [
"Apache-2.0"
] | null | null | null | """
Django settings for numericclub project.
Generated by 'django-admin startproject' using Django 2.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'w_l10&nehl^++%b&l+jo=7@@$2ybx3dq=w*!%pk5xzvm-@wlut'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEBUG_PROPAGATE_EXCEPTIONS = True
ALLOWED_HOSTS = ['localhost', '127.0.0.1', '[::1]', '.v2nobel.com', '121.40.82.247', '172.96.235.144', '.v2nobel.xyz']
# Application definition
INSTALLED_APPS = [
'topics.apps.TopicsConfig',
'talks.apps.TalksConfig',
'being.apps.BeingConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'mathfilters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'numericclub.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'numericclub.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = os.path.join(BASE_DIR, '/static/')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = os.path.join(BASE_DIR, '/media/')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
AUTH_USER_MODEL = 'being.AdvancedUser'
THUMB_SIZE = (100,100)
#### EMAIL #####
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_PORT = 25
EMAIL_BLOCK_LIST = ['aadddss@sina.com', 'dfc@dfc.com']
# Local
#EMAIL_USE_TLS = False
#EMAIL_HOST = 'localhost'
#EMAIL_HOST_USER = ''
#EMAIL_HOST_PASSWORD = ''
#DEFAULT_FROM_EMAIL = 'Server <server@v2nobel.com>'
# QQ
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.qq.com'
EMAIL_HOST_USER = '602668429@qq.com'
EMAIL_HOST_PASSWORD = 'jnbxjmmfjmsgbfbj'
DEFAULT_FROM_EMAIL = 'NumericClub <602668429@qq.com>'
# Sina
#EMAIL_USE_TLS = False
#EMAIL_HOST = 'smtp.sina.com'
#EMAIL_HOST_USER = 'aadddss@sina.com'
#EMAIL_HOST_PASSWORD = 'cacate0129'
#DEFAULT_FROM_EMAIL = "NumericClub <aadddss@sina.com>"
| 26.592593 | 118 | 0.70195 |
aceda20e8fcfb5550f0a35f3088b710f39639a23 | 1,364 | py | Python | api_connection.py | wangejay/smartoffice2 | a842b2f6b93a8c4fd536c67f4dd4b279c7e0352b | [
"Apache-2.0"
] | null | null | null | api_connection.py | wangejay/smartoffice2 | a842b2f6b93a8c4fd536c67f4dd4b279c7e0352b | [
"Apache-2.0"
] | null | null | null | api_connection.py | wangejay/smartoffice2 | a842b2f6b93a8c4fd536c67f4dd4b279c7e0352b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os.path
import sys
import json
try:
import apiai
except ImportError:
sys.path.append(
os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)
)
import apiai
CLIENT_ACCESS_TOKEN = '9837df6bcc2a435cbcfac3698d24db42'
def main():
ai = apiai.ApiAI(CLIENT_ACCESS_TOKEN)
request = ai.text_request()
request.lang = 'en' # optional, default value equal 'en'
# request.session_id = "<SESSION ID, UNIQUE FOR EACH USER>"
if sys.argv[1]:
request.query = sys.argv[1]
else:
request.query = "how to save the power"
response = request.getresponse()
data_string= response.read()
print (data_string)
data = json.loads(data_string)
print (data["result"]["parameters"]["date"])
print (data["result"])
print (data["id"])
id_test=data["id"]
print (id_test[3:5])
date_test= str(data["result"]["parameters"]["date"])
date_string =date_test[3:13]
print (date_string)
any_test= str(data["result"]["parameters"]["any"])
any_string =any_test
print (any_string)
if sys.argv[1]:
print sys.argv[1]
p_comment= "python /Users/wangejay/Github/smartoffice/calendar_manage.py "+ date_string +" "+any_string
os.system(p_comment)
if __name__ == '__main__':
main() | 22.733333 | 107 | 0.643695 |
aceda2b3c2af3b2935693cdaa33858e54a65a5a4 | 3,988 | py | Python | queue-health/poll/poller.py | crassirostris/test-infra | 25db62f5a275343e395f5d2032f112e880be4d6f | [
"Apache-2.0"
] | null | null | null | queue-health/poll/poller.py | crassirostris/test-infra | 25db62f5a275343e395f5d2032f112e880be4d6f | [
"Apache-2.0"
] | 1 | 2021-03-20T05:41:39.000Z | 2021-03-20T05:41:39.000Z | queue-health/poll/poller.py | crassirostris/test-infra | 25db62f5a275343e395f5d2032f112e880be4d6f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cStringIO
import datetime
import pprint
import subprocess
import sys
import time
import traceback
import requests
def get_submit_queue_json(path):
for n in range(3):
uri = 'https://submit-queue.k8s.io/%s' % path
print >>sys.stderr, 'GET %s' % uri
resp = requests.get(uri, allow_redirects=True)
if resp.ok:
break
time.sleep(2**n)
resp.raise_for_status()
return resp.json()
def is_blocked():
ci = get_submit_queue_json('health')
return ci['MergePossibleNow'] != True
def get_stats():
stats = get_submit_queue_json('sq-stats')
return stats['Initialized'] == True, stats['MergesSinceRestart']
def poll():
prs = get_submit_queue_json('prs')
e2e = get_submit_queue_json('github-e2e-queue')
online, merge_count = get_stats()
return (
online, # Is mergebot initialized?
len(prs['PRStatus']), # number of open PRs
len(e2e['E2EQueue']), # number of items in the e2e queue
len(e2e['E2ERunning']), # Worthless: number of keys in this dict.
is_blocked(), # Whether we can merge
merge_count, # Number of merges the bot has done
)
def load_stats(uri):
while True:
try:
return subprocess.check_output(['gsutil', '-q', 'cat', uri])
except subprocess.CalledProcessError:
traceback.print_exc()
time.sleep(5)
def save_stats(uri, buf):
proc = subprocess.Popen(
# TODO(fejta): add -Z if this gets resolved:
# https://github.com/GoogleCloudPlatform/gsutil/issues/364
['gsutil', '-q', '-h', 'Content-Type:text/plain',
'cp', '-a', 'public-read', '-', uri],
stdin=subprocess.PIPE)
proc.communicate(buf.getvalue())
code = proc.wait()
if code:
print >>sys.stderr, 'Failed to copy stats to %s: %d' % (uri, code)
def poll_forever(uri, service_account=None):
if service_account:
print >>sys.stderr, 'Activating service account using: %s' % service_account
subprocess.check_call(
['gcloud', 'auth', 'activate-service-account', '--key-file=%s' % service_account])
print >>sys.stderr, 'Loading historical stats from %s...' % uri
buf = cStringIO.StringIO()
buf.write(load_stats(uri))
secs = 60
while True:
try:
print >>sys.stderr, 'Waiting %ds...' % secs
time.sleep(secs)
now = datetime.datetime.now()
print >>sys.stderr, 'Polling current status...'
online, prs, queue, running, blocked, merge_count = False, 0, 0, 0, False, 0
try:
online, prs, queue, running, blocked, merge_count = poll()
except KeyboardInterrupt:
raise
except (KeyError, IOError):
traceback.print_exc()
continue
data = '{} {} {} {} {} {} {}\n'.format(now, online, prs, queue, running, blocked, merge_count)
print >>sys.stderr, 'Appending to history: %s' % data
buf.write(data)
print >>sys.stderr, 'Saving historical stats to %s...' % uri
save_stats(uri, buf)
except KeyboardInterrupt:
break
if __name__ == '__main__':
# log all arguments.
pp = pprint.PrettyPrinter(stream=sys.stderr)
pp.pprint(sys.argv)
poll_forever(*sys.argv[1:])
| 31.650794 | 106 | 0.621615 |
aceda38372c69c803c71e4c9ea5f6df7e4364f2e | 203 | py | Python | 29 Execution_Time/sleepFun.py | codewithsandy/Python-Basic-Exp | 4c70ada4a042923a94301453c7bd76e704cd2989 | [
"MIT"
] | 3 | 2021-05-08T13:11:41.000Z | 2021-05-14T02:43:20.000Z | 29 Execution_Time/sleepFun.py | codewithsandy/Python-Basic-Exp | 4c70ada4a042923a94301453c7bd76e704cd2989 | [
"MIT"
] | null | null | null | 29 Execution_Time/sleepFun.py | codewithsandy/Python-Basic-Exp | 4c70ada4a042923a94301453c7bd76e704cd2989 | [
"MIT"
] | null | null | null | import time
initial = time.time()
#print(initial)
k = 0
while(k<10):
print("This is sandy program")
time.sleep(2)
k+=1
print("while loop execution time: ", time.time() - initial, "Seconds")
| 18.454545 | 70 | 0.64532 |
aceda4a4aa055df160adb8c1bb74dbea8eca1f08 | 216 | py | Python | pav_propms/pav_property_management_solution/doctype/receipt_note/test_receipt_note.py | alkuhlani/pav_propms | 2b1a9f9b2430fd45083ea750ab0d0df0243f6d17 | [
"MIT"
] | null | null | null | pav_propms/pav_property_management_solution/doctype/receipt_note/test_receipt_note.py | alkuhlani/pav_propms | 2b1a9f9b2430fd45083ea750ab0d0df0243f6d17 | [
"MIT"
] | null | null | null | pav_propms/pav_property_management_solution/doctype/receipt_note/test_receipt_note.py | alkuhlani/pav_propms | 2b1a9f9b2430fd45083ea750ab0d0df0243f6d17 | [
"MIT"
] | 3 | 2021-03-24T13:43:14.000Z | 2021-06-20T09:02:24.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2021, Patrner Team and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class TestReceiptNote(unittest.TestCase):
pass
| 19.636364 | 51 | 0.763889 |
aceda4ca077ef88f8c623e1f98ac49a59a00653c | 338 | py | Python | Python/ENEM.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | Python/ENEM.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | Python/ENEM.py | jacksontenorio8/python | a484f019960faa5aa29177eff44a1bb1e3f3b9d0 | [
"MIT"
] | null | null | null | n1 = float(input('Redação:'))
n2 = float(input('Ciências da Natureza e suas Tecnologias:'))
n3 = float(input('Ciências Humanas e suas Tecnologias:'))
n4 = float(input('Linguagens, Códigos e suas Tecnologias:'))
n5 = float(input('Matemática e suas Tecnologias:'))
x = (n1 + n2 + n3 + n4 + n5) / 5
print('Sua média no Enem é: {}'.format(x)) | 48.285714 | 61 | 0.680473 |
aceda4fe2f436ae3e2b0b280a8c46f8c33079688 | 9,771 | py | Python | libraries/unified-model/unified_model/ensemble_utils.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 55 | 2020-08-28T12:26:15.000Z | 2022-02-01T08:57:26.000Z | libraries/unified-model/unified_model/ensemble_utils.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 38 | 2020-09-01T17:17:22.000Z | 2022-03-31T15:44:57.000Z | libraries/unified-model/unified_model/ensemble_utils.py | felixridinger/machine-learning-lab | 410e2f5fecb7ea91dcec12a5b9cb9161331191bf | [
"Apache-2.0",
"MIT"
] | 19 | 2020-08-31T16:38:09.000Z | 2022-03-09T13:59:58.000Z | import atexit
import operator
import os
import shutil
import tempfile
import pandas as pd
from unified_model import UnifiedModel, NotSupportedException
from unified_model.model_types import DEFAULT_LIMIT, RecommendationModel
from unified_model.utils import ITEM_COLUMN, SCORE_COLUMN, overrides
class EnsembleStrategy:
RELATIVE_SCORE = "relative_score"
ONE_VOTE = "one_vote"
TOTAL_SCORE = "total_score"
RANK_VOTE = "rank_vote"
RANK_AVERAGING = "rank_averaging"
HIGHEST_SCORES = "highest_scores"
def combine_predictions(models: list, data, limit: int = DEFAULT_LIMIT, strategy: str = EnsembleStrategy.RELATIVE_SCORE,
multiply_limit: float = 1):
predicted_labels = {}
for model in models:
model_weight = 1
if isinstance(model, tuple):
model_weight = model[1]
model = model[0]
if limit:
request_limit = int(limit * multiply_limit)
else:
request_limit = None
if strategy == 'rank_averaging':
request_limit = None
predictions = model.predict(data, limit=request_limit)
total_score = 0
for index, row in predictions.iterrows():
total_score += row[SCORE_COLUMN]
for index, row in predictions.iterrows():
item = row[ITEM_COLUMN]
score = row[SCORE_COLUMN]
if item not in predicted_labels:
predicted_labels[item] = 0
if strategy == EnsembleStrategy.ONE_VOTE:
predicted_labels[item] += 1 * model_weight
elif strategy == EnsembleStrategy.RELATIVE_SCORE:
predicted_labels[item] += (score / total_score) * model_weight
elif strategy == EnsembleStrategy.HIGHEST_SCORES:
if score > predicted_labels[item]:
predicted_labels[item] = score
elif strategy == EnsembleStrategy.TOTAL_SCORE:
predicted_labels[item] += score * model_weight
elif strategy == EnsembleStrategy.RANK_VOTE:
min_value = 0.5
predicted_labels[item] += min_value + (len(predictions) - (index - 1)) * (
(1 - min_value) / len(predictions)) * model_weight
elif strategy == EnsembleStrategy.RANK_AVERAGING:
predicted_labels[item] += (len(predictions) - (index - 1)) * (
1 / len(predictions)) * model_weight
else:
raise NotSupportedException("Voting strategy " + strategy + " is not supported")
if limit is None:
limit = len(predicted_labels)
sorted_predictions = sorted(predicted_labels.items(), key=operator.itemgetter(1), reverse=True)[:limit]
if strategy == EnsembleStrategy.RANK_AVERAGING:
temp_sorted_predictions = sorted_predictions
sorted_predictions = []
for prediction in temp_sorted_predictions:
# Average scores:
sorted_predictions.append([prediction[0], prediction[1] / len(models)])
return pd.DataFrame(sorted_predictions,
columns=[ITEM_COLUMN, SCORE_COLUMN])
class VotingEnsemble(RecommendationModel):
"""
Initialize voting ensemble.
# Arguments
models (list): List of unified models
strategy (str): Ensemble Strategy (relative_score, one_vote, total_score, rank_vote, rank_averaging, highest_scores)
multiply_limit (float): Will determine how much more data is requested for every model prediction (optional)
**kwargs: Provide additional keyword-based parameters.
"""
def __init__(self, models: list, strategy: str = EnsembleStrategy.RELATIVE_SCORE, multiply_limit: float = 1,
**kwargs):
super(VotingEnsemble, self).__init__(**kwargs)
self.models = models
for model in models:
if not isinstance(model, RecommendationModel):
self._log.warn("Model " + str(model) + " is not a recommendation model and might fail.")
if not self.name:
self.name = "voting_ensemble_" + strategy
self.strategy = strategy
self.multiply_limit = multiply_limit
self.model_keys = []
@overrides
def _init_model(self):
self.models = []
try:
# Check if install requirements was set
self._install_requirements
except AttributeError:
# Otherwise set false as default
self._install_requirements = False
for model_key in self.model_keys:
self.models.append(UnifiedModel.load(self.get_file(model_key),
install_requirements=self._install_requirements))
@overrides
def _save_model(self, output_path):
temp_folder = tempfile.mkdtemp()
# automatically remove temp directory if process exits
def cleanup():
shutil.rmtree(temp_folder)
atexit.register(cleanup)
for model in self.models:
model_key = str(model) + "_" + str(id(model))
model_path = model.save(os.path.join(temp_folder, model_key))
self.add_file(os.path.basename(model_path), model_path)
self.model_keys.append(os.path.basename(model_path))
del self.models
@overrides
def _predict(self, data, limit=None, **kwargs):
return combine_predictions(self.models, data, limit=limit,
strategy=self.strategy,
multiply_limit=self.multiply_limit)
def get_empty_dataframe(row_size):
return pd.DataFrame(
columns=[ITEM_COLUMN, SCORE_COLUMN],
data=[["", ""]] * row_size)
class StackedEnsembleModel(UnifiedModel):
def __init__(self, first_stage_model: RecommendationModel, second_stage_models: dict, **kwargs):
super(StackedEnsembleModel, self).__init__(**kwargs)
self.first_stage_model = first_stage_model
self.second_stage_models = second_stage_models
@overrides
def _init_model(self, **kwargs):
try:
# Check if install requirements was set
self._install_requirements
except AttributeError:
# Otherwise set false as default
self._install_requirements = False
self.second_stage_models = {}
self.first_stage_model = UnifiedModel.load(
self.get_file(self.first_stage_model_key),
install_requirements=self._install_requirements)
for category, key in self.second_stage_model_keys.items():
self.second_stage_models[category] = UnifiedModel.load(
self.get_file(key),
install_requirements=self._install_requirements)
@overrides
def _save_model(self, output_path):
temp_folder = tempfile.mkdtemp()
# automatically remove temp directory if process exits
def cleanup():
shutil.rmtree(temp_folder)
atexit.register(cleanup)
self.second_stage_model_keys = {}
self._store_model(
self.first_stage_model,
temp_folder,
second_stage_category=None)
for category, model in self.second_stage_models.items():
self._store_model(
model,
temp_folder,
second_stage_category=category)
del self.first_stage_model
del self.second_stage_models
def _predict(self, data, limit=None, **kwargs):
if limit:
limit = int(limit)
first_stage_pred = self.first_stage_model.predict(data, limit=1)
prediction = first_stage_pred[ITEM_COLUMN][0]
if prediction not in self.second_stage_models:
return get_empty_dataframe(1)
second_stage_model = self.second_stage_models[prediction]
return second_stage_model.predict(data, limit=limit)
def predict_batch(self, data, limit=None, **kwargs):
# TODO update for new batch prediction
first_stage_pred = self.first_stage_model.predict_batch(
data,
limit=1)
first_stage_pred[ITEM_COLUMN] = first_stage_pred.apply(lambda x: x[ITEM_COLUMN][0], axis=1)
first_stage_pred["data"] = data
unique_first_stage_pred = first_stage_pred[ITEM_COLUMN].unique()
result = []
for unique in unique_first_stage_pred:
select_df = first_stage_pred.loc[first_stage_pred[ITEM_COLUMN] == unique]
if unique in self.second_stage_models:
second_stage = self.second_stage_models[unique].predict_batch(select_df["data"], limit=limit)
else:
# TODO empty list for prediction X
second_stage = get_empty_dataframe(len(select_df))
# second_stage = give_default_preds(len(select_df),limit)
second_stage.index = select_df.index
result.append(second_stage)
return pd.concat(result).sort_index(ascending=True)
def _store_model(self, model, temp_folder, second_stage_category=None):
model_key = str(model) + "_" + str(id(model))
model_path = model.save(os.path.join(temp_folder, model_key))
model_basename = os.path.basename(model_path)
if not second_stage_category:
self.first_stage_model_key = model_basename
else:
self.second_stage_model_keys[second_stage_category] = model_basename
self.add_file(model_basename, model_path)
@overrides
def evaluate(self, test_data: list, target_predictions: list, **kwargs) -> dict:
# Not easily done since we do not know the types of the second stage models
raise NotImplementedError('Method not implemented')
| 36.055351 | 124 | 0.640364 |
aceda54ba7032009682cb9d6ee3e4f9e2b981418 | 1,553 | py | Python | transfer/data/conmssqlserver/connection.py | geezylucas/transfer-git | c47af7565f607840376ada2e9f27d849e9cc2af4 | [
"MIT"
] | null | null | null | transfer/data/conmssqlserver/connection.py | geezylucas/transfer-git | c47af7565f607840376ada2e9f27d849e9cc2af4 | [
"MIT"
] | null | null | null | transfer/data/conmssqlserver/connection.py | geezylucas/transfer-git | c47af7565f607840376ada2e9f27d849e9cc2af4 | [
"MIT"
] | null | null | null | import pyodbc
import time
class ConnMSSQLServer(object):
def __init__(self, server, database, user, password):
self.__server = server
self.__database = database
self.__user = user
self.__password = password
self.__conn = None
self.open_connection()
def open_connection(self):
self.__conn = pyodbc.connect('Driver={ODBC Driver 17 for SQL Server};'
f'Server={self.__server};'
f'Database={self.__database};'
f'UID={self.__user};'
f'PWD={self.__password};')
def execute_query(self, query):
try:
cursor = self.__conn.cursor()
return cursor.execute(query)
except pyodbc.Error as err:
print(err)
def insert_many(self, query, params):
cursor = self.__conn.cursor()
try:
start = time.time()
cursor.fast_executemany = True
cursor.executemany(query, params)
cursor.commit()
end = time.time()
print(end - start)
except pyodbc.Error as err:
print(err)
def __del__(self):
self.__conn.close()
# def insert_many_for(self, query, params):
# cursor = self.__conn.cursor()
# try:
# for p in params:
# cursor.execute(query, p)
# cursor.commit()
# except pyodbc.Error as err:
# print(err)
| 27.245614 | 78 | 0.513844 |
aceda8b6f66e0c2af6eeeb108cb7e6b5c3297a00 | 55,246 | py | Python | qa/rpc-tests/test_framework/mininode.py | hashtagcoin/hashtagcoinCORE | 1c7c0dd4aed70364909d4666f150c45e741e6f76 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/mininode.py | hashtagcoin/hashtagcoinCORE | 1c7c0dd4aed70364909d4666f150c45e741e6f76 | [
"MIT"
] | null | null | null | qa/rpc-tests/test_framework/mininode.py | hashtagcoin/hashtagcoinCORE | 1c7c0dd4aed70364909d4666f150c45e741e6f76 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Hashtagcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Hashtagcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a hashtagcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# hashtagcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from .util import hex_str_to_bytes, bytes_to_hex_str
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from test_framework.siphash import siphash256
BIP0031_VERSION = 60000
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_INV_SZ = 50000
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 HASH in satoshis
NODE_NETWORK = (1 << 0)
NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def ripemd160(s):
return hashlib.new('ripemd160', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return bytes_to_hex_str(obj.serialize())
# Objects that map to hashtagcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
MSG_WITNESS_FLAG = 1<<30
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), bytes_to_hex_str(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
bytes_to_hex_str(self.scriptPubKey))
class CScriptWitness(object):
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([bytes_to_hex_str(x) for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness(object):
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness(object):
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in hashtagcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is without witness -- must explicitly
# call serialize_with_witness to include witness data.
def serialize(self):
return self.serialize_without_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx)
return r
# Calculate the merkle root given a vector of transaction hashes
def get_merkle_root(self, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
class PrefilledTransaction(object):
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=False):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs(object):
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn)
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs(object):
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids != None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest(object):
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions(object):
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions != None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=False):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions)
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic(object):
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_witness_block(msg_block):
def serialize(self):
r = self.block.serialize(with_witness=True)
return r
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in hashtagcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class msg_feefilter(object):
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct(object):
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock(object):
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn(object):
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn(object):
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_witness_blocktxn(msg_blocktxn):
def serialize(self):
r = b""
r += self.block_transactions.serialize(with_witness=True)
return r
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
# Remember the services our peer has advertised
self.peer_services = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
conn.nServices = message.nServices
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_open(self, conn): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
def on_feefilter(self, conn, message): pass
def on_sendheaders(self, conn, message): pass
def on_sendcmpct(self, conn, message): pass
def on_cmpctblock(self, conn, message): pass
def on_getblocktxn(self, conn, message): pass
def on_blocktxn(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def send_and_ping(self, message):
self.send_message(message)
self.sync_with_ping()
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout=timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
b"feefilter": msg_feefilter,
b"sendheaders": msg_sendheaders,
b"sendcmpct": msg_sendcmpct,
b"cmpctblock": msg_cmpctblock,
b"getblocktxn": msg_getblocktxn,
b"blocktxn": msg_blocktxn
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=NODE_NETWORK, send_version=True):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
self.nServices = 0
if send_version:
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Hashtagcoin Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
if self.state != "connected":
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
self.cb.on_open(self)
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
# actually connect
if self.state == "connecting":
self.handle_connect()
if not self.writable():
return
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| 30.556416 | 262 | 0.593057 |
aceda9b59ad5f83d1e461425f15b9cf88937ebb4 | 5,290 | py | Python | src/mmf_setup/notebook_configuration.py | mforbes/mmf-setup-fork | 774d4ac32caf353abf0d527d78d44b7aaca92684 | [
"BSD-3-Clause"
] | null | null | null | src/mmf_setup/notebook_configuration.py | mforbes/mmf-setup-fork | 774d4ac32caf353abf0d527d78d44b7aaca92684 | [
"BSD-3-Clause"
] | null | null | null | src/mmf_setup/notebook_configuration.py | mforbes/mmf-setup-fork | 774d4ac32caf353abf0d527d78d44b7aaca92684 | [
"BSD-3-Clause"
] | null | null | null | """Jupyter Notebook initialization.
Usage:
1) Add the following to the first code cell of your notebook:
import mmf_setup; mmf_setup.nbinit()
2) Execute and save the results.
3) Trust the notebook (File->Trust Notebook).
This module provides customization for Jupyter notebooks including
styling and some pre-defined MathJaX macros.
"""
import logging
import os.path
try:
from IPython.display import HTML, Javascript, display, clear_output
except (ImportError, KeyError):
HTML = Javascript = display = clear_output = None
__all__ = ["nbinit"]
_HERE = os.path.abspath(os.path.dirname(__file__))
_DATA = os.path.join(_HERE, "_data")
_NBTHEMES = os.path.join(_DATA, "nbthemes")
_MESSAGE = r"""
<i>
<p>This cell contains some definitions
for equations and some CSS for styling the notebook.
If things look a bit strange, please try the following:
<ul>
<li>Choose "Trust Notebook" from the "File" menu.</li>
<li>Re-execute this cell.</li>
<li>Reload the notebook.</li>
</ul>
</p>
</i>
"""
_TOGGLE_CODE = r"""<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit"
value="Click here to toggle on/off the raw code."></form>
"""
def log(msg, level=logging.INFO):
logging.getLogger(__name__).log(level=level, msg=msg)
class MyFormatter(logging.Formatter):
"""Custom logging formatter for sending info to Jupyter console."""
def __init__(self):
logging.Formatter.__init__(
self,
fmt="[%(levelname)s %(asctime)s %(name)s] %(message)s",
datefmt="%H:%M:%S",
)
def format(self, record):
record.levelname = record.levelname[0]
msg = logging.Formatter.format(self, record)
if record.levelno >= logging.WARNING:
msg += "\n{}{}:{}".format(" " * 14, record.filename, record.lineno)
return msg
def nbinit(
theme="default",
set_path=True,
toggle_code=False,
debug=False,
console_logging=True,
quiet=False,
):
"""Initialize a notebook.
This function displays a set of CSS and javascript code to customize the
notebook, for example, defining some MathJaX latex commands. Saving the
notebook with this output should allow the notebook to render correctly on
nbviewer.org etc.
Arguments
---------
theme : str
Choose a theme.
set_path : bool
If `True`, then call `mmf_setup.set_path()` to add the root directory to
the path so that top-level packages can be imported without installation.
toggle_code : bool
If `True`, then provide a function to toggle the visibility of input
code. (This should be replaced by an extension.)
debug : bool
If `True`, then return the list of CSS etc. code displayed to the
notebook.
console_logging : bool
If `True`, then add an error handler that logs messages to the console.
quiet : bool
If `True`, then do not display message about reloading and trusting notebook.
"""
clear_output()
####################
# Logging to jupyter console.
# Not exactly sure why this works, but here we add a handler
# to send output to the main console.
# https://stackoverflow.com/a/39331977/1088938
if console_logging:
logger = logging.getLogger()
handler = None
for h in logger.handlers:
try:
if h.stream.fileno() == 1:
handler = h
break
except Exception:
pass
if not handler:
handler = logging.StreamHandler(open(1, "w", encoding="utf-8"))
logger.addHandler(handler)
handler.setFormatter(MyFormatter())
handler.setLevel("DEBUG")
logger.setLevel("DEBUG")
####################
# Accumulate output for notebook to setup MathJaX etc.
res = []
def _load(ext, theme=theme):
"""Try loading resource from theme, fallback to default"""
for _theme in [theme, "default"]:
_file = os.path.join(
_NBTHEMES, "{theme}{ext}".format(theme=_theme, ext=ext)
)
if os.path.exists(_file):
with open(_file) as _f:
return _f.read()
return ""
def _display(val, wrapper=HTML):
res.append((val, wrapper))
display(wrapper(val))
# CSS
_display(r"<style>{}</style>".format(_load(".css")))
# Javascript
_display(_load(".js"), wrapper=Javascript)
# LaTeX commands
_template = r'<script id="MathJax-Element-48" type="math/tex">{}</script>'
_display(_template.format(_load(".tex").strip()))
# Remaining HTML
_display(_load(".html"))
message = _MESSAGE
if set_path:
from .set_path import set_path
path = set_path()
if path:
message = message.replace(
"This cell", f"This cell adds {path} to your path, and"
)
# Message
if not quiet:
_display(message)
if toggle_code:
_display(_TOGGLE_CODE)
if debug:
return res
| 26.989796 | 84 | 0.619471 |
acedaa881d215c88e258e88c74f739af0f0f94b9 | 3,551 | py | Python | ch04/negative_sampling_layer.py | ytgw/deep-learning-from-scratch-2 | e20364cc91ca8c94e4625a080569d6d2f4d604af | [
"MIT"
] | null | null | null | ch04/negative_sampling_layer.py | ytgw/deep-learning-from-scratch-2 | e20364cc91ca8c94e4625a080569d6d2f4d604af | [
"MIT"
] | null | null | null | ch04/negative_sampling_layer.py | ytgw/deep-learning-from-scratch-2 | e20364cc91ca8c94e4625a080569d6d2f4d604af | [
"MIT"
] | null | null | null | # coding: utf-8
import sys
sys.path.append('..')
from common.config import np, GPU
from common.layers import Embedding, SigmoidWithLoss
import collections
class EmbeddingDot:
def __init__(self, W):
self.embed = Embedding(W)
self.params = self.embed.params
self.grads = self.embed.grads
self.cache = None
def forward(self, h, idx):
target_W = self.embed.forward(idx)
out = np.sum(target_W * h, axis=1)
self.cache = (h, target_W)
return out
def backward(self, dout):
h, target_W = self.cache
dout = dout.reshape(dout.shape[0], 1)
dtarget_W = dout * h
self.embed.backward(dtarget_W)
dh = dout * target_W
return dh
class UnigramSampler:
def __init__(self, corpus, power, sample_size):
self.sample_size = sample_size
self.vocab_size = None
self.word_p = None
counts = collections.Counter()
for word_id in corpus:
counts[word_id] += 1
vocab_size = len(counts)
self.vocab_size = vocab_size
self.word_p = np.zeros(vocab_size)
for i in range(vocab_size):
self.word_p[i] = counts[i]
self.word_p = np.power(self.word_p, power)
self.word_p /= np.sum(self.word_p)
def get_negative_sample(self, target):
batch_size = target.shape[0]
if not GPU:
negative_sample = np.zeros((batch_size, self.sample_size), dtype=np.int32)
for i in range(batch_size):
p = self.word_p.copy()
target_idx = target[i]
p[target_idx] = 0
p /= p.sum()
negative_sample[i, :] = np.random.choice(self.vocab_size, size=self.sample_size, replace=False, p=p)
else:
# GPU(cupy)で計算するときは、速度を優先
# 負例にターゲットが含まれるケースがある
negative_sample = np.random.choice(self.vocab_size, size=(batch_size, self.sample_size),
replace=True, p=self.word_p)
return negative_sample
class NegativeSamplingLoss:
def __init__(self, W, corpus, power=0.75, sample_size=5):
self.sample_size = sample_size
self.sampler = UnigramSampler(corpus, power, sample_size)
self.loss_layers = [SigmoidWithLoss() for _ in range(sample_size + 1)]
self.embed_dot_layers = [EmbeddingDot(W) for _ in range(sample_size + 1)]
self.params, self.grads = [], []
for layer in self.embed_dot_layers:
self.params += layer.params
self.grads += layer.grads
def forward(self, h, target):
batch_size = target.shape[0]
negative_sample = self.sampler.get_negative_sample(target)
# 正例のフォワード
score = self.embed_dot_layers[0].forward(h, target)
correct_label = np.ones(batch_size, dtype=np.int32)
loss = self.loss_layers[0].forward(score, correct_label)
# 負例のフォワード
negative_label = np.zeros(batch_size, dtype=np.int32)
for i in range(self.sample_size):
negative_target = negative_sample[:, i]
score = self.embed_dot_layers[1+i].forward(h, negative_target)
loss += self.loss_layers[1+i].forward(score, negative_label)
return loss
def backward(self, dout=1):
dh = 0
for loss_layer, embed_dot_layer in zip(self.loss_layers, self.embed_dot_layers):
dscore = loss_layer.backward(dout)
dh += embed_dot_layer.backward(dscore)
return dh
| 31.990991 | 116 | 0.606871 |
acedaa93b413968723968964e86e3430dc1d02da | 5,908 | py | Python | attic/FIRST_TEST/test_XTP_417.py | ska-telescope/skampi | cd2f95bd56594888c8d0c3476824b438dfcfcf71 | [
"BSD-3-Clause"
] | null | null | null | attic/FIRST_TEST/test_XTP_417.py | ska-telescope/skampi | cd2f95bd56594888c8d0c3476824b438dfcfcf71 | [
"BSD-3-Clause"
] | 3 | 2019-10-25T13:38:56.000Z | 2022-03-30T09:13:33.000Z | attic/FIRST_TEST/test_XTP_417.py | ska-telescope/skampi | cd2f95bd56594888c8d0c3476824b438dfcfcf71 | [
"BSD-3-Clause"
] | 2 | 2019-11-04T09:59:06.000Z | 2020-05-07T11:05:42.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_calc
----------------------------------
Acceptance tests for MVP.
"""
from types import SimpleNamespace
import logging
from pytest_bdd import scenario, given, when, then
import pytest
from time import sleep
#from tango import DeviceProxy
from ska_ser_skallop.mvp_fixtures.env_handling import ExecEnv
from ska_ser_skallop.mvp_fixtures.base import ExecSettings
from ska_ser_skallop.mvp_control.entry_points.base import EntryPoint
from ska_ser_skallop.mvp_control.subarray.compose import SBConfig
from ska_ser_skallop.mvp_control.entry_points import types as conf_types
from ska_ser_skallop.mvp_control.event_waiting import wait
from ska_ser_skallop.mvp_control.describing import mvp_names
from ska_ser_skallop.event_handling import builders
from ska_ser_skallop.connectors.configuration import get_device_proxy
from ska_ser_skallop.subscribing.helpers import get_attr_value_as_str
from ska_ser_skallop.mvp_fixtures.context_management import TelescopeContext
from ska_ser_skallop.event_handling.logging import device_logging_context, LogSpec
logger = logging.getLogger(__name__)
class Context(SimpleNamespace):
pass
def wait_for_read_attr(attr: str, required_value: str, device_name: str, poll_period = 0.5, timeout = 3):
proxy = get_device_proxy(device_name)
current = get_attr_value_as_str(proxy.read_attribute(attr))
iterations = int(timeout/poll_period)
if current != required_value:
for count in range(iterations):
sleep(poll_period)
current = get_attr_value_as_str(proxy.read_attribute(attr))
if current == required_value:
logger.exception(
f"got an event for {device_name} on {attr} == {required_value}, but reading the attr only gave the same results after {count*poll_period}s"
)
break
if current != required_value:
raise Exception(
f"got an event for {device_name} on {attr} == {required_value} but reading the attr still gives {current} after {timeout}"
)
@pytest.fixture(name="context")
def fxt_context():
return Context()
@pytest.fixture(name="init")
def fxt_init(exec_env: ExecEnv, exec_settings: ExecSettings):
exec_env.entrypoint = "tmc"
#exec_settings.log_enabled = True
@pytest.mark.skamid
@pytest.mark.sst587
#@pytest.mark.skip("test is still WIP")
@scenario("1_XR-13_XTP-494.feature", "A1-Test, Sub-array resource allocation")
def test_allocate_resources(init):
"""Assign Resources."""
@given("A running telescope for executing observations on a subarray")
def set_to_running(running_telescope: TelescopeContext):
pass
@when("I allocate 4 dishes to subarray 1")
def allocate(
tmp_path,
context,
sb_config: SBConfig,
exec_settings: ExecSettings,
running_telescope: TelescopeContext,
entry_point: EntryPoint,
):
subarray_id = 1
nr_of_dishes = 4
receptors = list(range(1, int(nr_of_dishes) + 1))
# check sdp subarray has polling set up
#sdp_subarray = mvp_names.Mid.sdp.subarray(subarray_id).__str__()
#sdp_proxy = DeviceProxy(sdp_subarray)
#polling = sdp_proxy.get_attribute_poll_period('obsState')
#logger.info(f'Note {sdp_subarray} is polled with {polling}ms')
composition = conf_types.CompositionByFile(tmp_path, conf_types.CompositionType.STANDARD)
builder = builders.get_message_board_builder()
checker = (
builder.check_that(str(mvp_names.Mid.tm.subarray(subarray_id)))
.transits_according_to(["EMPTY", ("RESOURCING", "ahead"), "IDLE"])
.on_attr("obsState")
.when_transit_occur_on(
mvp_names.SubArrays(subarray_id).subtract("tm").subtract("cbf domain").list
)
)
running_telescope.release_subarray_when_finished(subarray_id, receptors, exec_settings)
# logging sdp_subarry as it is suspect
devices_to_log = LogSpec().add_log(
device_name=mvp_names.Mid.sdp.subarray(subarray_id).__str__()
).add_log(
device_name=mvp_names.Mid.tm.subarray(subarray_id).__str__()
).add_log(
device_name=mvp_names.Mid.csp.subarray(subarray_id).__str__()
)
running_telescope.push_context_onto_test(device_logging_context(builder, devices_to_log))
board = running_telescope.push_context_onto_test(wait.waiting_context(builder))
context.board = board
context.checker = checker
exec_settings.touched = True
entry_point.compose_subarray(
subarray_id,
receptors,
composition,
sb_config.sbid,
)
@then("I have a subarray composed of 4 dishes")
def check_subarray_composition(context):
board: wait.MessageBoardBase = context.board
try:
wait.wait(context.board, 3*60, live_logging=False)
wait_for_read_attr(
attr = 'obsState',
required_value = 'IDLE',
device_name = mvp_names.Mid.sdp.subarray(1).__str__()
) # hack to circumvent possible synchronization fault in event generated data vs queried data
except wait.EWhilstWaiting as exception:
logs = board.play_log_book(filter_log=False,log_filter_pattern="txn")
logger.info(f"Log messages during waiting:\n{logs}")
raise exception
logs = board.play_log_book(filter_log=False,log_filter_pattern="txn")
logger.info(f"Log messages and events captured whilst test ran:\n{logs}")
@then("the subarray is in the condition that allows scan configurations to take place")
def check_subarry_state(context):
checker: builders.Occurrences = context.checker
checking_logs = checker.print_outcome_for(checker.subject_device)
logger.info(f"Results of checking:\n{checking_logs}")
checker.assert_that(checker.subject_device).is_ahead_of_all_on_transit("RESOURCING")
checker.assert_that(checker.subject_device).is_behind_all_on_transit("IDLE")
| 38.116129 | 159 | 0.730366 |
acedab00937ae7c2396d62a0230c0b8c71581ad6 | 292 | py | Python | loops/sumToN.py | Awes35/python-functions | f6484cddcf0144e627b8219272d964bf039d1e53 | [
"MIT"
] | null | null | null | loops/sumToN.py | Awes35/python-functions | f6484cddcf0144e627b8219272d964bf039d1e53 | [
"MIT"
] | null | null | null | loops/sumToN.py | Awes35/python-functions | f6484cddcf0144e627b8219272d964bf039d1e53 | [
"MIT"
] | null | null | null | #author Kollen Gruizenga
#function to compute whether any 2 numbers in integer list L
#sum to equal specified integer N
def sumToN(L, N):
for x in range(len(L)):
for y in range(x+1, len(L)):
if (L[x]+L[y] == N):
return True
return False
| 20.857143 | 60 | 0.575342 |
acedab346ea17c47d3972725b62cbedb3e9946ca | 8,669 | py | Python | src/llvm.py | strellic/PulseLang | 72730ec4c2967f004e9bf19dcd7bbef097824973 | [
"MIT"
] | 1 | 2020-09-24T05:57:25.000Z | 2020-09-24T05:57:25.000Z | src/llvm.py | strellic/PulseLang | 72730ec4c2967f004e9bf19dcd7bbef097824973 | [
"MIT"
] | null | null | null | src/llvm.py | strellic/PulseLang | 72730ec4c2967f004e9bf19dcd7bbef097824973 | [
"MIT"
] | null | null | null | # Pulse llvmgen -> turns intermediate code to llvm ir
from collections import ChainMap
from functools import partialmethod
from llvmlite.ir import (
Module, IRBuilder, Function, IntType, DoubleType, VoidType, Constant,
GlobalVariable, FunctionType
)
int_type = IntType(32) # 32-bit integer
float_type = DoubleType() # 64-bit float
byte_type = IntType(8) # 8-bit integer
void_type = VoidType() # Void type. This is a special type
# used for internal functions returning
# no value
LLVM_TYPE_MAPPING = {
'I': int_type,
'F': float_type,
'B': byte_type,
None: void_type
}
class GenerateLLVM(object):
def __init__(self):
self.module = Module('module')
self.globals = { }
self.blocks = { }
self.declare_runtime_library()
def declare_runtime_library(self):
self.runtime = {}
self.runtime['_print_int'] = Function(self.module,
FunctionType(void_type, [int_type]),
name="_print_int")
self.runtime['_print_float'] = Function(self.module,
FunctionType(void_type, [float_type]),
name="_print_float")
self.runtime['_print_byte'] = Function(self.module,
FunctionType(void_type, [byte_type]),
name="_print_byte")
def generate_code(self, ir_function):
self.function = Function(self.module,
FunctionType(
LLVM_TYPE_MAPPING[ir_function.return_type],
[LLVM_TYPE_MAPPING[ptype] for _, ptype in ir_function.parameters]
),
name=ir_function.name)
self.block = self.function.append_basic_block('entry')
self.builder = IRBuilder(self.block)
self.globals[ir_function.name] = self.function
self.locals = { }
self.vars = ChainMap(self.locals, self.globals)
self.temps = { }
for n, (pname, ptype) in enumerate(ir_function.parameters):
self.vars[pname] = self.builder.alloca(LLVM_TYPE_MAPPING[ptype], name=pname)
self.builder.store(self.function.args[n], self.vars[pname])
if ir_function.return_type:
self.vars['return'] = self.builder.alloca(
LLVM_TYPE_MAPPING[ir_function.return_type], name='return')
self.return_block = self.function.append_basic_block('return')
for opcode, *args in ir_function.code:
if hasattr(self, 'emit_'+opcode):
getattr(self, 'emit_'+opcode)(*args)
else:
print('Warning: No emit_'+opcode+'() method')
if not self.block.is_terminated:
self.builder.branch(self.return_block)
self.builder.position_at_end(self.return_block)
self.builder.ret(self.builder.load(self.vars['return'], 'return'))
def get_block(self, block_name):
block = self.blocks.get(block_name)
if block is None:
block = self.function.append_basic_block(block_name)
self.blocks[block_name] = block
return block
def emit_MOV(self, value, target, val_type):
self.temps[target] = Constant(val_type, value)
emit_MOVI = partialmethod(emit_MOV, val_type=int_type)
emit_MOVF = partialmethod(emit_MOV, val_type=float_type)
emit_MOVB = partialmethod(emit_MOV, val_type=byte_type)
def emit_VAR(self, name, var_type):
var = GlobalVariable(self.module, var_type, name=name)
var.initializer = Constant(var_type, 0)
self.globals[name] = var
emit_VARI = partialmethod(emit_VAR, var_type=int_type)
emit_VARF = partialmethod(emit_VAR, var_type=float_type)
emit_VARB = partialmethod(emit_VAR, var_type=byte_type)
def emit_ALLOC(self, name, var_type):
self.locals[name] = self.builder.alloca(var_type, name=name)
emit_ALLOCI = partialmethod(emit_ALLOC, var_type=int_type)
emit_ALLOCF = partialmethod(emit_ALLOC, var_type=float_type)
emit_ALLOCB = partialmethod(emit_ALLOC, var_type=byte_type)
def emit_LOADI(self, name, target):
self.temps[target] = self.builder.load(self.vars[name], name=target)
emit_LOADF = emit_LOADI
emit_LOADB = emit_LOADI
def emit_STOREI(self, source, target):
self.builder.store(self.temps[source], self.vars[target])
emit_STOREF = emit_STOREI
emit_STOREB = emit_STOREI
def emit_ADDI(self, left, right, target):
self.temps[target] = self.builder.add(self.temps[left], self.temps[right], name=target)
def emit_ADDF(self, left, right, target):
self.temps[target] = self.builder.fadd(self.temps[left], self.temps[right], name=target)
def emit_SUBI(self, left, right, target):
self.temps[target] = self.builder.sub(self.temps[left], self.temps[right], name=target)
def emit_SUBF(self, left, right, target):
self.temps[target] = self.builder.fsub(self.temps[left], self.temps[right], name=target)
def emit_MULI(self, left, right, target):
self.temps[target] = self.builder.mul(self.temps[left], self.temps[right], name=target)
def emit_MULF(self, left, right, target):
self.temps[target] = self.builder.fmul(self.temps[left], self.temps[right], name=target)
def emit_DIVI(self, left, right, target):
self.temps[target] = self.builder.sdiv(self.temps[left], self.temps[right], name=target)
def emit_DIVF(self, left, right, target):
self.temps[target] = self.builder.fdiv(self.temps[left], self.temps[right], name=target)
def emit_PRINT(self, source, runtime_name):
self.builder.call(self.runtime[runtime_name], [self.temps[source]])
emit_PRINTI = partialmethod(emit_PRINT, runtime_name="_print_int")
emit_PRINTF = partialmethod(emit_PRINT, runtime_name="_print_float")
emit_PRINTB = partialmethod(emit_PRINT, runtime_name="_print_byte")
def emit_CMPI(self, operator, left, right, target):
if operator == "=":
operator = "=="
tmp = self.builder.icmp_signed(operator, self.temps[left], self.temps[right], 'tmp')
self.temps[target] = self.builder.zext(tmp, int_type, target)
def emit_CMPF(self, operator, left, right, target):
if operator == "=":
operator = "=="
tmp = self.builder.fcmp_ordered(operator, self.temps[left], self.temps[right], 'tmp')
self.temps[target] = self.builder.zext(tmp, int_type, target)
emit_CMPB = emit_CMPI
def emit_AND(self, left, right, target):
self.temps[target] = self.builder.and_(self.temps[left], self.temps[right], target)
def emit_OR(self, left, right, target):
self.temps[target] = self.builder.or_(self.temps[left], self.temps[right], target)
def emit_XOR(self, left, right, target):
self.temps[target] = self.builder.xor(self.temps[left], self.temps[right], target)
def emit_LABEL(self, lbl_name):
self.block = self.get_block(lbl_name)
self.builder.position_at_end(self.block)
def emit_BRANCH(self, dst_label):
if not self.block.is_terminated:
self.builder.branch(self.get_block(dst_label))
def emit_CBRANCH(self, test_target, true_label, false_label):
true_block = self.get_block(true_label)
false_block = self.get_block(false_label)
testvar = self.temps[test_target]
self.builder.cbranch(self.builder.trunc(testvar, IntType(1)), true_block, false_block)
def emit_RET(self, register):
self.builder.store(self.temps[register], self.vars['return'])
self.builder.branch(self.return_block)
def emit_CALL(self, func_name, *registers):
args = [self.temps[r] for r in registers[:-1]]
target = registers[-1]
self.temps[target] = self.builder.call(self.globals[func_name], args)
def compile_llvm(source):
from ir import compile_ircode
generator = GenerateLLVM()
ir_functions = compile_ircode(source)
for ir_func in ir_functions:
generator.generate_code(ir_func)
return str(generator.module)
def main():
import sys
if len(sys.argv) != 2:
raise SystemExit(1)
source = open(sys.argv[1]).read()
llvm_code = compile_llvm(source)
print(llvm_code)
if __name__ == '__main__':
main()
| 35.970954 | 102 | 0.630176 |
acedab4b50c728019dba842d5d62c4836acfcfc5 | 2,536 | py | Python | Comd/level.py | Hotkota/Am | da55c4a710e6c90577e1db1f93f107d171462959 | [
"MIT"
] | 3 | 2020-08-18T04:45:36.000Z | 2021-01-22T15:58:44.000Z | Comd/level.py | Hotkota/Am | da55c4a710e6c90577e1db1f93f107d171462959 | [
"MIT"
] | null | null | null | Comd/level.py | Hotkota/Am | da55c4a710e6c90577e1db1f93f107d171462959 | [
"MIT"
] | null | null | null | import sqlite3
import discord
from discord.ext import commands
class Level(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(aliases = ["level", "lvl", "xp", "опыт"])
async def уровень(self, ctx, *, arg):
if arg != 18:
pass
else:
if member.bot:
await ctx.send("У ботов нет профиля")
else:
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp, name FROM users where id={arg}").fetchall():
emb = discord.Embed(title = f"Профиль {row[1]}",colour = discord.Color.red())
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
@commands.command(aliases = ["level", "lvl", "xp", "опыт"])
async def уровень(self, ctx, member: discord.Member):
if member.bot:
await ctx.send("У ботов нет профиля")
else:
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp FROM users where id={member.id}").fetchall():
emb = discord.Embed(title = f"Профиль {ctx.message.author.name}",colour = discord.Color.red())
emb.set_thumbnail(url = ctx.message.author.avatar_url)
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
@уровень.error
async def Level_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
with sqlite3.connect("../am/data/DB/Database.db") as conn:
cursor = conn.cursor()
for row in cursor.execute(f"SELECT lvl, xp FROM users where id={ctx.message.author.id}").fetchall():
emb = discord.Embed(title = f"Профиль {ctx.message.author.name}",colour = discord.Color.red())
emb.set_thumbnail(url = ctx.message.author.avatar_url)
emb.description = f"Уровень: **{row[0]}**\nопыт: **{row[1]}**\nДо нового уровня: **{(5*row[0]**2+50*row[0]+100)-row[1]}**"
await ctx.send(embed = emb)
def setup(client):
client.add_cog(Level(client)) | 50.72 | 146 | 0.554416 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.