hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4794c83d8776c8c5b558d4635d40488a4106450a | 133 | py | Python | dreamerv2/models/__init__.py | magamba/dreamerv2 | ad7d54e256a03378bfd483e8a4c389ab2b444078 | [
"MIT"
] | 97 | 2021-07-08T07:05:22.000Z | 2022-03-29T11:47:49.000Z | dreamerv2/models/__init__.py | magamba/dreamerv2 | ad7d54e256a03378bfd483e8a4c389ab2b444078 | [
"MIT"
] | 2 | 2021-09-01T09:37:07.000Z | 2022-01-28T15:59:54.000Z | dreamerv2/models/__init__.py | magamba/dreamerv2 | ad7d54e256a03378bfd483e8a4c389ab2b444078 | [
"MIT"
] | 14 | 2021-07-08T07:51:47.000Z | 2022-03-30T14:58:54.000Z | from .actor import DiscreteActionModel
from .rssm import RSSM
from .dense import DenseModel
from .pixel import ObsDecoder, ObsEncoder | 33.25 | 41 | 0.842105 |
125dd99536826e1f3866683480f3484f8a41e82e | 559 | py | Python | test/selenium/src/lib/ui/daily_emails_ui_facade.py | pbedn/ggrc-core | 12ae4720a430730835f1d02def62c0f6ef453521 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-08-26T06:56:01.000Z | 2021-07-08T13:56:20.000Z | test/selenium/src/lib/ui/daily_emails_ui_facade.py | pbedn/ggrc-core | 12ae4720a430730835f1d02def62c0f6ef453521 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2021-02-02T23:04:30.000Z | 2022-03-02T09:54:47.000Z | test/selenium/src/lib/ui/daily_emails_ui_facade.py | pbedn/ggrc-core | 12ae4720a430730835f1d02def62c0f6ef453521 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2016-08-23T10:51:19.000Z | 2016-08-23T10:51:19.000Z | # Copyright (C) 2019 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Daily emails UI facade."""
from lib.service import daily_emails_service
def get_emails_by_user_names(user_names):
"""Get emails by user names."""
emails_service = daily_emails_service.DailyEmailsService()
emails_service.open_daily_digest()
user_emails_dict = dict.fromkeys(user_names)
for user_name in user_names:
user_emails_dict[user_name] = emails_service.get_email_by_user_name(
user_name)
return user_emails_dict
| 34.9375 | 78 | 0.779964 |
62585215cf3a7190102a9918346fc067f059335b | 124 | py | Python | example24.py | wnykuang/fluentPython | b0edea9971627f100b3e53a1acda8589fd728552 | [
"MIT"
] | null | null | null | example24.py | wnykuang/fluentPython | b0edea9971627f100b3e53a1acda8589fd728552 | [
"MIT"
] | null | null | null | example24.py | wnykuang/fluentPython | b0edea9971627f100b3e53a1acda8589fd728552 | [
"MIT"
] | null | null | null | colors = ['black', 'white']
sizes = ['S', 'M', 'L']
tshirts = [(col, siz) for col in colors for siz in sizes]
print(tshirts) | 31 | 57 | 0.612903 |
96e6d335e4eac3c3aa430cc20b2e23d85a15f945 | 923 | py | Python | cli.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | cli.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | cli.py | midnights-straychild/weatherman | 50354f0639fbcdde01e1ac6290bf71379581868b | [
"MIT"
] | null | null | null | #!/usr/bin/python
import sys
from app.db import DB
def main():
try:
function = sys.argv[1]
except IndexError:
function = "do all"
print("You have chosen to " + function)
if function is "initdb":
print("init db")
initdb()
elif function is "inittestdata":
print("init testdata")
inittestdata()
else:
initdb()
inittestdata()
def initdb():
importDumpFromPath("db/tables.sql")
def inittestdata():
importDumpFromPath("db/testdata.sql")
def importDumpFromPath(path):
dump = readFile(path)
commands = dump.split('\n\n')
db = DB()
connection = db.connect()
cursor = connection.cursor()
for command in commands:
if command:
cursor.execute(command)
cursor.close()
connection.commit()
def readFile(path):
with open(path, 'r') as f:
return f.read()
main() | 20.065217 | 43 | 0.589382 |
ca0e5de98e5883b8639e5b9170a4e249f762d408 | 484 | py | Python | app/product/serializers.py | chiti07/lulo-exercise | 425bf2d05732ebad5302e44306187a3b83921285 | [
"MIT"
] | null | null | null | app/product/serializers.py | chiti07/lulo-exercise | 425bf2d05732ebad5302e44306187a3b83921285 | [
"MIT"
] | null | null | null | app/product/serializers.py | chiti07/lulo-exercise | 425bf2d05732ebad5302e44306187a3b83921285 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from core.models import Tag, Product
class TagSerializer(serializers.ModelSerializer):
class Meta:
model = Tag
fields = ('id', 'name')
read_only_fields = ('id',)
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = ('id', 'code', 'description', 'picture')
read_only_fields = ('id',)
class ProductDetailSerializer(ProductSerializer):
""""""
| 22 | 57 | 0.657025 |
7347f382a42d99dedfbe3a9f3d08ad0debb3da97 | 3,605 | py | Python | ctripspider/ctripspider/middlewares.py | JoeJing/ctripspider | 02861129ff37d766589c644f6989bfdeff85301e | [
"MIT"
] | null | null | null | ctripspider/ctripspider/middlewares.py | JoeJing/ctripspider | 02861129ff37d766589c644f6989bfdeff85301e | [
"MIT"
] | 1 | 2022-03-02T14:59:20.000Z | 2022-03-02T14:59:20.000Z | ctripspider/ctripspider/middlewares.py | JoeJing/ctripspider | 02861129ff37d766589c644f6989bfdeff85301e | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class CtripspiderSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class CtripspiderDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| 34.663462 | 78 | 0.666852 |
2b65b21a9edb415ae1affd4e2f5549927203ccf9 | 1,173 | py | Python | src/sw/arp-poisoning/server.py | nniranjhana/dps-p4 | 0b704f98570c3a0d2b44b4c5189ca48b2d1a00a3 | [
"Apache-2.0"
] | 4 | 2019-07-22T04:32:55.000Z | 2022-02-25T02:24:18.000Z | src/sw/arp-poisoning/server.py | nniranjhana/dps-p4 | 0b704f98570c3a0d2b44b4c5189ca48b2d1a00a3 | [
"Apache-2.0"
] | null | null | null | src/sw/arp-poisoning/server.py | nniranjhana/dps-p4 | 0b704f98570c3a0d2b44b4c5189ca48b2d1a00a3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
import os
from scapy.all import sniff, sendp, get_if_list, get_if_hwaddr, get_if_raw_hwaddr
from scapy.all import Ether, ARP
def send_response(pkt):
client_hw_addr = pkt[Ether].src
client_ip_addr = pkt[ARP].psrc
print "request detected from client with MAC: %s and IP: %s" % (client_hw_addr, client_ip_addr)
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
iface = ifaces[0]
print "sending response packet from interface %s" % iface
rpkt = Ether(src = get_if_hwaddr(iface), dst = client_hw_addr)
rpkt = rpkt / ARP(op = 2, hwsrc = get_if_hwaddr(iface), hwdst = client_hw_addr, pdst = client_ip_addr)
rpkt.show2()
sendp(rpkt, iface = iface, verbose = False) # sendp works at layer 2
exit(1)
def handle_pkt(pkt):
if ARP in pkt:
print "got an ARP packet"
pkt.show2()
send_response(pkt)
def main():
ifaces = filter(lambda i: 'eth' in i, os.listdir('/sys/class/net/'))
iface = ifaces[0]
print "sniffing on %s" % iface
sys.stdout.flush()
sniff(iface = iface,
prn = lambda x: handle_pkt(x))
# sniff function passes the packet object as the one arg into prn: func
if __name__ == '__main__':
main() | 30.076923 | 103 | 0.71185 |
f46c7e8c20bd613e31d73e77bc01d6c16742b684 | 40,751 | py | Python | tempest/scenario/test_network_basic_ops.py | Dajvid/tempest | 7fdd39c6dbde37bccd419c4037e1e352a5189c5a | [
"Apache-2.0"
] | 1 | 2020-01-14T03:20:44.000Z | 2020-01-14T03:20:44.000Z | tempest/scenario/test_network_basic_ops.py | Dajvid/tempest | 7fdd39c6dbde37bccd419c4037e1e352a5189c5a | [
"Apache-2.0"
] | 1 | 2019-08-08T10:36:44.000Z | 2019-08-09T05:58:23.000Z | tempest/scenario/test_network_basic_ops.py | Dajvid/tempest | 7fdd39c6dbde37bccd419c4037e1e352a5189c5a | [
"Apache-2.0"
] | 1 | 2019-08-08T09:21:25.000Z | 2019-08-08T09:21:25.000Z | # Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from oslo_log import log as logging
import testtools
from tempest.common import utils
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest.scenario import manager
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""The test suite of network basic operations
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'project_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def skip_checks(cls):
super(TestNetworkBasicOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
'public_network_id must be defined.')
raise cls.skipException(msg)
for ext in ['router', 'security-group']:
if not utils.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
if not CONF.network_feature_enabled.floating_ips:
raise cls.skipException("Floating ips are not available")
@classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
super(TestNetworkBasicOps, cls).setup_credentials()
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.keypairs = {}
self.servers = []
def _setup_network_and_servers(self, **kwargs):
boot_with_port = kwargs.pop('boot_with_port', False)
self.network, self.subnet, self.router = self.create_networks(**kwargs)
self.check_networks()
self.ports = []
port_id = None
if boot_with_port:
# create a port on the network and boot with that
port_id = self.create_port(self.network['id'])['id']
self.ports.append({'port': port_id})
server = self._create_server(self.network, port_id)
ssh_login = CONF.validation.image_ssh_user
for server in self.servers:
# call the common method in the parent class
self.check_tenant_network_connectivity(
server, ssh_login, self._get_server_key(server),
servers_for_debug=self.servers)
floating_ip = self.create_floating_ip(server)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def check_networks(self):
"""Checks that we see the newly created network/subnet/router
via checking the result of list_[networks,routers,subnets]
"""
seen_nets = self.os_admin.networks_client.list_networks()
seen_names = [n['name'] for n in seen_nets['networks']]
seen_ids = [n['id'] for n in seen_nets['networks']]
self.assertIn(self.network['name'], seen_names)
self.assertIn(self.network['id'], seen_ids)
if self.subnet:
seen_subnets = self.os_admin.subnets_client.list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets['subnets']]
seen_subnet_ids = [n['id'] for n in seen_subnets['subnets']]
self.assertIn(self.network['id'], seen_net_ids)
self.assertIn(self.subnet['id'], seen_subnet_ids)
if self.router:
seen_routers = self.os_admin.routers_client.list_routers()
seen_router_ids = [n['id'] for n in seen_routers['routers']]
seen_router_names = [n['name'] for n in seen_routers['routers']]
self.assertIn(self.router['name'],
seen_router_names)
self.assertIn(self.router['id'],
seen_router_ids)
def _create_server(self, network, port_id=None):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [
{'name': self._create_security_group()['name']}
]
network = {'uuid': network['id']}
if port_id is not None:
network['port'] = port_id
server = self.create_server(
networks=[network],
key_name=keypair['name'],
security_groups=security_groups)
self.servers.append(server)
return server
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
def _check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True, mtu=None):
"""Verifies connectivty to a VM via public network and floating IP
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
negative or positive.
:param msg: Failure message to add to Error message. Should describe
the place in the test scenario where the method was called,
to indicate the context of the failure
:param should_check_floating_ip_status: bool. should status of
floating_ip be checked or not
:param mtu: int. MTU network to use for connectivity validation
"""
ssh_login = CONF.validation.image_ssh_user
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip['floating_ip_address']
private_key = None
floatingip_status = 'DOWN'
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
# Check FloatingIP Status before initiating a connection
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
message = 'Public network connectivity check failed'
if msg:
message += '. Reason: %s' % msg
self.check_vm_connectivity(
ip_address, ssh_login, private_key, should_connect,
message, server, mtu=mtu)
def _disassociate_floating_ips(self):
floating_ip, _ = self.floating_ip_tuple
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], port_id=None)['floatingip']
self.assertIsNone(floating_ip['port_id'])
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
# create a new server for the floating ip
server = self._create_server(self.network)
port_id, _ = self._get_server_port_id_and_ip4(server)
floating_ip = self.floating_ips_client.update_floatingip(
floating_ip['id'], port_id=port_id)['floatingip']
self.assertEqual(port_id, floating_ip['port_id'])
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
def _create_new_network(self, create_gateway=False):
self.new_net = self._create_network()
if create_gateway:
self.new_subnet = self.create_subnet(
network=self.new_net)
else:
self.new_subnet = self.create_subnet(
network=self.new_net,
gateway_ip=None)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip['floating_ip_address']
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(
ip_address, private_key=private_key, server=server)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self.os_admin.ports_client.list_ports(
device_id=server['id'])['ports']
self.assertEqual(1, len(port_list))
old_port = port_list[0]
interface = self.interface_client.create_interface(
server_id=server['id'],
net_id=self.new_net['id'])['interfaceAttachment']
self.addCleanup(self.ports_client.wait_for_resource_deletion,
interface['port_id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.interface_client.delete_interface,
server['id'], interface['port_id'])
def check_ports():
self.new_port_list = [
port for port in
self.os_admin.ports_client.list_ports(
device_id=server['id'])['ports']
if port['id'] != old_port['id']
]
return len(self.new_port_list) == 1
if not test_utils.call_until_true(
check_ports, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException(
"No new port attached to the server in time (%s sec)! "
"Old port: %s. Number of new ports: %d" % (
CONF.network.build_timeout, old_port,
len(self.new_port_list)))
new_port = self.new_port_list[0]
def check_new_nic():
new_nic_list = self._get_server_nics(ssh_client)
self.diff_list = [n for n in new_nic_list if n not in old_nic_list]
return len(self.diff_list) == 1
if not test_utils.call_until_true(
check_new_nic, CONF.network.build_timeout,
CONF.network.build_interval):
raise exceptions.TimeoutException("Interface not visible on the "
"guest after %s sec"
% CONF.network.build_timeout)
_, new_nic = self.diff_list[0]
ip_output = ssh_client.exec_command('ip a')
ip_address = new_port['fixed_ips'][0]['ip_address']
ip_mask = CONF.network.project_network_mask_bits
# check if the address is not already in use, if not, set it
if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
ip_address, ip_mask, new_nic))
ssh_client.exec_command("sudo ip link set %s up" % new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+)[@]?.*:')
ipatxt = ssh_client.exec_command("ip address")
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network,
should_connect=True):
"""via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes
- ping internal compute port, implying connectivity to other VMs on
this network
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network and compute ports in the new network
internal_ips = (
p['fixed_ips'][0]['ip_address'] for p in
self.os_admin.ports_client.list_ports(
tenant_id=server['tenant_id'],
network_id=network['id'])['ports']
if p['device_owner'].startswith('network') or
p['device_owner'].startswith('compute')
)
self._check_server_connectivity(floating_ip,
internal_ips,
should_connect)
def _check_network_external_connectivity(self):
"""ping default gateway to imply external connectivity"""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
# We ping the external IP from the instance using its floating IP
# which is always IPv4, so we must only test connectivity to
# external IPv4 IPs if the external network is dualstack.
v4_subnets = [
s for s in self.os_admin.subnets_client.list_subnets(
network_id=CONF.network.public_network_id)['subnets']
if s['ip_version'] == 4
]
self.assertEqual(1, len(v4_subnets),
"Found %d IPv4 subnets" % len(v4_subnets))
external_ips = [v4_subnets[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list,
should_connect=True):
ip_address = floating_ip['floating_ip_address']
private_key = self._get_server_key(self.floating_ip_tuple.server)
ssh_source = self.get_remote_client(
ip_address, private_key=private_key,
server=self.floating_ip_tuple.server)
for remote_ip in address_list:
self.check_remote_connectivity(ssh_source, remote_ip,
should_connect)
def _update_router_admin_state(self, router, admin_state_up):
kwargs = dict(admin_state_up=admin_state_up)
router = self.routers_client.update_router(
router['id'], **kwargs)['router']
self.assertEqual(admin_state_up, router['admin_state_up'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
@utils.services('compute', 'network')
def test_network_basic_ops(self):
"""Basic network operation test
For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address. This implies, but
does not guarantee (see the ssh check that follows), that the
VM has been assigned the correct IP address and has
connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
- ping an external IP address, implying external connectivity.
- ping an external hostname, implying that dns is correctly
configured.
- ping an internal IP address, implying connectivity to another
VM on the same network.
- detach the floating-ip from the VM and verify that it becomes
unreachable
- associate detached floating ip to a new VM and verify connectivity.
VMs are created with unique keypair so connectivity also asserts
that floating IP is associated with the new VM instead of the old
one
Verifies that floating IP status is updated correctly after each change
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._disassociate_floating_ips()
self._check_public_network_connectivity(should_connect=False,
msg="after disassociate "
"floating ip")
self._reassociate_floating_ips()
self._check_public_network_connectivity(should_connect=True,
msg="after re-associate "
"floating ip")
@decorators.idempotent_id('b158ea55-472e-4086-8fa9-c64ac0c6c1d0')
@testtools.skipUnless(utils.is_extension_enabled('net-mtu', 'network'),
'No way to calculate MTU for networks')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_mtu_sized_frames(self):
"""Validate that network MTU sized frames fit through."""
self._setup_network_and_servers()
# first check that connectivity works in general for the instance
self._check_public_network_connectivity(should_connect=True)
# now that we checked general connectivity, test that full size frames
# can also pass between nodes
self._check_public_network_connectivity(
should_connect=True, mtu=self.network['mtu'])
@decorators.idempotent_id('1546850e-fbaa-42f5-8b5f-03d8a6a95f15')
@testtools.skipIf(CONF.network.shared_physical_network,
'Connectivity can only be tested when in a '
'multitenant network environment')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_connectivity_between_vms_on_different_networks(self):
"""Test connectivity between VMs on different networks
For a freshly-booted VM with an IP address ("port") on a given network:
- the Tempest host can ping the IP address.
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
- ping an external IP address, implying external connectivity.
- ping an external hostname, implying that dns is correctly
configured.
- ping an internal IP address, implying connectivity to another
VM on the same network.
- Create another network on the same tenant with subnet, create
an VM on the new network.
- Ping the new VM from previous VM failed since the new network
was not attached to router yet.
- Attach the new network to the router, Ping the new VM from
previous VM succeed.
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._create_new_network(create_gateway=True)
new_server = self._create_server(self.new_net)
new_server_ips = [addr['addr'] for addr in
new_server['addresses'][self.new_net['name']]]
# Assert that pinging the new VM fails since the new network is not
# connected to a router
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
new_server_ips, should_connect=False)
router_id = self.router['id']
self.routers_client.add_router_interface(
router_id, subnet_id=self.new_subnet['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.routers_client.remove_router_interface,
router_id, subnet_id=self.new_subnet['id'])
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
new_server_ips, should_connect=True)
@decorators.idempotent_id('c5adff73-e961-41f1-b4a9-343614f18cfa')
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
'NIC hotplug not available')
@testtools.skipIf(CONF.network.port_vnic_type in ['direct', 'macvtap'],
'NIC hotplug not supported for '
'vnic_type direct or macvtap')
@utils.services('compute', 'network')
def test_hotplug_nic(self):
"""Test hotplug network interface
1. Create a network and a VM.
2. Check connectivity to the VM via a public network.
3. Create a new network, with no gateway.
4. Bring up a new interface
5. check the VM reach the new network
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
@decorators.idempotent_id('04b9fe4e-85e8-4aea-b937-ea93885ac59f')
@testtools.skipIf(CONF.network.shared_physical_network,
'Router state can be altered only with multitenant '
'networks capabilities')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_update_router_admin_state(self):
"""Test to update admin state up of router
1. Check public connectivity before updating
admin_state_up attribute of router to False
2. Check public connectivity after updating
admin_state_up attribute of router to False
3. Check public connectivity after updating
admin_state_up attribute of router to True
"""
self._setup_network_and_servers()
self._check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of router to False")
self._update_router_admin_state(self.router, False)
# TODO(alokmaurya): Remove should_check_floating_ip_status=False check
# once bug 1396310 is fixed
self._check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of router to False",
should_check_floating_ip_status=False)
self._update_router_admin_state(self.router, True)
self._check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of router to True")
@decorators.idempotent_id('d8bb918e-e2df-48b2-97cd-b73c95450980')
@testtools.skipIf(CONF.network.shared_physical_network,
'network isolation not available')
@testtools.skipUnless(CONF.scenario.dhcp_client,
"DHCP client is not available.")
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_subnet_details(self):
"""Tests that subnet's extra configuration details are affecting VMs.
This test relies on non-shared, isolated tenant networks.
NOTE: Neutron subnets push data to servers via dhcp-agent, so any
update in subnet requires server to actively renew its DHCP lease.
1. Configure subnet with dns nameserver
2. retrieve the VM's configured dns and verify it matches the one
configured for the subnet.
3. update subnet's dns
4. retrieve the VM's configured dns and verify it matches the new one
configured for the subnet.
TODO(yfried): add host_routes
any resolution check would be testing either:
* l3 forwarding (tested in test_network_basic_ops)
* Name resolution of an external DNS nameserver - out of scope for
Tempest
"""
# this test check only updates (no actual resolution) so using
# arbitrary ip addresses as nameservers, instead of parsing CONF
initial_dns_server = '1.2.3.4'
alt_dns_server = '9.8.7.6'
# Original timeouts are suggested by salvatore-orlando in
# https://bugs.launchpad.net/neutron/+bug/1412325/comments/3
#
# Compared to that renew_delay was increased, because
# busybox's udhcpc accepts SIGUSR1 as a renew request. Internally
# it goes into RENEW_REQUESTED state. If it receives a 2nd SIGUSR1
# signal while in that state then it calls the deconfig script
# ("/sbin/cirros-dhcpc deconfig" in sufficiently new cirros versions)
# which leads to the address being transiently deconfigured which
# for our case is unwanted.
renew_delay = 3 * CONF.network.build_interval
renew_timeout = CONF.network.build_timeout
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
self._check_public_network_connectivity(should_connect=True)
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip['floating_ip_address']
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(
ip_address, private_key=private_key, server=server)
dns_servers = [initial_dns_server]
servers = ssh_client.get_dns_servers()
self.assertEqual(set(dns_servers), set(servers),
'Looking for servers: {trgt_serv}. '
'Retrieved DNS nameservers: {act_serv} '
'From host: {host}.'
.format(host=ssh_client.ssh_client.host,
act_serv=servers,
trgt_serv=dns_servers))
self.subnet = self.subnets_client.update_subnet(
self.subnet['id'], dns_nameservers=[alt_dns_server])['subnet']
# asserts that Neutron DB has updated the nameservers
self.assertEqual([alt_dns_server], self.subnet['dns_nameservers'],
"Failed to update subnet's nameservers")
def check_new_dns_server():
# NOTE: Server needs to renew its dhcp lease in order to get new
# definitions from subnet
# NOTE(amuller): we are renewing the lease as part of the retry
# because Neutron updates dnsmasq asynchronously after the
# subnet-update API call returns.
ssh_client.renew_lease(fixed_ip=floating_ip['fixed_ip_address'],
dhcp_client=CONF.scenario.dhcp_client)
if ssh_client.get_dns_servers() != [alt_dns_server]:
LOG.debug("Failed to update DNS nameservers")
return False
return True
self.assertTrue(test_utils.call_until_true(check_new_dns_server,
renew_timeout,
renew_delay),
msg="DHCP renewal failed to fetch "
"new DNS nameservers")
@decorators.idempotent_id('f5dfcc22-45fd-409f-954c-5bd500d7890b')
@testtools.skipUnless(CONF.network_feature_enabled.port_admin_state_change,
"Changing a port's admin state is not supported "
"by the test environment")
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_update_instance_port_admin_state(self):
"""Test to update admin_state_up attribute of instance port
1. Check public and project connectivity before updating
admin_state_up attribute of instance port to False
2. Check public and project connectivity after updating
admin_state_up attribute of instance port to False
3. Check public and project connectivity after updating
admin_state_up attribute of instance port to True
"""
self._setup_network_and_servers()
_, server = self.floating_ip_tuple
server_id = server['id']
port_id = self.os_admin.ports_client.list_ports(
device_id=server_id)['ports'][0]['id']
server_pip = server['addresses'][self.network['name']][0]['addr']
server2 = self._create_server(self.network)
server2_fip = self.create_floating_ip(server2)
private_key = self._get_server_key(server2)
ssh_client = self.get_remote_client(server2_fip['floating_ip_address'],
private_key=private_key,
server=server2)
self._check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of instance port to False")
self.check_remote_connectivity(ssh_client, dest=server_pip,
should_succeed=True)
self.ports_client.update_port(port_id, admin_state_up=False)
self._check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of instance port to False",
should_check_floating_ip_status=False)
self.check_remote_connectivity(ssh_client, dest=server_pip,
should_succeed=False)
self.ports_client.update_port(port_id, admin_state_up=True)
self._check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of instance port to True")
self.check_remote_connectivity(ssh_client, dest=server_pip,
should_succeed=True)
@decorators.idempotent_id('759462e1-8535-46b0-ab3a-33aa45c55aaa')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_preserve_preexisting_port(self):
"""Test preserve pre-existing port
Tests that a pre-existing port provided on server boot is not deleted
if the server is deleted.
Nova should unbind the port from the instance on delete if the port was
not created by Nova as part of the boot request.
We should also be able to boot another server with the same port.
"""
# Setup the network, create a port and boot the server from that port.
self._setup_network_and_servers(boot_with_port=True)
_, server = self.floating_ip_tuple
self.assertEqual(1, len(self.ports),
'There should only be one port created for '
'server %s.' % server['id'])
port_id = self.ports[0]['port']
self.assertIsNotNone(port_id,
'Server should have been created from a '
'pre-existing port.')
# Assert the port is bound to the server.
port_list = self.os_admin.ports_client.list_ports(
device_id=server['id'], network_id=self.network['id'])['ports']
self.assertEqual(1, len(port_list),
'There should only be one port created for '
'server %s.' % server['id'])
self.assertEqual(port_id, port_list[0]['id'])
# Delete the server.
self.servers_client.delete_server(server['id'])
waiters.wait_for_server_termination(self.servers_client, server['id'])
# Assert the port still exists on the network but is unbound from
# the deleted server.
port = self.ports_client.show_port(port_id)['port']
self.assertEqual(self.network['id'], port['network_id'])
self.assertEqual('', port['device_id'])
self.assertEqual('', port['device_owner'])
# Boot another server with the same port to make sure nothing was
# left around that could cause issues.
server = self._create_server(self.network, port['id'])
port_list = self.os_admin.ports_client.list_ports(
device_id=server['id'], network_id=self.network['id'])['ports']
self.assertEqual(1, len(port_list),
'There should only be one port created for '
'server %s.' % server['id'])
self.assertEqual(port['id'], port_list[0]['id'])
@utils.requires_ext(service='network', extension='l3_agent_scheduler')
@decorators.idempotent_id('2e788c46-fb3f-4ac9-8f82-0561555bea73')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_router_rescheduling(self):
"""Tests that router can be removed from agent and add to a new agent.
1. Verify connectivity
2. Remove router from all l3-agents
3. Verify connectivity is down
4. Assign router to new l3-agent (or old one if no new agent is
available)
5. Verify connectivity
"""
# TODO(yfried): refactor this test to be used for other agents (dhcp)
# as well
list_hosts = (self.os_admin.routers_client.
list_l3_agents_hosting_router)
schedule_router = (self.os_admin.network_agents_client.
create_router_on_l3_agent)
unschedule_router = (self.os_admin.network_agents_client.
delete_router_from_l3_agent)
agent_list_alive = set(
a["id"] for a in
self.os_admin.network_agents_client.list_agents(
agent_type="L3 agent")['agents'] if a["alive"] is True
)
self._setup_network_and_servers()
# NOTE(kevinbenton): we have to use the admin credentials to check
# for the distributed flag because self.router only has a project view.
admin = self.os_admin.routers_client.show_router(
self.router['id'])
if admin['router'].get('distributed', False):
msg = "Rescheduling test does not apply to distributed routers."
raise self.skipException(msg)
self._check_public_network_connectivity(should_connect=True)
# remove resource from agents
hosting_agents = set(a["id"] for a in
list_hosts(self.router['id'])['agents'])
no_migration = agent_list_alive == hosting_agents
LOG.info("Router will be assigned to {mig} hosting agent".
format(mig="the same" if no_migration else "a new"))
for hosting_agent in hosting_agents:
unschedule_router(hosting_agent, self.router['id'])
self.assertNotIn(hosting_agent,
[a["id"] for a in
list_hosts(self.router['id'])['agents']],
'unscheduling router failed')
# verify resource is un-functional
self._check_public_network_connectivity(
should_connect=False,
msg='after router unscheduling',
)
# schedule resource to new agent
target_agent = list(hosting_agents if no_migration else
agent_list_alive - hosting_agents)[0]
schedule_router(target_agent,
router_id=self.router['id'])
self.assertEqual(
target_agent,
list_hosts(self.router['id'])['agents'][0]['id'],
"Router failed to reschedule. Hosting agent doesn't match "
"target agent")
# verify resource is functional
self._check_public_network_connectivity(
should_connect=True,
msg='After router rescheduling')
@utils.requires_ext(service='network', extension='port-security')
@testtools.skipUnless(CONF.compute_feature_enabled.interface_attach,
'NIC hotplug not available')
@decorators.idempotent_id('7c0bb1a2-d053-49a4-98f9-ca1a1d849f63')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_port_security_macspoofing_port(self):
"""Tests port_security extension enforces mac spoofing
Neutron security groups always apply anti-spoof rules on the VMs. This
allows traffic to originate and terminate at the VM as expected, but
prevents traffic to pass through the VM. Anti-spoof rules are not
required in cases where the VM routes traffic through it.
The test steps are:
1. Create a new network.
2. Connect (hotplug) the VM to a new network.
3. Check the VM can ping a server on the new network ("peer")
4. Spoof the mac address of the new VM interface.
5. Check the Security Group enforces mac spoofing and blocks pings via
spoofed interface (VM cannot ping the peer).
6. Disable port-security of the spoofed port- set the flag to false.
7. Retest 3rd step and check that the Security Group allows pings via
the spoofed interface.
"""
spoof_mac = "00:00:00:00:00:01"
# Create server
self._setup_network_and_servers()
self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
fip, server = self.floating_ip_tuple
new_ports = self.os_admin.ports_client.list_ports(
device_id=server["id"], network_id=self.new_net["id"])['ports']
spoof_port = new_ports[0]
private_key = self._get_server_key(server)
ssh_client = self.get_remote_client(fip['floating_ip_address'],
private_key=private_key,
server=server)
spoof_nic = ssh_client.get_nic_name_by_mac(spoof_port["mac_address"])
peer = self._create_server(self.new_net)
peer_address = peer['addresses'][self.new_net['name']][0]['addr']
self.check_remote_connectivity(ssh_client, dest=peer_address,
nic=spoof_nic, should_succeed=True)
# Set a mac address by making nic down temporary
cmd = ("sudo ip link set {nic} down;"
"sudo ip link set dev {nic} address {mac};"
"sudo ip link set {nic} up").format(nic=spoof_nic,
mac=spoof_mac)
ssh_client.exec_command(cmd)
new_mac = ssh_client.get_mac_address(nic=spoof_nic)
self.assertEqual(spoof_mac, new_mac)
self.check_remote_connectivity(ssh_client, dest=peer_address,
nic=spoof_nic, should_succeed=False)
self.ports_client.update_port(spoof_port["id"],
port_security_enabled=False,
security_groups=[])
self.check_remote_connectivity(ssh_client, dest=peer_address,
nic=spoof_nic, should_succeed=True)
| 46.046328 | 79 | 0.635175 |
2ac6b1dd618e3d49ff96ae041c33b0c95b2de1cc | 1,248 | py | Python | errors.py | vlee489/Turnip-Bot | 2571846607d6ca57171325211c5eb6572013c767 | [
"MIT"
] | 5 | 2020-04-19T22:47:28.000Z | 2020-06-01T04:37:12.000Z | errors.py | vlee489/Turnip-Bot | 2571846607d6ca57171325211c5eb6572013c767 | [
"MIT"
] | 7 | 2020-04-21T23:25:20.000Z | 2021-04-20T07:50:11.000Z | errors.py | vlee489/Turnip-Bot | 2571846607d6ca57171325211c5eb6572013c767 | [
"MIT"
] | null | null | null | """
This file contains errors thrown by Turnip Bot internally
"""
class EndPointValidation(Exception):
"""
Raised when an invalid API endpoint is given
"""
pass
class InvalidAPICall(Exception):
"""
Raised when an invalid API returns something other than 200
"""
pass
class FileNotCreated(Exception):
"""
Raised when a requested file to upload hasn't been created
"""
pass
class AWSError(Exception):
"""
For catching AWS error like S3 and dynamoDB
"""
pass
class InvalidDateTime(Exception):
"""
For catching invalid date/time
"""
pass
class InvalidPeriod(Exception):
"""
Raised when an invalid period is given
"""
pass
class InvalidDateFormat(Exception):
"""
Raised when an invalid date format is given
"""
pass
class BellsOutOfRange(Exception):
"""
Raised when the value in bells is out of range
"""
pass
class NoData(Exception):
"""
Raised when there's no data to work with
"""
pass
class DataCorrect(Exception):
"""
Raised when data in DB is already valid
"""
pass
class InternalError(Exception):
"""
Report an internal error has happened
"""
pass
| 15.407407 | 63 | 0.632212 |
d517a29d36c1321f23dd60b5ba7f629b81d7cdda | 737 | py | Python | jevtagram/images/migrations/0003_auto_20181107_1452.py | jjun0214z/FullStack-Instagram-Clone | 142cf37be8a90983a74c576766e5abd1cd0771c1 | [
"MIT"
] | null | null | null | jevtagram/images/migrations/0003_auto_20181107_1452.py | jjun0214z/FullStack-Instagram-Clone | 142cf37be8a90983a74c576766e5abd1cd0771c1 | [
"MIT"
] | 11 | 2020-06-05T19:32:03.000Z | 2022-02-26T14:26:35.000Z | jevtagram/images/migrations/0003_auto_20181107_1452.py | jjun0214z/FullStack-Instagram-Clone | 142cf37be8a90983a74c576766e5abd1cd0771c1 | [
"MIT"
] | 2 | 2019-07-29T14:56:16.000Z | 2019-11-22T01:54:17.000Z | # Generated by Django 2.0.9 on 2018-11-07 05:52
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('images', '0002_auto_20181106_1038'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='images.Image'),
),
migrations.AlterField(
model_name='like',
name='image',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='images.Image'),
),
]
| 29.48 | 136 | 0.632293 |
2a6a2cbb3d72fa8112e58e7c1005f3d604cf47ec | 19,358 | py | Python | tests/interop/test_calls.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/interop/test_calls.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | tests/interop/test_calls.py | ProvoK/trio-asyncio | 8098e93a63eedf7188545cbda45e54c0bcdd85fc | [
"Apache-2.0",
"MIT"
] | null | null | null | import pytest
import asyncio
import trio
import sniffio
from trio_asyncio import aio_as_trio, trio_as_aio
from tests import aiotest
from functools import partial
import sys
from .. import utils as test_utils
class Seen:
flag = 0
async def async_gen_to_list(generator):
result = []
async for item in generator:
result.append(item)
return result
class TrioContext:
def __init__(self, parent):
self.parent = parent
async def __aenter__(self):
assert self.parent.did_it == 0
self.parent.did_it = 1
if sys.version_info >= (3, 7):
assert sniffio.current_async_library() == "trio"
await trio.sleep(0.01)
self.parent.did_it = 2
return self
async def __aexit__(self, *tb):
assert self.parent.did_it == 3
self.parent.did_it = 4
class AioContext:
def __init__(self, parent, loop):
self.parent = parent
self.loop = loop
async def __aenter__(self):
assert self.parent.did_it == 0
self.parent.did_it = 1
if sys.version_info >= (3, 7):
assert sniffio.current_async_library() == "asyncio"
await asyncio.sleep(0.01, loop=self.loop)
self.parent.did_it = 2
return self
async def __aexit__(self, *tb):
assert self.parent.did_it == 3
self.parent.did_it = 4
class TestCalls(aiotest.TestCase):
async def call_t_a(self, proc, *args, loop=None):
"""called from Trio"""
return await aio_as_trio(proc, loop=loop)(*args)
async def call_t_a_depr(self, proc, *args, loop=None):
"""called from Trio"""
with test_utils.deprecate(self):
return await loop.run_asyncio(proc, *args)
async def call_a_t(self, proc, *args, loop=None):
"""call from asyncio to an async trio function"""
return await trio_as_aio(proc, loop=loop)(*args)
async def call_a_t_depr(self, proc, *args, loop=None):
"""call from asyncio to an async trio function"""
with test_utils.deprecate(self):
return await loop.run_trio(proc, *args)
async def call_a_ts(self, proc, *args, loop=None):
"""called from asyncio to a sync trio function"""
return proc(*args)
@pytest.mark.trio
async def test_call_at(self, loop):
async def delay(t):
done = asyncio.Event(loop=loop)
loop.call_at(t, done.set)
await done.wait()
t = loop.time() + 0.1
await aio_as_trio(delay, loop=loop)(t)
@pytest.mark.trio
async def test_call_at_depr(self, loop):
async def delay(t):
done = asyncio.Event(loop=loop)
loop.call_at(t, done.set)
await done.wait()
t = loop.time() + 0.1
with test_utils.deprecate(self):
await loop.run_asyncio(delay, t)
@pytest.mark.trio
async def test_asyncio_trio(self, loop):
"""Call asyncio from trio"""
async def dly_trio(seen):
await trio.sleep(0.01)
seen.flag |= 2
return 8
seen = Seen()
res = await aio_as_trio(partial(self.call_a_t, loop=loop), loop=loop)(dly_trio, seen)
assert res == 8
assert seen.flag == 2
@pytest.mark.trio
async def test_asyncio_trio_depr(self, loop):
"""Call asyncio from trio"""
async def dly_trio(seen):
await trio.sleep(0.01)
seen.flag |= 2
return 8
seen = Seen()
res = await aio_as_trio(partial(self.call_a_t_depr, loop=loop), loop=loop)(dly_trio, seen)
assert res == 8
assert seen.flag == 2
@pytest.mark.trio
async def test_call_asyncio_ctx(self, loop):
self.did_it = 0
async with aio_as_trio(AioContext(self, loop), loop=loop) as ctx:
assert ctx.parent is self
assert self.did_it == 2
self.did_it = 3
assert self.did_it == 4
@pytest.mark.trio
async def test_call_trio_ctx(self, loop):
async def _call_trio_ctx():
self.did_it = 0
async with trio_as_aio(TrioContext(self)) as ctx:
assert ctx.parent is self
assert self.did_it == 2
self.did_it = 3
assert self.did_it == 4
await aio_as_trio(_call_trio_ctx, loop=loop)()
@pytest.mark.trio
async def test_call_trio_ctx_depr(self, loop):
async def _call_trio_ctx():
self.did_it = 0
async with loop.wrap_trio_context(TrioContext(self)) as ctx:
assert ctx.parent is self
assert self.did_it == 2
self.did_it = 3
assert self.did_it == 4
with test_utils.deprecate(self):
await loop.run_asyncio(_call_trio_ctx)
@pytest.mark.trio
async def test_asyncio_trio_sync(self, loop):
"""Call asyncio from trio"""
def dly_trio(seen):
seen.flag |= 2
return 8
seen = Seen()
res = await aio_as_trio(partial(self.call_a_ts, loop=loop), loop=loop)(dly_trio, seen)
assert res == 8
assert seen.flag == 2
@pytest.mark.trio
async def test_asyncio_trio_sync_depr(self, loop):
"""Call asyncio from trio"""
def dly_trio(seen):
seen.flag |= 2
return 8
seen = Seen()
with test_utils.deprecate(self):
res = await loop.run_asyncio(partial(self.call_a_ts, loop=loop), dly_trio, seen)
assert res == 8
assert seen.flag == 2
@pytest.mark.trio
async def test_trio_asyncio(self, loop):
async def dly_asyncio(seen):
await asyncio.sleep(0.01, loop=loop)
seen.flag |= 1
return 4
seen = Seen()
res = await self.call_t_a(dly_asyncio, seen, loop=loop)
assert res == 4
assert seen.flag == 1
@pytest.mark.trio
async def test_trio_asyncio_depr(self, loop):
async def dly_asyncio(seen):
await asyncio.sleep(0.01, loop=loop)
seen.flag |= 1
return 4
seen = Seen()
res = await self.call_t_a_depr(dly_asyncio, seen, loop=loop)
assert res == 4
assert seen.flag == 1
@pytest.mark.trio
async def test_asyncio_trio_error(self, loop):
async def err_trio():
await trio.sleep(0.01)
raise RuntimeError("I has another owie")
with pytest.raises(RuntimeError) as err:
await aio_as_trio(partial(self.call_a_t, loop=loop), loop=loop)(err_trio)
assert err.value.args[0] == "I has another owie"
@pytest.mark.trio
async def test_asyncio_trio_error_depr1(self, loop):
async def err_trio():
await trio.sleep(0.01)
raise RuntimeError("I has another owie")
with pytest.raises(RuntimeError) as err:
await aio_as_trio(partial(self.call_a_t_depr, loop=loop), loop=loop)(err_trio)
assert err.value.args[0] == "I has another owie"
@pytest.mark.trio
async def test_asyncio_trio_error_depr2(self, loop):
async def err_trio():
await trio.sleep(0.01)
raise RuntimeError("I has another owie")
with pytest.raises(RuntimeError) as err:
with test_utils.deprecate(self):
await loop.run_asyncio(partial(self.call_a_t, loop=loop), err_trio)
assert err.value.args[0] == "I has another owie"
@pytest.mark.trio
async def test_asyncio_trio_error_depr3(self, loop):
async def err_trio():
await trio.sleep(0.01)
raise RuntimeError("I has another owie")
with pytest.raises(RuntimeError) as err:
with test_utils.deprecate(self):
await loop.run_asyncio(partial(self.call_a_t_depr, loop=loop), err_trio)
assert err.value.args[0] == "I has another owie"
@pytest.mark.trio
async def test_asyncio_trio_sync_error(self, loop):
def err_trio_sync():
loop.time() # verify that the loop is running
raise RuntimeError("I has more owie")
with pytest.raises(RuntimeError) as err:
await aio_as_trio(partial(self.call_a_ts, loop=loop), loop=loop)(err_trio_sync)
assert err.value.args[0] == "I has more owie"
@pytest.mark.trio
async def test_asyncio_trio_sync_error_depr(self, loop):
def err_trio_sync():
loop.time() # verify that the loop is running
raise RuntimeError("I has more owie")
with pytest.raises(RuntimeError) as err:
with test_utils.deprecate(self):
await loop.run_asyncio(partial(self.call_a_ts, loop=loop), err_trio_sync)
assert err.value.args[0] == "I has more owie"
@pytest.mark.trio
async def test_trio_asyncio_error(self, loop):
async def err_asyncio():
await asyncio.sleep(0.01, loop=loop)
raise RuntimeError("I has an owie")
with pytest.raises(RuntimeError) as err:
await self.call_t_a(err_asyncio, loop=loop)
assert err.value.args[0] == "I has an owie"
@pytest.mark.trio
async def test_trio_asyncio_error_depr(self, loop):
async def err_asyncio():
await asyncio.sleep(0.01, loop=loop)
raise RuntimeError("I has an owie")
with pytest.raises(RuntimeError) as err:
await self.call_t_a_depr(err_asyncio, loop=loop)
assert err.value.args[0] == "I has an owie"
@pytest.mark.trio
async def test_asyncio_trio_cancel_out(self, loop):
async def cancelled_trio(seen):
seen.flag |= 1
await trio.sleep(0.01)
scope = trio.hazmat.current_task()._cancel_stack[-1]
scope.cancel()
seen.flag |= 2
await trio.sleep(0.01)
seen.flag |= 4
seen = Seen()
with pytest.raises(asyncio.CancelledError):
await aio_as_trio(partial(self.call_a_t, loop=loop), loop=loop)(cancelled_trio, seen)
assert seen.flag == 3
@pytest.mark.trio
async def test_trio_asyncio_cancel_out(self, loop):
async def cancelled_asyncio(seen):
seen.flag |= 1
await asyncio.sleep(0.01, loop=loop)
f = asyncio.Future(loop=loop)
f.cancel()
return f.result() # raises error
def cancelled_future(seen):
seen.flag |= 1
f = asyncio.Future(loop=loop)
f.cancel()
return f # contains error
async def check_cancel(proc, seen):
with trio.CancelScope() as scope:
with pytest.raises(asyncio.CancelledError):
await self.call_t_a(proc, seen, loop=loop)
assert not scope.cancel_called
seen.flag |= 4
seen = Seen()
await check_cancel(cancelled_future, seen)
assert seen.flag == 1 | 4
seen = Seen()
await check_cancel(cancelled_asyncio, seen)
assert seen.flag == 1 | 4
@pytest.mark.trio
async def test_trio_asyncio_cancel_out_depr(self, loop):
async def cancelled_asyncio(seen):
seen.flag |= 1
await asyncio.sleep(0.01, loop=loop)
f = asyncio.Future(loop=loop)
f.cancel()
return f.result() # raises error
def cancelled_future(seen):
seen.flag |= 1
f = asyncio.Future(loop=loop)
f.cancel()
return f # contains error
async def check_cancel(proc, seen):
with trio.CancelScope() as scope:
with pytest.raises(asyncio.CancelledError):
await self.call_t_a_depr(proc, seen, loop=loop)
assert not scope.cancel_called
seen.flag |= 4
seen = Seen()
await check_cancel(cancelled_future, seen)
assert seen.flag == 1 | 4
seen = Seen()
await check_cancel(cancelled_asyncio, seen)
assert seen.flag == 1 | 4
@pytest.mark.trio
async def test_asyncio_trio_cancel_in(self, loop):
async def in_trio(started, seen):
started.set()
try:
await trio.sleep(1)
except trio.Cancelled:
seen.flag |= 1
else:
seen.flag |= 4
finally:
seen.flag |= 2
async def cancel_asyncio(seen):
started = asyncio.Event(loop=loop)
f = asyncio.ensure_future(self.call_a_t(in_trio, started, seen, loop=loop))
await started.wait()
f.cancel()
with pytest.raises(asyncio.CancelledError):
await f
seen.flag |= 8
seen = Seen()
await aio_as_trio(cancel_asyncio, loop=loop)(seen)
assert seen.flag == 1 | 2 | 8
@pytest.mark.trio
async def test_trio_asyncio_cancel_in(self, loop):
async def in_asyncio(started, seen):
started.set()
try:
await asyncio.sleep(9999, loop=loop)
except asyncio.CancelledError:
seen.flag |= 1
except trio.Cancelled:
seen.flag |= 16
else:
seen.flag |= 4
finally:
seen.flag |= 2
async def cancel_trio(seen):
started = trio.Event()
async with trio.open_nursery() as nursery:
nursery.start_soon(partial(self.call_t_a, loop=loop), in_asyncio, started, seen)
await started.wait()
nursery.cancel_scope.cancel()
seen.flag |= 8
seen = Seen()
await cancel_trio(seen)
assert seen.flag == 1 | 2 | 8
@pytest.mark.trio
async def test_trio_asyncio_cancel_in_depr(self, loop):
async def in_asyncio(started, seen):
started.set()
try:
await asyncio.sleep(9999, loop=loop)
except asyncio.CancelledError:
seen.flag |= 1
except trio.Cancelled:
seen.flag |= 16
else:
seen.flag |= 4
finally:
seen.flag |= 2
async def cancel_trio(seen):
started = trio.Event()
async with trio.open_nursery() as nursery:
nursery.start_soon(
partial(self.call_t_a_depr, loop=loop), in_asyncio, started, seen
)
await started.wait()
nursery.cancel_scope.cancel()
seen.flag |= 8
seen = Seen()
await cancel_trio(seen)
assert seen.flag == 1 | 2 | 8
@pytest.mark.trio
async def test_trio_asyncio_cancel_direct(self, loop):
def in_asyncio(started, seen):
# This is intentionally not async
seen.flag |= 1
raise asyncio.CancelledError()
async def cancel_trio(seen):
started = trio.Event()
try:
async with trio.open_nursery() as nursery:
nursery.start_soon(
partial(self.call_t_a, loop=loop), in_asyncio, started, seen
)
await started.wait()
nursery.cancel_scope.cancel()
finally:
seen.flag |= 8
seen = Seen()
with pytest.raises(asyncio.CancelledError):
await cancel_trio(seen)
assert seen.flag == 1 | 8
@pytest.mark.trio
async def test_trio_asyncio_cancel_direct_depr(self, loop):
def in_asyncio(started, seen):
# This is intentionally not async
seen.flag |= 1
raise asyncio.CancelledError()
async def cancel_trio(seen):
started = trio.Event()
try:
async with trio.open_nursery() as nursery:
nursery.start_soon(
partial(self.call_t_a_depr, loop=loop), in_asyncio, started, seen
)
await started.wait()
nursery.cancel_scope.cancel()
finally:
seen.flag |= 8
seen = Seen()
with pytest.raises(asyncio.CancelledError):
await cancel_trio(seen)
assert seen.flag == 1 | 8
@pytest.mark.trio
async def test_trio_asyncio_error_direct(self, loop):
def err_asyncio():
# This is intentionally not async
raise RuntimeError("I has an owie")
with pytest.raises(RuntimeError) as err:
await self.call_t_a(err_asyncio, loop=loop)
assert err.value.args[0] == "I has an owie"
@pytest.mark.trio
async def test_trio_asyncio_error_direct_depr(self, loop):
def err_asyncio():
# This is intentionally not async
raise RuntimeError("I has an owie")
with pytest.raises(RuntimeError) as err:
await self.call_t_a_depr(err_asyncio, loop=loop)
assert err.value.args[0] == "I has an owie"
@pytest.mark.trio
async def test_trio_asyncio_generator(self, loop):
async def dly_asyncio():
yield 1
await asyncio.sleep(0.01, loop=loop)
yield 2
with test_utils.deprecate(self):
res = await async_gen_to_list(loop.wrap_generator(dly_asyncio))
assert res == [1, 2]
@pytest.mark.trio
async def test_trio_asyncio_generator_with_error(self, loop):
async def dly_asyncio():
yield 1
raise RuntimeError("I has an owie")
yield 2
with test_utils.deprecate(self):
with pytest.raises(RuntimeError) as err:
await async_gen_to_list(loop.wrap_generator(dly_asyncio))
assert err.value.args[0] == "I has an owie"
@pytest.mark.trio
async def test_trio_asyncio_generator_with_cancellation(self, loop):
async def dly_asyncio(hold, seen):
yield 1
seen.flag |= 1
await hold.wait()
async def cancel_soon(nursery):
await trio.sleep(0.01)
nursery.cancel_scope.cancel()
hold = asyncio.Event(loop=loop)
seen = Seen()
with test_utils.deprecate(self):
async with trio.open_nursery() as nursery:
nursery.start_soon(async_gen_to_list, loop.wrap_generator(dly_asyncio, hold, seen))
nursery.start_soon(cancel_soon, nursery)
assert nursery.cancel_scope.cancel_called
assert seen.flag == 1
@pytest.mark.trio
async def test_trio_asyncio_iterator(self, loop):
async def slow_nums():
for n in range(1, 6):
asyncio.sleep(0.01, loop=loop)
yield n
sum = 0
async for n in aio_as_trio(slow_nums()):
sum += n
assert sum == 15
@pytest.mark.trio
async def test_trio_asyncio_iterator_depr(self, loop):
async def slow_nums():
for n in range(1, 6):
asyncio.sleep(0.01, loop=loop)
yield n
sum = 0
# with test_utils.deprecate(self): ## not yet
async for n in aio_as_trio(
slow_nums(), loop=loop
):
sum += n
assert sum == 15
| 32.479866 | 99 | 0.576506 |
c8e77318fef4550e46073e24bef2340a841bfc6d | 1,269 | py | Python | models/alexnet.py | kamwoh/dpn | b4d721e0e164188399307a4b5944c0a655882c3c | [
"BSD-3-Clause"
] | 2 | 2021-07-06T08:10:33.000Z | 2021-12-03T09:17:36.000Z | models/alexnet.py | kamwoh/dpn | b4d721e0e164188399307a4b5944c0a655882c3c | [
"BSD-3-Clause"
] | null | null | null | models/alexnet.py | kamwoh/dpn | b4d721e0e164188399307a4b5944c0a655882c3c | [
"BSD-3-Clause"
] | 1 | 2021-12-08T09:26:26.000Z | 2021-12-08T09:26:26.000Z | import torch
import torch.nn as nn
from torchvision.models import alexnet
from models import register_network
@register_network('alexnet')
class AlexNet(nn.Module):
def __init__(self, nbit, nclass, pretrained=False, freeze_weight=False, **kwargs):
super(AlexNet, self).__init__()
model = alexnet(pretrained=pretrained)
self.features = model.features
self.avgpool = model.avgpool
fc = []
for i in range(6):
fc.append(model.classifier[i])
self.fc = nn.Sequential(*fc)
in_features = model.classifier[6].in_features
self.ce_fc = nn.Linear(in_features, nclass)
self.hash_fc = nn.Linear(in_features, nbit, bias=False)
nn.init.normal_(self.hash_fc.weight, std=0.01)
# nn.init.zeros_(self.hash_fc.bias)
self.extrabit = 0
if freeze_weight:
for param in self.features.parameters():
param.requires_grad_(False)
for param in self.fc.parameters():
param.requires_grad_(False)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
u = self.ce_fc(x)
v = self.hash_fc(x)
return u, v
| 28.2 | 86 | 0.608353 |
8bdc0071f3a9653c751bc4ca1d00b538f7490323 | 503 | py | Python | DiRa_Software/Reference/Source code final 2018-2019/PTIT-Fast-and-Fiery_Digital_race_2019/test_fps.py | lamhoangtung/DiRa | 6e92f465c0197f3bd60b1e5719c1cc8fa06c5e4c | [
"MIT"
] | 34 | 2019-05-07T08:44:27.000Z | 2020-05-26T13:52:32.000Z | DiRa_Software/Reference/Source code final 2018-2019/PTIT-Fast-and-Fiery_Digital_race_2019/test_fps.py | giangnt071098/DiRa | 71da5c9f13f3fb32d4cc1efd96d981139fb66ee5 | [
"MIT"
] | 3 | 2019-10-21T04:37:48.000Z | 2019-11-11T12:16:04.000Z | DiRa_Software/Reference/Source code final 2018-2019/PTIT-Fast-and-Fiery_Digital_race_2019/test_fps.py | giangnt071098/DiRa | 71da5c9f13f3fb32d4cc1efd96d981139fb66ee5 | [
"MIT"
] | 58 | 2019-03-13T09:15:15.000Z | 2021-11-19T08:32:27.000Z | from time import time
from keras.models import load_model
import keras.backend as K
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.2)))
K.set_session(sess)
t = time()
model = load_model("updated.h5")
import numpy as np
fps = 0
count = 0
while(True):
t = time()
model.predict(np.ones((16, 48, 48, 1)))
fps += 1/(time()-t)
count += 1
if count == 50:
print fps/count
count = 0
fps = 0
| 23.952381 | 104 | 0.664016 |
9e279411f2b0edac18e35472e253c15fd9829fe2 | 2,219 | py | Python | cfgparse/test_package.py | fyhuang/archive_box | 1cbbd839012fed5e12341b78027d132c72dd01df | [
"BSD-3-Clause"
] | null | null | null | cfgparse/test_package.py | fyhuang/archive_box | 1cbbd839012fed5e12341b78027d132c72dd01df | [
"BSD-3-Clause"
] | null | null | null | cfgparse/test_package.py | fyhuang/archive_box | 1cbbd839012fed5e12341b78027d132c72dd01df | [
"BSD-3-Clause"
] | null | null | null | import unittest
from typing import Optional, Tuple, List, Dict, NamedTuple
from . import *
class NestedNt(NamedTuple):
n_a: float
n_b: str
class TestNt(NamedTuple):
f_str: str = ""
f_int: int = 0
f_optional: Optional[int] = None
f_tuple: Tuple[int, float] = (0, 0.0)
f_list: List[str] = []
f_dict: Dict[str, int] = {}
f_optional_nested: Optional[NestedNt] = None
f_list_nested: List[NestedNt] = []
f_dict_nested: Dict[int, NestedNt] = {}
class MappingToNtTests(unittest.TestCase):
def test_wrong_primitive(self) -> None:
with self.assertRaises(ParseError):
mapping_to_nt(
{"f_str": 123, "f_int": "hello"},
TestNt
)
def test_primitive(self) -> None:
nt = mapping_to_nt({"f_str": "hello", "f_int": 123}, TestNt)
self.assertEqual(TestNt(f_str="hello", f_int=123), nt)
def test_optional(self) -> None:
nt = mapping_to_nt({"f_optional": None}, TestNt)
self.assertEqual(TestNt(f_optional=None), nt)
nt = mapping_to_nt({"f_optional": 100}, TestNt)
self.assertEqual(TestNt(f_optional=100), nt)
def test_tuple(self) -> None:
nt = mapping_to_nt({"f_tuple": [1, 2.0]}, TestNt)
self.assertEqual(TestNt(f_tuple=(1, 2.0)), nt)
def test_list(self) -> None:
nt = mapping_to_nt({"f_list": ["a", "b"]}, TestNt)
self.assertEqual(TestNt(f_list=["a", "b"]), nt)
def test_dict(self) -> None:
nt = mapping_to_nt({"f_dict": {"a": 1}}, TestNt)
self.assertEqual(TestNt(f_dict={"a": 1}), nt)
def test_optional_nested(self) -> None:
nt = mapping_to_nt({"f_optional_nested": {"n_a": 2.0, "n_b": "abc"}}, TestNt)
self.assertEqual(TestNt(f_optional_nested=NestedNt(2.0, "abc")), nt)
def test_list_nested(self) -> None:
nt = mapping_to_nt({"f_list_nested": [{"n_a": 2.0, "n_b": "abc"}]}, TestNt)
self.assertEqual(TestNt(f_list_nested=[NestedNt(2.0, "abc")]), nt)
def test_dict_nested(self) -> None:
nt = mapping_to_nt({"f_dict_nested": {23: {"n_a": 2.0, "n_b": "abc"}}}, TestNt)
self.assertEqual(TestNt(f_dict_nested={23: NestedNt(2.0, "abc")}), nt)
| 35.222222 | 87 | 0.605228 |
3bf442327faccabdfd2bc547e888a866194b1a5d | 1,084 | py | Python | ensysmod/model/energy_storage.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | 1 | 2021-12-10T19:41:01.000Z | 2021-12-10T19:41:01.000Z | ensysmod/model/energy_storage.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | 83 | 2021-10-20T22:54:28.000Z | 2022-03-24T19:07:06.000Z | ensysmod/model/energy_storage.py | NOWUM/EnSysMod | 18c8a2198db3510e667c1f0298d00a3dfcb0aab7 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, ForeignKey, Float
from sqlalchemy.orm import relationship
from ensysmod.database.base_class import Base
class EnergyStorage(Base):
"""
EnergyStorage table definition
See https://vsa-fine.readthedocs.io/en/latest/storageClassDoc.html
"""
ref_component = Column(Integer, ForeignKey("energy_component.id"), index=True, nullable=False, primary_key=True)
ref_commodity = Column(Integer, ForeignKey("energy_commodity.id"), index=True, nullable=False)
charge_efficiency = Column(Float, nullable=True)
discharge_efficiency = Column(Float, nullable=True)
self_discharge = Column(Float, nullable=True)
cyclic_lifetime = Column(Integer, nullable=True)
charge_rate = Column(Float, nullable=True)
discharge_rate = Column(Float, nullable=True)
state_of_charge_min = Column(Float, nullable=True)
state_of_charge_max = Column(Float, nullable=True)
# Relationships
component = relationship("EnergyComponent")
commodity = relationship("EnergyCommodity", back_populates="energy_storages")
| 38.714286 | 116 | 0.760148 |
cd980e92c0164d7dbda0264c2fbca7ce5bdc561b | 46,408 | py | Python | torch/_jit_internal.py | fossabot/pytorch | 42ba67992cbb29033c65d76cc3374ea9fbf4127d | [
"Intel"
] | null | null | null | torch/_jit_internal.py | fossabot/pytorch | 42ba67992cbb29033c65d76cc3374ea9fbf4127d | [
"Intel"
] | null | null | null | torch/_jit_internal.py | fossabot/pytorch | 42ba67992cbb29033c65d76cc3374ea9fbf4127d | [
"Intel"
] | null | null | null | """
The weak_script annotation needs to be here instead of inside torch/jit/ so it
can be used in other places in torch/ (namely torch.nn) without running into
circular dependency problems
"""
import contextlib
import collections
import enum
import inspect
import ast
import weakref
import warnings
from textwrap import dedent
import torch
import sys
import builtins
import typing
import io
import pickle
# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
# Explicitly ask to import `torch.distributed.__init__` first.
# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
import torch.distributed.rpc
from torch._C import Future as CFuture
from torch._sources import get_source_lines_and_file, parse_def, fake_range
from torch.futures import Future
import torch.package._mangling as package_mangling
from typing import Any, Callable, Dict, Generic, List, Optional, Tuple, Type, TypeVar, Union # noqa: F401
if sys.version_info[:2] > (3, 7):
from typing import Final
else:
from typing_extensions import Final
LockType: Type
try:
import _thread
LockType = _thread.LockType
except ImportError:
import _dummy_thread
LockType = _dummy_thread.LockType
# Wrapper functions that can call either of 2 functions depending on a boolean
# argument
boolean_dispatched: 'weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]' = weakref.WeakKeyDictionary() # noqa: T484
def createResolutionCallbackFromEnv(lookup_base):
"""
Creates a resolution callback that will look up qualified names in an
environment, starting with `lookup_base` for the base of any qualified
names, then proceeding down the lookup chain with the resolved object.
You should not use this directly, it should only be used from the other
createResolutionCallbackFrom* functions.
"""
def lookupInModule(qualified_name, module):
if '.' in qualified_name:
parts = qualified_name.split('.')
base = parts[0]
remaining_pieces = '.'.join(parts[1:])
module_value = getattr(module, base)
return lookupInModule(remaining_pieces, module_value)
else:
return getattr(module, qualified_name)
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
i = 0
while i < len(expr) and expr[i] not in (',', '[', ']'):
i += 1
# Special case logic for the empty Tuple as a subscript (used
# in the type annotation `Tuple[()]`)
if expr[:i] == '()':
return (), i
base = lookupInModule(expr[:i].strip(), module)
assert base is not None, f"Unresolvable type {expr[:i]}"
if i == len(expr) or expr[i] != '[':
return base, i
assert expr[i] == '['
parts = []
while expr[i] != ']':
part_len = 0
i += 1
part, part_len = parseNestedExpr(expr[i:], module)
parts.append(part)
i += part_len
if len(parts) > 1:
return base[tuple(parts)], i + 1
else:
return base[parts[0]], i + 1
def parseExpr(expr, module):
try:
value, len_parsed = parseNestedExpr(expr, module)
assert len_parsed == len(expr), "whole expression was not parsed, falling back to c++ parser"
return value
except Exception:
"""
The python resolver fails in several cases in known unit tests, and is intended
to fall back gracefully to the c++ resolver in general. For example, python 2 style
annotations which are frequent in our unit tests often fail with types e.g. int not
resolvable from the calling frame.
"""
return None
return lambda expr: parseExpr(expr, lookup_base)
def createResolutionCallbackFromFrame(frames_up: int = 0):
"""
Creates a function which, given a string variable name,
returns the value of the variable in the scope of the caller of
the function which called createResolutionCallbackFromFrame (by default).
This is used to enable access in-scope Python variables inside
TorchScript fragments.
frames_up is number of additional frames to go up on the stack.
The default value is 0, which correspond to the frame of the caller
of createResolutionCallbackFromFrame. Also for example, if frames_up is set
to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
will be taken.
For example, the following program prints 2::
def bar():
cb = createResolutionCallbackFromFrame(1)
print(cb("foo"))
def baz():
foo = 2
bar()
baz()
"""
frame = inspect.currentframe()
i = 0
while i < frames_up + 1:
assert frame is not None
frame = frame.f_back
i += 1
assert frame is not None
f_locals = frame.f_locals
f_globals = frame.f_globals
class env(object):
def __getattr__(self, key):
if key in f_locals:
return f_locals[key]
elif key in f_globals:
return f_globals[key]
elif key in dir(builtins):
return getattr(builtins, key)
return createResolutionCallbackFromEnv(env())
def get_closure(fn):
"""
Get a dictionary of closed over variables from a function
"""
captures = {}
captures.update(fn.__globals__)
for index, captured_name in enumerate(fn.__code__.co_freevars):
captures[captured_name] = fn.__closure__[index].cell_contents
return captures
# [local resolution in python]
# Depending on where a variable is defined, and where it is used, we may
# or may not be able to recover its value when recursively compiling a
# script function. Remember in the general case, a module or function is
# first defined and then later scripted. This means we do not have a
# chance to capture the active frames when the function is defined. Hence any
# name resolution has to happen later on the created closure. The way
# python captures type annotations restricts what we can recover. The
# follow example illustrates the different cases:
#
# class MyGlobalClass:
# ...
# def my_local_scope():
# @torch.jit.script
# class MyClass:
# ...
# @torch.jit.script
# class MyClassUsedAsVar:
# ...
# def eg(x: MyClass, y: MyGlobalClass):
# a_local_capture : Foo
# return MyClassUsedAsVar(x)
#
# MyGlobalClass is defined in the __globals__ dictionary of function
# 'eg', so it is always recoverable. my_local_scope introduces a new local
# variable scope in the function. Classes defined here are only visible as
# local variables. For the case of MyClassUsedAsVar, it is captured
# because it is used as a variable inside the body of the function, and we
# can resolve it using the captures returned from `get_closure`. However,
# the type annotations are not captured by the closure. In Python
# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
# annotations on `eg``, but starting in Python 4.0, they will represented as
# strings and no longer present. Furthermore, since the body of `eg` does
# not reference those names, they do not appear in the list of closed over
# variables. In Python 2.x, type annotations are in comments, leading to a
# similar situation where their definitions are not available. We anticipate
# that most users will not run into this issue because their modules and
# functions will be defined at a global scope like MyGlobalClass. In cases
# where they are not, it is possible to work around issues by declaring the
# values global in the function.
# In Python 3.9 declaring class as global will make it invisible to
# `inspect.getsource`, see https://bugs.python.org/issue42666 .
# This could be worked around by manualy adding it to `global()` dictionary.
def createResolutionCallbackFromClosure(fn):
"""
Create a resolutionCallback by introspecting the function instead of
looking up the stack for the enclosing scope
"""
closure = get_closure(fn)
class closure_lookup(object):
# This is a class since `closure` is a dict and it's easier in
# `env_helper` if everything just works with `getattr` calls
def __getattr__(self, key):
if key in closure:
return closure[key]
elif hasattr(typing, key):
return getattr(typing, key)
elif hasattr(builtins, key):
return getattr(builtins, key)
return None
return createResolutionCallbackFromEnv(closure_lookup())
def can_compile_class(cls) -> bool:
# If any of the functions on a type don't have a code object, this type can't
# be compiled and is probably a builtin / bound from C
if is_ignored_fn(cls):
return False
# Ignore the following list of built-in classes.
ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
if issubclass(cls, ignored_builtin_classes):
return False
names = cls.__dict__
fns = [getattr(cls, name) for name in names if inspect.isroutine(getattr(cls, name, None))]
has_code = [hasattr(fn, '__code__') for fn in fns]
return all(has_code)
def get_callable_argument_names(fn) -> List[str]:
"""
Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
Returns an empty list when other types of arguments are present.
This is used by `torch.jit.trace` to assign meaningful argument names to
traced functions and modules.
Args:
fn: A callable.
Returns:
Argument names: List[str]
"""
# inspect.signature may fail, give up in that case.
try:
callable_signature = inspect.signature(fn)
except Exception:
return []
argument_names = []
for name, param in callable_signature.parameters.items():
# All four other types of arguments do not map to individual values
# with a keyword as name.
if not param.kind == param.POSITIONAL_OR_KEYWORD:
return []
argument_names.append(name)
return argument_names
def get_annotation_str(annotation):
"""
Convert an AST node containing a type annotation to the string present in the source
that represents the same annotation.
"""
if isinstance(annotation, ast.Name):
return annotation.id
elif isinstance(annotation, ast.Attribute):
return '.'.join([get_annotation_str(annotation.value), annotation.attr])
elif isinstance(annotation, ast.Subscript):
# In Python3.9+ subscript indicies are not wrapped in ast.Index
subscript_slice = annotation.slice if sys.version_info >= (3, 9) else annotation.slice.value # type: ignore[attr-defined]
return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
elif isinstance(annotation, ast.Tuple):
return ','.join([get_annotation_str(elt) for elt in annotation.elts])
elif isinstance(annotation, ast.Constant) or isinstance(annotation, ast.NameConstant):
return f"{annotation.value}"
# If an AST node is not handled here, it's probably handled in ScriptTypeParser.
return None
def get_type_hint_captures(fn):
"""
Get a dictionary containing type resolution mappings necessary to resolve types
for the literal annotations on 'fn'. These are not considered to be closed-over by fn
and must be obtained separately (e.g. using this function).
Args:
fn: A callable.
Returns:
A Dict[str, Any] containing a mapping from the literal annotations used on
fn to the Python objects they refer to.
"""
# Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
# types are strings. These are only understood by TorchScript in the context of a type annotation
# that refers to a class in its own definition, but trying to include a mapping for this in the result
# function would cause infinite recursion because the class is currently being compiled.
# In addition, there is logic in ScriptTypeParser to handle this.
signature = inspect.signature(fn)
name_to_type = {
name: parameter.annotation
for name, parameter in signature.parameters.items()
if parameter.annotation is not inspect.Parameter.empty and not isinstance(parameter.annotation, str)
}
# Then, get the literal type annotations from the function declaration
# by source inspection. This accounts for the case in which aliases are used
# to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
src = inspect.getsource(fn)
# frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
a = ast.parse(dedent(src))
if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
raise RuntimeError(f"Expected {fn} to be a function")
f = a.body[0]
# Prepare a dictionary of source annotation -> type, which will be the final result of this function,
# by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
# them to the type object corresponding to the annotation via name_to_type using the parameter name.
annotation_to_type = {}
for arg in f.args.args:
# Get the source type annotation string for this argument if possible.
arg_annotation_str = get_annotation_str(arg.annotation) if arg.annotation else None
# If the argument has no annotation or get_annotation_str cannot convert it to a string,
# arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
# this in the latter case.
if arg_annotation_str is None:
continue
# Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
# be present in name_to_type is that the annotation itself is a string and not a type object
# (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
arg_name = arg.arg
if arg_name in name_to_type:
annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
# If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
# the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
# of the annotation cannot be a string.
literal_return_annotation = get_annotation_str(f.returns)
valid_literal_annotation = literal_return_annotation is not None
return_annotation = signature.return_annotation
valid_return_annotation_type = return_annotation is not inspect.Parameter.empty and not isinstance(return_annotation, str)
if valid_literal_annotation and valid_return_annotation_type:
annotation_to_type[literal_return_annotation] = return_annotation
return annotation_to_type
def createResolutionCallbackForClassMethods(cls):
"""
This looks at all the methods defined in a class and pulls their closed-over
variables into a dictionary and uses that to resolve variables.
"""
# cls is a type here, so `ismethod` is false since the methods on the type
# aren't bound to anything, so Python treats them as regular functions
fns = [getattr(cls, name) for name in cls.__dict__ if inspect.isroutine(getattr(cls, name))]
captures = {}
for fn in fns:
captures.update(get_closure(fn))
captures.update(get_type_hint_captures(fn))
def lookup_in_class(key):
if key in captures:
return captures[key]
else:
return getattr(builtins, key, None)
return lookup_in_class
def boolean_dispatch(arg_name, arg_index, default, if_true, if_false, module_name, func_name):
"""
Dispatches to either of 2 script functions based on a boolean argument.
In TorchScript, the boolean argument must be constant so that the correct
function to use can be determined at compile time.
"""
def fn(*args, **kwargs):
dispatch_flag = False
if arg_name in kwargs:
dispatch_flag = kwargs[arg_name]
elif arg_index < len(args):
dispatch_flag = args[arg_index]
if dispatch_flag:
return if_true(*args, **kwargs)
else:
return if_false(*args, **kwargs)
if if_true.__doc__ is None and if_false.__doc__ is not None:
doc = if_false.__doc__
if_true.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is not None:
doc = if_true.__doc__
if_false.__doc__ = doc
elif if_false.__doc__ is None and if_true.__doc__ is None:
# neither function has a docstring
doc = None
else:
raise RuntimeError("only one function can have a docstring")
fn.__doc__ = doc
if module_name is not None:
fn.__module__ = module_name
if func_name is not None:
fn.__name__ = func_name
boolean_dispatched[fn] = {
"if_true": if_true,
"if_false": if_false,
"index": arg_index,
"default": default,
"arg_name": arg_name
}
return fn
class FunctionModifiers(object):
"""
Used to denote the behavior of a function in TorchScript. See export() and
ignore() for details.
"""
UNUSED = "unused (ignored and replaced with raising of an exception)"
IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
EXPORT = "export (compile this function even if nothing calls it)"
DEFAULT = "default (compile if called from a exported function / forward)"
COPY_TO_SCRIPT_WRAPPER = \
"if this method is not scripted, copy the python method onto the scripted model"
def export(fn):
"""
This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
:class:`ScriptModule` and should be compiled.
``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
Functions and methods called from ``forward`` are compiled as they are seen
by the compiler, so they do not need this decorator either.
Example (using ``@torch.jit.export`` on a method):
.. testcode::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def implicitly_compiled_method(self, x):
return x + 99
# `forward` is implicitly decorated with `@torch.jit.export`,
# so adding it here would have no effect
def forward(self, x):
return x + 10
@torch.jit.export
def another_forward(self, x):
# When the compiler sees this call, it will compile
# `implicitly_compiled_method`
return self.implicitly_compiled_method(x)
def unused_method(self, x):
return x - 20
# `m` will contain compiled methods:
# `forward`
# `another_forward`
# `implicitly_compiled_method`
# `unused_method` will not be compiled since it was not called from
# any compiled methods and wasn't decorated with `@torch.jit.export`
m = torch.jit.script(MyModule())
"""
fn._torchscript_modifier = FunctionModifiers.EXPORT
return fn
def unused(fn):
"""
This decorator indicates to the compiler that a function or method should
be ignored and replaced with the raising of an exception. This allows you
to leave code in your model that is not yet TorchScript compatible and still
export your model.
Example (using ``@torch.jit.unused`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
def __init__(self, use_memory_efficient):
super(MyModule, self).__init__()
self.use_memory_efficient = use_memory_efficient
@torch.jit.unused
def memory_efficient(self, x):
import pdb
pdb.set_trace()
return x + 10
def forward(self, x):
# Use not-yet-scriptable memory efficient mode
if self.use_memory_efficient:
return self.memory_efficient(x)
else:
return x + 10
m = torch.jit.script(MyModule(use_memory_efficient=False))
m.save("m.pt")
m = torch.jit.script(MyModule(use_memory_efficient=True))
# exception raised
m(torch.rand(100))
"""
if isinstance(fn, property):
prop = fn
setattr(prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED) # noqa: B010
if prop.fset:
setattr(prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED) # noqa: B010
return prop
fn._torchscript_modifier = FunctionModifiers.UNUSED
return fn
# No op context manager from python side
class _IgnoreContextManager(contextlib.AbstractContextManager):
def __init__(self, **kwargs):
pass
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
pass
def ignore(drop=False, **kwargs):
"""
This decorator indicates to the compiler that a function or method should
be ignored and left as a Python function. This allows you to leave code in
your model that is not yet TorchScript compatible. If called from TorchScript,
ignored functions will dispatch the call to the Python interpreter. Models with ignored
functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
Example (using ``@torch.jit.ignore`` on a method)::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore
def debugger(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
x += 10
# The compiler would normally try to compile `debugger`,
# but since it is `@ignore`d, it will be left as a call
# to Python
self.debugger(x)
return x
m = torch.jit.script(MyModule())
# Error! The call `debugger` cannot be saved since it calls into Python
m.save("m.pt")
Example (using ``@torch.jit.ignore(drop=True)`` on a method):
.. testcode::
import torch
import torch.nn as nn
class MyModule(nn.Module):
@torch.jit.ignore(drop=True)
def training_method(self, x):
import pdb
pdb.set_trace()
def forward(self, x):
if self.training:
self.training_method(x)
return x
m = torch.jit.script(MyModule())
# This is OK since `training_method` is not saved, the call is replaced
# with a `raise`.
m.save("m.pt")
.. testcleanup::
import os
os.remove('m.pt')
"""
if callable(drop):
# used without any args, so drop is actually a function
# @torch.jit.ignore
# def fn(...):
fn = drop
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
if not isinstance(drop, bool):
raise RuntimeError("Argument to @torch.jit.ignore must be a bool or "
f"a function but got {drop}")
# for backwards compat
drop_on_export = kwargs.pop("drop_on_export", None)
if drop_on_export:
warnings.warn("ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}", category=FutureWarning)
drop = drop_on_export
elif drop:
warnings.warn("ignore(True) has been deprecated. TorchScript will now drop the function "
"call on compilation. Use torch.jit.unused now. {}", category=FutureWarning)
def decorator(fn):
if drop:
fn._torchscript_modifier = FunctionModifiers.UNUSED
else:
fn._torchscript_modifier = FunctionModifiers.IGNORE
return fn
return decorator
def _copy_to_script_wrapper(fn):
fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
return fn
def module_has_exports(mod):
for name in dir(mod):
if hasattr(mod, name):
item = getattr(mod, name)
if callable(item):
if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
return True
return False
# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
# allow JIT'd code to still be covered.
def should_drop(fn) -> bool:
attr = get_torchscript_modifier(fn)
if attr is None:
return False
return attr is FunctionModifiers.UNUSED
def is_ignored_fn(fn) -> bool:
mod = get_torchscript_modifier(fn)
return mod is FunctionModifiers.UNUSED or mod is FunctionModifiers.IGNORE
def is_static_fn(cls, fn) -> bool:
return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
def get_static_fn(cls, fn):
return inspect.getattr_static(cls, fn).__func__
def get_torchscript_modifier(fn):
if not callable(fn):
return None
if hasattr(fn, '__func__'):
fn = fn.__func__
return getattr(fn, '_torchscript_modifier', FunctionModifiers.DEFAULT)
def copy_torchscript_modifier(orig, new) -> None:
attr = get_torchscript_modifier(orig)
if attr is None:
return
new._torchscript_modifier = attr
# overloading registration
# overloads get registered in this file, and compiled in torch/jit/__init__.py
# so that they can be imported in nn/functional.py without an import cycle
# qualified_name => list[overload_functions]
_overloaded_fns : Dict[str, List[Callable]] = {} # noqa: T484
_OVERLOAD_EXAMPLE = '''
Example usage of overload function:
@torch.jit._overload
def my_function(x: type0) -> type0: # decl 1
pass
@torch.jit._overload
def my_function(x: type1) -> type1: # decl 2
pass
def my_function(x): # implementation
if isinstance(x, type0):
return x
elif isinstance(x, type1):
return x
'''
def get_overload_no_implementation_error_message(kind, obj):
sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
return (
f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
f'sure a definition is provided and defined after all overload declarations.\n'
f'File "{filename}", line {file_lineno}:\n' + ''.join(sourcelines) + "\n" + _OVERLOAD_EXAMPLE
)
def _check_overload_body(func):
parsed_def = parse_def(func)
body = parsed_def.ast.body[0].body
def is_pass(x):
return isinstance(x, ast.Pass)
def is_ellipsis(x):
return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
msg = "Only `pass` statement or `...` can be the body of overload declaration:\n"
msg += '\n'.join(parsed_def.source.split("\n")[:3])
msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
raise RuntimeError(msg)
def _overload(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_fns
fn_overload_list = _overloaded_fns.get(qual_name)
if fn_overload_list is None:
fn_overload_list = []
_overloaded_fns[qual_name] = fn_overload_list
fn_overload_list.append(func)
return func
def _get_fn_overloads(qual_name):
return _overloaded_fns.get(qual_name)
def _clear_fn_overloads(qual_name) -> None:
del _overloaded_fns[qual_name]
def get_class_name_lineno(method) -> Tuple[str, int]:
current_frame = inspect.currentframe()
# one for the get_class_name call, one for _overload_method call
for i in range(2):
assert current_frame is not None # assert current frame is not an Optional[FrameType]
current_frame = current_frame.f_back
assert current_frame is not None # same here
class_name = current_frame.f_code.co_name
line_no = current_frame.f_code.co_firstlineno
return class_name, line_no
# At the the point the decorator is applied to class methods the method
# has no reference to its owning class. _qualified_name would not include
# the class it is defined in, so any methods with the same name in the same file
# would have the same _qualified_name, even if they were defined in different
# classes. This problem only exists in python 2.
# We get around this problem by looking at the stack frame and identifying
# the class name, and throwing an error whenever overloads are used
# when modules of the same name are in the same file
# qualified_name => class name => list[overload_functions]
_overloaded_methods : Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
# (qualified_name, class name) => class_fileno
_overloaded_method_class_fileno = {}
def _overload_method(func):
_check_overload_body(func)
qual_name = _qualified_name(func)
global _overloaded_methods
class_name_map = _overloaded_methods.get(qual_name, None)
if class_name_map is None:
class_name_map = {}
_overloaded_methods[qual_name] = class_name_map
class_name, line_no = get_class_name_lineno(func)
method_overloads = class_name_map.get(class_name, None)
if method_overloads is None:
method_overloads = []
class_name_map[class_name] = method_overloads
_overloaded_method_class_fileno[(qual_name, class_name)] = line_no
else:
existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
if existing_lineno != line_no:
raise RuntimeError("Cannot currently overload the same method name in two different"
" classes with the same name in the same module")
method_overloads.append(func)
return func
def _get_overloaded_methods(method, mod_class):
# TODO: __name__ not set for submodules in recursive script
if not hasattr(method, "__name__"):
return None
qual_name = _qualified_name(method)
class_name_map = _overloaded_methods.get(qual_name, None)
if class_name_map is None:
return None
overloads = class_name_map.get(mod_class.__name__, None)
if overloads is None:
return None
method_line_no = get_source_lines_and_file(method)[1]
mod_class_fileno = get_source_lines_and_file(mod_class)[1]
mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
raise Exception("Overloads are not useable when a module is redeclared within the same file: " + str(method))
return overloads
def is_tuple(ann) -> bool:
if ann is Tuple:
raise_error_container_parameter_missing("Tuple")
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
if not hasattr(ann, '__module__'):
return False
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is Tuple or
getattr(ann, '__origin__', None) is tuple)
def is_list(ann) -> bool:
if ann is List:
raise_error_container_parameter_missing("List")
if not hasattr(ann, '__module__'):
return False
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is List or
getattr(ann, '__origin__', None) is list)
def is_dict(ann) -> bool:
if ann is Dict:
raise_error_container_parameter_missing("Dict")
if not hasattr(ann, '__module__'):
return False
return ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is Dict or
getattr(ann, '__origin__', None) is dict)
def is_optional(ann) -> bool:
if ann is Optional:
raise_error_container_parameter_missing("Optional")
# Optional[T] is just shorthand for Union[T, None], so check for both
def safe_is_subclass(the_type, super_type):
# Don't throw if `the_type` isn't a class type (e.g. if it is
# another type annotation instance)
if not inspect.isclass(the_type):
return False
return issubclass(the_type, super_type)
if not hasattr(ann, '__module__'):
return False
union_optional = False
if ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is Union):
args = getattr(ann, '__args__', ())
if len(args) == 2:
union_optional = (safe_is_subclass(args[1], type(None)) and not safe_is_subclass(args[0], type(None))) \
or (safe_is_subclass(args[0], type(None)) and not safe_is_subclass(args[1], type(None)))
optional = ann.__module__ == 'typing' and \
(getattr(ann, '__origin__', None) is Optional)
return optional or union_optional
def is_future(ann) -> bool:
if ann is Future:
raise RuntimeError(
"Attempted to use Future without a "
"contained type. Please add a contained type, e.g. "
"Future[int]"
)
return getattr(ann, "__origin__", None) is Future
if torch.distributed.rpc.is_available():
from torch.distributed.rpc import RRef
from torch._C._distributed_rpc import PyRRef
def is_rref(ann) -> bool:
if ann is RRef:
raise RuntimeError(
"Attempted to use RRef without a "
"contained type. Please add a contained type, e.g. "
"RRef[int]"
)
return getattr(ann, "__origin__", None) is RRef
def is_rref_instance(obj) -> bool:
return isinstance(obj, PyRRef)
else:
def is_rref_instance(obj) -> bool:
# If the RPC module doesn't exist then RRefs don't exist either.
return False
def is_final(ann) -> bool:
return ann.__module__ in {'typing', 'typing_extensions'} and \
(getattr(ann, '__origin__', None) is Final or isinstance(ann, type(Final)))
# allows BroadcastingList instance to be subscriptable
class BroadcastingListCls(object):
def __getitem__(self, types):
return
# mypy doesn't support parameters on types, so we have to explicitly type each
# list size
BroadcastingList1 = BroadcastingListCls()
for i in range(2, 7):
globals()[f"BroadcastingList{i}"] = BroadcastingList1
def is_scripting() -> bool:
r"""
Function that returns True when in compilation and False otherwise. This
is useful especially with the @unused decorator to leave code in your
model that is not yet TorchScript compatible.
.. testcode::
import torch
@torch.jit.unused
def unsupported_linear_op(x):
return x
def linear(x):
if torch.jit.is_scripting():
return torch.linear(x)
else:
return unsupported_linear_op(x)
"""
return False
# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
def _qualified_name(obj) -> str:
# This special case allows us to override the qualified name on a type.
# It's currently used in conjunction with tracing, where we create a
# fake module to filter only supported attributes. However, since this
# new type is defined as a local class, we need a mechanism to override
# its qualname so it appears correctly in the TorchScript system. This,
# we set '_jit_override_qualname' with the original traced module's
# qualified name, which is picked up here
if hasattr(obj, '_jit_override_qualname'):
return obj._jit_override_qualname
# short-circuit in cases where the object already has a known qualified name
if isinstance(obj, torch._C.ScriptFunction):
return obj.qualified_name
if getattr(obj, "__name__", None):
name = obj.__name__
# Enum classes do not have `__name__` attr, instead they have `name`.
elif isinstance(obj, enum.Enum):
name = obj.name
else:
raise RuntimeError("Could not get name of python class object")
if name == '<lambda>':
name = '_lambda' # make name a valid identifier
module_name = obj.__module__
# If the module is actually a torchbind module, then we should short circuit
if module_name == "torch._classes":
return obj.qualified_name
# The Python docs are very clear that `__module__` can be None, but I can't
# figure out when it actually would be.
if module_name is None:
raise RuntimeError(f"Could not get qualified name for class '{name}': "
"__module__ can't be None.")
# if getattr(sys.modules[module_name], name) is not obj:
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
# f"the attr {name} on module {module_name} is not the the class")
# torch.package and TorchScript have separate mangling schemes to avoid
# name collisions from multiple packages. To avoid them interfering with
# each other, remove the package mangling here.
module_name = package_mangling.demangle(module_name)
# __main__ is a builtin module, so rewrite it to "__torch__".
if module_name == "__main__":
module_name = "__torch__"
else:
# Everything else gets a "__torch__" prefix to avoid name collisions
# with the names of user values.
module_name = "__torch__." + module_name
if "." in name:
raise RuntimeError(f"Could not get qualified name for class '{name}': "
f"'{name}' is not a valid identifier")
return module_name + "." + name
def _try_get_dispatched_fn(fn):
if not callable(fn):
return None
return boolean_dispatched.get(fn)
def _get_named_tuple_properties(obj):
assert issubclass(obj, tuple) and hasattr(obj, '_fields')
if hasattr(obj, "_field_defaults"):
defaults = [obj._field_defaults[field]
for field in obj._fields
if field in obj._field_defaults]
else:
defaults = []
annotations = []
has_annotations = hasattr(obj, '__annotations__')
for field in obj._fields:
if has_annotations and field in obj.__annotations__:
the_type = torch.jit.annotations.ann_to_type(obj.__annotations__[field], fake_range())
annotations.append(the_type)
else:
annotations.append(torch._C.TensorType.getInferred())
return type(obj).__name__, obj._fields, annotations, defaults
def _create_named_tuple(t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]):
# mypy: namedtuple() expects a string literal as the first argument
if sys.version_info < (3, 7, 0):
TupleType = collections.namedtuple(unqual_name, field_names) # type: ignore[no-redef, misc]
TupleType.__new__.__defaults__ = defaults # type: ignore[attr-defined]
else:
TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
return TupleType(*t)
@contextlib.contextmanager
def _disable_emit_hooks():
hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
yield
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
def __enter__(self) -> None:
self.hooks = torch._C._jit_get_emit_hooks()
torch._C._jit_set_emit_hooks(None, None)
def __exit__(self, *args) -> None:
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
def _is_exception(obj) -> bool:
if not inspect.isclass(obj):
return False
return issubclass(obj, Exception)
def raise_error_container_parameter_missing(target_type) -> None:
if target_type == 'Dict':
raise RuntimeError(
"Attempted to use Dict without "
"contained types. Please add contained type, e.g. "
"Dict[int, int]"
)
raise RuntimeError(
f"Attempted to use {target_type} without a "
"contained type. Please add a contained type, e.g. "
f"{target_type}[int]"
)
def get_origin(target_type):
return getattr(target_type, "__origin__", None)
def get_args(target_type):
return getattr(target_type, "__args__", None)
def check_args_exist(target_type) -> None:
if target_type is List or target_type is list:
raise_error_container_parameter_missing("List")
elif target_type is Tuple or target_type is tuple:
raise_error_container_parameter_missing("Tuple")
elif target_type is Dict or target_type is dict:
raise_error_container_parameter_missing("Dict")
elif target_type is None or target_type is Optional:
raise_error_container_parameter_missing("Optional")
# supports List/Dict/Tuple and Optional types
# TODO support future
def container_checker(obj, target_type) -> bool:
origin_type = get_origin(target_type)
check_args_exist(target_type)
if origin_type is list or origin_type is List:
if not isinstance(obj, list):
return False
arg_type = get_args(target_type)[0]
arg_origin = get_origin(arg_type)
for el in obj:
# check if nested container, ex: List[List[str]]
if arg_origin: # processes nested container, ex: List[List[str]]
if not container_checker(el, arg_type):
return False
elif not isinstance(el, arg_type):
return False
return True
elif origin_type is Dict or origin_type is dict:
if not isinstance(obj, dict):
return False
key_type = get_args(target_type)[0]
val_type = get_args(target_type)[1]
for key, val in obj.items():
# check if keys are of right type
if not isinstance(key, key_type):
return False
val_origin = get_origin(val_type)
if val_origin:
if not container_checker(val, val_type):
return False
elif not isinstance(val, val_type):
return False
return True
elif origin_type is Tuple or origin_type is tuple:
if not isinstance(obj, tuple):
return False
arg_types = get_args(target_type)
if len(obj) != len(arg_types):
return False
for el, el_type in zip(obj, arg_types):
el_origin = get_origin(el_type)
if el_origin:
if not container_checker(el, el_type):
return False
elif not isinstance(el, el_type):
return False
return True
elif origin_type is Union: # actually handles Optional Case
if obj is None: # check before recursion because None is always fine
return True
optional_type = get_args(target_type)[0]
optional_origin = get_origin(optional_type)
if optional_origin:
return container_checker(obj, optional_type)
elif isinstance(obj, optional_type):
return True
return False
def _isinstance(obj, target_type) -> bool:
if isinstance(target_type, collections.abc.Container):
if not isinstance(target_type, tuple):
raise RuntimeError("The second argument to "
"`torch.jit.isinstance` must be a type "
"or a tuple of types")
for t_type in target_type:
if _isinstance(obj, t_type):
return True
return False
origin_type = get_origin(target_type)
if origin_type:
return container_checker(obj, target_type)
# Check to handle weird python type behaviors
# 1. python 3.6 returns None for origin of containers without
# contained type (intead of returning outer container type)
# 2. non-typed optional origin returns as none instead
# of as optional in 3.6-3.8
check_args_exist(target_type)
# handle non-containers
return isinstance(obj, target_type)
class _TensorExtractor(pickle.Pickler):
def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
super().__init__(*args, **kwargs)
self.tensors = tensors
def persistent_id(self, obj):
if isinstance(obj, torch.Tensor):
self.tensors.append(obj)
return ""
# Since we just want to extract tensors, we don't mind if an object is
# unpicklable if it doesn't contain tensors, as we can just ignore/skip
# it. To play it safe, we only do so for common objects that we're sure
# don't contain tensors. Feel free to add new types here. Note also that
# even if a type isn't listed here this won't block users, since thet
# can just add a __getstate__ or __reduce__ method to their class.
if isinstance(obj, LockType):
return ""
# Futures and RRefs don't technically contain a value, they just offer
# the means to access a value.
if isinstance(obj, CFuture) or is_rref_instance(obj):
return ""
if isinstance(obj, torch.cuda.Event):
return ""
return None
def _extract_tensors(obj):
r"""
This function is exclusively called from C++.
See ``torch/csrc/jit/python/python_ivalue.h``.
It extracts the tensors contained in the given object, through pickling.
"""
tensors: List[torch.Tensor] = []
extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
extractor.dump(obj)
return tensors
| 37.096723 | 130 | 0.664433 |
3282787186a8b09b8e4ee2181664f748f271066d | 878 | py | Python | examples/conversation_read_webhook.py | smadivad/sreemessage | b9642c32bfea65ec7b523bffd7b23306b235e651 | [
"BSD-2-Clause"
] | 57 | 2015-04-07T21:20:12.000Z | 2022-03-30T17:27:48.000Z | examples/conversation_read_webhook.py | smadivad/sreemessage | b9642c32bfea65ec7b523bffd7b23306b235e651 | [
"BSD-2-Clause"
] | 33 | 2015-09-24T21:29:48.000Z | 2022-02-11T22:17:29.000Z | examples/conversation_read_webhook.py | smadivad/sreemessage | b9642c32bfea65ec7b523bffd7b23306b235e651 | [
"BSD-2-Clause"
] | 59 | 2015-01-08T13:00:13.000Z | 2022-03-09T16:55:46.000Z | #!/usr/bin/env python
import messagebird
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--accessKey', help='access key for MessageBird API', type=str, required=True)
parser.add_argument('--webhookId', help='webhook that you want to read', type=str, required=True)
args = vars(parser.parse_args())
try:
client = messagebird.Client(args['accessKey'])
webhook = client.conversation_read_webhook(args['webhookId'])
# Print the object information.
print('The following information was returned as a Webhook object:')
print(webhook)
except messagebird.client.ErrorException as e:
print('An error occured while requesting a Webhook object:')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
| 33.769231 | 98 | 0.702733 |
f59f05d49d5be5ee0cce05a0d061fa931e483f1c | 5,526 | py | Python | src/dochive/__main__.py | Boldware/dochive | e93d794bbf4bb931cbc41f9284632d861bfe300e | [
"Apache-2.0"
] | null | null | null | src/dochive/__main__.py | Boldware/dochive | e93d794bbf4bb931cbc41f9284632d861bfe300e | [
"Apache-2.0"
] | null | null | null | src/dochive/__main__.py | Boldware/dochive | e93d794bbf4bb931cbc41f9284632d861bfe300e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
DocHive
"""
import logging
import sys
import os
from . import __version__
from . import utils
from . import config
from .commands import add, digest
import click
import textwrap
import shutil
log = logging.getLogger(__name__)
config_help = "Provide a specific DocHive config"
class ColorFormatter(logging.Formatter):
colors = {
'CRITICAL': 'red',
'ERROR': 'red',
'WARNING': 'yellow',
'DEBUG': 'blue'
}
text_wrapper = textwrap.TextWrapper(
width=shutil.get_terminal_size(fallback=(0, 0)).columns,
replace_whitespace=False,
break_long_words=False,
break_on_hyphens=False,
initial_indent=' '*12,
subsequent_indent=' '*12
)
def format(self, record):
message = super().format(record)
prefix = f'{record.levelname:<8} - '
if record.levelname in self.colors:
prefix = click.style(prefix, fg=self.colors[record.levelname])
if self.text_wrapper.width:
# Only wrap text if a terminal width was detected
msg = '\n'.join(
self.text_wrapper.fill(line)
for line in message.splitlines()
)
# Prepend prefix after wrapping so that color codes don't affect length
return prefix + msg[12:]
return prefix + message
class State:
''' Maintain logging level.'''
def __init__(self, log_name='DocHive', level=logging.INFO):
self.logger = logging.getLogger(log_name)
# Don't restrict level on logger; use handler
self.logger.setLevel(1)
self.logger.propagate = False
self.stream = logging.StreamHandler()
self.stream.setFormatter(ColorFormatter())
self.stream.setLevel(level)
self.stream.name = 'ProfileBuilderStreamHandler'
self.logger.addHandler(self.stream)
# Add CountHandler for strict mode
self.counter = utils.log_counter
self.counter.setLevel(logging.WARNING)
self.logger.addHandler(self.counter)
pass_state = click.make_pass_decorator(State, ensure=True)
def add_options(opts):
def inner(f):
for i in reversed(opts):
f = i(f)
return f
return inner
def verbose_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
if value:
state.stream.setLevel(logging.DEBUG)
return click.option('-v', '--verbose',
is_flag=True,
expose_value=False,
help='Enable verbose output',
callback=callback)(f)
common_options = add_options([verbose_option])
# Might have a use for this, might not
# common_config_options = add_options([
# click.option('-f', '--config-file', type=click.File('rb'), help=config_help),
# # Don't override config value if user did not specify --strict flag
# # Conveniently, load_config drops None values
# click.option('-s', '--strict', is_flag=True, default=None, help=strict_help),
# click.option('-t', '--theme', type=click.Choice(theme_choices), help=theme_help),
# # As with --strict, set the default to None so that this doesn't incorrectly
# # override the config file
# click.option('--use-directory-urls/--no-directory-urls', is_flag=True, default=None, help=use_directory_urls_help)
# ])
PYTHON_VERSION = sys.version[:3]
PKG_DIR = os.path.dirname(os.path.abspath(__file__))
@click.group(context_settings={'help_option_names': ['-h', '--help']})
@click.version_option(
__version__,
'-V', '--version',
message=f'%(prog)s, version %(version)s from { PKG_DIR } (Python { PYTHON_VERSION })'
)
#@common_options
def cli():
"""
DocHive - Automate document archives.
"""
@cli.command(name="add")
@click.option('-t', '--templates', type=click.Path(), help='template help', default='./templates')
@click.option('-c', '--config-file', type=click.File(), help='config help', default='./config.yml')
@click.option('-C', '--config', help='config help', multiple=True)
@click.option('-n', '--nav', help='nav help')
@click.option('-d', '--timestamp', help='timestamp help')
@click.option('-o', '--output-dir', type=click.Path(), help="output help", default='../docs/blog')
@click.option('-m', '--mkdocs-file', type=click.File(), help="mkdocs help", default='../mkdocs.yml')
@common_options
# @common_config_options
def add_command(templates, config_file, config, nav, timestamp, output_dir, mkdocs_file, **kwargs):
"""Generate new blog according to arguments"""
add.add(templates=templates, config_file=config_file, configs=config, nav=nav, \
timestamp=timestamp, output_dir=output_dir, mkdocs_file=mkdocs_file, **kwargs)
# This command creates the blog digest page, explaining the site section and has recent posts at the bottom of the page as cards
@cli.command(name="digest")
@click.option('-t', '--template', type=click.Path(), help='template help')
@click.option('-l', '--limit', default=3, show_default=True)
@click.option('-i', '--input-dir', type=click.Path(), help='config help')
@click.option('-o', '--output-file', type=click.STRING, help="output help")
@common_options
# @common_config_options
def digest_command(template, limit, input_dir, output_file, **kwargs):
"""Generate new digest according to arguments"""
digest.digest(template=template, limit=limit, input_dir=input_dir, output_file=output_file, **kwargs)
if __name__ == '__main__':
cli() | 35.423077 | 128 | 0.656352 |
59f5a141d578ccb0133547f8cea4edfcf26facff | 2,096 | py | Python | GUI/SoundCloudDL.py | hassaanaliw/SoundCloud-Downloader | 09e7bb9a9cf17d572fcc8cd117aeaddb48334ee9 | [
"MIT"
] | 2 | 2017-11-19T09:30:44.000Z | 2020-04-07T09:18:16.000Z | GUI/SoundCloudDL.py | hassaanaliw/SoundCloud-Downloader | 09e7bb9a9cf17d572fcc8cd117aeaddb48334ee9 | [
"MIT"
] | null | null | null | GUI/SoundCloudDL.py | hassaanaliw/SoundCloud-Downloader | 09e7bb9a9cf17d572fcc8cd117aeaddb48334ee9 | [
"MIT"
] | null | null | null | import urllib
import re,requests,json,sys
useragent = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5"
html = {"User-Agent": useragent, "Content-Type": "application/json", "Accept-Encoding": "gzip"}
client_id='40ae937e42ea2d4d4a04b1e22bb5d371'
def main(url):
valid = r'^(?:https?://)?(?:www\.)?soundcloud\.com/([\w\d-]+)/([\w\d-]+)'
matches = re.match(valid, url)
if matches is None:
print "Invalid URL"
sys.exit()
accountname = matches.group(1)
song =matches.group(2)
simple_title = accountname + u'-' + song
url = 'http://soundcloud.com/%s/%s' % (accountname, song)
next = 'http://api.soundcloud.com/resolve.json?url=' + url + '&client_id=%s' % client_id
info_json = requests.get(next, headers=html).text
info = json.loads(info_json)
video_id = info['id']
final = "https://api.sndcdn.com/i1/tracks/%s/streams?client_id=%s"%(video_id,client_id)
next_json = requests.get(final,headers=html).text
nextdata = json.loads(next_json)
try:
dl_link = nextdata["http_mp3_128_url"]
except KeyError:
print("Sorry. A stream only(rtmp) link was returned. This song cannot be downloaded. ")
raw_input("Enter to exit:")
sys.exit(1)
##########DOWNLOAD############
dl_link = nextdata["http_mp3_128_url"]
download(dl_link,song,".mp3")
def reporthook(blocknum, blocksize, totalsize):
readsofar = blocknum * blocksize
if totalsize > 0:
percent = readsofar * 1e2 / totalsize
s = "\r%5.1f%% %*d / %d" % (
percent, len(str(totalsize)), readsofar, totalsize)
print(s)
if readsofar >= totalsize: # near the end
print("Done")
else: # total size is unknown
print("read %d\n" % (readsofar,))
def download(url, filename, extension):
print("Downloading "+filename+extension)
urllib.urlretrieve(url,filename+extension,reporthook)
if __name__ == "__main__":
main(raw_input("Please Enter A Valid Soundcloud Link: ")) | 24.658824 | 120 | 0.62834 |
ef4ada01f6fa06aa216b1853e5a45bcdd2709ff9 | 1,836 | py | Python | vega/algorithms/nas/modnas/compat/importer.py | shaido987/vega | 14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6 | [
"MIT"
] | 1 | 2021-05-08T07:47:44.000Z | 2021-05-08T07:47:44.000Z | vega/algorithms/nas/modnas/compat/importer.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | null | null | null | vega/algorithms/nas/modnas/compat/importer.py | WholeG/vega | d1ccf1c3ce68a118bdb6775594ceed0f895911e7 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Import hooks for ModularNAS (PEP 302)."""
import sys
import importlib
import importlib.util
class ModNASImporter():
"""ModularNAS Importer class."""
path_exclude = [
'modnas.registry'
]
path_spec = [
('modnas.contrib.arch_space', 'zeus.networks.pytorch.customs.modnas.contrib.arch_space'),
('modnas.arch_space', 'zeus.networks.pytorch.customs.modnas.arch_space'),
('modnas', 'vega.algorithms.nas.modnas'),
]
def find_spec(self, fullname, path, target=None):
"""Handle ModularNAS imports."""
for exc_path in self.path_exclude:
if exc_path in fullname:
return
for ori_path, _ in self.path_spec:
if fullname.startswith(ori_path):
return importlib.util.spec_from_loader(fullname, self)
def load_module(self, fullname):
"""Load ModularNAS module by import path."""
for ori_path, cur_path in self.path_spec:
if not fullname.startswith(ori_path):
continue
cur_fullname = fullname.replace(ori_path, cur_path)
mod = sys.modules.get(fullname, sys.modules.get(cur_fullname, importlib.import_module(cur_fullname)))
mod.__package__ = fullname
mod.__name__ = fullname
sys.modules[fullname] = mod
return mod
sys.meta_path.append(ModNASImporter())
| 34 | 113 | 0.661765 |
250ffadbb766e7ece8c5ce56b39d27e56b4a532b | 609 | py | Python | pms/student/migrations/0016_auto_20190406_1608.py | iammeliodas/pms_django | 535f59ff2dc5260ed9c6da62defa6d740afc0289 | [
"Apache-2.0"
] | null | null | null | pms/student/migrations/0016_auto_20190406_1608.py | iammeliodas/pms_django | 535f59ff2dc5260ed9c6da62defa6d740afc0289 | [
"Apache-2.0"
] | 7 | 2020-06-05T20:21:00.000Z | 2021-09-22T18:18:42.000Z | pms/student/migrations/0016_auto_20190406_1608.py | meet86/pms_django | 535f59ff2dc5260ed9c6da62defa6d740afc0289 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-04-06 10:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('student', '0015_auto_20190406_1551'),
]
operations = [
migrations.RemoveField(
model_name='registerdstudents',
name='place_comapany_name',
),
migrations.AddField(
model_name='registerdstudents',
name='place_comapany_id',
field=models.IntegerField(default=0, max_length=4),
),
]
| 24.36 | 63 | 0.62069 |
5e2f89c8f56062fb5ecd05e99a823841b5e30ec5 | 6,940 | py | Python | src/GAMUTRawData/odmservices/service_manager.py | ODM2/h2outility | 78801e2de68a309069a602f5253a35f57fbc8ffd | [
"BSD-3-Clause"
] | null | null | null | src/GAMUTRawData/odmservices/service_manager.py | ODM2/h2outility | 78801e2de68a309069a602f5253a35f57fbc8ffd | [
"BSD-3-Clause"
] | 30 | 2017-07-19T21:20:36.000Z | 2021-12-13T19:40:25.000Z | src/GAMUTRawData/odmservices/service_manager.py | ODM2/h2outility | 78801e2de68a309069a602f5253a35f57fbc8ffd | [
"BSD-3-Clause"
] | 1 | 2017-07-20T20:51:15.000Z | 2017-07-20T20:51:15.000Z | import sys
import urllib
import os
from sqlalchemy.exc import SQLAlchemyError, DBAPIError
import utilities as util
from series_service import SeriesService
from cv_service import CVService
from edit_service import EditService
from record_service import RecordService
from export_service import ExportService
class ServiceManager():
def __init__(self, debug=False):
self.debug = debug
f = self.__get_file('r')
self._connections = []
self.version = 0
self._connection_format = "%s+%s://%s:%s@%s:%s/%s"
self._connection_format_nopassword = "%s+%s://%s@%s:%s/%s"
# Read all lines (connections) in the connection.cfg file
while True:
line = f.readline()
if not line:
break
else:
line = line.split()
if len(line) >= 6:
line_dict = {'engine': line[0], 'user': line[1], 'password': line[2], 'address': line[3],
'port': line[4], 'db': line[5]}
self._connections.append(line_dict)
if len(self._connections) is not 0:
# The current connection defaults to the most recent (i.e. the last written to the file)
self._current_connection = self._connections[-1]
else:
self._current_connection = None
f.close()
def get_connections(self):
return self._connections
def get_current_connection(self):
return self._current_connection
def add_connection(self, conn_dict):
"""conn_dict must be a dictionary with keys: engine, user, password, address, db"""
# remove earlier connections that are identical to this one
self.delete_connection(conn_dict)
self._connections.append(conn_dict)
self._current_connection = self._connections[-1]
# write changes to connection file
self.__save_connections()
def test_connection(self, conn_dict):
try:
self.version = self.get_db_version(conn_dict)
except DBAPIError:
pass
#print e.message
except SQLAlchemyError:
return False
return True
def delete_connection(self, conn_dict):
self._connections[:] = [x for x in self._connections if x != conn_dict]
# Create and return services based on the currently active connection
def get_db_version(self, conn_dict):
conn_string = self.__build_connection_string(conn_dict)
service = SeriesService(conn_string)
if not self.version:
self.version = service.get_db_version()
return self.version
def get_series_service(self):
conn_string = self.__build_connection_string(self._current_connection)
return SeriesService(conn_string, self.debug)
def get_cv_service(self):
conn_string = self.__build_connection_string(self._current_connection)
return CVService(conn_string, self.debug)
def get_edit_service(self, series_id, connection):
conn_string = self.__build_connection_string(self._current_connection)
return EditService(series_id, connection=connection, connection_string=conn_string, debug=self.debug)
def get_record_service(self, script, series_id, connection):
return RecordService(script, self.get_edit_service(series_id, connection),
self.__build_connection_string(self.get_current_connection()))
def get_export_service(self):
return ExportService(self.get_series_service())
## ###################
# private variables
## ###################
def __get_file(self, mode):
fn = util.resource_path('connection.config')
config_file = None
try:
config_file = open(fn, mode)
except:
open(fn, 'w').close()
config_file = open(fn, mode)
return config_file
def __build_connection_string(self, conn_dict):
self._connection_format = "%s+%s://%s:%s@%s/%s"
if conn_dict['engine'] == 'mssql' and sys.platform != 'win32':
driver = "pyodbc"
quoted = urllib.quote_plus(
'DRIVER={FreeTDS};DSN=%s;UID=%s;PWD=%s;' % (conn_dict['address'], conn_dict['user'],
conn_dict['password']))
# quoted = urllib.quote_plus('DRIVER={FreeTDS};DSN=%s;UID=%s;PWD=%s;DATABASE=%s' %
# (conn_dict['address'], conn_dict['user'], conn_dict['password'],conn_dict['db'],
# ))
conn_string = 'mssql+pyodbc:///?odbc_connect={}'.format(quoted)
elif conn_dict['engine'] == 'sqlite':
connformat = "%s:///%s:%s"
conn_string = connformat % (conn_dict['engine'], conn_dict['address'], conn_dict['port'])
else:
if conn_dict['engine'] == 'mssql':
driver = "pyodbc"
conn = "%s+%s://%s:%s@%s:%s/%s?driver=SQL+Server"
if "sqlncli11.dll" in os.listdir("C:\\Windows\\System32"):
conn = "%s+%s://%s:%s@%s:%s/%s?driver=SQL+Server+Native+Client+11.0"
self._connection_format = conn
conn_string = self._connection_format % (
conn_dict['engine'], driver, conn_dict['user'], conn_dict['password'], conn_dict['address'],
conn_dict['port'], conn_dict['db'])
elif conn_dict['engine'] == 'mysql':
driver = "pymysql"
conn_string = self.constringBuilder(conn_dict, driver)
elif conn_dict['engine'] == 'postgresql':
driver = "psycopg2"
conn_string = self.constringBuilder(conn_dict, driver)
else:
driver = "None"
conn_string = self.constringBuilder(conn_dict, driver)
# print "******", conn_string
return conn_string
def constringBuilder(self, conn_dict, driver):
if conn_dict['password'] is None or not conn_dict['password']:
conn_string = self._connection_format_nopassword % (
conn_dict['engine'], driver, conn_dict['user'], conn_dict['address'], conn_dict['port'],
conn_dict['db'])
else:
conn_string = self._connection_format % (
conn_dict['engine'], driver, conn_dict['user'], conn_dict['password'], conn_dict['address'],
conn_dict['port'], conn_dict['db'])
# print "******", conn_string
return conn_string
def __save_connections(self):
f = self.__get_file('w')
for conn in self._connections:
f.write("%s %s %s %s %s %s\n" % (conn['engine'], conn['user'], conn['password'], conn['address'], conn['port'],
conn['db']))
f.close()
| 38.77095 | 123 | 0.585303 |
4ddc25728aeac741d566f944ddb191462dcae750 | 1,248 | py | Python | blog/forms.py | J-Chaudhary/blog_site | 6b966a3560cc5d0cbdd8f82203bc7ad4c95dbc95 | [
"MIT"
] | null | null | null | blog/forms.py | J-Chaudhary/blog_site | 6b966a3560cc5d0cbdd8f82203bc7ad4c95dbc95 | [
"MIT"
] | null | null | null | blog/forms.py | J-Chaudhary/blog_site | 6b966a3560cc5d0cbdd8f82203bc7ad4c95dbc95 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
def should_be_empty(value):
if value:
raise forms.ValidationError('Field is not empty...!')
class ContactForm(forms.Form):
name = forms.CharField(max_length=80)
message = forms.CharField(widget=forms.Textarea)
email = forms.EmailField()
forcefield = forms.CharField(
required=False, widget=forms.HiddenInput,
label="Leave empty", validators=[should_be_empty])
class UserCreateForm(UserCreationForm):
email = forms.EmailField(required=True, label='email', error_messages={'exists':'This already exists.'})
class Meta:
model = User
fields = ('username', 'email', 'password1', 'password2')
def save(self, commit=True):
user = super(UserCreateForm, self).save(commit=False)
user.email = self.cleaned_data['email']
if commit:
user.save()
return user
def clean_emai(self):
if User.objects.filter(email=self.clean_data['email']).exists():
raise forms.ValidationError(self.fields['email'].error_message['exists'])
return self.clean_data['email'] | 36.705882 | 109 | 0.663462 |
a94747442bcf34293e604c03ae09879b0976b72f | 9,365 | py | Python | zerver/tests/test_logging_handlers.py | yakkl/yakkl | 89ecf4ee8998554a0634667067e16f428e4c480c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | zerver/tests/test_logging_handlers.py | yakkl/yakkl | 89ecf4ee8998554a0634667067e16f428e4c480c | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-06-06T00:51:42.000Z | 2022-02-10T21:38:40.000Z | zerver/tests/test_logging_handlers.py | yakkl/yakkl | 89ecf4ee8998554a0634667067e16f428e4c480c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import logging
import sys
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.http import HttpRequest
from django.test import TestCase
from django.utils.log import AdminEmailHandler
from functools import wraps
from mock import MagicMock, patch
from mypy_extensions import NoReturn
from typing import Any, Callable, Dict, Iterator, Optional, Tuple, Type
from types import TracebackType
from zerver.lib.types import ViewFuncT
from zerver.lib.test_classes import YakklTestCase
from zerver.logging_handlers import AdminNotifyHandler
captured_request = None # type: Optional[HttpRequest]
captured_exc_info = None # type: Tuple[Optional[Type[BaseException]], Optional[BaseException], Optional[TracebackType]]
def capture_and_throw(domain: Optional[str]=None) -> Callable[[ViewFuncT], ViewFuncT]:
def wrapper(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def wrapped_view(request: HttpRequest, *args: Any, **kwargs: Any) -> NoReturn:
global captured_request
captured_request = request
try:
raise Exception("Request error")
except Exception as e:
global captured_exc_info
captured_exc_info = sys.exc_info()
raise e
return wrapped_view # type: ignore # https://github.com/python/mypy/issues/1927
return wrapper
class AdminNotifyHandlerTest(YakklTestCase):
logger = logging.getLogger('django')
def setUp(self) -> None:
self.handler = AdminNotifyHandler()
# Prevent the exceptions we're going to raise from being printed
# You may want to disable this when debugging tests
settings.LOGGING_ENABLED = False
global captured_exc_info
global captured_request
captured_request = None
captured_exc_info = None
def tearDown(self) -> None:
settings.LOGGING_ENABLED = True
def get_admin_yakkl_handler(self) -> AdminNotifyHandler:
return [
h for h in logging.getLogger('').handlers
if isinstance(h, AdminNotifyHandler)
][0]
@patch('zerver.logging_handlers.try_git_describe')
def test_basic(self, mock_function: MagicMock) -> None:
mock_function.return_value = None
"""A random exception passes happily through AdminNotifyHandler"""
handler = self.get_admin_yakkl_handler()
try:
raise Exception("Testing Error!")
except Exception:
exc_info = sys.exc_info()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 16,
'message', {}, exc_info)
handler.emit(record)
def simulate_error(self) -> logging.LogRecord:
email = self.example_email('hamlet')
self.login(email)
with patch("zerver.decorator.rate_limit") as rate_limit_patch:
rate_limit_patch.side_effect = capture_and_throw
result = self.client_get("/json/users")
self.assert_json_error(result, "Internal server error", status_code=500)
rate_limit_patch.assert_called_once()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 15,
'message', {}, captured_exc_info)
record.request = captured_request # type: ignore # this field is dynamically added
return record
def run_handler(self, record: logging.LogRecord) -> Dict[str, Any]:
with patch('zerver.lib.error_notify.notify_server_error') as patched_notify:
self.handler.emit(record)
patched_notify.assert_called_once()
return patched_notify.call_args[0][0]
@patch('zerver.logging_handlers.try_git_describe')
def test_long_exception_request(self, mock_function: MagicMock) -> None:
mock_function.return_value = None
"""A request with no stack and multi-line report.getMessage() is handled properly"""
record = self.simulate_error()
record.exc_info = None
record.msg = 'message\nmoremesssage\nmore'
report = self.run_handler(record)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
self.assertEqual(report['stack_trace'], 'message\nmoremesssage\nmore')
self.assertEqual(report['message'], 'message')
@patch('zerver.logging_handlers.try_git_describe')
def test_request(self, mock_function: MagicMock) -> None:
mock_function.return_value = None
"""A normal request is handled properly"""
record = self.simulate_error()
report = self.run_handler(record)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
# Test that `add_request_metadata` throwing an exception is fine
with patch("zerver.logging_handlers.traceback.print_exc"):
with patch("zerver.logging_handlers.add_request_metadata",
side_effect=Exception("Unexpected exception!")):
report = self.run_handler(record)
self.assertNotIn("user_email", report)
self.assertIn("message", report)
self.assertEqual(report["stack_trace"], "See /var/log/yakkl/errors.log")
# Check anonymous user is handled correctly
record.request.user = AnonymousUser() # type: ignore # this field is dynamically added
report = self.run_handler(record)
self.assertIn("host", report)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
# Now simulate a DisallowedHost exception
def get_host_error() -> None:
raise Exception("Get Host Failure!")
orig_get_host = record.request.get_host # type: ignore # this field is dynamically added
record.request.get_host = get_host_error # type: ignore # this field is dynamically added
report = self.run_handler(record)
record.request.get_host = orig_get_host # type: ignore # this field is dynamically added
self.assertIn("host", report)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
# Test an exception_filter exception
with patch("zerver.logging_handlers.get_exception_reporter_filter",
return_value=15):
record.request.method = "POST" # type: ignore # this field is dynamically added
report = self.run_handler(record)
record.request.method = "GET" # type: ignore # this field is dynamically added
self.assertIn("host", report)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
# Test the catch-all exception handler doesn't throw
with patch('zerver.lib.error_notify.notify_server_error',
side_effect=Exception("queue error")):
self.handler.emit(record)
with self.settings(STAGING_ERROR_NOTIFICATIONS=False):
with patch('zerver.logging_handlers.queue_json_publish',
side_effect=Exception("queue error")):
self.handler.emit(record)
# Test no exc_info
record.exc_info = None
report = self.run_handler(record)
self.assertIn("host", report)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertEqual(report["stack_trace"], 'No stack trace available')
# Test arbitrary exceptions from request.user
record.request.user = None # type: ignore # this field is dynamically added
with patch("zerver.logging_handlers.traceback.print_exc"):
report = self.run_handler(record)
self.assertIn("host", report)
self.assertIn("user_email", report)
self.assertIn("message", report)
self.assertIn("stack_trace", report)
class LoggingConfigTest(TestCase):
@staticmethod
def all_loggers() -> Iterator[logging.Logger]:
# There is no documented API for enumerating the loggers; but the
# internals of `logging` haven't changed in ages, so just use them.
loggerDict = logging.Logger.manager.loggerDict # type: ignore
for logger in loggerDict.values():
if not isinstance(logger, logging.Logger):
continue
yield logger
def test_django_emails_disabled(self) -> None:
for logger in self.all_loggers():
# The `handlers` attribute is undocumented, but see comment on
# `all_loggers`.
for handler in logger.handlers:
assert not isinstance(handler, AdminEmailHandler)
class ErrorFiltersTest(TestCase):
def test_clean_data_from_query_parameters(self) -> None:
from zerver.filters import clean_data_from_query_parameters
self.assertEqual(clean_data_from_query_parameters("api_key=abcdz&stream=1"),
"api_key=******&stream=******")
self.assertEqual(clean_data_from_query_parameters("api_key=abcdz&stream=foo&topic=bar"),
"api_key=******&stream=******&topic=******")
| 44.383886 | 120 | 0.660651 |
728cd7e76585c51f464f4b7815f21ed50e78b62c | 1,104 | py | Python | coreapp/migrations/0044_auto_20200327_1016.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | coreapp/migrations/0044_auto_20200327_1016.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | coreapp/migrations/0044_auto_20200327_1016.py | Quanscendence/braynai | ab828ca95571c6dffef2b2392522e6a4160a2304 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2020-03-27 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coreapp', '0043_auto_20200326_1521'),
]
operations = [
migrations.RemoveField(
model_name='projectbillingprms',
name='admin',
),
migrations.RemoveField(
model_name='projectbillingprms',
name='dahboard',
),
migrations.AlterField(
model_name='projectpricing',
name='disk_space',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='projectpricing',
name='end_point',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='projectpricing',
name='iqs',
field=models.FloatField(default=0.0),
),
migrations.AlterField(
model_name='projectpricing',
name='user',
field=models.FloatField(default=0.0),
),
]
| 26.285714 | 49 | 0.55163 |
82eb8c44760a23abe2f0ba9b1792e4080728df10 | 1,141 | py | Python | nbheapanalytics/__init__.py | oreillymedia/nbheapanalytics | 0ec4e0ac0c5c2a911a7413dbd36d7a478064f61d | [
"BSD-3-Clause"
] | null | null | null | nbheapanalytics/__init__.py | oreillymedia/nbheapanalytics | 0ec4e0ac0c5c2a911a7413dbd36d7a478064f61d | [
"BSD-3-Clause"
] | null | null | null | nbheapanalytics/__init__.py | oreillymedia/nbheapanalytics | 0ec4e0ac0c5c2a911a7413dbd36d7a478064f61d | [
"BSD-3-Clause"
] | null | null | null | from notebook.services.config import ConfigManager
from traitlets.config import Configurable
from traitlets import Unicode
def _jupyter_server_extension_paths():
return [{
'module': 'nbheapanalytics',
}]
def _jupyter_nbextension_paths():
return [
{
# Load this on all the pages!
"section": "common",
"dest": "nbheapanalytics",
"src": "static",
"require": "nbheapanalytics/main"
}
]
class HeapAnalytics(Configurable):
tracking_id = Unicode(
None,
allow_none=True,
help="""
The Heap Analytics Trackin ID to use.
Set to None to disable tracking.
""",
config=True
)
def setup_config(self):
# This is apparently a singleton?
cm = ConfigManager()
cm.update(
'common',
{
'HeapAnalytics': {
'tracking_id': self.tracking_id
}
}
)
return cm
def load_jupyter_server_extension(nbapp):
ga = HeapAnalytics(parent=nbapp)
return ga.setup_config()
| 23.285714 | 51 | 0.5539 |
382bc30f40272ffd005a39929af580b4b7498a7e | 42 | py | Python | tests/test_model.py | tyo-yo/cookiecutter_sample_9 | 56c2e7e6c9b4a7d7164d1d4190df59b264e9486f | [
"MIT"
] | null | null | null | tests/test_model.py | tyo-yo/cookiecutter_sample_9 | 56c2e7e6c9b4a7d7164d1d4190df59b264e9486f | [
"MIT"
] | null | null | null | tests/test_model.py | tyo-yo/cookiecutter_sample_9 | 56c2e7e6c9b4a7d7164d1d4190df59b264e9486f | [
"MIT"
] | null | null | null | def test_exmaple():
assert 1 + 1 == 2
| 14 | 21 | 0.571429 |
3360ca6b4e734a5b4f684162fb9239a01aba6aa9 | 1,230 | py | Python | _unittests/ut_documentation/test_nb_logregclus.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 48 | 2017-11-19T14:59:41.000Z | 2022-03-03T15:50:24.000Z | _unittests/ut_documentation/test_nb_logregclus.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 87 | 2017-11-20T00:10:32.000Z | 2021-11-20T01:48:09.000Z | _unittests/ut_documentation/test_nb_logregclus.py | sdpython/mlinsights | bae59cda775a69bcce83b16b88df2f34a092cb60 | [
"MIT"
] | 12 | 2019-05-09T07:45:52.000Z | 2021-06-28T06:55:53.000Z | # -*- coding: utf-8 -*-
"""
@brief test log(time=23s)
"""
import os
import unittest
from sklearn import __version__ as sklver
from pyquickhelper.loghelper import fLOG
from pyquickhelper.pycode import add_missing_development_version
from pyquickhelper.ipythonhelper import test_notebook_execution_coverage
from pyquickhelper.texthelper import compare_module_version
import mlinsights
class TestNotebookLogRegClus(unittest.TestCase):
def setUp(self):
add_missing_development_version(["jyquickhelper"], __file__, hide=True)
def test_notebook_logregclus(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
self.assertTrue(mlinsights is not None)
folder = os.path.join(os.path.dirname(__file__),
"..", "..", "_doc", "notebooks", "sklearn")
try:
test_notebook_execution_coverage(
__file__, "logistic_regression_clustering",
folder, 'mlinsights', fLOG=fLOG)
except Exception as e:
if compare_module_version(sklver, "0.24") < 0:
return
raise e
if __name__ == "__main__":
unittest.main()
| 30 | 79 | 0.656098 |
5d901c212d4ce4c3b2710e6ee828866ca7656bb6 | 27,990 | py | Python | collaboration/activity.py | vipulgupta2048/activity-turtle-confusion | 8d24b4e48e76ed8401fa6ae89a10c4dd73c3dcd2 | [
"MIT"
] | null | null | null | collaboration/activity.py | vipulgupta2048/activity-turtle-confusion | 8d24b4e48e76ed8401fa6ae89a10c4dd73c3dcd2 | [
"MIT"
] | null | null | null | collaboration/activity.py | vipulgupta2048/activity-turtle-confusion | 8d24b4e48e76ed8401fa6ae89a10c4dd73c3dcd2 | [
"MIT"
] | null | null | null | # Copyright (C) 2007, Red Hat, Inc.
# Copyright (C) 2010 Collabora Ltd. <http://www.collabora.co.uk/>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""UI interface to an activity in the presence service
STABLE.
"""
import logging
from functools import partial
import dbus
from dbus import PROPERTIES_IFACE
from gi.repository import GObject
from telepathy.client import Channel
from telepathy.interfaces import (CHANNEL,
CHANNEL_INTERFACE_GROUP,
CHANNEL_TYPE_TUBES,
CHANNEL_TYPE_TEXT,
CONNECTION,
PROPERTIES_INTERFACE)
from telepathy.constants import (CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES,
HANDLE_TYPE_ROOM,
HANDLE_TYPE_CONTACT,
PROPERTY_FLAG_WRITE)
CONN_INTERFACE_ACTIVITY_PROPERTIES = 'org.laptop.Telepathy.ActivityProperties'
CONN_INTERFACE_BUDDY_INFO = 'org.laptop.Telepathy.BuddyInfo'
_logger = logging.getLogger('sugar3.presence.activity')
class Activity(GObject.GObject):
"""UI interface for an Activity in the presence service
Activities in the presence service represent your and other user's
shared activities.
Properties:
id
color
name
type
joined
"""
__gsignals__ = {
'buddy-joined': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
([GObject.TYPE_PYOBJECT])),
'buddy-left': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
([GObject.TYPE_PYOBJECT])),
'new-channel': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
([GObject.TYPE_PYOBJECT])),
'joined': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
([GObject.TYPE_PYOBJECT, GObject.TYPE_PYOBJECT])),
}
__gproperties__ = {
'id': (str, None, None, None, GObject.PARAM_READABLE),
'name': (str, None, None, None, GObject.PARAM_READWRITE),
'tags': (str, None, None, None, GObject.PARAM_READWRITE),
'color': (str, None, None, None, GObject.PARAM_READWRITE),
'type': (str, None, None, None, GObject.PARAM_READABLE),
'private': (bool, None, None, True, GObject.PARAM_READWRITE),
'joined': (bool, None, None, False, GObject.PARAM_READABLE),
}
def __init__(self, account_path, connection, room_handle=None,
properties=None):
if room_handle is None and properties is None:
raise ValueError('Need to pass one of room_handle or properties')
if properties is None:
properties = {}
GObject.GObject.__init__(self)
self._account_path = account_path
self.telepathy_conn = connection
self.telepathy_text_chan = None
self.telepathy_tubes_chan = None
self.room_handle = room_handle
self._join_command = None
self._share_command = None
self._id = properties.get('id', None)
self._color = properties.get('color', None)
self._name = properties.get('name', None)
self._type = properties.get('type', None)
self._tags = properties.get('tags', None)
self._private = properties.get('private', True)
self._joined = properties.get('joined', False)
self._channel_self_handle = None
self._text_channel_group_flags = 0
self._buddies = {}
self._get_properties_call = None
if self.room_handle is not None:
self._start_tracking_properties()
def _start_tracking_properties(self):
bus = dbus.SessionBus()
self._get_properties_call = bus.call_async(
self.telepathy_conn.requested_bus_name,
self.telepathy_conn.object_path,
CONN_INTERFACE_ACTIVITY_PROPERTIES,
'GetProperties',
'u',
(self.room_handle,),
reply_handler=self.__got_properties_cb,
error_handler=self.__error_handler_cb,
utf8_strings=True)
# As only one Activity instance is needed per activity process,
# we can afford listening to ActivityPropertiesChanged like this.
self.telepathy_conn.connect_to_signal(
'ActivityPropertiesChanged',
self.__activity_properties_changed_cb,
dbus_interface=CONN_INTERFACE_ACTIVITY_PROPERTIES)
def __activity_properties_changed_cb(self, room_handle, properties):
print('%r: Activity properties changed to %r', self, properties)
self._update_properties(properties)
def __got_properties_cb(self, properties):
print('__got_properties_cb %r', properties)
self._get_properties_call = None
self._update_properties(properties)
def __error_handler_cb(self, error):
print('__error_handler_cb %r', error)
def _update_properties(self, new_props):
val = new_props.get('name', self._name)
if isinstance(val, str) and val != self._name:
self._name = val
self.notify('name')
val = new_props.get('tags', self._tags)
if isinstance(val, str) and val != self._tags:
self._tags = val
self.notify('tags')
val = new_props.get('color', self._color)
if isinstance(val, str) and val != self._color:
self._color = val
self.notify('color')
val = bool(new_props.get('private', self._private))
if val != self._private:
self._private = val
self.notify('private')
val = new_props.get('id', self._id)
if isinstance(val, str) and self._id is None:
self._id = val
self.notify('id')
val = new_props.get('type', self._type)
if isinstance(val, str) and self._type is None:
self._type = val
self.notify('type')
def object_path(self):
"""Get our dbus object path"""
return self._object_path
def do_get_property(self, pspec):
"""Retrieve a particular property from our property dictionary"""
if pspec.name == 'joined':
return self._joined
if self._get_properties_call is not None:
print('%r: Blocking on GetProperties() because someone '
'wants property %s', self, pspec.name)
self._get_properties_call.block()
if pspec.name == 'id':
return self._id
elif pspec.name == 'name':
return self._name
elif pspec.name == 'color':
return self._color
elif pspec.name == 'type':
return self._type
elif pspec.name == 'tags':
return self._tags
elif pspec.name == 'private':
return self._private
def do_set_property(self, pspec, val):
"""Set a particular property in our property dictionary"""
# FIXME: need an asynchronous API to set these properties,
# particularly 'private'
if pspec.name == 'name':
self._name = val
elif pspec.name == 'color':
self._color = val
elif pspec.name == 'tags':
self._tags = val
elif pspec.name == 'private':
self._private = val
else:
raise ValueError('Unknown property %r', pspec.name)
self._publish_properties()
def set_private(self, val, reply_handler, error_handler):
print('set_private %r', val)
self._activity.SetProperties({'private': bool(val)},
reply_handler=reply_handler,
error_handler=error_handler)
def get_joined_buddies(self):
"""Retrieve the set of Buddy objects attached to this activity
returns list of presence Buddy objects that we can successfully
create from the buddy object paths that PS has for this activity.
"""
return self._buddies.values()
def get_buddy_by_handle(self, handle):
"""Retrieve the Buddy object given a telepathy handle.
buddy object paths are cached in self._handle_to_buddy_path,
so we can get the buddy without calling PS.
"""
object_path = self._handle_to_buddy_path.get(handle, None)
if object_path:
buddy = self._ps_new_object(object_path)
return buddy
return None
def invite(self, buddy, message, response_cb):
"""Invite the given buddy to join this activity.
The callback will be called with one parameter: None on success,
or an exception on failure.
"""
if not self._joined:
raise RuntimeError('Cannot invite a buddy to an activity that is'
'not shared.')
self.telepathy_text_chan.AddMembers(
[buddy.contact_handle],
message,
dbus_interface=CHANNEL_INTERFACE_GROUP,
reply_handler=partial(self.__invite_cb, response_cb),
error_handler=partial(self.__invite_cb, response_cb))
def __invite_cb(self, response_cb, error=None):
response_cb(error)
def set_up_tubes(self, reply_handler, error_handler):
raise NotImplementedError()
def __joined_cb(self, join_command, error):
print('%r: Join finished %r', self, error)
if error is not None:
self.emit('joined', error is None, str(error))
self.telepathy_text_chan = join_command.text_channel
self.telepathy_tubes_chan = join_command.tubes_channel
self._channel_self_handle = join_command.channel_self_handle
self._text_channel_group_flags = join_command.text_channel_group_flags
self._start_tracking_buddies()
self._start_tracking_channel()
def _start_tracking_buddies(self):
group = self.telepathy_text_chan[CHANNEL_INTERFACE_GROUP]
group.GetAllMembers(reply_handler=self.__get_all_members_cb,
error_handler=self.__error_handler_cb)
group.connect_to_signal('MembersChanged',
self.__text_channel_members_changed_cb)
def _start_tracking_channel(self):
channel = self.telepathy_text_chan[CHANNEL]
channel.connect_to_signal('Closed', self.__text_channel_closed_cb)
def __get_all_members_cb(self, members, local_pending, remote_pending):
print('__get_all_members_cb %r %r', members,
self._text_channel_group_flags)
if self._channel_self_handle in members:
members.remove(self._channel_self_handle)
if not members:
return
self._resolve_handles(members, reply_cb=self._add_initial_buddies)
def _resolve_handles(self, input_handles, reply_cb):
def get_handle_owners_cb(handles):
self.telepathy_conn.InspectHandles(
HANDLE_TYPE_CONTACT,
handles,
reply_handler=reply_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
if self._text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
group = self.telepathy_text_chan[CHANNEL_INTERFACE_GROUP]
group.GetHandleOwners(input_handles,
reply_handler=get_handle_owners_cb,
error_handler=self.__error_handler_cb)
else:
get_handle_owners_cb(input_handles)
def _add_initial_buddies(self, contact_ids):
print('__add_initial_buddies %r', contact_ids)
# for contact_id in contact_ids:
# self._buddies[contact_id] = self._get_buddy(contact_id)
# Once we have the initial members, we can finish the join process
self._joined = True
self.emit('joined', True, None)
def __text_channel_members_changed_cb(self, message, added, removed,
local_pending, remote_pending,
actor, reason):
print('__text_channel_members_changed_cb %r',
[added, message, added, removed, local_pending,
remote_pending, actor, reason])
if self._channel_self_handle in added:
added.remove(self._channel_self_handle)
if added:
self._resolve_handles(added, reply_cb=self._add_buddies)
if self._channel_self_handle in removed:
removed.remove(self._channel_self_handle)
if removed:
self._resolve_handles(added, reply_cb=self._remove_buddies)
def _add_buddies(self, contact_ids):
for contact_id in contact_ids:
if contact_id not in self._buddies:
buddy = self._get_buddy(contact_id)
self.emit('buddy-joined', buddy)
self._buddies[contact_id] = buddy
def _remove_buddies(self, contact_ids):
for contact_id in contact_ids:
if contact_id in self._buddies:
buddy = self._get_buddy(contact_id)
self.emit('buddy-left', buddy)
del self._buddies[contact_id]
def _get_buddy(self, contact_id):
return None
# if contact_id in self._buddies:
# return self._buddies[contact_id]
# else:
# return Buddy(self._account_path, contact_id)
def join(self):
"""Join this activity.
Emits 'joined' and otherwise does nothing if we're already joined.
"""
if self._join_command is not None:
return
if self._joined:
self.emit('joined', True, None)
return
print('%r: joining', self)
self._join_command = _JoinCommand(self.telepathy_conn,
self.room_handle)
self._join_command.connect('finished', self.__joined_cb)
self._join_command.run()
def share(self, share_activity_cb, share_activity_error_cb):
if self.room_handle is not None:
raise ValueError('Already have a room handle')
self._share_command = _ShareCommand(self.telepathy_conn, self._id)
self._share_command.connect('finished',
partial(self.__shared_cb,
share_activity_cb,
share_activity_error_cb))
self._share_command.run()
def __shared_cb(self, share_activity_cb, share_activity_error_cb,
share_command, error):
print('%r: Share finished %r', self, error)
if error is None:
print "There was no error!"
self._joined = True
self.room_handle = share_command.room_handle
self.telepathy_text_chan = share_command.text_channel
self.telepathy_tubes_chan = share_command.tubes_channel
self._channel_self_handle = share_command.channel_self_handle
self._text_channel_group_flags = \
share_command.text_channel_group_flags
self._publish_properties()
self._start_tracking_properties()
self._start_tracking_buddies()
self._start_tracking_channel()
share_activity_cb(self)
else:
print("error = %s" % error)
share_activity_error_cb(self, error)
def _publish_properties(self):
properties = {}
if self._color is not None:
properties['color'] = str(self._color)
if self._name is not None:
properties['name'] = str(self._name)
if self._type is not None:
properties['type'] = self._type
if self._tags is not None:
properties['tags'] = self._tags
properties['private'] = self._private
self.telepathy_conn.SetProperties(
self.room_handle,
properties,
dbus_interface=CONN_INTERFACE_ACTIVITY_PROPERTIES)
def __share_error_cb(self, share_activity_error_cb, error):
logging.debug('%r: Share failed because: %s', self, error)
share_activity_error_cb(self, error)
# GetChannels() wrapper
def get_channels(self):
"""Retrieve communications channel descriptions for the activity
Returns a tuple containing:
- the D-Bus well-known service name of the connection
(FIXME: this is redundant; in Telepathy it can be derived
from that of the connection)
- the D-Bus object path of the connection
- a list of D-Bus object paths representing the channels
associated with this activity
"""
bus_name = self.telepathy_conn.requested_bus_name
connection_path = self.telepathy_conn.object_path
channels = [self.telepathy_text_chan.object_path,
self.telepathy_tubes_chan.object_path]
print('%r: bus name is %s, connection is %s, channels are %r',
self, bus_name, connection_path, channels)
return bus_name, connection_path, channels
# Leaving
def __text_channel_closed_cb(self):
self._joined = False
self.emit('joined', False, 'left activity')
def leave(self):
"""Leave this shared activity"""
print('%r: leaving', self)
self.telepathy_text_chan.Close()
class _BaseCommand(GObject.GObject):
__gsignals__ = {
'finished': (GObject.SIGNAL_RUN_FIRST, GObject.TYPE_NONE,
([object])),
}
def __init__(self):
GObject.GObject.__init__(self)
self.text_channel = None
self.text_channel_group_flags = None
self.tubes_channel = None
self.room_handle = None
self.channel_self_handle = None
def run(self):
raise NotImplementedError()
class _ShareCommand(_BaseCommand):
def __init__(self, connection, activity_id):
_BaseCommand.__init__(self)
self._connection = connection
self._activity_id = activity_id
self._finished = False
self._join_command = None
def run(self):
self._connection.RequestHandles(
HANDLE_TYPE_ROOM,
[self._activity_id],
reply_handler=self.__got_handles_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
def __got_handles_cb(self, handles):
logging.debug('__got_handles_cb %r', handles)
self.room_handle = handles[0]
self._join_command = _JoinCommand(self._connection, self.room_handle)
self._join_command.connect('finished', self.__joined_cb)
self._join_command.run()
def __joined_cb(self, join_command, error):
print('%r: Join finished %r', self, error)
if error is not None:
self._finished = True
self.emit('finished', error)
return
self.text_channel = join_command.text_channel
self.text_channel_group_flags = join_command.text_channel_group_flags
self.tubes_channel = join_command.tubes_channel
self._connection.AddActivity(
self._activity_id,
self.room_handle,
reply_handler=self.__added_activity_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONN_INTERFACE_BUDDY_INFO)
def __added_activity_cb(self):
self._finished = True
self.emit('finished', None)
def __error_handler_cb(self, error):
self._finished = True
self.emit('finished', error)
class _JoinCommand(_BaseCommand):
def __init__(self, connection, room_handle):
_BaseCommand.__init__(self)
self._connection = connection
self._finished = False
self.room_handle = room_handle
self._global_self_handle = None
def run(self):
if self._finished:
raise RuntimeError('This command has already finished')
self._connection.Get(
CONNECTION,
'SelfHandle',
reply_handler=self.__get_self_handle_cb,
error_handler=self.__error_handler_cb,
dbus_interface=PROPERTIES_IFACE)
def __get_self_handle_cb(self, handle):
self._global_self_handle = handle
self._connection.RequestChannel(
CHANNEL_TYPE_TEXT,
HANDLE_TYPE_ROOM,
self.room_handle, True,
reply_handler=self.__create_text_channel_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
self._connection.RequestChannel(
CHANNEL_TYPE_TUBES,
HANDLE_TYPE_ROOM,
self.room_handle, True,
reply_handler=self.__create_tubes_channel_cb,
error_handler=self.__error_handler_cb,
dbus_interface=CONNECTION)
def __create_text_channel_cb(self, channel_path):
Channel(self._connection.requested_bus_name, channel_path,
ready_handler=self.__text_channel_ready_cb)
def __create_tubes_channel_cb(self, channel_path):
print("Creating tubes channel with bus name %s" %
(self._connection.requested_bus_name))
print("Creating tubes channel with channel path %s" % (channel_path))
Channel(self._connection.requested_bus_name, channel_path,
ready_handler=self.__tubes_channel_ready_cb)
def __error_handler_cb(self, error):
self._finished = True
self.emit('finished', error)
def __tubes_channel_ready_cb(self, channel):
print('%r: Tubes channel %r is ready', self, channel)
self.tubes_channel = channel
self._tubes_ready()
def __text_channel_ready_cb(self, channel):
print('%r: Text channel %r is ready', self, channel)
self.text_channel = channel
self._tubes_ready()
def _tubes_ready(self):
if self.text_channel is None or \
self.tubes_channel is None:
return
print('%r: finished setting up tubes', self)
self._add_self_to_channel()
def __text_channel_group_flags_changed_cb(self, added, removed):
print('__text_channel_group_flags_changed_cb %r %r', added,
removed)
self.text_channel_group_flags |= added
self.text_channel_group_flags &= ~removed
def _add_self_to_channel(self):
# FIXME: cope with non-Group channels here if we want to support
# non-OLPC-compatible IMs
group = self.text_channel[CHANNEL_INTERFACE_GROUP]
def got_all_members(members, local_pending, remote_pending):
print('got_all_members members %r local_pending %r '
'remote_pending %r', members, local_pending,
remote_pending)
if self.text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
self_handle = self.channel_self_handle
else:
self_handle = self._global_self_handle
if self_handle in local_pending:
print('%r: We are in local pending - entering', self)
group.AddMembers(
[self_handle],
'',
reply_handler=lambda: None,
error_handler=lambda e:
self._join_failed_cb(e, 'got_all_members AddMembers'))
if members:
self.__text_channel_members_changed_cb('', members, (),
(), (), 0, 0)
def got_group_flags(flags):
self.text_channel_group_flags = flags
# by the time we hook this, we need to know the group flags
group.connect_to_signal('MembersChanged',
self.__text_channel_members_changed_cb)
# bootstrap by getting the current state. This is where we find
# out whether anyone was lying to us in their PEP info
group.GetAllMembers(reply_handler=got_all_members,
error_handler=self.__error_handler_cb)
def got_self_handle(channel_self_handle):
self.channel_self_handle = channel_self_handle
group.connect_to_signal('GroupFlagsChanged',
self.__text_channel_group_flags_changed_cb)
group.GetGroupFlags(reply_handler=got_group_flags,
error_handler=self.__error_handler_cb)
group.GetSelfHandle(reply_handler=got_self_handle,
error_handler=self.__error_handler_cb)
def __text_channel_members_changed_cb(self, message, added, removed,
local_pending, remote_pending,
actor, reason):
print('__text_channel_members_changed_cb added %r removed %r '
'local_pending %r remote_pending %r channel_self_handle '
'%r', added, removed, local_pending, remote_pending,
self.channel_self_handle)
if self.text_channel_group_flags & \
CHANNEL_GROUP_FLAG_CHANNEL_SPECIFIC_HANDLES:
self_handle = self.channel_self_handle
else:
self_handle = self._global_self_handle
if self_handle in added:
if PROPERTIES_INTERFACE not in self.text_channel:
self._finished = True
self.emit('finished', None)
else:
self.text_channel[PROPERTIES_INTERFACE].ListProperties(
reply_handler=self.__list_properties_cb,
error_handler=self.__error_handler_cb)
def __list_properties_cb(self, prop_specs):
# FIXME: invite-only ought to be set on private activities; but
# since only the owner can change invite-only, that would break
# activity scope changes.
props = {
# otherwise buddy resolution breaks
'anonymous': False,
# anyone who knows about the channel can join
'invite-only': False,
# so non-owners can invite others
'invite-restricted': False,
# vanish when there are no members
'persistent': False,
# don't appear in server room lists
'private': True,
}
props_to_set = []
for ident, name, sig_, flags in prop_specs:
value = props.pop(name, None)
if value is not None:
if flags & PROPERTY_FLAG_WRITE:
props_to_set.append((ident, value))
# FIXME: else error, but only if we're creating the room?
# FIXME: if props is nonempty, then we want to set props that aren't
# supported here - raise an error?
if props_to_set:
self.text_channel[PROPERTIES_INTERFACE].SetProperties(
props_to_set, reply_handler=self.__set_properties_cb,
error_handler=self.__error_handler_cb)
else:
self._finished = True
self.emit('finished', None)
def __set_properties_cb(self):
self._finished = True
self.emit('finished', None)
| 38.029891 | 79 | 0.621222 |
266345479e3032fb741e2f11d45b80aa5fda8eae | 524 | py | Python | regex-and-parsing/validating-parsing-email-address.py | anishLearnsToCode/hackerrank-python | 7d707c07af051e7b00471ebe547effd7e1d6d9d9 | [
"MIT"
] | 2 | 2020-05-21T16:18:26.000Z | 2021-11-28T15:44:33.000Z | regex-and-parsing/validating-parsing-email-address.py | anishLearnsToCode/hackerrank-python | 7d707c07af051e7b00471ebe547effd7e1d6d9d9 | [
"MIT"
] | null | null | null | regex-and-parsing/validating-parsing-email-address.py | anishLearnsToCode/hackerrank-python | 7d707c07af051e7b00471ebe547effd7e1d6d9d9 | [
"MIT"
] | 2 | 2020-11-01T00:55:53.000Z | 2021-10-31T16:05:17.000Z | # https://www.hackerrank.com/challenges/validating-named-email-addresses/problem
import email.utils
import re
email_pattern = r'([a-zA-Z](\w|\d|_|-|[.])*)@([a-zA-Z])*[.]([a-zA-Z]{1,3})'
def is_valid_email_address(person):
email = person[1]
return re.fullmatch(email_pattern, email) is not None
people = []
n = int(input())
for _ in range(n):
line = input()
people.append(email.utils.parseaddr(line))
for element in (filter(is_valid_email_address, people)):
print(email.utils.formataddr(element))
| 22.782609 | 80 | 0.683206 |
ee4ac62feb1974c6f6bedd51dac813349480d81f | 14,154 | py | Python | skbio/io/format/blast6.py | jairideout/scikit-bio | 81a1ce5acb434603c537f832caee64a76db19190 | [
"BSD-3-Clause"
] | null | null | null | skbio/io/format/blast6.py | jairideout/scikit-bio | 81a1ce5acb434603c537f832caee64a76db19190 | [
"BSD-3-Clause"
] | null | null | null | skbio/io/format/blast6.py | jairideout/scikit-bio | 81a1ce5acb434603c537f832caee64a76db19190 | [
"BSD-3-Clause"
] | null | null | null | """
BLAST+6 format (:mod:`skbio.io.format.blast6`)
==============================================
.. currentmodule:: skbio.io.format.blast6
The BLAST+6 format (``blast+6``) stores the results of a BLAST [1]_ database
search. The results are stored in a simple tabular format with no column
headers. Values are separated by the tab character.
An example BLAST+6-formatted file comparing two protein sequences, taken
from [2]_ (tab characters represented by ``<tab>``)::
moaC<tab>gi|15800534|ref|NP_286546.1|<tab>100.00<tab>161<tab>0<tab>0<tab>1\
<tab>161<tab>1<tab>161<tab>3e-114<tab>330
moaC<tab>gi|170768970|ref|ZP_02903423.1|<tab>99.38<tab>161<tab>1<tab>0\
<tab>1<tab>161<tab>1<tab>161<tab>9e-114<tab>329
Format Support
--------------
**Has Sniffer: No**
**State: Experimental as of 0.4.0-dev.**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |No |:mod:`pandas.DataFrame` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
BLAST+6 format is a tabular text-based format produced by both BLAST+ output
format 6 (``-outfmt 6``) and legacy BLAST output format 8 (``-m 8``). It is
tab-separated and has no column headers. With BLAST+, users can specify the
columns that are present in their BLAST output file by specifying column names
(e.g., ``-outfmt "6 qseqid sseqid bitscore qstart sstart"``), if the default
columns output by BLAST are not desired.
BLAST Column Types
^^^^^^^^^^^^^^^^^^
The following column types are output by BLAST and supported by scikit-bio.
This information is taken from [3]_.
+-----------+------------------------------------+-----+
|Name |Description |Type |
+===========+====================================+=====+
|qseqid |Query Seq-id |str |
+-----------+------------------------------------+-----+
|qgi |Query GI |int |
+-----------+------------------------------------+-----+
|qacc |Query accesion |str |
+-----------+------------------------------------+-----+
|qaccver |Query accesion.version |str |
+-----------+------------------------------------+-----+
|qlen |Query sequence length |int |
+-----------+------------------------------------+-----+
|sseqid |Subject Seq-id |str |
+-----------+------------------------------------+-----+
|sallseqid |All subject Seq-id(s), separated by |str |
| |a ';' | |
+-----------+------------------------------------+-----+
|sgi |Subject GI |int |
+-----------+------------------------------------+-----+
|sallgi |All subject GIs |int |
+-----------+------------------------------------+-----+
|sacc |Subject accesion |str |
+-----------+------------------------------------+-----+
|saccver |Subject accesion.version |str |
+-----------+------------------------------------+-----+
|sallacc |All subject accesions |str |
+-----------+------------------------------------+-----+
|slen |Subject sequence length |int |
+-----------+------------------------------------+-----+
|qstart |Start of alignment in query |int |
+-----------+------------------------------------+-----+
|qend |End of alignment in query |int |
+-----------+------------------------------------+-----+
|sstart |Start of alignment in subject |int |
+-----------+------------------------------------+-----+
|send |End of alignment in subject |int |
+-----------+------------------------------------+-----+
|qseq |Aligned part of query sequence |str |
+-----------+------------------------------------+-----+
|sseq |Aligned part of subject sequence |str |
+-----------+------------------------------------+-----+
|evalue |Expect value |float|
+-----------+------------------------------------+-----+
|bitscore |Bit score |float|
+-----------+------------------------------------+-----+
|score |Raw score |int |
+-----------+------------------------------------+-----+
|length |Alignment length |int |
+-----------+------------------------------------+-----+
|pident |Percent of identical matches |float|
+-----------+------------------------------------+-----+
|nident |Number of identical matches |int |
+-----------+------------------------------------+-----+
|mismatch |Number of mismatches |int |
+-----------+------------------------------------+-----+
|positive |Number of positive-scoring matches |int |
+-----------+------------------------------------+-----+
|gapopen |Number of gap openings |int |
+-----------+------------------------------------+-----+
|gaps |Total number of gaps |int |
+-----------+------------------------------------+-----+
|ppos |Percentage of positive-scoring matc\|float|
| |hes | |
+-----------+------------------------------------+-----+
|frames |Query and subject frames separated |str |
| |by a '/' | |
+-----------+------------------------------------+-----+
|qframe |Query frame |int |
+-----------+------------------------------------+-----+
|sframe |Subject frame |int |
+-----------+------------------------------------+-----+
|btop |Blast traceback operations (BTOP) |int |
+-----------+------------------------------------+-----+
|staxids |Unique Subject Taxonomy ID(s), sepa\|str |
| |rated by a ';' (in numerical order) | |
+-----------+------------------------------------+-----+
|sscinames |Unique Subject Scientific Name(s), |str |
| |separated by a ';' | |
+-----------+------------------------------------+-----+
|scomnames |Unique Subject Common Name(s), sepa\|str |
| |rated by a ';' | |
+-----------+------------------------------------+-----+
|sblastnames|unique Subject Blast Name(s), separ\|str |
| |ated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|sskingdoms |unique Subject Super Kingdom(s), se\|str |
| |parated by a ';' (in alphabetical | |
| |order) | |
+-----------+------------------------------------+-----+
|stitle |Subject Title |str |
+-----------+------------------------------------+-----+
|sstrand |Subject Strand |str |
+-----------+------------------------------------+-----+
|salltitles |All Subject Title(s), separated by |str |
| |a '<>' | |
+-----------+------------------------------------+-----+
|qcovs |Query Coverage Per Subject |int |
+-----------+------------------------------------+-----+
|qcovhsp |Query Coverage Per HSP |int |
+-----------+------------------------------------+-----+
.. note:: When a BLAST+6-formatted file contains ``N/A`` values, scikit-bio
will convert these values into ``np.nan``, matching pandas' convention for
representing missing data.
.. note:: scikit-bio stores columns of type ``int`` as type ``float`` in the
returned ``pd.DataFrame``. This is necessary in order to allow ``N/A``
values in integer columns (this is currently a limitation of pandas).
Format Parameters
-----------------
The following format parameters are available in ``blast+6`` format:
- ``default_columns``: ``False`` by default. If ``True``, will use the default
columns output by BLAST, which are qseqid, sseqid, pident, length, mismatch,
gapopen, qstart, qend, sstart, send, evalue, and bitscore.
.. warning:: When reading legacy BLAST files, you must pass
``default_columns=True`` because legacy BLAST does not allow users to
specify which columns are present in the output file.
- ``columns``: ``None`` by default. If provided, must be a list of column names
in the order they will appear in the file.
.. note:: Either ``default_columns`` or ``columns`` must be provided, as
``blast+6`` does not contain column headers.
Examples
--------
Suppose we have a ``blast+6`` file with default columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\tgi|15800534|ref|NP_286546.1|\\t100.00\\t161\\t0\\t0\\t1\\t161\
\\t1\\t161\\t3e-114\\t330',
... 'moaC\\tgi|170768970|ref|ZP_02903423.1|\\t99.38\\t161\\t1\\t0\\t1\\t\
161\\t1\\t161\\t9e-114\\t329'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify that default columns should
be used:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... default_columns=True)
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid sseqid pident length mismatch gapopen \\
0 moaC gi|15800534|ref|NP_286546.1| 100.00 161 0 0
1 moaC gi|170768970|ref|ZP_02903423.1| 99.38 161 1 0
<BLANKLINE>
qstart qend sstart send evalue bitscore
0 1 161 1 161 3.000000e-114 330
1 1 161 1 161 9.000000e-114 329
Suppose we have a ``blast+6`` file with user-supplied (non-default) columns:
>>> from io import StringIO
>>> import skbio.io
>>> import pandas as pd
>>> fs = '\\n'.join([
... 'moaC\\t100.00\\t0\\t161\\t0\\t161\\t330\\t1',
... 'moaC\\t99.38\\t1\\t161\\t0\\t161\\t329\\t1'
... ])
>>> fh = StringIO(fs)
Read the file into a ``pd.DataFrame`` and specify which columns are present
in the file:
>>> df = skbio.io.read(fh, format="blast+6", into=pd.DataFrame,
... columns=['qseqid', 'pident', 'mismatch', 'length',
... 'gapopen', 'qend', 'bitscore', 'sstart'])
>>> df # doctest: +NORMALIZE_WHITESPACE
qseqid pident mismatch length gapopen qend bitscore sstart
0 moaC 100.00 0 161 0 161 330 1
1 moaC 99.38 1 161 0 161 329 1
References
----------
.. [1] Altschul, S.F., Gish, W., Miller, W., Myers, E.W. & Lipman, D.J. (1990)
"Basic local alignment search tool." J. Mol. Biol. 215:403-410.
.. [2] http://blastedbio.blogspot.com/2014/11/column-headers-in-blast-tabular-\
and-csv.html
.. [3] http://www.ncbi.nlm.nih.gov/books/NBK279675/
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import functools
import pandas as pd
from skbio.io import create_format
blast6 = create_format('blast+6')
_possible_columns = {'qseqid': str, 'qgi': float, 'qacc': str, 'qaccver': str,
'qlen': float, 'sseqid': str, 'sallseqid': str,
'sgi': float, 'sallgi': float, 'sacc': str,
'saccver': str, 'sallacc': str, 'slen': float,
'qstart': float, 'qend': float, 'sstart': float,
'send': float, 'qseq': str, 'sseq': str,
'evalue': float, 'bitscore': float, 'score': float,
'length': float, 'pident': float, 'nident': float,
'mismatch': float, 'positive': float, 'gapopen': float,
'gaps': float, 'ppos': float, 'frames': str,
'qframe': float, 'sframe': float, 'btop': float,
'staxids': str, 'sscinames': str, 'scomnames': str,
'sblastnames': str, 'sskingdoms': str, 'stitle': str,
'salltitles': str, 'sstrand': str, 'qcovs': float,
'qcovhsp': float}
_default_columns = ['qseqid', 'sseqid', 'pident', 'length', 'mismatch',
'gapopen', 'qstart', 'qend', 'sstart', 'send',
'evalue', 'bitscore']
@blast6.reader(pd.DataFrame, monkey_patch=False)
def _blast6_to_data_frame(fh, columns=None, default_columns=False):
if default_columns and columns is not None:
raise ValueError("`columns` and `default_columns` cannot both be"
" provided.")
if not default_columns and columns is None:
raise ValueError("Either `columns` or `default_columns` must be"
" provided.")
if default_columns:
columns = _default_columns
else:
for column in columns:
if column not in _possible_columns:
raise ValueError("Unrecognized column (%r)."
" Supported columns:\n%r" %
(column, set(_possible_columns.keys())))
read_csv = functools.partial(pd.read_csv, na_values='N/A', sep='\t',
header=None, keep_default_na=False)
lineone = read_csv(fh, nrows=1)
if len(lineone.columns) != len(columns):
raise ValueError("Specified number of columns (%d) does not"
" match the number of columns in the file (%d)."
% (len(columns), len(lineone.columns)))
fh.seek(0)
return read_csv(fh, names=columns, dtype=_possible_columns)
| 46.86755 | 79 | 0.436626 |
47659e07608a00ac7484888e9eb444f46ddb2b71 | 3,454 | py | Python | test1/helper.py | omr00t/Semati | 08f8020b934548818f02de5d6a3ec819098d51eb | [
"BSD-2-Clause"
] | null | null | null | test1/helper.py | omr00t/Semati | 08f8020b934548818f02de5d6a3ec819098d51eb | [
"BSD-2-Clause"
] | null | null | null | test1/helper.py | omr00t/Semati | 08f8020b934548818f02de5d6a3ec819098d51eb | [
"BSD-2-Clause"
] | null | null | null | from .models import Personality, Question
from json import loads
def get_all_book_questions():
"""
This returns the questions in the way that the book presents them.
"""
questions = []
for i in range(0, 6):
for j in Personality.objects.all()[i].first_three_questions:
questions.append(j)
for i in range(0, 6):
for j in Personality.objects.all()[i].middle_three_questions:
questions.append(j)
for i in range(0, 6):
for j in Personality.objects.all()[i].last_three_questions:
questions.append(j)
return questions
def get_result(questions, test_id):
"""
Returns the result in the {personality:value} format according to the appropriate test_id.
"""
# Get the questions' actual IDs.
question_IDs = []
for q in questions:
question_IDs.append(q.strip('q'))
question_objects = list(Question.objects.filter(id__in=question_IDs))
result = {}
if(test_id == 1):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points1)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 2):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points2)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 3):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points3)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 4):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points4)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 5):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points5)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 6):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points6)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 7):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points7)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
elif(test_id == 8):
for i in range(0, 6):
l = loads(Personality.objects.all()[i].personality_points8)
result.update({ Personality.objects.all()[i] : l[len(list(set(Personality.objects.all()[i].all_questions).intersection(question_objects)))]})
else:
# This should not happen
pass
# Sort the result by their values:
sorted_result = {k: v for k, v in sorted(result.items(), key=lambda item: item[1], reverse=True)}
return sorted_result
| 40.162791 | 153 | 0.640996 |
08ea0fbc48e236ea1b80aa400f8ac9359f711d22 | 391 | py | Python | allwork/asgi.py | Eysen/allwork_django | 9b15a8a1f93d33752e3530352339bf4bfea4ae41 | [
"MIT"
] | null | null | null | allwork/asgi.py | Eysen/allwork_django | 9b15a8a1f93d33752e3530352339bf4bfea4ae41 | [
"MIT"
] | null | null | null | allwork/asgi.py | Eysen/allwork_django | 9b15a8a1f93d33752e3530352339bf4bfea4ae41 | [
"MIT"
] | null | null | null | """
ASGI config for allwork project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'allwork.settings')
application = get_asgi_application()
| 23 | 78 | 0.785166 |
de254fed62dc52d2020106b0a446361b1d45a20d | 738 | py | Python | python/runner.py | xHeliotrope/led_tweet_writer | ab6de46097e11d79ba7f9a49ea388a09d829da04 | [
"BSD-2-Clause"
] | null | null | null | python/runner.py | xHeliotrope/led_tweet_writer | ab6de46097e11d79ba7f9a49ea388a09d829da04 | [
"BSD-2-Clause"
] | null | null | null | python/runner.py | xHeliotrope/led_tweet_writer | ab6de46097e11d79ba7f9a49ea388a09d829da04 | [
"BSD-2-Clause"
] | null | null | null | from subprocess import Popen, call, PIPE
import os
def run_processes():
my_env = os.environ.copy()
my_env["PYTHONPATH"] = ".:/home/pi/Development/rpi_ws281x/python/build/lib.linux-arm7l-2.7/:/home/pi/Development/rpi_ws281x/pyton/examples/"
proc1 = Popen("%s %s %s" % ('sudo', '/usr/bin/python', 'examples/scroll_text.py'), env=my_env, stdout=PIPE, shell=True)
proc2 = Popen("%s %s" % ('/home/pi/.nvm/versions/node/v8.8.1/bin/node', 'tweet_writer.js'), stdout=PIPE, shell=True)
(out, err) = proc1.communicate()
(out2, err2) = proc2.communicate()
proc1.wait()
proc2.wait()
#call(['sudo', 'PYTHONPATH=".:build/lib.linux-arm7l-2.7"', 'python', 'examples/scroll_text.py'], shell=True)
run_processes()
| 46.125 | 144 | 0.665312 |
e03289d6a7e035b3362fe4678d8e5529cbd06406 | 2,017 | py | Python | python-algorithm/leetcode/problem_206.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 5 | 2017-06-11T09:19:34.000Z | 2019-01-16T16:58:31.000Z | python-algorithm/leetcode/problem_206.py | isudox/leetcode-solution | 60085e64deaf396a171367affc94b18114565c43 | [
"MIT"
] | 5 | 2020-03-22T13:53:54.000Z | 2020-03-23T08:49:35.000Z | python-algorithm/leetcode/problem_206.py | isudox/nerd-algorithm | c1fbe153953cf3fc24395f75d102016fdf9ea0fa | [
"MIT"
] | 1 | 2019-03-02T15:50:43.000Z | 2019-03-02T15:50:43.000Z | """206. Reverse Linked List
https://leetcode.com/problems/reverse-linked-list/
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively.
Could you implement both?
"""
from typing import Optional
from common.list_node import ListNode
class Solution:
def reverse_list(self, head: ListNode) -> ListNode:
"""
iteratively.
time complexity: O(N)
space complexity: O(1)
"""
if head is None or head.next is None:
return head
p1, p2 = None, head
while p2 is not None:
next_node = p2.next
p2.next = p1
p1 = p2
p2 = next_node
return p1
def reverse_list1(self, head: ListNode) -> ListNode:
"""
recursively.
time complexity: O(N)
space complexity: O(N)
"""
if head is None or head.next is None:
return head
ans, p = head, head
next_node = p.next
p.next = None
ans = self.reverse_list1(next_node)
next_node.next = p
return ans
def reverse_list2(self, head: ListNode) -> ListNode:
if head is None:
return head
store = []
while head is not None:
store.append(ListNode(head.val))
head = head.next
for i in range(len(store) - 1, 0, -1):
store[i].next = store[i - 1]
return store[-1]
def reverse_list3(self, head: Optional[ListNode]) -> Optional[ListNode]:
def helper(node: Optional[ListNode]) -> Optional[ListNode]:
if not node:
return None
if not node.next:
nonlocal ans
ans = node
return node
ret = helper(node.next)
ret.next = node
node.next = None
return node
ans = None
helper(head)
return ans
| 25.2125 | 76 | 0.539911 |
2b7d916a27826dff5deb79e71597498d79f7bf45 | 5,695 | py | Python | pylot/perception/fusion/fusion_operator.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 231 | 2019-06-05T00:22:00.000Z | 2022-03-28T06:15:00.000Z | pylot/perception/fusion/fusion_operator.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 108 | 2019-06-27T16:28:01.000Z | 2022-03-28T19:14:18.000Z | pylot/perception/fusion/fusion_operator.py | mageofboy/pylot | c3154dc24c9429b9916274894c72ef92e03c946d | [
"Apache-2.0"
] | 80 | 2019-06-07T01:08:13.000Z | 2022-03-28T01:44:42.000Z | from collections import deque
import erdos
import numpy as np
from pylot.perception.messages import ObstaclePositionsSpeedsMessage
class FusionOperator(erdos.Operator):
"""Fusion Operator
Args:
rgbd_max_range (:obj:`float`): Maximum distance of the rgbd frame
camera_fov (:obj:`float`): Angular field of view in radians of the RGBD
and RGB cameras used to infer depth info and generate bounding
boxes respectively. Note that camera position, orientation, and
FOV must be identical for both.
"""
def __init__(self,
pose_stream,
obstacles_stream,
depth_camera_stream,
fused_stream,
flags,
camera_fov=np.pi / 4,
rgbd_max_range=1000):
self.pose_stream = pose_stream
self.obstacles_stream = obstacles_stream
self.depth_camera_stream = depth_camera_stream
pose_stream.add_callback(self.update_pos)
obstacles_stream.add_callback(self.update_obstacles)
depth_camera_stream.add_callback(self.update_distances)
self._fused_stream = fused_stream
self._logger = erdos.utils.setup_logging(self.config.name,
self.config.log_file_name)
self._flags = flags
self._segments = []
self._rgbd_max_range = rgbd_max_range
# TODO(ionel): Check fov is same as the camera fov.
self._camera_fov = camera_fov
self._car_positions = deque()
self._distances = deque()
self._obstacles = deque()
@staticmethod
def connect(pose_stream, obstacles_stream, depth_camera_stream):
fused_stream = erdos.WriteStream()
return [fused_stream]
def __calc_obstacle_positions(self, obstacle_bboxes, distances,
car_position, car_orientation):
obstacle_positions = []
for bbox in obstacle_bboxes:
bounding_box_center = np.average(
[[bbox.x_min, bbox.x_max], [bbox.y_min, bbox.y_max]], axis=1)
distance = np.median(distances[bbox.x_min:bbox.x_max,
bbox.y_min:bbox.y_max])
vertical_angle, horizontal_angle = (
self._camera_fov * (bounding_box_center - distances.shape) /
distances.shape)
horizontal_diagonal = distance * np.cos(vertical_angle)
forward_distance = horizontal_diagonal * np.cos(horizontal_angle)
right_distance = horizontal_diagonal * np.sin(horizontal_angle)
# TODO(peter): check that this is right
position_x = car_position[0] + forward_distance * np.cos(
car_orientation) - right_distance * np.sin(car_orientation)
position_y = car_position[1] + forward_distance * np.sin(
car_orientation) - right_distance * np.cos(car_orientation)
obstacle_positions.append([position_x, position_y])
return obstacle_positions
def __discard_old_data(self):
"""Discards stored data that are too old to be used for fusion"""
oldest_timestamp = min([
self._car_positions[-1][0], self._distances[-1][0],
self._obstacles[-1][0]
])
for queue in [self._car_positions, self._distances, self._obstacles]:
while queue[0][0] < oldest_timestamp:
queue.popleft()
@erdos.profile_method()
def fuse(self):
# Return if we don't have car position, distances or obstacles.
if min(
map(len,
[self._car_positions, self._distances, self._obstacles
])) == 0:
return
self.__discard_old_data()
obstacle_positions = self.__calc_obstacle_positions(
self._obstacles[0][1], self._distances[0][1],
self._car_positions[0][1][0],
np.arccos(self._car_positions[0][1][1][0]))
timestamp = self._obstacles[0][0]
output_msg = ObstaclePositionsSpeedsMessage(timestamp,
obstacle_positions)
self._fused_stream.send(output_msg)
def update_pos(self, msg):
vehicle_pos = ((msg.data.transform.location.x,
msg.data.transform.location.y,
msg.data.transform.location.z),
(msg.data.transform.forward_vector.x,
msg.data.transform.forward_vector.y,
msg.data.transform.forward_vector.z))
self._car_positions.append((msg.timestamp, vehicle_pos))
def update_obstacles(self, msg):
# Filter obstacles
self._logger.info("Received update obstacles")
vehicle_bounds = []
for obstacle in msg.obstacles:
self._logger.info("%s received: %s ", self.config.name, obstacle)
# TODO(ionel): Deal with different types of labels.
if obstacle.label in {"truck", "car"}:
vehicle_bounds.append(obstacle.bounding_box_2D)
self._obstacles.append((msg.timestamp, vehicle_bounds))
def update_distances(self, msg):
self._distances.append((msg.timestamp, msg.frame.as_numpy_array()))
def run(self):
while True:
pose_msg = self.pose_stream.read()
obstacles_msg = self.obstacles_stream.read()
depth_camera_msg = self.depth_camera_stream.read()
self.update_pos(pose_msg)
self.update_obstacles(obstacles_msg)
self.update_distances(depth_camera_msg)
self.fuse()
| 40.678571 | 79 | 0.607902 |
115558f200245f83b4d3485f706365f731a4ee3a | 3,823 | py | Python | examples/classical_ml/misc/neural_network.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | 1 | 2022-01-10T21:02:50.000Z | 2022-01-10T21:02:50.000Z | examples/classical_ml/misc/neural_network.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | null | null | null | examples/classical_ml/misc/neural_network.py | Learn-Live/activity_recognition | 76fa7bcecc3e422f1ea59fd1aaf576669e1248fb | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv1D, GlobalMaxPooling1D, Embedding
from keras.datasets import imdb
from keras.utils import plot_model
from keras import optimizers
import matplotlib.pyplot as plt
import numpy as np
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
#
# Get data
#
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=10000)
test_data = x_test
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
# print(x_train[450])
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(sequences=x_train, maxlen=800)
x_test = sequence.pad_sequences(sequences=x_test, maxlen=800)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
print('Build model...')
model = Sequential()
#
# prepeare model
#
model.add(Embedding(input_dim=10000, output_dim=100, input_length=800))
model.add(Dropout(0.5))
model.add(Conv1D(filters=250, kernel_size=3, padding='valid', activation='relu', strides=1))
model.add(GlobalMaxPooling1D())
model.add(Dense(250))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# model.summary()
SVG(model_to_dot(model, show_shapes=True).create(prog='dot', format='svg'))
#
# Training
#
history = model.fit(x_train, y_train, batch_size=32, epochs=10, validation_data=(x_test, y_test))
history_dict = history.history
history_dict.keys()
#
# evaluation
#
results = model.evaluate(x_test, y_test)
print("Accuracy on test set:", results)
print('Test loss:', results[0])
print('Test accuracy:', results[1])
#
# Plot
#
val_loss = history.history['val_loss']
loss = history.history['loss']
accuracy = history.history['acc']
val_accuracy = history.history['val_acc']
epochs = range(1, len(accuracy) + 1)
plt.rcParams['figure.figsize'] = [10, 5]
plt.subplot(1, 2, 1)
plt.plot(epochs, loss, 'bo', label='Training loss', color='red')
plt.plot(epochs, val_loss, 'b', label='Validation loss', color='green')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(epochs, accuracy, 'bo', label='Training acc', color='red')
plt.plot(epochs, val_accuracy, 'b', label='Validation acc', color='green')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout()
plt.show()
#
# PREDICTION
#
model_prediction = Sequential()
model_prediction.add(Embedding(10000, 50, input_length=800))
model_prediction.add(Dropout(0.5))
model_prediction.add(Conv1D(filters=250, kernel_size=3, padding='valid', activation='relu', strides=1))
model_prediction.add(GlobalMaxPooling1D())
model_prediction.add(Dense(250))
model_prediction.add(Activation('relu'))
model_prediction.add(Dense(1))
model_prediction.add(Activation('sigmoid'))
model_prediction.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#
# training
#
history = model_prediction.fit(x_train, y_train, batch_size=32, epochs=3, validation_data=(x_test, y_test))
#
# Evaluation
#
results = model.evaluate(x_test, y_test)
print("Accuracy on test set:", results)
print('Test loss:', results[0])
print('Test accuracy:', results[1])
#
# Plot
#
plt.hist(model_prediction.predict(x_test))
#
# Prediction
#
y_pred = model_prediction.predict(x_test)
prediction_is_positive = y_pred > 0.5
label_is_negative = y_test.reshape((25000, 1)) == 0
incorrect_cases = np.where(np.logical_and(prediction_is_positive, label_is_negative))[0]
# print ("All incorrect cases: ",incorrect_cases[0:])
print("Predicted score: ", len(incorrect_cases))
| 30.830645 | 107 | 0.756736 |
24fe1754aa024484abfd062797340cf633a239ad | 904 | py | Python | tests/journal.api/help_notes.py | pyre/pyre | 0f903836f52450bf81216c5dfdfdfebb16090177 | [
"BSD-3-Clause"
] | 25 | 2018-04-23T01:45:39.000Z | 2021-12-10T06:01:23.000Z | tests/journal.api/help_notes.py | pyre/pyre | 0f903836f52450bf81216c5dfdfdfebb16090177 | [
"BSD-3-Clause"
] | 53 | 2018-05-31T04:55:00.000Z | 2021-10-07T21:41:32.000Z | tests/journal.api/help_notes.py | pyre/pyre | 0f903836f52450bf81216c5dfdfdfebb16090177 | [
"BSD-3-Clause"
] | 12 | 2018-04-23T22:50:40.000Z | 2022-02-20T17:27:23.000Z | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
# (c) 1998-2021 all rights reserved
def test():
"""
Verify access to the channel metadata
"""
# access
import journal
# make a channel
channel = journal.help("test.channel")
# get its metadata
notes = channel.notes
# adjust the application name
notes["application"] = "help_notes"
# add something
notes["author"] = "michael"
# make sure the adjustments stick by getting the value once again
notes = channel.notes
# and comparing against expectations
assert notes["application"] == "help_notes"
assert notes["author"] == "michael"
assert notes["channel"] == "test.channel"
assert notes["severity"] == "help"
# all done
return
# main
if __name__ == "__main__":
# run the test
test()
# end of file
| 21.023256 | 69 | 0.629425 |
79c11158a7ee9c120ddd69d9fa1c69bf314c981d | 4,084 | py | Python | game/round_game.py | DarainS/texas-hold-em-poker | 6bb98f88b19a025affb2898c89ff0bc8c2abf840 | [
"MIT"
] | 1 | 2017-10-13T08:05:47.000Z | 2017-10-13T08:05:47.000Z | game/round_game.py | DarainS/texas-holdem-poker | 6bb98f88b19a025affb2898c89ff0bc8c2abf840 | [
"MIT"
] | null | null | null | game/round_game.py | DarainS/texas-holdem-poker | 6bb98f88b19a025affb2898c89ff0bc8c2abf840 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
from decimal import Decimal
from card import SevenCard
from deck import Deck
def show(s):
print(s)
class RoundGame():
"""
pot 算法:记录每个玩家的筹码投入量,然后从高到低计算 value ,玩家获得小于等于其最大筹码数的其他玩家的筹码。
"""
def __init__(self):
self.players = []
self.livingPlayers = []
self.smallBlind = Decimal()
self.bigBlind = Decimal()
self.pot = {}
self.buttonIndex = 0
self.deck = Deck()
def begin(self):
for p in self.players:
self.pot[p] = 0
self.livingPlayers = self.players.copy()
self.activePlayers = self.players.copy()
def gameEnd(self):
for p in self.players:
p.hands = []
def dealPlayersHands(self):
for i in [0, 1]:
for p in self.players:
p.hands.append(self.deck.dealOne())
for p in self.players:
p.sortHands()
def playerBet(self, player, betNum):
num = Decimal(str(betNum))
player.currentMoney -= num
self.pot[player] += num
def playerRaise(self, player, betNum):
self.pot[player] += Decimal(str(betNum))
def playerCheck(self, player):
pass
def playerFold(self, player):
self.livingPlayers.remove(player)
self.activePlayers.remove(player)
def playerAllIn(self, player):
self.activePlayers.remove(player)
def askBehaviours(self):
index = self.buttonIndex
for i in range(len(self.livingPlayers)):
index = self.nextLivingPlayerIndex(index)
if self.players[index].currentMoney <= 0:
continue
self.askPlayerBehaviour(self.players[index])
while not self.isThisTurnFinsh():
index = self.nextActivePlayerIndex(index)
self.askPlayerBehaviour(self.players[index])
def askPlayerBehaviour(self, player):
self.playerBet(player, Decimal(1))
print(str(player) + " " + 'checked')
def isThisTurnFinsh(self):
if len(self.pot.values()) <= 1:
return True
potMax = max(self.pot.values())
for p in self.livingPlayers:
if p.currentMoney == 0:
continue
if self.pot[p] < potMax:
return False
return True
def turnFinish(self):
pass
def nextLivingPlayerIndex(self, index):
id2 = index
while True:
id2 += 1
if id2 >= len(self.players):
id2 = 0
p = self.players[id2]
if p in self.livingPlayers:
return id2
def nextActivePlayerIndex(self, index):
id2 = index
while True:
id2 += 1
if id2 >= len(self.players):
index = 0
p = self.players[id2]
if p in self.livingPlayers and p.currentMoney > 0:
return id2
def isShowDownTime(self):
if len(self.deck.showList) >= 5:
return True
if len(self.activePlayers) <= 1:
return True
return False
def goPreFlop(self):
self.dealPlayersHands()
self.askBehaviours()
def goFlop(self):
for i in range(0, 3):
self.deck.dealAndShow()
showList = self.deck.showList
show('flop:' + str(showList[0]) + str(showList[1]) + str(showList[2]))
self.askBehaviours()
def goTurn(self):
self.deck.dealAndShow()
show('turn:' + str(self.deck.showList[3]))
self.askBehaviours()
def goRiver(self):
self.deck.dealAndShow()
show('river:' + str(self.deck.showList[4]))
self.askBehaviours()
def makePlayersHandsValue(self):
for p in self.livingPlayers:
seven = SevenCard.fromCardArray(self.deck.showList, p.hands)
seven.caculateAll()
p.handsValue = seven.value
def makeResult(self):
self.makePlayersHandsValue()
while (len(self.livingPlayers) >= 1):
winners = self.getMaxValuePlayers()
winnerNum = len(winners)
for player in winners:
toWinNum = Decimal()
playPotNum = self.pot[player]
for key in self.pot.keys():
if self.pot[key] >= playPotNum:
self.pot[key] -= playPotNum / winnerNum
toWinNum += playPotNum
else:
toWinNum += self.pot[key] / winnerNum
self.pot[key] = Decimal('0')
if self.pot[key] <= 0 and key in self.livingPlayers:
self.livingPlayers.remove(key)
player.currentMoney += toWinNum
for p in self.players:
print(p)
def getMaxValuePlayers(self):
result = []
maxValue = max(p.handsValue for p in self.livingPlayers)
for p in self.livingPlayers:
if p.handsValue == maxValue:
result.append(p)
return result
| 23.337143 | 72 | 0.680705 |
0ab8a5f6ca2d19559651dd9fe67d1cadd520cf92 | 1,470 | py | Python | tests/tracer/test_version.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 308 | 2016-12-07T16:49:27.000Z | 2022-03-15T10:06:45.000Z | tests/tracer/test_version.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1,928 | 2016-11-28T17:13:18.000Z | 2022-03-31T21:43:19.000Z | tests/tracer/test_version.py | p7g/dd-trace-py | 141ac0ab6e9962e3b3bafc9de172076075289a19 | [
"Apache-2.0",
"BSD-3-Clause"
] | 311 | 2016-11-27T03:01:49.000Z | 2022-03-18T21:34:03.000Z | import sys
import mock
import pkg_resources
from ddtrace.version import get_version
from tests.tracer import _version # noqa: F401 -> we need to import it so that it can be swapped with the test module
def test_get_version_from_version_file():
with mock.patch.dict(sys.modules, {"ddtrace._version": sys.modules["tests.tracer._version"]}):
assert get_version() == "my_test_version_from_generated_file"
def test_get_version_from_pkg_resources():
with mock.patch.dict(sys.modules, {"ddtrace._version": None}):
with mock.patch("pkg_resources.get_distribution") as mock_get_distribution:
mock_get_distribution.return_value = FakeDistribution()
assert get_version() == "my_test_version_from_pkg_resources"
mock_get_distribution.assert_called_with("ddtrace.version")
def test_get_version_dev_fallback():
with mock.patch.dict(sys.modules, {"ddtrace._version": None}):
expected_error = pkg_resources.DistributionNotFound()
with mock.patch("pkg_resources.get_distribution") as mock_get_distribution:
mock_get_distribution.side_effect = expected_error
assert get_version() == "dev"
class FakeDistributionIterator:
def __init__(self, distribution):
pass
def __next__(self):
raise StopIteration
class FakeDistribution:
version = "my_test_version_from_pkg_resources"
def __iter__(self):
return FakeDistributionIterator(self)
| 33.409091 | 118 | 0.738095 |
84918b67f40dfd7db20d55819605fedec44cde70 | 5,614 | py | Python | yt_dlp/extractor/scrippsnetworks.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 80 | 2021-05-25T11:33:49.000Z | 2022-03-29T20:36:53.000Z | yt_dlp/extractor/scrippsnetworks.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 53 | 2017-04-12T19:53:18.000Z | 2022-02-22T10:33:13.000Z | yt_dlp/extractor/scrippsnetworks.py | mrBliss/yt-dlp | aecd021656b672dbb617e5bae54a8986f9c4ebaf | [
"Unlicense"
] | 22 | 2021-05-07T05:01:27.000Z | 2022-03-26T19:10:54.000Z | # coding: utf-8
from __future__ import unicode_literals
import json
import hashlib
from .aws import AWSIE
from .anvato import AnvatoIE
from .common import InfoExtractor
from ..utils import (
smuggle_url,
urlencode_postdata,
xpath_text,
)
class ScrippsNetworksWatchIE(AWSIE):
IE_NAME = 'scrippsnetworks:watch'
_VALID_URL = r'''(?x)
https?://
watch\.
(?P<site>geniuskitchen)\.com/
(?:
player\.[A-Z0-9]+\.html\#|
show/(?:[^/]+/){2}|
player/
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'http://watch.geniuskitchen.com/player/3787617/Ample-Hills-Ice-Cream-Bike/',
'info_dict': {
'id': '4194875',
'ext': 'mp4',
'title': 'Ample Hills Ice Cream Bike',
'description': 'Courtney Rada churns up a signature GK Now ice cream with The Scoopmaster.',
'uploader': 'ANV',
'upload_date': '20171011',
'timestamp': 1507698000,
},
'params': {
'skip_download': True,
},
'add_ie': [AnvatoIE.ie_key()],
}]
_SNI_TABLE = {
'geniuskitchen': 'genius',
}
_AWS_API_KEY = 'E7wSQmq0qK6xPrF13WmzKiHo4BQ7tip4pQcSXVl1'
_AWS_PROXY_HOST = 'web.api.video.snidigital.com'
_AWS_USER_AGENT = 'aws-sdk-js/2.80.0 callback'
def _real_extract(self, url):
mobj = self._match_valid_url(url)
site_id, video_id = mobj.group('site', 'id')
aws_identity_id_json = json.dumps({
'IdentityId': '%s:7655847c-0ae7-4d9b-80d6-56c062927eb3' % self._AWS_REGION
}).encode('utf-8')
token = self._download_json(
'https://cognito-identity.%s.amazonaws.com/' % self._AWS_REGION, video_id,
data=aws_identity_id_json,
headers={
'Accept': '*/*',
'Content-Type': 'application/x-amz-json-1.1',
'Referer': url,
'X-Amz-Content-Sha256': hashlib.sha256(aws_identity_id_json).hexdigest(),
'X-Amz-Target': 'AWSCognitoIdentityService.GetOpenIdToken',
'X-Amz-User-Agent': self._AWS_USER_AGENT,
})['Token']
sts = self._download_xml(
'https://sts.amazonaws.com/', video_id, data=urlencode_postdata({
'Action': 'AssumeRoleWithWebIdentity',
'RoleArn': 'arn:aws:iam::710330595350:role/Cognito_WebAPIUnauth_Role',
'RoleSessionName': 'web-identity',
'Version': '2011-06-15',
'WebIdentityToken': token,
}), headers={
'Referer': url,
'X-Amz-User-Agent': self._AWS_USER_AGENT,
'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8',
})
def get(key):
return xpath_text(
sts, './/{https://sts.amazonaws.com/doc/2011-06-15/}%s' % key,
fatal=True)
mcp_id = self._aws_execute_api({
'uri': '/1/web/brands/%s/episodes/scrid/%s' % (self._SNI_TABLE[site_id], video_id),
'access_key': get('AccessKeyId'),
'secret_key': get('SecretAccessKey'),
'session_token': get('SessionToken'),
}, video_id)['results'][0]['mcpId']
return self.url_result(
smuggle_url(
'anvato:anvato_scripps_app_web_prod_0837996dbe373629133857ae9eb72e740424d80a:%s' % mcp_id,
{'geo_countries': ['US']}),
AnvatoIE.ie_key(), video_id=mcp_id)
class ScrippsNetworksIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?(?P<site>cookingchanneltv|discovery|(?:diy|food)network|hgtv|travelchannel)\.com/videos/[0-9a-z-]+-(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.cookingchanneltv.com/videos/the-best-of-the-best-0260338',
'info_dict': {
'id': '0260338',
'ext': 'mp4',
'title': 'The Best of the Best',
'description': 'Catch a new episode of MasterChef Canada Tuedsay at 9/8c.',
'timestamp': 1475678834,
'upload_date': '20161005',
'uploader': 'SCNI-SCND',
},
'add_ie': ['ThePlatform'],
}, {
'url': 'https://www.diynetwork.com/videos/diy-barnwood-tablet-stand-0265790',
'only_matching': True,
}, {
'url': 'https://www.foodnetwork.com/videos/chocolate-strawberry-cake-roll-7524591',
'only_matching': True,
}, {
'url': 'https://www.hgtv.com/videos/cookie-decorating-101-0301929',
'only_matching': True,
}, {
'url': 'https://www.travelchannel.com/videos/two-climates-one-bag-5302184',
'only_matching': True,
}, {
'url': 'https://www.discovery.com/videos/guardians-of-the-glades-cooking-with-tom-cobb-5578368',
'only_matching': True,
}]
_ACCOUNT_MAP = {
'cookingchanneltv': 2433005105,
'discovery': 2706091867,
'diynetwork': 2433004575,
'foodnetwork': 2433005105,
'hgtv': 2433004575,
'travelchannel': 2433005739,
}
_TP_TEMPL = 'https://link.theplatform.com/s/ip77QC/media/guid/%d/%s?mbr=true'
def _real_extract(self, url):
site, guid = self._match_valid_url(url).groups()
return self.url_result(smuggle_url(
self._TP_TEMPL % (self._ACCOUNT_MAP[site], guid),
{'force_smil_url': True}), 'ThePlatform', guid)
| 36.934211 | 149 | 0.550053 |
8f94a27439b84c42abf3df7168d5e6a9698570a5 | 870 | py | Python | isi_sdk_8_1_0/test/test_ntp_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_1_0/test/test_ntp_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_1_0/test/test_ntp_settings.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.ntp_settings import NtpSettings # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestNtpSettings(unittest.TestCase):
"""NtpSettings unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testNtpSettings(self):
"""Test NtpSettings"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.ntp_settings.NtpSettings() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 21.219512 | 79 | 0.694253 |
011c9d049977379f58cddffd512c0577ecd0797d | 1,355 | py | Python | password_generator.py | cavadsalman/password_generator | 6f1bb027220fbc2a1076138b8bedff2b87a3e2c1 | [
"MIT"
] | 1 | 2022-03-01T16:34:34.000Z | 2022-03-01T16:34:34.000Z | password_generator.py | cavadsalman/password_generator | 6f1bb027220fbc2a1076138b8bedff2b87a3e2c1 | [
"MIT"
] | null | null | null | password_generator.py | cavadsalman/password_generator | 6f1bb027220fbc2a1076138b8bedff2b87a3e2c1 | [
"MIT"
] | null | null | null | import random
def get_order():
length = int(input('Length: '))
content_indexes = input("""Content;
1. Lowercases
2. Uppercases
3. Digits
4. Symbols
5. Identific
Select: """)
return {'length': length, 'content_indexes': content_indexes}
class Characters():
# Ascii codes of characters
lowercase = list(range(97, 123))
uppercase = list(range(65, 91))
digits = list(range(48, 58))
symbols = list(range(33, 48)) + list(range(58, 65))
identific = lowercase + uppercase + digits + [95] #the characters which accpeted by str.isidentifier() method
indexes = {'1': 'lowercase', '2': 'uppercase', '3': 'digits', '4': 'symbols', '5': 'identific'} #for getting attribute name according to index
def new_pass(self, length=8, content_indexes='123'):
contents = []
for i in content_indexes:
attribute_name = self.indexes[i] #atribute name of selected content index
contents.extend(self.__getattribute__(attribute_name)) # extend contents with selected ascii codes of selected content index
return ''.join(chr(random.choice(contents)) for i in range(length)) # return result as string
if __name__ == '__main__':
c = Characters()
while True:
order = get_order()
print('Result:', c.new_pass(**order), end='\n\n')
| 33.04878 | 146 | 0.64059 |
c0e3f902ec25f6244714ea0b99ff5f7a763c4d18 | 133,609 | py | Python | ospgrillage/osp_grillage.py | MonashSmartStructures/ops-grillage | 9b7fea6c1279f704e4ca0a7836538fd8963b60cc | [
"MIT"
] | null | null | null | ospgrillage/osp_grillage.py | MonashSmartStructures/ops-grillage | 9b7fea6c1279f704e4ca0a7836538fd8963b60cc | [
"MIT"
] | 1 | 2021-07-21T06:43:25.000Z | 2021-07-21T06:52:46.000Z | ospgrillage/osp_grillage.py | MonashSmartStructures/ops-grillage | 9b7fea6c1279f704e4ca0a7836538fd8963b60cc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This module contain the parent class OspGrillage which handles input information and outputs the grillage model instance
or executable py file. This is done by wrapping `OpenSeesPy` commands for creating models (nodes/elements).
This module also handles all load case assignment, analysis, and results by wrapping `OpenSeesPy` command for analysis
"""
from datetime import datetime
from itertools import combinations
import openseespy.opensees as ops
from ospgrillage.load import *
from ospgrillage.mesh import *
from ospgrillage.material import *
from ospgrillage.members import *
from ospgrillage.postprocessing import *
import xarray as xr
def create_grillage(**kwargs):
"""
User interface to create :class:`~ospgrillage.osp_grillage.OspGrillage` object.
The constructor takes the following arguments:
:param model_type: Name string of model type - default is "beam"
:type model_type: str
:param bridge_name: Name of bridge model and output .py file
:type bridge_name: str
:param long_dim: Length of the model in the longitudinal direction (default: x axis)
:type long_dim: int or float
:param width: Width of the model in the transverse direction (default: z axis)
:type width: int or float
:param skew: Skew angle of the start and end edges of model
:type skew: int or float
:param num_long_grid: Number of grid lines in longitudinal direction
:type num_long_grid: int
:param num_trans_grid: Number of grid lines in the transverse direction
:type num_trans_grid: int
:param edge_beam_dist: Distance of edge beam node lines to exterior main beam node lines
:type edge_beam_dist: int or float
:param mesh_type: Type of mesh either "Ortho" or "Oblique" - default "Ortho"
:type mesh_type: string
:param kwargs: See below
:keyword:
* ext_to_int_dist: (Int or Float, or a List of Int or Float) distance between internal beams and exterior main beams (on both sides)
Depending on the ``model_type`` argument, this function returns the relevant concrete class of
:class:`~ospgrillage.osp_grillage.OspGrillage`.
:returns: :class:`~ospgrillage.osp_grillage.OspGrillageBeam` or :class:`~ospgrillage.osp_grillage.OspGrillageShell`
"""
model_type = kwargs.get("model_type", "beam_only")
if model_type == "shell_beam": # if shell, create shell grillage type
return OspGrillageShell(**kwargs)
else: # creates default model type - beam elements
return OspGrillageBeam(**kwargs)
class GrillageElement:
"""
Class to store grillage element data pertaining to generating ops.element() command. This class is handled by
OspGrillage class to transfer information between GrillageMember, Mesh, and OspGrillage classes.
"""
def __init__(self):
# instantiate variables of elements
self.ele_node_list = []
# TODO trial with set_member() function
class OspGrillage:
"""
Base class of grillage model. Stores information about mesh and grillage elements. Also provides methods to
add load cases to grillage model for analysis.
The class constructor provides an interface for the user to specify the geometric properties of the grillage model
and its mesh.
"""
def __init__(
self,
bridge_name,
long_dim,
width,
skew: Union[list, float, int],
num_long_grid: int,
num_trans_grid: int,
edge_beam_dist: Union[list, float, int],
mesh_type="Ortho",
model="3D",
**kwargs
):
"""
Init the OspGrillage class
:param bridge_name: Name of bridge model and output .py file
:type bridge_name: str
:param long_dim: Length of the model in the longitudinal direction (default: x axis)
:type long_dim: int or float
:param width: Width of the model in the transverse direction (default: z axis)
:type width: int or float
:param skew: Skew angle of the start and end edges of model
:type skew: int or float
:param num_long_grid: Number of grid lines in longitudinal direction
:type num_long_grid: int
:param num_trans_grid: Number of grid lines in the transverse direction -
:type num_trans_grid: int
:param edge_beam_dist: Distance of edge beam node lines to exterior main beam node lines
:type edge_beam_dist: int or float
:param mesh_type: Type of mesh either "Ortho" for orthogonal mesh or "Oblique" for oblique mesh
:type mesh_type: string
:param kwargs: See below
:keyword:
* ext_to_int_dist: (Int or Float, or a List of Int or Float) distance between internal beams and exterior main beams (on both sides)
:raises ValueError: If skew angle is greater than 90. If number of transverse grid line is less than 2.
"""
# store geometry input
self.mesh_type = mesh_type # mesh type either orthogonal or oblique
self.model_name = bridge_name # name string
self.long_dim = long_dim # span , defined c/c between support bearings
self.width = width # length of the bearing support - if skew = 0 , this corresponds to width of bridge
self.num_long_gird = num_long_grid # number of longitudinal beams
self.num_trans_grid = num_trans_grid # number of grids for transverse members
self.edge_width = edge_beam_dist # width of cantilever edge beam
# if skew is a list containing 2 skew angles, then set angles to the start and end edge of span
if isinstance(skew, list):
self.skew_a = skew[0]
if len(skew) >= 2:
self.skew_b = skew[1]
else: # both start and end edge span edges have same angle
self.skew_a = skew # angle in degrees
self.skew_b = skew # angle in degrees
# check if angle is greater than 90
if any([np.abs(self.skew_a) > 90, np.abs(self.skew_b) > 90]):
raise ValueError(
"Skew angle either start or end edge exceeds 90 degrees. Allowable range is -90 to 90"
)
# next check if arctan (L/w)
# check if transverse grid lines is not less than or equal to 2
if num_trans_grid <= 2:
raise ValueError(
"invalid num_trans_grid value - hint: should be greater than 2 to have at least 3 grid "
"lines"
)
# check if edge beam dist is provided as a list of size 2, set to edge_beam a and edge_beam b respectively
if isinstance(edge_beam_dist, list):
self.edge_width_a = edge_beam_dist[0]
if len(edge_beam_dist) >= 2:
self.edge_width_b = edge_beam_dist[1]
else:
self.edge_width_b = edge_beam_dist[0]
else: # same edge distance, set to both a and b
self.edge_width_a = edge_beam_dist
self.edge_width_b = edge_beam_dist
# exterior to interior beam distance, get from kwargs
ext_to_int_dist = kwargs.get("ext_to_int_dist", None)
if isinstance(ext_to_int_dist, list):
self.ext_to_int_a = ext_to_int_dist[0]
if len(ext_to_int_dist) >= 2:
self.ext_to_int_b = ext_to_int_dist[1]
else:
self.ext_to_int_b = ext_to_int_dist[0]
else: # set same
self.ext_to_int_a = ext_to_int_dist
self.ext_to_int_b = ext_to_int_dist
# instantiate variables
self.global_mat_object = [] # material matrix
self.global_line_int_dict = []
# list of components tags
self.element_command_list = [] # list of str of ops.element() commands
self.section_command_list = [] # list of str of ops.section() commands
self.material_command_list = [] # list of str of ops.material() commands
# list of common grillage elements - base class variable
self.common_grillage_element_keys = [
"edge_beam",
"exterior_main_beam_1",
"interior_main_beam",
"exterior_main_beam_2",
"start_edge",
"end_edge",
"transverse_slab",
]
# prefix index of members after longitudinal members
self.long_member_index = 4 # 0,1,2,3 correspond to edge, ext_a, interior_beam,
# dict storing information
self.common_grillage_element = dict() # of common grillage
self.section_dict = {} # of section tags
self.material_dict = {} # of material tags
# variables related to analysis - which can be unique to element/material/ types
self.constraint_type = "Plain" # base class - plain
# collect mesh groups
self.mesh_group = [] # for future release
if self.mesh_type == "Ortho":
self.ortho_mesh = True
else:
self.ortho_mesh = False
self.y_elevation = 0 # default model plane is orthogonal plane of y = 0
self.min_grid_ortho = 3 # for orthogonal mesh (skew>skew_threshold) region of orthogonal area default 3
# set model space and degree's of freedom according to user input for model space
if model == "2D":
self.__ndm = 2 # OpenSess dimension 2
self.__ndf = 3 # Degrees' of Freedom per node 3
else:
self.__ndm = 3 # OpenSees dimension 3
self.__ndf = 6 # Degrees' of Freedom per node 6
# default vector for standard (for 2D grillage in x - z plane) - 1 represent fix for [Vx,Vy,Vz, Mx, My, Mz]
self.fix_val_pin = [1, 1, 1, 0, 0, 0] # pinned
self.fix_val_roller_x = [0, 1, 1, 0, 0, 0] # roller
self.fix_val_fixed = [1, 1, 1, 1, 1, 1] # rigid /fixed support
# default dict for support conditions
self.fixity_vector = {
"pin": [1, 1, 1, 0, 0, 0],
"roller": [0, 1, 1, 0, 0, 0],
"fixed": [1, 1, 1, 1, 1, 1],
}
# special rules for grillage - alternative to Properties of grillage definition - use for special dimensions
self.skew_threshold = [
10,
30,
] # threshold for grillage to allow option of mesh choices
self.deci_tol = 4 # tol of decimal places
# dict for load cases and load types
self.global_load_str = [] # store load() commands
self.global_patch_int_dict = dict() # store patch intersection grid information
self.load_case_list = (
[]
) # list of dict, example [{'loadcase':LoadCase object, 'load_command': list of str}..]
self.load_combination_dict = (
dict()
) # example {0:[{'loadcase':LoadCase object, 'load_command': list of str},
# {'loadcase':LoadCase object, 'load_command': list of str}....]}
self.moving_load_case_dict = dict() # example [ list of load_case_dict]\
# counters to keep track of ops time series and ops pattern objects for loading
self.global_time_series_counter = 1
self.global_pattern_counter = 1
# file name for output py file
self.filename = "{}_op.py".format(self.model_name)
# calculate edge length of grillage
self.trans_dim = self.width / math.cos(self.skew_a / 180 * math.pi)
# Mesh objects, pyfile flag, and verbose flag
self.pyfile = None
self.results = None
self.diagnostics = kwargs.get(
"diagnostics", False
) # flag for diagnostics printed to terminal
# kwargs for rigid link modelling option
self.model_type = kwargs.get(
"model_type", "beam_only"
) # accepts int type 1 or 2
# create mesh object of grillage
self.Mesh_obj = self._create_mesh(
long_dim=self.long_dim,
width=self.width,
trans_dim=self.trans_dim,
num_trans_beam=self.num_trans_grid,
num_long_beam=self.num_long_gird,
ext_to_int_a=self.ext_to_int_a,
ext_to_int_b=self.ext_to_int_b,
skew_1=self.skew_a,
edge_dist_a=self.edge_width_a,
edge_dist_b=self.edge_width_b,
skew_2=self.skew_b,
orthogonal=self.ortho_mesh,
**kwargs
)
# create dict of standard elements from the generated Mesh obj
self._create_standard_element_list() # base class method, concrete classes may overwrite this
def _create_mesh(self, **kwargs):
"""
Private function to create mesh. Creates the concrete Mesh class based on mesh type specified
"""
if self.model_type == "beam_link":
mesh_obj = BeamLinkMesh(**kwargs)
elif self.model_type == "shell_beam":
mesh_obj = ShellLinkMesh(**kwargs)
elif self.model_type == "beam_only":
mesh_obj = BeamMesh(**kwargs)
else:
mesh_obj = None
if self.diagnostics:
print("Meshing complete")
return mesh_obj
# interface function
def create_osp_model(self, pyfile=False):
"""
Function to create model instance in OpenSees model space. If pyfile input is True, function creates an
executable pyfile for generating the grillage model in OpenSees model space.
:param pyfile: if True returns an executable py file instead of creating OpenSees instance of model.
:type pyfile: bool
"""
self.pyfile = pyfile
# if output mode, create the py file
if self.pyfile:
with open(self.filename, "w") as file_handle:
# create py file or overwrite existing
# writing headers and description at top of file
file_handle.write(
"# Grillage generator wizard\n# Model name: {}\n".format(
self.model_name
)
)
# time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
file_handle.write("# Constructed on:{}\n".format(dt_string))
# write imports
file_handle.write(
"import numpy as np\nimport math\nimport openseespy.opensees as ops"
"\nimport openseespy.postprocessing.Get_Rendering as opsplt\n"
)
self._write_op_model()
# run model generation in OpenSees or write generation command to py file
self._run_mesh_generation()
# create the result object for the grillage model
self.results = Results(self.Mesh_obj)
# function to run mesh generation
def _run_mesh_generation(self):
"""
Private function to write / execute commands. This function communicates with OpenSees framework
"""
# 2 generate command lines in output py file
self._write_op_node(self.Mesh_obj) # write node() commands
self._write_op_fix(self.Mesh_obj) # write fix() command for support nodes
self._write_geom_transf(self.Mesh_obj) # x dir members
# write / execute material and sections
for mat_str in self.material_command_list:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# Material definition \n")
file_handle.write(mat_str)
else:
eval(mat_str)
for sec_str in self.section_command_list:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# Create section: \n")
file_handle.write(sec_str)
else:
eval(sec_str)
# write /execute element commands
for ele_dict in self.element_command_list:
for ele_list in ele_dict.values():
for ele_str in ele_list:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(ele_str)
else:
eval(ele_str)
# interface function
def set_boundary_condition(
self, edge_group_counter=[1], new_restraint_vector=None, group_to_exclude=[0]
):
"""
Function to set or modify customized support conditions.
.. note::
This feature to be available for future release.
"""
if new_restraint_vector:
self.fix_val_pin = [1, 1, 1, 0, 0, 0] # pinned
self.fix_val_roller_x = [0, 1, 1, 0, 0, 0] # roller
self.fix_val_fixed = [1, 1, 1, 1, 1, 1]
pass
# private functions to write ops commands to output py file.
def _write_geom_transf(self, mesh_obj, transform_type="Linear"):
"""
Private function to write ops.geomTransf() to output py file.
:param transform_type: transformation type
:type transform_type: str
"""
# loop all transform dict items,
for k, v in mesh_obj.transform_dict.items():
vxz = k.split("|")[0] # first substring is vector xz
offset = k.split("|")[
1
] # second substring is global offset of node i and j of element
if eval(offset):
offset_list = eval(
offset
) # list of global offset of node i entry 0 and node j entry 1
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(
'ops.geomTransf("{type}", {tag}, *{vxz}, {offset_i}, {offset_j})\n'.format(
type=transform_type,
tag=v,
vxz=eval(vxz),
offset_i=offset_list[0],
offset_j=offset_list[1],
)
)
else:
ops.geomTransf(
transform_type, v, *eval(vxz), *offset_list[0], *offset_list[1]
)
else:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# create transformation {}\n".format(v))
file_handle.write(
'ops.geomTransf("{type}", {tag}, *{vxz})\n'.format(
type=transform_type, tag=v, vxz=eval(vxz)
)
)
else:
ops.geomTransf(transform_type, v, *eval(vxz))
# loop to add geom transf obj for additional transformation i.e. element with rigid links
def _write_op_model(self):
"""
Private function to instantiate the OpenSees model
space. If pyfile flagged as True, this function writes the instantiating commands e.g. ops.model() to the
output py file.
.. note:
For 3-D model, the default model dimension and node degree-of-freedoms are 3 and 6 respectively.
This method automatically sets the aforementioned parameters to 2 and 4 respectively, for a 2-D problem.
"""
# check if write or eval command
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("ops.wipe()\n")
file_handle.write(
"ops.model('basic', '-ndm', {ndm}, '-ndf', {ndf})\n".format(
ndm=self.__ndm, ndf=self.__ndf
)
)
else:
ops.wipe()
ops.model("basic", "-ndm", self.__ndm, "-ndf", self.__ndf)
def _write_op_node(self, mesh_obj):
"""
Private function to write or execute the ops.node command to
create nodes in OpenSees model space. If pyfile is flagged true, writes the ops.nodes() command to py file
instead.
"""
# check if write mode, write header for node commands
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# Model nodes\n")
# loop all node in dict, write or eval node command
for (
k,
nested_v,
) in mesh_obj.node_spec.items():
coordinate = nested_v["coordinate"]
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(
"ops.node({tag}, {x:.4f}, {y:.4f}, {z:.4f})\n".format(
tag=nested_v["tag"],
x=coordinate[0],
y=coordinate[1],
z=coordinate[2],
)
)
else: # indices correspondence . 0 - x , 1 - y, 2 - z
ops.node(nested_v["tag"], coordinate[0], coordinate[1], coordinate[2])
def _write_op_fix(self, mesh_obj):
"""
Private function to write the ops.fix() command for
boundary condition definition in the grillage model. If pyfile is flagged true, writes
the ops.fix() command to py file instead.
"""
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# Boundary condition implementation\n")
# TODO generalize for user input of boundary condition
for node_tag, edge_group_num in mesh_obj.edge_node_recorder.items():
# if node is an edge beam - is part of common group z ==0 ,do not assign any fixity
if (
mesh_obj.node_spec[node_tag]["z_group"]
in mesh_obj.common_z_group_element[0]
): # here [0] is first group
pass # move to next node in edge recorder
elif edge_group_num == 0: # 0 is edge of start of span
if self.pyfile: # if writing py file
with open(self.filename, "a") as file_handle:
file_handle.write(
"ops.fix({}, *{})\n".format(
node_tag, self.fixity_vector["pin"]
)
)
else: # run instance
ops.fix(node_tag, *self.fixity_vector["pin"])
elif edge_group_num == 1: # 1 is edge of end of span
if self.pyfile: # if writing py file
with open(self.filename, "a") as file_handle:
file_handle.write(
"ops.fix({}, *{})\n".format(
node_tag, self.fixity_vector["roller"]
)
)
else: # run instance
ops.fix(node_tag, *self.fixity_vector["roller"])
def _write_material(
self, member: GrillageMember = None, material: Material = None
) -> int:
"""
Private function to write Material command of the model class.
"""
material_obj = None
# check if material input is valid,
if member is None and material is None:
raise Exception(
"Uniaxial material has no input GrillageMember or Material Object"
)
elif member is None:
# This is for the option of updating preivously defined material commands
material_obj = material
elif material is None:
material_obj = member.material
if not member.material_command_flag:
return 1 # placeholder num, no material command is written/executed
# access member class object's material - get the material arguments and command
(
material_type,
op_mat_arg,
) = member.material.get_material_args() # get the material arguments
# - write unique material tag and input argument to store as key for dict
material_str = [
material_type,
op_mat_arg,
] # repr both variables as a list for keyword definition
# if section is specified, get the materialtagcounter for material() assignment
if not bool(self.material_dict):
lastmaterialtag = 0 # if dict empty, start counter at 1
else: # set materialtagcounter as the latest defined element - i.e. max of section_dict
lastmaterialtag = self.material_dict[list(self.material_dict)[-1]]
material_tag = self.material_dict.setdefault(
repr(material_str), lastmaterialtag + 1
) # set key for material
# check if the material_tag is a previously assigned key, if not, append to materal_command_list variable
if material_tag != lastmaterialtag:
mat_str = member.material.get_ops_material_command(
material_tag=material_tag
)
self.material_command_list.append(mat_str)
else: # material tag defined, skip, print to terminal
if self.diagnostics:
print(
"Material {} with tag {} has been previously defined".format(
material_type, material_tag
)
)
return material_tag
def _write_section(self, grillage_member_obj: GrillageMember) -> int:
"""
Private function to write section() command for the elements.
"""
# checks if grillage member's element type requires the generation of ops.section()
if not grillage_member_obj.section_command_flag:
return 1 # return a placeholder num, no section is written
# extract section variables from Section class
section_type = grillage_member_obj.section # get section type
section_arg = grillage_member_obj.get_section_arguments() # get arguments
section_str = [
section_type,
section_arg,
] # repr both variables as a list for keyword definition
# if section is specified, get the sectiontagcounter for section assignment
if not bool(self.section_dict):
lastsectioncounter = 0 # if dict empty, start counter at 0
else: # dict not empty, get default value as latest defined tag
lastsectioncounter = self.section_dict[list(self.section_dict)[-1]]
# set section tag or get section tag if already been assigned
previously_defined_section = list(self.section_dict.values())
sectiontagcounter = self.section_dict.setdefault(
repr(section_str), lastsectioncounter + 1
)
if sectiontagcounter not in previously_defined_section:
sec_str = grillage_member_obj.get_ops_section_command(
section_tag=sectiontagcounter
)
self.section_command_list.append(sec_str)
# print to terminal
if self.diagnostics:
print(
"Section {}, of tag {} created".format(
section_type, sectiontagcounter
)
)
else:
if self.diagnostics:
print(
"Section {} with tag {} has been previously defined".format(
section_type, sectiontagcounter
)
)
return sectiontagcounter
def _create_standard_element_list(self):
"""
Private method to populate common_grillage_element dict. This is the base class variant -concrete classes of
grillage may have different elements
Base class variant is beam grillage model.
"""
# loop through base dict for grillage elements, sort members based on four groups (edge,ext_a,int,ext_b).
for key, val in zip(
self.common_grillage_element_keys[0 : self.long_member_index],
sort_list_into_four_groups(self.Mesh_obj.model_plane_z_groups).values(),
):
self.common_grillage_element.update({key: val})
# populate start edge and end edge entries
self.common_grillage_element[self.common_grillage_element_keys[4]] = [0]
self.common_grillage_element[self.common_grillage_element_keys[5]] = [1]
# interface function
def set_member(self, grillage_member_obj: GrillageMember, member=None):
"""
Function to set grillage member class object to elements of grillage members.
:param grillage_member_obj: `GrillageMember` class object
:type grillage_member_obj: GrillageMember
:param member: str of member category - see below table for the available name strings
:type member: str
===================================== ======================================
Standard grillage elements name str Description
===================================== ======================================
edge_beam Elements along x axis at top and bottom edges of mesh (z = 0, z = width)
exterior_main_beam_1 Elements along first grid line after bottom edge (z = 0)
interior_main_beam For all elements in x direction between grid lines of exterior_main_beam_1 and exterior_main_beam_2
exterior_main_beam_1 Elements along first grid line after top edge (z = width)
start_edge Elements along z axis where longitudinal grid line x = 0
end_edge Elements along z axis where longitudinal grid line x = Length
transverse_slab For all elements in transverse direction between start_edge and end_edge
===================================== ======================================
:raises: ValueError If missing argument for member=
"""
if self.diagnostics:
print("Setting member: {} of model".format(member))
if member is None:
raise ValueError(
"Missing target elements of grillage model to be assigned. Hint, member="
)
# check and write member's section command
section_tag = self._write_section(grillage_member_obj)
# check and write member's material command
material_tag = self._write_material(member=grillage_member_obj)
# dictionary for key = common member tag, val is list of str for ops.element()
ele_command_dict = dict()
ele_command_list = []
# if option for pyfile is True, write the header for element group commands
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(
"# Element generation for member: {}\n".format(member)
)
# lookup member grouping
# z_flag, x_flag, edge_flag, common_member_tag = self._create_standard_element_list(namestring=member)
ele_width = 1 # set default ele width 1
# if member properties is based on unit width (e.g. slab elements), get width of element and assign properties
if grillage_member_obj.section.unit_width:
if member == self.common_grillage_element_keys[-1]:
for ele in self.Mesh_obj.trans_ele:
n1 = ele[1] # node i
n2 = ele[2] # node j
node_tag_list = [n1, n2]
# get node width of node_i and node_j
lis_1 = self.Mesh_obj.node_width_x_dict[n1]
lis_2 = self.Mesh_obj.node_width_x_dict[n2]
ele_width = 1
ele_width_record = []
# for the two list of vicinity nodes, find their distance and store in ele_width_record
for lis in [lis_1, lis_2]:
if len(lis) == 1:
ele_width_record.append(
np.sqrt(
lis[0][0] ** 2 + lis[0][1] ** 2 + lis[0][2] ** 2
)
/ 2
)
elif len(lis) == 2:
ele_width_record.append(
(
np.sqrt(
lis[0][0] ** 2 + lis[0][1] ** 2 + lis[0][2] ** 2
)
+ np.sqrt(
lis[1][0] ** 2 + lis[1][1] ** 2 + lis[1][2] ** 2
)
)
/ 2
)
else:
#
break # has assigned element, continue to next check
ele_width = np.mean(
ele_width_record
) # if node lies between a triangular and quadrilateral grid, get mean between
# both width
# here take the average width in x directions
ele_str = grillage_member_obj.get_element_command_str(
ele_tag=ele[0],
node_tag_list=node_tag_list,
transf_tag=ele[4],
ele_width=ele_width,
materialtag=material_tag,
sectiontag=section_tag,
)
ele_command_list.append(ele_str)
elif member == "start_edge" or member == "end_edge":
for edge_group in self.common_grillage_element[member]:
for edge_ele in self.Mesh_obj.edge_group_to_ele[edge_group]:
edge_ele_width = 0.5 # nominal half -m width
node_tag_list = [edge_ele[1], edge_ele[2]]
ele_str = grillage_member_obj.get_element_command_str(
ele_tag=edge_ele[0],
node_tag_list=node_tag_list,
transf_tag=edge_ele[4],
ele_width=edge_ele_width,
materialtag=material_tag,
sectiontag=section_tag,
)
ele_command_list.append(ele_str)
else: # non-unit width member assignment
# if start and end edge elements
if member == "start_edge" or member == "end_edge":
for edge_group in self.common_grillage_element[member]:
ele_command_list += self._get_element_command_list(
grillage_member_obj=grillage_member_obj,
list_of_ele=self.Mesh_obj.edge_group_to_ele[edge_group],
material_tag=material_tag,
section_tag=section_tag,
)
# check if non-unit width transverse slab assigment
elif member == self.common_grillage_element_keys[-1]:
ele_command_list = self._get_element_command_list(
grillage_member_obj=grillage_member_obj,
list_of_ele=self.Mesh_obj.trans_ele,
material_tag=material_tag,
section_tag=section_tag,
)
else: # longitudinal members
for z_group in self.common_grillage_element[member]:
ele_command_list += self._get_element_command_list(
grillage_member_obj=grillage_member_obj,
list_of_ele=self.Mesh_obj.z_group_to_ele[z_group],
material_tag=material_tag,
section_tag=section_tag,
)
ele_command_dict[member] = ele_command_list
self.element_command_list.append(ele_command_dict)
# subfunctions of set_member function
@staticmethod
def _get_element_command_list(
grillage_member_obj, list_of_ele, material_tag, section_tag
):
"""
Private unction to get list of element command string
:param grillage_member_obj:
:param list_of_ele:
:param material_tag:
:param section_tag:
:return: list of string consisting element() commands for creating elements
"""
ele_command_list = []
for ele in list_of_ele:
n1 = ele[1] # node i
n2 = ele[2] # node j
node_tag_list = [n1, n2]
ele_width = 1
ele_str = grillage_member_obj.get_element_command_str(
ele_tag=ele[0],
node_tag_list=node_tag_list,
transf_tag=ele[4],
ele_width=ele_width,
materialtag=material_tag,
sectiontag=section_tag,
)
ele_command_list.append(ele_str)
return ele_command_list
# interface function
def set_material(self, material_obj):
"""
Function to define a global material model. This function proceeds to write write the material() command to
output file. By default, function is only called and handled within set_member function. When called by user,
function creates a material object instance to be set for the OpenSees instance.
.. note::
Currently, function does not have overwriting feature yet.
"""
# set material to global material object
self.global_mat_object = material_obj # material matrix for
# write uniaxialMaterial() command to output file
self._write_material(material=material_obj)
# ---------------------------------------------------------------
# Functions to query nodes or grids correspond to a point or line + distributing
# loads to grillage nodes. These are not accessible part of API
# private procedure to find elements within a grid
def _get_elements(self, node_tag_combo):
# abstracted procedure to find and return the long and trans elements within a grid of 4 or 3 nodes
record_long = []
record_trans = []
record_edge = []
for combi in node_tag_combo:
long_mem_index = [
i
for i, x in enumerate(
[
combi[0] in n[1:3] and combi[1] in n[1:3]
for n in self.Mesh_obj.long_ele
]
)
if x
]
trans_mem_index = [
i
for i, x in enumerate(
[
combi[0] in n[1:3] and combi[1] in n[1:3]
for n in self.Mesh_obj.trans_ele
]
)
if x
]
edge_mem_index = [
i
for i, x in enumerate(
[
combi[0] in n[1:3] and combi[1] in n[1:3]
for n in self.Mesh_obj.edge_span_ele
]
)
if x
]
record_long = record_long + long_mem_index # record
record_trans = record_trans + trans_mem_index # record
record_edge = record_edge + edge_mem_index
return record_long, record_trans, record_edge
# Getter for Points Loads nodes
def _get_point_load_nodes(self, point):
# procedure
# 1 find the closest node 2 find the respective grid within the closest node
# extract points
loading_point = None
grid = None
if type(point) is float or type(point) is list:
x = point[0]
y = point[1] # default y = self.y_elevation = 0
z = point[2]
# set point to tuple
loading_point = Point(x, y, z)
elif isinstance(point, LoadPoint):
loading_point = point
for grid_tag, grid_nodes in self.Mesh_obj.grid_number_dict.items():
# get grid nodes coordinate as named tuple Point
point_list = []
for node_tag in grid_nodes:
coord = self.Mesh_obj.node_spec[node_tag]["coordinate"]
coord_point = Point(coord[0], coord[1], coord[2])
point_list.append(coord_point)
if check_point_in_grid(loading_point, point_list):
node_list = point_list
grid = grid_tag
node_list = self.Mesh_obj.grid_number_dict.get(grid, None)
return node_list, grid # grid = grid number
# Getter for Line loads nodes
def _get_line_load_nodes(self, line_load_obj=None, list_of_load_vertices=None):
# from starting point of line load
# initiate variables
next_grid = []
x = 0
z = 0
x_start = []
z_start = []
colinear_spec = (
[]
) # list storing coordinates *sublist of element coinciding points
# colinear_spec has the following properties: key (ele number), [point1, point2]
intersect_spec = (
dict()
) # a sub dict for characterizing the line segment's intersecting points within grid
grid_inter_points = []
# process inputs
if line_load_obj is None and list_of_load_vertices is not None:
start_load_vertex = list_of_load_vertices[0] # first point is start point
end_load_vertex = list_of_load_vertices[1] # second point is end point
elif line_load_obj is not None and list_of_load_vertices is None:
start_load_vertex = line_load_obj.load_point_1
end_load_vertex = line_load_obj.line_end_point
else:
raise Exception(
"Error is defining points of line/patch on grillage: hint check load points vertices of "
"load obj"
)
# sub_dict has the following keys:
# {bound: , long_intersect: , trans_intersect, edge_intersect, ends:}
# find grids where start point of line load lies in
start_nd, start_grid = self._get_point_load_nodes(start_load_vertex)
last_nd, last_grid = self._get_point_load_nodes(end_load_vertex)
line_grid_intersect = dict()
# loop each grid check if line segment lies in grid
for grid_tag, grid_nodes in self.Mesh_obj.grid_number_dict.items():
point_list = []
# get coordinates of all nodes in grid
for node_tag in grid_nodes:
coord = self.Mesh_obj.node_spec[node_tag]["coordinate"]
coord_point = Point(coord[0], coord[1], coord[2])
point_list.append(coord_point)
# get long, trans and edge elements in the grids. This is for searching intersection later on
element_combi = combinations(grid_nodes, 2)
long_ele_index, trans_ele_index, edge_ele_index = self._get_elements(
element_combi
)
(
Rz,
Rx,
Redge,
R_z_col,
R_x_col,
R_edge_col,
) = self._get_intersecting_elements(
grid_tag,
start_grid,
last_grid,
start_load_vertex,
end_load_vertex,
long_ele_index,
trans_ele_index,
edge_ele_index,
)
# if colinear, assign to colinear_spec
if any([R_z_col, R_x_col, R_edge_col]):
if R_z_col:
colinear_spec += R_z_col
if R_x_col:
colinear_spec += R_x_col
if R_edge_col:
colinear_spec += R_edge_col
# if no intersection, continue to next grid
elif Rz == [] and Rx == [] and Redge == []:
continue
else: # intersection point exist, record to intersect_spec and set to dict
intersect_spec.setdefault("long_intersect", Rz)
intersect_spec.setdefault("trans_intersect", Rx)
#
intersect_spec.setdefault("edge_intersect", Redge)
grid_inter_points += Rz + Rx + Redge
# check if point is not double assigned
line_grid_intersect.setdefault(grid_tag, intersect_spec)
intersect_spec = dict()
# update line_grid_intersect by removing grids if line coincide with elements and multiple grids of vicinity
# grids are returned with same values
removed_key = []
edited_dict = line_grid_intersect.copy()
# if line does not intersect any grid, overwrite edited_dict
if not edited_dict:
for key in self.Mesh_obj.grid_number_dict.keys():
edited_dict.setdefault(
key,
{"long_intersect": [], "trans_intersect": [], "edge_intersect": []},
)
# update line_grid_intersect adding start and end points of line segment to the dict within grid key
for grid_key, int_list in edited_dict.items():
point_tuple_list = []
int_list.setdefault("ends", []) # set the key pair to empty list
for node_tag in self.Mesh_obj.grid_number_dict[grid_key]:
coord = self.Mesh_obj.node_spec[node_tag]["coordinate"]
coord_point = Point(coord[0], coord[1], coord[2])
point_tuple_list.append(coord_point)
if check_point_in_grid(start_load_vertex, point_tuple_list):
# int_list.setdefault("ends", [[line_load_obj.load_point_1.x, line_load_obj.load_point_1.y,
# line_load_obj.load_point_1.z]])
int_list["ends"].append(
[start_load_vertex.x, start_load_vertex.y, start_load_vertex.z]
)
if check_point_in_grid(end_load_vertex, point_tuple_list):
# int_list.setdefault("ends", [[line_load_obj.line_end_point.x, line_load_obj.line_end_point.y,
# line_load_obj.line_end_point.z]])
int_list["ends"].append(
[end_load_vertex.x, end_load_vertex.y, end_load_vertex.z]
)
else:
int_list.setdefault("ends", [])
# loop to remove empty entries
for grid_key, int_list in list(edited_dict.items()):
if all([val == [] for val in int_list.values()]):
del edited_dict[grid_key]
# last check to remove duplicate grids due to having colinear conditions
# i.e. where two vicinity grids with same intersection points are stored in edited_dict
for grid_key, int_list in line_grid_intersect.items():
if grid_key not in removed_key:
check_dup_list = [
int_list == val for val in line_grid_intersect.values()
]
# if there are duplicates check_dup_list will be greater than 1,
# another case to remove if
if sum(check_dup_list) > 1:
# check if grid key is a vicinity grid of current grid_key
for dup_key in [
key
for (count, key) in enumerate(line_grid_intersect.keys())
if check_dup_list[count] and key is not grid_key
]:
if dup_key in [start_grid, last_grid]:
continue
elif (
dup_key
in self.Mesh_obj.grid_vicinity_dict[grid_key].values()
):
removed_key.append(dup_key)
del edited_dict[dup_key]
return edited_dict, colinear_spec
# private function to find intersection points of line/patch edge within grid
def _get_intersecting_elements(
self,
current_grid,
line_start_grid,
line_end_grid,
start_point,
end_point,
long_ele_index,
trans_ele_index,
edge_ele_index,
):
# instantiate variables
R_z = (
[]
) # variables with _ are elements of the main variable without _ i.e. R_z is an element of Rz
Rz = []
R_x = []
Rx = []
R_edge = []
Redge = []
R_x_col = []
R_z_col = []
R_edge_col = []
# get line segment - p_1 and p_2 correspond to start and end point of line
p_1 = start_point # start point of line
p_2 = end_point
# get line equation for checking intersections
L2 = line([p_1.x, p_1.z], [p_2.x, p_2.z])
# loop through long elements in grid, find intersection points
for long_ele in [self.Mesh_obj.long_ele[i] for i in long_ele_index]:
pz1 = self.Mesh_obj.node_spec[long_ele[1]]["coordinate"] # point z 1
pz2 = self.Mesh_obj.node_spec[long_ele[2]]["coordinate"] # point z 2
pz1 = Point(pz1[0], pz1[1], pz1[2]) # convert to point namedtuple
pz2 = Point(pz2[0], pz2[1], pz2[2]) # convert to point namedtuple
# get the line segment within the grid. Line segment defined by two points assume model plane = 0 [x_1, z_1
# ], and [x_2, z_2]
# if neither special case, check intersection
intersect_z, colinear_z = check_intersect(pz1, pz2, p_1, p_2)
if colinear_z and intersect_z:
# if colinear, find the colinear points
first = is_between(p_1, pz1, p_2)
second = is_between(p_1, pz2, p_2)
if first and second:
R_z_col.append([long_ele[0], pz1, pz2])
elif first: # second point not in between
if is_between(pz1, p_2, pz2):
R_z_col.append([long_ele[0], pz1, p_2])
else:
R_z_col.append([long_ele[0], pz1, p_1])
elif second: # second only
if is_between(pz1, p_1, pz2):
R_z_col.append([long_ele[0], p_1, pz2])
else:
R_z_col.append([long_ele[0], p_2, pz2])
elif intersect_z:
L1 = line([pz1.x, pz1.z], [pz2.x, pz2.z])
R_z = intersection(L1, L2)
Rz.append([R_z[0], pz1.y, R_z[1]])
# loop through trans elements in grid, find intersection points
for trans_ele in [self.Mesh_obj.trans_ele[i] for i in trans_ele_index]:
px1 = self.Mesh_obj.node_spec[trans_ele[1]]["coordinate"] # point z 1
px2 = self.Mesh_obj.node_spec[trans_ele[2]]["coordinate"] # point z 2
px1 = Point(px1[0], px1[1], px1[2]) # convert to point namedtuple
px2 = Point(px2[0], px2[1], px2[2]) # convert to point namedtuple
# check potential for intersection or co linear condition
intersect_x, colinear_x = check_intersect(px1, px2, p_1, p_2)
if colinear_x and intersect_x:
first = is_between(p_1, px1, p_2)
second = is_between(p_1, px2, p_2)
if first and second:
R_z_col.append([trans_ele[0], px1, px2])
elif first: # second point not in between
if is_between(px1, p_2, px2):
R_z_col.append([trans_ele[0], px1, p_2])
else:
R_z_col.append([trans_ele[0], px1, p_1])
elif second: # second only
if is_between(px1, p_1, px2):
R_z_col.append([trans_ele[0], p_1, px2])
else:
R_z_col.append([trans_ele[0], p_2, px2])
elif intersect_x:
L1 = line([px1.x, px1.z], [px2.x, px2.z])
R_x = intersection(L1, L2)
Rx.append([R_x[0], px1.y, R_x[1]])
# loop through edge elements in grid, find intersection points
for edge_ele in [self.Mesh_obj.edge_span_ele[i] for i in edge_ele_index]:
p_edge_1 = self.Mesh_obj.node_spec[edge_ele[1]]["coordinate"] # point z 1
p_edge_2 = self.Mesh_obj.node_spec[edge_ele[2]]["coordinate"] # point z 2
p_edge_1 = Point(
p_edge_1[0], p_edge_1[1], p_edge_1[2]
) # convert to point namedtuple
p_edge_2 = Point(
p_edge_2[0], p_edge_2[1], p_edge_2[2]
) # convert to point namedtuple
intersect_edge, colinear_edge = check_intersect(
p_edge_1, p_edge_2, p_1, p_2
)
if colinear_edge and intersect_edge:
first = is_between(p_1, p_edge_1, p_2)
second = is_between(p_1, p_edge_2, p_2)
if first and second:
R_z_col.append([edge_ele[0], p_edge_1, p_edge_2])
elif first: # second point not in between
if is_between(p_edge_1, p_2, p_edge_2):
R_z_col.append([edge_ele[0], p_edge_1, p_2])
else:
R_z_col.append([edge_ele[0], p_edge_1, p_1])
elif second: # second only
if is_between(p_edge_1, p_1, p_edge_2):
R_z_col.append([edge_ele[0], p_1, p_edge_2])
else:
R_z_col.append([edge_ele[0], p_2, p_edge_2])
elif intersect_edge:
L1 = line([p_edge_1.x, p_edge_1.z], [p_edge_2.x, p_edge_2.z])
R_edge = intersection(L1, L2) # temporary R_edge variable
Redge.append(
[R_edge[0], p_edge_1.y, R_edge[1]]
) # Redge variable to be returned - as list
return Rz, Rx, Redge, R_z_col, R_x_col, R_edge_col
# Getter for Patch loads
def _get_bounded_nodes(self, patch_load_obj):
# function to return nodes bounded by patch load
point_list = [
patch_load_obj.load_point_1,
patch_load_obj.load_point_2,
patch_load_obj.load_point_3,
patch_load_obj.load_point_4,
]
bounded_node = []
bounded_grids = []
for node_tag, node_spec in self.Mesh_obj.node_spec.items():
coordinate = node_spec["coordinate"]
node = Point(coordinate[0], coordinate[1], coordinate[2])
flag = check_point_in_grid(node, point_list)
if flag:
# node is inside
bounded_node.append(node_tag)
# check if nodes form grid
for grid_number, grid_nodes in self.Mesh_obj.grid_number_dict.items():
check = all([nodes in bounded_node for nodes in grid_nodes])
if check:
bounded_grids.append(grid_number)
return bounded_node, bounded_grids
# Setter for Point loads
def _assign_point_to_four_node(self, point, mag, shape_func="linear"):
node_mx = []
node_mz = []
# search grid where the point lies in
grid_nodes, _ = self._get_point_load_nodes(point=point)
if grid_nodes is None:
load_str = []
return load_str
# if corner or edge grid with 3 nodes, run specific assignment for triangular grids
# extract coordinates
p1 = self.Mesh_obj.node_spec[grid_nodes[0]]["coordinate"]
p2 = self.Mesh_obj.node_spec[grid_nodes[1]]["coordinate"]
p3 = self.Mesh_obj.node_spec[grid_nodes[2]]["coordinate"]
point_list = [
Point(p1[0], p1[1], p1[2]),
Point(p2[0], p2[1], p2[2]),
Point(p3[0], p3[1], p3[2]),
]
if len(grid_nodes) == 3:
sorted_list, sorted_node_tag = sort_vertices(point_list, grid_nodes)
Nv = ShapeFunction.linear_triangular(
x=point[0],
z=point[2],
x1=sorted_list[0].x,
z1=sorted_list[0].z,
x2=sorted_list[1].x,
z2=sorted_list[1].z,
x3=sorted_list[2].x,
z3=sorted_list[2].z,
)
node_load = [mag * n for n in Nv]
node_mx = np.zeros(len(node_load))
node_mz = np.zeros(len(node_load))
else: # else run assignment for quadrilateral grids
# extract coordinates of fourth point
p4 = self.Mesh_obj.node_spec[grid_nodes[3]]["coordinate"]
point_list.append(Point(p4[0], p4[1], p4[2]))
sorted_list, sorted_node_tag = sort_vertices(point_list, grid_nodes)
# mapping coordinates to natural coordinate, then finds eta (x) and zeta (z) of the point xp,zp
eta, zeta = solve_zeta_eta(
xp=point[0],
zp=point[2],
x1=sorted_list[0].x,
z1=sorted_list[0].z,
x2=sorted_list[1].x,
z2=sorted_list[1].z,
x3=sorted_list[2].x,
z3=sorted_list[2].z,
x4=sorted_list[3].x,
z4=sorted_list[3].z,
)
# access shape function of line load
if shape_func == "hermite":
Nv, Nmx, Nmz = ShapeFunction.hermite_shape_function_2d(eta, zeta)
node_mx = [mag * n for n in Nmx]
# Mz
node_mz = [mag * n for n in Nmz]
else: # linear shaep function
Nv = ShapeFunction.linear_shape_function(eta, zeta)
# Nv, Nmx, Nmz = ShapeFunction.hermite_shape_function_2d(eta, zeta)
# Fy
node_load = [mag * n for n in Nv]
load_str = []
if shape_func == "hermite":
for count, node in enumerate(sorted_node_tag):
load_str.append(
"ops.load({pt}, *{val})\n".format(
pt=node,
val=[0, node_load[count], 0, node_mx[count], 0, node_mz[count]],
)
)
else:
for count, node in enumerate(sorted_node_tag):
load_str.append(
"ops.load({pt}, *{val})\n".format(
pt=node, val=[0, node_load[count], 0, 0, 0, 0]
)
)
return load_str
# Setter for Line loads and above
def _assign_line_to_four_node(
self, line_load_obj, line_grid_intersect, line_ele_colinear
) -> list:
# Function to assign line load to mesh. Procedure to assign line load is as follows:
# . get properties of line on the grid
# . convert line load to equivalent point load
# . Find position of equivalent point load
# . Runs assignment for point loads function (assign_point_to_four_node) using equivalent point load
# loop each grid
load_str_line = []
for grid, points in line_grid_intersect.items():
if (
"ends" not in points.keys()
): # hard code fix to solve colinear problems - see API notes
continue # continue to next load assignment
# extract two point of intersections within the grid
# depending on the type of line intersections
if len(points["long_intersect"]) >= 2: # long, long
p1 = points["long_intersect"][0]
p2 = points["long_intersect"][1]
elif len(points["trans_intersect"]) >= 2: # trans trans
p1 = points["trans_intersect"][0]
p2 = points["trans_intersect"][1]
elif points["long_intersect"] and points["trans_intersect"]: # long, trans
p1 = points["long_intersect"][0]
p2 = points["trans_intersect"][0]
elif points["long_intersect"] and points["edge_intersect"]: # long, edge
p1 = points["long_intersect"][0]
p2 = points["edge_intersect"][0]
elif points["trans_intersect"] and points["edge_intersect"]: # trans, edge
p1 = points["trans_intersect"][0]
p2 = points["edge_intersect"][0]
elif points["long_intersect"] and points["ends"]: # long, ends
p1 = points["long_intersect"][0]
p2 = points["ends"][0]
elif points["trans_intersect"] and points["ends"]: # trans, ends
p1 = points["trans_intersect"][0]
p2 = points["ends"][0]
elif points["edge_intersect"] and points["ends"]: # edge, ends
p1 = points["edge_intersect"][0]
p2 = points["ends"][0]
else:
p1 = [0, 0, 0]
p2 = p1
continue
# get length of line
L = np.sqrt((p1[0] - p2[0]) ** 2 + (p1[2] - p2[2]) ** 2)
# get magnitudes at point 1 and point 2
w1 = line_load_obj.interpolate_udl_magnitude([p1[0], 0, p1[1]])
w2 = line_load_obj.interpolate_udl_magnitude([p2[0], 0, p2[1]])
W = (w1 + w2) / 2
# get mid point of line
x_bar = ((2 * w1 + w2) / (w1 + w2)) * L / 3 # from p2
load_point = line_load_obj.get_point_given_distance(
xbar=x_bar, point_coordinate=[p2[0], self.y_elevation, p2[2]]
)
# uses point load assignment function to assign load point and mag to four nodes in grid
load_str = self._assign_point_to_four_node(
point=load_point, mag=W, shape_func=line_load_obj.shape_function
)
load_str_line += load_str # append to major list for line load
# loop through all colinear elements
# for each colinear element, assign line load to two nodes of element
assigned_ele = []
for ele in line_ele_colinear:
if ele[0] not in assigned_ele:
p1 = ele[1]
p2 = ele[2]
# get magnitudes at point 1 and point 2
L = get_distance(p1, p2)
w1 = line_load_obj.interpolate_udl_magnitude([p1.x, p1.y, p1.z])
w2 = line_load_obj.interpolate_udl_magnitude([p2.x, p2.y, p2.z])
W = (w1 + w2) / 2
mag = W * L
# mag = W
# get mid point of line
x_bar = ((2 * w1 + w2) / (w1 + w2)) * L / 3 # from p2
load_point = line_load_obj.get_point_given_distance(
xbar=x_bar, point_coordinate=[p2.x, p2.y, p2.z]
)
load_str = self._assign_point_to_four_node(point=load_point, mag=mag)
load_str_line += load_str # append to major list for line load
assigned_ele.append(ele[0])
return load_str_line
def _assign_beam_ele_line_load(self, line_load_obj: LineLoading):
load_str_line = []
ele_group = []
width_dict = None
if line_load_obj.long_beam_ele_load_flag:
ele_group = self.Mesh_obj.long_ele
width_dict = self.Mesh_obj.node_width_z_dict
elif line_load_obj.trans_beam_ele_load_flag:
ele_group = self.Mesh_obj.trans_ele
width_dict = self.Mesh_obj.node_width_x_dict
for ele in ele_group:
if ele[3] != 0: # exclude edge beams
p1 = ele[1] # node tag i
p2 = ele[2] # node tag j
# convert to point load tuple
p1_list = self.Mesh_obj.node_spec[p1]["coordinate"]
p2_list = self.Mesh_obj.node_spec[p2]["coordinate"]
p1_point = create_point(x=p1_list[0], z=p1_list[2])
p2_point = create_point(x=p2_list[0], z=p2_list[2])
L = get_distance(
p1_point, p2_point
) # distance between two point tuples of ele
w1 = line_load_obj.load_point_1.p # magnitude at vertex 1
w2 = line_load_obj.line_end_point.p # magnitude at vertex 2
d1 = np.sum(width_dict.get(p1)) # width of node j
d2 = np.sum(width_dict.get(p2)) # width of node j
d = (d1 + d2) / 2 # average width
W = (w1 + w2) / 2 # average mag
mag = W * L * d # convert UDL (N/m2) to point load, q * Length * width
# get mid point of line
x_bar = ((2 * w1 + w2) / (w1 + w2)) * L / 3 # from p2
load_point = line_load_obj.get_point_given_distance(
xbar=x_bar, point_coordinate=[p2_point.x, p2_point.y, p2_point.z]
)
load_str = self._assign_point_to_four_node(point=load_point, mag=mag)
load_str_line += load_str # append to major list for line load
return load_str_line
# setter for patch loads
def _assign_patch_load(self, patch_load_obj: PatchLoading) -> list:
# searches grid that encompass the patch load
# use getter for line load, 4 times for each point
# between 4 dictionaries record the common grids as having the corners of the patch - to be evaluated different
bound_node, bound_grid = self._get_bounded_nodes(patch_load_obj)
patch_load_str = [] # final return str list
# assign patch for grids fully bounded by patch
for grid in bound_grid:
nodes = self.Mesh_obj.grid_number_dict[grid] # read grid nodes
# get p value of each node
p_list = []
for tag in nodes:
coord = self.Mesh_obj.node_spec[tag]["coordinate"]
p = patch_load_obj.patch_mag_interpolate(coord[0], coord[2])[
0
] # object function returns array like
p_list.append(LoadPoint(coord[0], coord[1], coord[2], p))
# get centroid of patch on grid
xc, yc, zc = get_patch_centroid(p_list)
inside_point = Point(xc, yc, zc)
# volume = area of base x average height
A = self._get_node_area(inside_point=inside_point, p_list=p_list)
# _, A = calculate_area_given_four_points(inside_point, p_list[0], p_list[1], p_list[2], p_list[3])
mag = A * sum([point.p for point in p_list]) / len(p_list)
# assign point and mag to 4 nodes of grid
load_str = self._assign_point_to_four_node(
point=[xc, yc, zc], mag=mag, shape_func=patch_load_obj.shape_function
)
patch_load_str += load_str
# apply patch for full bound grids completed
# search the intersecting grids using line load function
intersect_grid_1, _ = self._get_line_load_nodes(
list_of_load_vertices=[
patch_load_obj.load_point_1,
patch_load_obj.load_point_2,
]
)
intersect_grid_2, _ = self._get_line_load_nodes(
list_of_load_vertices=[
patch_load_obj.load_point_2,
patch_load_obj.load_point_3,
]
)
intersect_grid_3, _ = self._get_line_load_nodes(
list_of_load_vertices=[
patch_load_obj.load_point_3,
patch_load_obj.load_point_4,
]
)
intersect_grid_4, _ = self._get_line_load_nodes(
list_of_load_vertices=[
patch_load_obj.load_point_4,
patch_load_obj.load_point_1,
]
)
# merging process of the intersect grid dicts
merged = check_dict_same_keys(intersect_grid_1, intersect_grid_2)
merged = check_dict_same_keys(merged, intersect_grid_3)
merged = check_dict_same_keys(merged, intersect_grid_4)
self.global_patch_int_dict.update(
merged
) # save intersect grid dict to global dict
# all lines are ordered in path counter clockwise - sorted hereafter via sort_vertices
# get nodes in grid that are left (check inside variable greater than 0)
for grid, int_point_list in merged.items(): # [x y z]
grid_nodes = self.Mesh_obj.grid_number_dict[grid] # read grid nodes
# get two grid nodes bounded by patch
node_in_grid = [
x
for x, y in zip(grid_nodes, [node in bound_node for node in grid_nodes])
if y
]
node_list = int_point_list # sort
p_list = []
# loop each int points - add extract coordinates, get patch magnitude using interpolation ,
# convert coordinate to namedtuple Loadpoint and append to point list p_list
for int_list in int_point_list.values():
for int_point in int_list: # [x y z]
p = (
patch_load_obj.patch_mag_interpolate(int_point[0], int_point[2])
if int_point != []
else []
) # object function returns array like
# p is array object, extract
p_list.append(
LoadPoint(int_point[0], int_point[1], int_point[2], p[0])
if int_point != []
else []
)
# loop each node in grid points
for items in node_in_grid:
coord = self.Mesh_obj.node_spec[items]["coordinate"]
p = patch_load_obj.patch_mag_interpolate(coord[0], coord[2])[
0
] # object function returns array like
p_list.append(LoadPoint(coord[0], coord[1], coord[2], p))
# Loop each p_list object to find duplicates if any, remove duplicate
for count, point in enumerate(p_list):
dupe = [point == val for val in p_list]
# if duplicate, remove value
if sum(dupe) > 1:
p_list.pop(count)
# sort points in counterclockwise
p_list, _ = sort_vertices(p_list) # sort takes namedtuple
# get centroid of patch on grid
xc, yc, zc = get_patch_centroid(p_list)
inside_point = Point(xc, yc, zc)
# volume = area of base x average height
# _, A = calculate_area_given_four_points(inside_point, p_list[0], p_list[1], p_list[2], p_list[3])
A = self._get_node_area(inside_point=inside_point, p_list=p_list)
mag = A * sum([point.p for point in p_list]) / len(p_list)
# assign point and mag to 4 nodes of grid
load_str = self._assign_point_to_four_node(
point=[xc, yc, zc], mag=mag, shape_func=patch_load_obj.shape_function
)
patch_load_str += load_str
return patch_load_str
@staticmethod
def _get_node_area(inside_point, p_list) -> float:
A = calculate_area_given_vertices(p_list)
return A
# ----------------------------------------------------------------------------------------------------------
# functions to add load case and load combination
def _distribute_load_types_to_model(
self, load_case_obj: Union[LoadCase, CompoundLoad]
) -> list:
global load_groups
load_str = []
# check the input parameter type, set load_groups parameter according to its type
if isinstance(load_case_obj, LoadCase):
load_groups = load_case_obj.load_groups
elif isinstance(load_case_obj.load_groups[0]["load"], CompoundLoad):
load_groups = load_case_obj.load_groups[0]["load"].compound_load_obj_list
# loop through each load object
load_str = []
for load_dict in load_groups:
load_obj = load_dict["load"]
# if compound load, distribute each individual load types within the compound load
if isinstance(load_obj, CompoundLoad):
# load_obj is a Compound load class, start a nested loop through each load class within compound load
# nested loop through each load in compound load, assign and get
for nested_list_of_load in load_obj.compound_load_obj_list:
if isinstance(nested_list_of_load, NodalLoad):
load_str += nested_list_of_load.get_nodal_load_str()
elif isinstance(nested_list_of_load, PointLoad):
load_str += self._assign_point_to_four_node(
point=list(nested_list_of_load.load_point_1)[:-1],
mag=nested_list_of_load.load_point_1.p,
shape_func=nested_list_of_load.shape_function,
)
elif isinstance(nested_list_of_load, LineLoading):
if any(
[
nested_list_of_load.long_beam_ele_load_flag,
nested_list_of_load.trans_beam_ele_load_flag,
]
):
load_str += self._assign_beam_ele_line_load(
line_load_obj=nested_list_of_load
)
else:
(
line_grid_intersect,
line_ele_colinear,
) = self._get_line_load_nodes(
line_load_obj=nested_list_of_load
) # returns self.line_grid_intersect
self.global_line_int_dict.append(line_grid_intersect)
load_str += self._assign_line_to_four_node(
nested_list_of_load,
line_grid_intersect=line_grid_intersect,
line_ele_colinear=line_ele_colinear,
)
elif isinstance(nested_list_of_load, PatchLoading):
load_str += self._assign_patch_load(nested_list_of_load)
# else, a single load type, assign it as it is
else:
# run single assignment of load type (load_obj is a load class)
if isinstance(load_obj, NodalLoad):
load_str += [
load_obj.get_nodal_load_str()
] # here return load_str as list with single element
elif isinstance(load_obj, PointLoad):
load_str += self._assign_point_to_four_node(
point=list(load_obj.load_point_1)[:-1],
mag=load_obj.load_point_1.p,
shape_func=load_obj.shape_function,
)
elif isinstance(load_obj, LineLoading):
if any(
[
load_obj.long_beam_ele_load_flag,
load_obj.trans_beam_ele_load_flag,
]
):
load_str += self._assign_beam_ele_line_load(
line_load_obj=load_obj
)
else:
(
line_grid_intersect,
line_ele_colinear,
) = self._get_line_load_nodes(
line_load_obj=load_obj
) # returns self.line_grid_intersect
self.global_line_int_dict.append(line_grid_intersect)
load_str += self._assign_line_to_four_node(
load_obj,
line_grid_intersect=line_grid_intersect,
line_ele_colinear=line_ele_colinear,
)
elif isinstance(load_obj, PatchLoading):
load_str += self._assign_patch_load(load_obj)
return load_str
# ---------------------------------------------------------------
# interface functions for load analysis utilities
def add_load_case(self, load_case_obj: Union[LoadCase, MovingLoad], load_factor=1):
"""
Function to add load cases to Ospllage grillage model. Function also adds moving load cases
:param load_factor: Optional load factor for the prescribed load case. Default = 1
:param load_case_obj: LoadCase or MovingLoad object
:type load_case_obj: LoadCase,MovingLoad
"""
if isinstance(load_case_obj, LoadCase):
# update the load command list of load case object
load_str = self._distribute_load_types_to_model(load_case_obj=load_case_obj)
# store load case + load command in dict and add to load_case_list
load_case_dict = {
"name": load_case_obj.name,
"loadcase": deepcopy(load_case_obj),
"load_command": load_str,
"load_factor": load_factor,
} # FORMATTING HERE
self.load_case_list.append(load_case_dict)
if self.diagnostics:
print("Load Case: {} added".format(load_case_obj.name))
elif isinstance(load_case_obj, MovingLoad):
# get the list of individual load cases
list_of_incr_load_case_dict = []
moving_load_obj = load_case_obj
# object method to create incremental load cases representing the position of the load
moving_load_obj.parse_moving_load_cases()
# for each load case, find the load commands of load distribution
for moving_load_case_list in moving_load_obj.moving_load_case:
for increment_load_case in moving_load_case_list:
load_str = self._distribute_load_types_to_model(
load_case_obj=increment_load_case
)
increment_load_case_dict = {
"name": increment_load_case.name,
"loadcase": increment_load_case,
"load_command": load_str,
"load_factor": load_factor,
}
list_of_incr_load_case_dict.append(increment_load_case_dict)
self.moving_load_case_dict[
moving_load_obj.name
] = list_of_incr_load_case_dict
if self.diagnostics:
print("Moving load case: {} created".format(moving_load_obj.name))
else:
raise ValueError(
"Input of add_load_case not a valid object. Hint:accepts only LoadCase or MovingLoad "
"objects"
)
def analyze(self, **kwargs):
"""
Function to analyze defined load
:keyword:
* all (`bool`): If True, runs all load cases. If not provided, default to True.
* load_case ('list' or 'str'): String or list of name strings for selected load case to be analyzed.
* set_verbose(`bool`): If True, incremental load case report is not printed to terminal (default True)
:except: raise ValueError if missing arguments for either load_case=, or all=
"""
# analyze all load case defined in self.load_case_dict for OspGrillage instance
# loop each load case dict
# get run options from kwargs
all_flag = True # Default true
selected_load_case: list = kwargs.get("load_case", None) #
if selected_load_case:
all_flag = False # overwrite all flag to be false
selected_moving_load_lc_list = None
# check if kwargs other than load_case are specified
# if all([kwargs, selected_load_case is None]):
# raise Exception("Error in analyze(options): only accepts load_case= ")
# if selected_load_case kwargs given, filter and select load case from load case list to run
# if given selected load case as a list, select load cases matching names in list
if isinstance(selected_load_case, list):
selected_basic_lc = [
lc for lc in self.load_case_list if lc["name"] in selected_load_case
]
selected_moving_load_lc_list = [
{ml_name: lc}
for ml_name, lc in self.moving_load_case_dict.items()
if ml_name in selected_load_case
]
if selected_moving_load_lc_list:
selected_moving_load_lc_list = selected_moving_load_lc_list[
0
] # get first entry
# if single string of load case name
elif isinstance(selected_load_case, str):
selected_basic_lc = [
lc for lc in self.load_case_list if lc["name"] == selected_load_case
]
selected_moving_load_lc_list = [
{ml_name: lc}
for (ml_name, lc) in self.moving_load_case_dict.items()
if ml_name == selected_load_case
]
if selected_moving_load_lc_list:
selected_moving_load_lc_list = selected_moving_load_lc_list[
0
] # get first entry
elif all_flag: # else, run all load case in list
selected_basic_lc = self.load_case_list
selected_moving_load_lc_list = self.moving_load_case_dict
else:
raise Exception(
"missing kwargs for run options: hint: requires input for `load_case=`"
)
# run basic load case
for load_case_dict in selected_basic_lc:
# create analysis object, run and get results
load_case_obj = load_case_dict["loadcase"]
load_command = load_case_dict["load_command"]
load_factor = load_case_dict["load_factor"]
load_case_analysis = Analysis(
analysis_name=load_case_obj.name,
ops_grillage_name=self.model_name,
pyfile=self.pyfile,
time_series_counter=self.global_time_series_counter,
pattern_counter=self.global_pattern_counter,
node_counter=self.Mesh_obj.node_counter,
ele_counter=self.Mesh_obj.element_counter,
constraint_type=self.constraint_type,
load_case=load_case_obj,
)
load_case_analysis.add_load_command(load_command, load_factor=load_factor)
# run the Analysis object, collect results, and store Analysis object in the list for Analysis load case
(
self.global_time_series_counter,
self.global_pattern_counter,
node_disp,
ele_force,
) = load_case_analysis.evaluate_analysis()
# print to terminal
if self.diagnostics:
print("Analysis: {} completed".format(load_case_obj.name))
# store result in Recorder object
self.results.insert_analysis_results(analysis_obj=load_case_analysis)
# run moving load case
list_of_inc_analysis = []
# for moving_load_obj, load_case_dict_list in self.moving_load_case_dict.items():
if selected_moving_load_lc_list:
for ml_name, load_case_dict_list in selected_moving_load_lc_list.items():
for load_case_dict in load_case_dict_list:
load_case_obj = load_case_dict["loadcase"] # maybe unused
load_command = load_case_dict["load_command"]
load_factor = load_case_dict["load_factor"]
incremental_analysis = Analysis(
analysis_name=load_case_obj.name,
ops_grillage_name=self.model_name,
pyfile=self.pyfile,
time_series_counter=self.global_time_series_counter,
pattern_counter=self.global_pattern_counter,
node_counter=self.Mesh_obj.node_counter,
ele_counter=self.Mesh_obj.element_counter,
constraint_type=self.constraint_type,
load_case=load_case_obj,
)
incremental_analysis.add_load_command(
load_command, load_factor=load_factor
)
(
self.global_time_series_counter,
self.global_pattern_counter,
node_disp,
ele_force,
) = incremental_analysis.evaluate_analysis()
list_of_inc_analysis.append(incremental_analysis)
if self.diagnostics:
print("Analysis: {} completed".format(load_case_obj.name))
# store result in Recorder object
self.results.insert_analysis_results(
list_of_inc_analysis=list_of_inc_analysis
)
if self.diagnostics:
print("Analysis: {} completed".format(ml_name))
def add_load_combination(
self, load_combination_name: str, load_case_and_factor_dict: dict
):
"""
Function to add load combination to analysis. Load combinations are defined through a dict with
load case name str to be included in combination as keys, and load factor (type float/int) as value of dict.
:param load_combination_name: Name string of load combination
:type load_combination_name: str
:param load_case_and_factor_dict: dict with name string of load cases within the combination as key,
corresponding load factor as value.
:type load_case_and_factor_dict: str
Example format of input dict for add_load_combination::
load_comb = {"name_of_load_case_1":1.2, "name_of_load_case_2": 1.5}
.. note::
As of release 0.1.0, load combinations can be directly obtained (calculated on the fly) by specifying
``combination`` kwarg in :func:`~ospgrillage.osp_grillage.OspGrillage.get_results`. Hence, `add_combination`
is here for adding and storing information of load combination to :class:`~ospgrillage.osp_grillage.OspGrillage`
object.
"""
load_case_dict_list = [] # list of dict: structure of dict See line
# create dict with key (combination name) and val (list of dict of load cases)
for (
load_case_name,
combination_load_factor,
) in load_case_and_factor_dict.items():
# lookup basic load cases for load_case_name
index_list = [
index
for (index, val) in enumerate(self.load_case_list)
if val["name"] == load_case_name
]
# copy lc objects in index list if present
if index_list:
ind = index_list[0]
load_case_dict = deepcopy(self.load_case_list[ind])
load_case_dict["load_factor"] = combination_load_factor
load_case_dict_list.append(load_case_dict)
# else look up in moving load cases
elif load_case_name in self.moving_load_case_dict.keys():
for inc_load_case_dict in self.moving_load_case_dict[load_case_name]:
inc_load_case_dict["load_factor"] = combination_load_factor
load_case_dict_list.append(inc_load_case_dict)
# get the dict from self.load_case_list
# self.load_case_list has this format [{'loadcase':LoadCase object, 'load_command': list of str}...]
self.load_combination_dict.setdefault(
load_combination_name, load_case_dict_list
)
if self.diagnostics:
print("Load Combination: {} created".format(load_combination_name))
def get_results(self, **kwargs):
"""
Function to get results from specific or all load cases. Alternatively, function process and returns load combination if
"combina+tions" argument is provided. Result format is xarray DataSet. If a "save_file_name" is provided, saves
xarray DataSet to NetCDF format to current working directory.
:keyword:
* combinations (`bool`): If provided, returns a modified DataSet according to combinations defined. Format of argument is dict()
with keys of load case name string and values of load factors (`int` of `float`)
* save_file_name (`str`): Name string of file name. Saves to NetCDF.
* load_case (`str`): str or list of name string of specific load case to extract. Returned DataSet with the specified Load cases only
:return: Xarray DataSet of analysis results - extracted based on keyword option specified.
If combination is True, returns a list of DataSet, with each element correspond to
a load combination.
"""
# instantiate variables
list_of_moving_load_case = []
coordinate_name_list = None
# get kwargs
comb = kwargs.get("combinations", False) # if Boolean true
save_filename = kwargs.get("save_filename", None) # str of file name
specific_load_case = kwargs.get("load_case", None) # str of fil
local_force_flag = kwargs.get("local_forces", False)
basic_da = self.results.compile_data_array(local_force_option=local_force_flag)
if isinstance(specific_load_case, str):
specific_load_case = [specific_load_case]
# filter extract specific load case, overwriting basic da
if specific_load_case:
storing_da = None
for load_case_name in specific_load_case:
# lookup in basic load cases
namelist = [lc["name"] for lc in self.load_case_list]
for name in namelist:
if load_case_name == name:
extract_da = basic_da.sel(Loadcase=name)
if storing_da is None:
storing_da = extract_da
else: # storing_da is not none, concat in "loadcase" dimension
storing_da = xr.concat(
[storing_da, extract_da], dim="Loadcase"
)
if self.diagnostics:
print("Extracted load case data for : {}".format(name))
# lookup in moving load cases
for moving_name in self.moving_load_case_dict.keys():
if load_case_name == moving_name:
# get all string of moving name, then slice
incremental_lc_name_list = [
a["name"] for a in self.moving_load_case_dict[moving_name]
]
for name in incremental_lc_name_list:
extract_da = basic_da.sel(Loadcase=name)
if storing_da is None:
storing_da = extract_da
else: # storing_da is not none, concat in "loadcase" dimension
storing_da = xr.concat(
[storing_da, extract_da], dim="Loadcase"
)
basic_da = (
storing_da # Overwrite basic_da, proceed to check/evaluate combinations
)
# if combinations
if comb:
# output_load_comb_dict = [] # {name: datarray, .... name: dataarray}
# load comb name, load case in load comb
# this format: self.load_combination_dict.setdefault(load_combination_name, load_case_dict_list)
# comb = [{road:1.2, DL: 1.5},{} , {} ]
if not isinstance(comb, dict):
raise Exception(
"Combination argument requires a dict or a list of dict: e.g. {'DL':1.2,'SIDL':1.5}"
)
# for load_case_dict_list in comb: # {0:[{'loadcase':LoadCase object, 'load_command': list of str}
if self.diagnostics:
print("Obtaining load combinations ....")
summation_array = None # instantiate
factored_array = None # instantiate
# check and add load cases to load combinations for basic non moving load cases
for (
load_case_name,
load_factor,
) in (
comb.items()
): # [{'loadcase':LoadCase object, 'load_command': list of str}.]
# if load case is a moving load, skip to next step
if load_case_name in self.moving_load_case_dict.keys():
list_of_moving_load_case.append(
{load_case_name: load_factor}
) # store dict combination for later
continue
# load_case_name = load_case_dict['loadcase'].name
# if first load case, the first extracted array becomes the summation array
# TODO, coordinate is now Load case Object
if summation_array is None:
summation_array = (
basic_da.sel(Loadcase=load_case_name) * load_factor
)
else: # add to summation array
summation_array += (
basic_da.sel(Loadcase=load_case_name) * load_factor
)
# check and add load cases to load combinations for moving load cases
# get the list of increm load case correspond to matching moving load case of load combination
# list_of_moving_load_case.append(self.moving_load_case_dict.get(load_case_name, []))
for moving_lc_combo_dict in list_of_moving_load_case:
coordinate_name_list = []
for moving_lc_name, load_factor in moving_lc_combo_dict.items():
for incremental_load_case_dict in self.moving_load_case_dict[
moving_lc_name
]:
load_case_name = incremental_load_case_dict["name"]
if factored_array is None:
factored_array = (
basic_da.sel(Loadcase=load_case_name) * load_factor
+ summation_array
)
else:
factored_array = xr.concat(
[
factored_array,
basic_da.sel(Loadcase=load_case_name) * load_factor
+ summation_array,
],
dim="Loadcase",
)
# store new coordinate name for load case
coordinate_name_list.append(load_case_name)
# check if combination has moving load, if no, combination output is array summed among basic load case
if not factored_array:
combination_array = summation_array
else: # comb has moving load, assign the coordinates along the load case dimension for identification
combination_array = factored_array.assign_coords(
Loadcase=coordinate_name_list
)
return combination_array
else:
# return raw data array for manual post processing
if save_filename:
basic_da.to_netcdf(save_filename)
return basic_da
def get_element(self, **kwargs):
"""
Function to query properties of elements in grillage model.
:keyword:
* options (`str): string for element data option. Either "elements" or "nodes" (default)
* z_group_num (`int`): group number [0 to N] for N is the number of groups within a specific grillage element group.
this is needed for interior beams, where users which to query specific group (e.g. 2nd group)
within this "interior_main_beam" element group.
* x_group_num (`int`): ditto for z_group_num but for x_group
* edge_group_num(`int`): ditto for z_group_num but for edge groups
:return: List of element data (tag)
"""
# get query member details
namestring = kwargs.get("member", None)
select_z_group = kwargs.get(
"z_group_num", 0
) # optional z_group number for internal beam members
select_x_group = kwargs.get("x_group_num", None)
select_edge_group = kwargs.get("edge_group_num", None)
# prefix namestring variables
element_option = "elements"
node_option = "nodes"
# instantiate variables
sorted_return_list = []
extracted_ele = []
# read kwargs
options = kwargs.get(
"options", node_option
) # similar to ops_vis, "nodes","element","node_i","node_j"
if not options:
raise Exception(
'Options not defined: Hint arg option= "nodes","element","node_i","node_j"'
)
# reading common elements off namestring
if namestring == "transverse_slab":
extracted_ele = self.Mesh_obj.trans_ele
# TODO
elif namestring == "start_edge" or namestring == "end_edge":
for edge_group in self.common_grillage_element[namestring]:
extracted_ele = self.Mesh_obj.edge_group_to_ele[edge_group]
if options == node_option:
sorted_return_list = [
key
for key, val in self.Mesh_obj.edge_node_recorder.items()
if val == self.common_grillage_element[namestring][0]
]
elif options == element_option:
sorted_return_list = [ele[0] for ele in extracted_ele]
else: # longitudinal members
extracted_ele = [
self.Mesh_obj.z_group_to_ele[num]
for num in self.common_grillage_element[namestring]
][select_z_group]
if options == node_option:
first_list = [i[1] for i in extracted_ele] # first list of nodes
second_list = [i[2] for i in extracted_ele] # second list of nodes
return_list = first_list + list(
set(second_list) - set(first_list)
) # get only unique nodes
# sort nodes based on x coordinate
node_x = [
self.Mesh_obj.node_spec[tag]["coordinate"][0] for tag in return_list
]
sorted_return_list.append(
[x for _, x in sorted(zip(node_x, return_list))]
)
elif options == element_option:
sorted_return_list = [i[0] for i in extracted_ele]
return sorted_return_list
def get_nodes(self):
"""
Function to return all information for nodes in grillage model
:return: dict contain node information
"""
return self.Mesh_obj.node_spec
def clear_load_cases(self, **kwargs):
"""
Function to remove all/specific load cases from model. This function also resets the results stored in the
model - users are require to re- :func:`~ospgrillage.osp_grillage.OspGrillage.analyze`.
"""
specific_lc = kwargs.get("load_case", None)
if isinstance(specific_lc, str):
specific_lc = [specific_lc]
if specific_lc:
for lc in specific_lc:
check_match = [lc in lc_name["name"] for lc_name in self.load_case_list]
if any(check_match):
ind = [i for i, x in enumerate(check_match) if x][
0
] # list of 1 element
self.load_case_list.pop(ind)
else:
self.load_case_list = [] # reset load case
# remove all results
self.results = Results(self.Mesh_obj) # reset results
# ---------------------------------------------------------------------------------------------------------------------
class Analysis:
"""
Main class to handle the run/execution of load case, including incremental load cases of a moving load analysis.
Analysis class is created and handled by the OspGrillage class.
The following are the roles of Analysis object:
* store information of ops commands for performing static (default) analysis of single/multiple load case(s).
* execute the required ops commands to perform analysis using the OspGrillage model instance.
* if flagged, writes an executable py file instead which performs the exact analysis as it would for an OspGrillage instance instead.
* manages multiple load case's ops.load() commands, applying the specified load factors to the load cases for load combinations
"""
remove_pattern_command: str
def __init__(
self,
analysis_name: str,
ops_grillage_name: str,
pyfile: bool,
node_counter,
ele_counter,
analysis_type="Static",
time_series_counter=1,
pattern_counter=1,
load_case: LoadCase = None,
**kwargs
):
self.analysis_name = analysis_name
self.ops_grillage_name = ops_grillage_name
self.time_series_tag = None
self.pattern_tag = None
self.analysis_type = analysis_type
self.pyfile = pyfile
self.analysis_file_name = (
self.analysis_name + "of" + self.ops_grillage_name + ".py"
) # py file name
# list recording load commands, time series and pattern for the input load case
self.load_cases_dict_list = (
[]
) # keys # [{time_series,pattern,load_command},... ]
# counters
self.time_series_counter = time_series_counter
self.plain_counter = pattern_counter
# variables from keyword args
self.constraint_type = kwargs.get("constraint_type", "Plain") # Default plain
# Variables recording results of analysis
self.node_disp = dict() # key node tag, val list of dof
self.ele_force = (
dict()
) # key ele tag, val list of forces on nodes of ele[ order according to ele tag]
self.global_ele_force = (
dict()
) # key ele tag, val list of forces on nodes of ele[ order according to ele tag]
self.ele_force_shell = dict() # ditto for ele force except only for shells
self.global_ele_force_shell = (
dict()
) # ditto for global ele force except only for shells
# preset ops analysis commands
self.wipe_command = "ops.wipeAnalysis()\n" # default wipe command
self.numberer_command = "ops.numberer('Plain')\n" # default numberer is Plain
self.system_command = "ops.system('BandGeneral')\n" # default band general
self.constraint_command = 'ops.constraints("{type}")\n'.format(
type=self.constraint_type
) # default plain
self.algorithm_command = "ops.algorithm('Linear')\n" # default linear
self.analyze_command = "ops.analyze(1)\n" # default 1 step
self.analysis_command = 'ops.analysis("{}")\n'.format(analysis_type)
self.intergrator_command = "ops.integrator('LoadControl', 1)\n"
self.mesh_node_counter = node_counter # set node counter based on current Mesh
self.mesh_ele_counter = ele_counter # set ele counter based on current Mesh
self.remove_pattern_command = (
"ops.remove('loadPattern',{})\n" # default remove load command
)
# save deepcopy of load case object
self.load_cases_obj = deepcopy(load_case)
# if true for pyfile, create pyfile for analysis command
if self.pyfile:
with open(self.analysis_file_name, "w") as file_handle:
# create py file or overwrite existing
# writing headers and description at top of file
file_handle.write(
"# Executable py file for Analysis of \n# Model name: {}\n".format(
self.ops_grillage_name
)
)
file_handle.write("# Load case: {}\n".format(self.analysis_name))
# time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
file_handle.write("# Constructed on:{}\n".format(dt_string))
# write imports
file_handle.write(
"import numpy as np\nimport math\nimport openseespy.opensees as ops"
"\nimport openseespy.postprocessing.Get_Rendering as opsplt\n"
)
def _time_series_command(self, load_factor):
time_series = "ops.timeSeries('Constant', {}, '-factor',{})\n".format(
self.time_series_counter, load_factor
)
self.time_series_counter += 1 # update counter by 1
return time_series
def _pattern_command(self):
pattern_command = "ops.pattern('Plain', {}, {})\n".format(
self.plain_counter, self.time_series_counter - 1
)
# minus 1 to time series counter for time_series_command() precedes pattern_command() and incremented the time
# series counter
self.plain_counter += 1
return pattern_command
def add_load_command(self, load_str: list, load_factor):
# create time series for added load case
time_series = self._time_series_command(
load_factor
) # get time series command - LF default 1
pattern_command = self._pattern_command() # get pattern command
time_series_dict = {
"time_series": time_series,
"pattern": pattern_command,
"load_command": load_str,
}
self.load_cases_dict_list.append(time_series_dict) # add dict to list
def evaluate_analysis(self):
# write/execute ops.load commands for load groups
if self.pyfile:
with open(self.analysis_file_name, "a") as file_handle:
file_handle.write(self.wipe_command)
for load_dict in self.load_cases_dict_list:
file_handle.write(load_dict["time_series"])
file_handle.write(load_dict["pattern"])
for load_command in load_dict["load_command"]:
file_handle.write(load_command)
file_handle.write(self.intergrator_command)
file_handle.write(self.numberer_command)
file_handle.write(self.system_command)
file_handle.write(self.constraint_command)
file_handle.write(self.algorithm_command)
file_handle.write(self.analysis_command)
file_handle.write(self.analyze_command)
else:
eval(self.wipe_command)
if (
self.plain_counter - 1 != 1
): # plain counter increments by 1 upon self.pattern_command function, so -1 here
for count in range(1, self.plain_counter - 1):
remove_command = self.remove_pattern_command.format(count)
eval(remove_command) # remove previous load pattern if any
for load_dict in self.load_cases_dict_list:
eval(load_dict["time_series"])
eval(load_dict["pattern"])
for load_command in load_dict["load_command"]:
eval(load_command)
eval(self.intergrator_command)
eval(self.numberer_command)
eval(self.system_command)
eval(self.constraint_command)
eval(self.algorithm_command)
eval(self.analysis_command)
eval(self.analyze_command)
# extract results
self.extract_grillage_responses()
# return time series and plain counter to update global time series and plain counter by by OspGrillage
return (
self.time_series_counter,
self.plain_counter,
self.node_disp,
self.ele_force,
)
# function to extract grillage model responses (dx,dy,dz,rotx,roty,rotz,N,Vy,Vz,Mx,My,Mz) and store to Result class
def extract_grillage_responses(self):
"""
Function that wraps OpenSeesPy nodeDisp() and eleResponse(), gets results of current analysis - model instance
in OpenSees.
:return: Stores results in global_ele_force and node_disp class variable
"""
if not self.pyfile:
# first loop extract node displacements
for node_tag in ops.getNodeTags():
disp_list = ops.nodeDisp(node_tag)
self.node_disp.setdefault(node_tag, disp_list)
# loop through all elements in Mesh, extract local forces
for ele_tag in ops.getEleTags():
ele_force = ops.eleResponse(ele_tag, "localForces")
self.ele_force.setdefault(ele_tag, ele_force)
global_ele_force = ops.eleResponse(ele_tag, "forces")
self.global_ele_force.setdefault(ele_tag, global_ele_force)
else:
print(
"OspGrillage is at output mode, pyfile = True. Procedure for {} are generated.".format(
self.analysis_name
)
)
class Results:
"""
Main class to store results of an Analysis class object, process into data array output for post processing/plotting.
Class object is accessed within OspGrillage class object.
"""
def __init__(self, mesh_obj: Mesh):
# instantiate variables
self.basic_load_case_record = dict()
self.basic_load_case_record_global_forces = dict()
self.moving_load_case_record = []
self.moving_load_case_record_global_forces = []
self.moving_load_counter = 0
# store mesh data of holding model
self.mesh_obj = mesh_obj
# coordinates for dimensions
self.displacement_component = [
"dx",
"dy",
"dz",
"theta_x",
"theta_y",
"theta_z",
]
self.force_component = [
"Vx_i",
"Vy_i",
"Vz_i",
"Mx_i",
"My_i",
"Mz_i",
"Vx_j",
"Vy_j",
"Vz_j",
"Mx_j",
"My_j",
"Mz_j",
]
# for force component of shell model (4 nodes)
self.force_component_shell = [
"Vx_i",
"Vy_i",
"Vz_i",
"Mx_i",
"My_i",
"Mz_i",
"Vx_j",
"Vy_j",
"Vz_j",
"Mx_j",
"My_j",
"Mz_j",
"Vx_k",
"Vy_k",
"Vz_k",
"Mx_k",
"My_k",
"Mz_k",
"Vx_l",
"Vy_l",
"Vz_l",
"Mx_l",
"My_l",
"Mz_l",
]
# dimension names
self.dim = ["Loadcase", "Node", "Component"]
self.dim2 = ["Loadcase", "Element", "Component"]
self.dim_ele_beam = ["i", "j"]
self.dim_ele_shell = ["i", "j", "k", "l"]
def insert_analysis_results(
self, analysis_obj: Analysis = None, list_of_inc_analysis: list = None
):
# Create/parse data based on incoming analysis object or list of analysis obj (moving load)
if analysis_obj:
# compile ele forces for each node
node_disp = analysis_obj.node_disp
ele_force_dict = dict.fromkeys(
list(ops.getEleTags())
) # dict key is element tag, value is ele force from
global_ele_force_dict = dict.fromkeys(
list(ops.getEleTags())
) # dict key is element tag, value is ele force from
ele_nodes_dict = dict.fromkeys(list(ops.getEleTags()))
# analysis_obj.ele_force
# extract element forces and sort them to according to nodes - summing in the process
for ele_num, ele_forces in analysis_obj.ele_force.items():
ele_force_dict.update({ele_num: ele_forces})
# get ele nodes
ele_nodes = ops.eleNodes(ele_num)
ele_nodes_dict.update({ele_num: ele_nodes})
self.basic_load_case_record.setdefault(
analysis_obj.analysis_name, [node_disp, ele_force_dict, ele_nodes_dict]
)
# repeat to extract global forces instead
# extract element forces and sort them to according to nodes - summing in the process
for ele_num, ele_forces in analysis_obj.global_ele_force.items():
global_ele_force_dict.update({ele_num: ele_forces})
ele_nodes = ops.eleNodes(ele_num) # get ele nodes
ele_nodes_dict.update({ele_num: ele_nodes})
self.basic_load_case_record_global_forces.setdefault(
analysis_obj.analysis_name,
[node_disp, global_ele_force_dict, ele_nodes_dict],
)
# if moving load, input is a list of analysis obj
elif list_of_inc_analysis:
inc_load_case_record = dict()
inc_load_case_global_force_record = (
dict()
) # inc_load_case_record but with forces in global
for inc_analysis_obj in list_of_inc_analysis:
# compile ele forces for each node
node_disp = inc_analysis_obj.node_disp
ele_force_dict = dict.fromkeys(list(ops.getEleTags()))
ele_force_global_dict = dict.fromkeys(list(ops.getEleTags()))
ele_nodes_dict = dict.fromkeys(list(ops.getEleTags()))
# extract element forces and sort them to according to nodes - summing in the process
for ele_num, ele_forces in inc_analysis_obj.ele_force.items():
ele_force_dict.update({ele_num: ele_forces})
# get ele nodes
ele_nodes = ops.eleNodes(ele_num)
ele_nodes_dict.update({ele_num: ele_nodes})
inc_load_case_record.setdefault(
inc_analysis_obj.analysis_name,
[node_disp, ele_force_dict, ele_nodes_dict],
)
for ele_num, ele_forces in inc_analysis_obj.global_ele_force.items():
ele_force_global_dict.update({ele_num: ele_forces})
inc_load_case_global_force_record.setdefault(
inc_analysis_obj.analysis_name,
[node_disp, ele_force_global_dict, ele_nodes_dict],
)
self.moving_load_case_record.append(inc_load_case_record)
self.moving_load_case_record_global_forces.append(
inc_load_case_global_force_record
)
def compile_data_array(self, local_force_option=False):
# Function called to compile analysis results into xarray
# Coordinates of dimension
node = list(self.mesh_obj.node_spec.keys()) # for Node
ele = list(ops.getEleTags())
# Sort data for dataArrays
# for basic load case {loadcasename:[{1:,2:...},{1:,2:...}], ... , loadcasename:[{1:,2:...},{1:,2:...} }
basic_array_list = []
basic_load_case_coord = []
basic_ele_force_list = []
extracted_ele_nodes_list = False # a 2D array of ele node i and ele node j
ele_nodes_list = []
base_ele_force_list_beam = []
base_ele_force_list_shell = []
# check if force option is global or local
if local_force_option:
basic_dict = self.basic_load_case_record
moving_dict = self.moving_load_case_record
else: # global forces
basic_dict = self.basic_load_case_record_global_forces
moving_dict = self.moving_load_case_record_global_forces
# loop all basic load case
for load_case_name, resp_list_of_2_dict in basic_dict.items():
# extract displacement
basic_array_list.append(
[a for a in list(resp_list_of_2_dict[0].values())]
) # list index 0 is disp
# extract force
basic_ele_force_list.append(
[a for a in list(resp_list_of_2_dict[1].values())]
) # list index 1 is force
# extract based on element type
base_ele_force_list_beam.append(
[
a
for a in list(resp_list_of_2_dict[1].values())
if len(a) == len(self.force_component)
]
)
base_ele_force_list_shell.append(
[
a
for a in list(resp_list_of_2_dict[1].values())
if len(a) == len(self.force_component_shell)
]
)
if not extracted_ele_nodes_list:
ele_nodes_list = list(
resp_list_of_2_dict[2].values()
) # list index 2 is ele nodes variable
extracted_ele_nodes_list = (
True # set to true, only extract if its the first time extracting
)
ele_tag = list(resp_list_of_2_dict[2].keys())
# for section forces of each element
# Coordinate of Load Case dimension
basic_load_case_coord.append(load_case_name)
# combine disp and force with respect to Component axis : size 12
# loop all moving load cases
for moving_load_case_inc_dict in moving_dict:
# for each load case increment in moving load case
for (
increment_load_case_name,
inc_resp_list_of_2_dict,
) in moving_load_case_inc_dict.items():
# basic_array_list.append([a + b for (a, b) in zip(list(inc_resp_list_of_2_dict[0].values()),
# list(inc_resp_list_of_2_dict[1].values()))])
basic_array_list.append(
[a for a in list(inc_resp_list_of_2_dict[0].values())]
)
basic_ele_force_list.append(
[a for a in list(inc_resp_list_of_2_dict[1].values())]
)
base_ele_force_list_beam.append(
[
a
for a in list(inc_resp_list_of_2_dict[1].values())
if len(a) == len(self.force_component)
]
)
base_ele_force_list_shell.append(
[
a
for a in list(inc_resp_list_of_2_dict[1].values())
if len(a) == len(self.force_component_shell)
]
)
# Coordinate of Load Case dimension
# inc_moving_load_case_coord.append(increment_load_case_name)
basic_load_case_coord.append(increment_load_case_name)
if not extracted_ele_nodes_list:
ele_nodes_list = list(inc_resp_list_of_2_dict[2].values())
ele_tag = list(inc_resp_list_of_2_dict[2].keys())
extracted_ele_nodes_list = True
# convert to np array format
basic_array = np.array(basic_array_list, dtype=object)
force_array = np.array(basic_ele_force_list, dtype=object)
ele_array = np.array(ele_nodes_list, dtype=object)
ele_array_shell = [e for e in ele_array if len(e) > 2]
ele_array_beam = [e for e in ele_array if len(e) == 2]
ele_tag = np.array(ele_tag)
ele_tag_shell = [tag for tag, e in zip(ele_tag, ele_array) if len(e) > 2]
ele_tag_beam = [tag for tag, e in zip(ele_tag, ele_array) if len(e) == 2]
force_array_shell = np.array(base_ele_force_list_shell)
force_array_beam = np.array(base_ele_force_list_beam)
# create data array for each basic load case if any, else return
if basic_array.size:
# displacement data array
basic_da = xr.DataArray(
data=basic_array,
dims=self.dim,
coords={
self.dim[0]: basic_load_case_coord,
self.dim[1]: node,
self.dim[2]: self.displacement_component,
},
)
force_da_beam = xr.DataArray(
data=force_array_beam,
dims=self.dim2,
coords={
self.dim2[0]: basic_load_case_coord,
self.dim2[1]: ele_tag_beam,
self.dim2[2]: self.force_component,
},
)
ele_nodes_beam = xr.DataArray(
data=ele_array_beam,
dims=[self.dim2[1], "Nodes"],
coords={self.dim2[1]: ele_tag_beam, "Nodes": self.dim_ele_beam},
)
# create data set based on
if isinstance(self.mesh_obj, ShellLinkMesh):
force_da_shell = xr.DataArray(
data=force_array_shell,
dims=self.dim2,
coords={
self.dim2[0]: basic_load_case_coord,
self.dim2[1]: ele_tag_shell,
self.dim2[2]: self.force_component_shell,
},
)
ele_nodes_shell = xr.DataArray(
data=ele_array_shell,
dims=[self.dim2[1], "Nodes"],
coords={self.dim2[1]: ele_tag_shell, "Nodes": self.dim_ele_shell},
)
result = xr.Dataset(
{
"displacements": basic_da,
"forces_beam": force_da_beam,
"forces_shell": force_da_shell,
"ele_nodes_beam": ele_nodes_beam,
"ele_nodes_shell": ele_nodes_shell,
}
)
else:
result = xr.Dataset(
{
"displacements": basic_da,
"forces": force_da_beam,
"ele_nodes": ele_nodes_beam,
}
)
else: # no result return None
result = None
return result
# ---------------------------------------------------------------------------------------------------------------------
# concrete classes of grillage model
class OspGrillageBeam(OspGrillage):
"""
Concrete class for beam grillage model type.
"""
def __init__(
self,
bridge_name,
long_dim,
width,
skew: Union[list, float, int],
num_long_grid: int,
num_trans_grid: int,
edge_beam_dist: Union[list, float, int],
mesh_type="Ortho",
model="3D",
**kwargs
):
# create mesh and model
super().__init__(
bridge_name,
long_dim,
width,
skew,
num_long_grid,
num_trans_grid,
edge_beam_dist,
mesh_type,
model="3D",
**kwargs
)
class OspGrillageShell(OspGrillage):
"""
Concrete class for shell model type
"""
def __init__(
self,
bridge_name,
long_dim,
width,
skew: Union[list, float, int],
num_long_grid: int,
num_trans_grid: int,
edge_beam_dist: Union[list, float, int],
mesh_type="Ortho",
model="3D",
**kwargs
):
# input variables specific to shell model - see default parameters if not specified
self.offset_beam_y_dist = kwargs.get("offset_beam_y_dist", 0) # default 0
self.mesh_size_x = kwargs.get("max_mesh_size_x", 1) # default 1 unit meter
self.mesh_size_z = kwargs.get("max_mesh_size_z", 1) # default 1 unit meter
# model variables specific to Shell type
self.shell_element_command_list = (
[]
) # list of str for ops.element() shell command
# create mesh and model
super().__init__(
bridge_name,
long_dim,
width,
skew,
num_long_grid,
num_trans_grid,
edge_beam_dist,
mesh_type,
model="3D",
**kwargs
)
# overwrite/ variables specific to shell mesh
self.constraint_type = (
"Transformation" # constraint type to allow MP constraint objects
)
# overwrite standard element list for shell model
self._create_standard_element_list()
# ----------------------------------------------------------------------------------------------------------------
# overwrite functions of base Mesh class - specific for
def create_osp_model(self, pyfile=False):
"""
Function to create model instance in OpenSees model space. If pyfile input is True, function creates an
executable pyfile for generating the grillage model in OpenSees model space.
:param pyfile: if True returns an executable py file instead of creating OpenSees instance of model.
:type pyfile: bool
"""
self.pyfile = pyfile
if self.pyfile:
with open(self.filename, "w") as file_handle:
# create py file or overwrite existing
# writing headers and description at top of file
file_handle.write(
"# Grillage generator wizard\n# Model name: {}\n".format(
self.model_name
)
)
# time
now = datetime.now()
dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
file_handle.write("# Constructed on:{}\n".format(dt_string))
# write imports
file_handle.write(
"import numpy as np\nimport math\nimport openseespy.opensees as ops"
"\nimport openseespy.postprocessing.Get_Rendering as opsplt\n"
)
# model() command
self._write_op_model()
# create grillage mesh object + beam element groups
self._run_mesh_generation()
# create shell element commands
for ele_str in self.shell_element_command_list:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(ele_str)
else:
eval(ele_str)
# create rigid link command
self._write_rigid_link()
# create the result file for the Mesh object
self.results = Results(self.Mesh_obj)
# overwrites base class for beam element grillage - specific for Shell model
def _create_standard_element_list(self):
"""
Function to create standard element list for grillage model type.
This child class overwrite parent class's function for beam grillage model type.
:return:
"""
# standard element for beam class
for key, val in zip(
self.common_grillage_element_keys[0 : self.long_member_index],
sort_list_into_four_groups(
self.Mesh_obj.offset_z_groups, option="shell"
).values(),
):
self.common_grillage_element.update({key: val})
# update edge beam groups' value
self.common_grillage_element.update(
{
self.common_grillage_element_keys[0]: [
self.Mesh_obj.model_plane_z_groups[0],
self.Mesh_obj.model_plane_z_groups[-1],
]
}
)
# ----------------------------------------------------------------------------------------------------------------
# functions specific to Shell model class
def set_shell_members(
self, grillage_member_obj: GrillageMember, quad=True, tri=False
):
"""
Function to set shell/quad members across entire mesh grid.
:param quad: Boolean to flag setting quad shell members
:param tri: Boolean to flag setting triangular shell members
:param grillage_member_obj: GrillageMember object
:type grillage_member_obj: GrillageMember
:raises ValueError: If GrillageMember object was not specified for quad or shell element. Also raises this error
if components of GrillageMember object (e.g. section or material) is not a valid property
for the specific shell element type in accordance with OpenSees conventions.
.. note::
Feature to be updated with class segregation later on 0.1.1
"""
# this function creates shell elements out of the node grids of Mesh object
shell_counter = self.Mesh_obj.element_counter
# if self.Mesh_obj is None: # checks if
# raise ValueError("Model instance not created. Run ops.create_ops() function before setting members")
# check and write member's section command if any
section_tag = self._write_section(grillage_member_obj)
# check and write member's material command if any
material_tag = self._write_material(member=grillage_member_obj)
# for each grid in Mesh, create a shell element
for grid_nodes_list in self.Mesh_obj.grid_number_dict.values():
ele_str = grillage_member_obj.get_element_command_str(
ele_tag=shell_counter,
node_tag_list=grid_nodes_list,
materialtag=material_tag,
sectiontag=section_tag,
)
self.shell_element_command_list.append(ele_str)
shell_counter += 1
# overwrite base fix() command procedure
def _write_op_fix(self, mesh_obj):
"""
Overwritten sub procedure to create ops.fix() command for
boundary condition definition in the grillage model. If pyfile is flagged true, writes
the ops.fix() command to py file instead.
"""
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write("# Boundary condition implementation\n")
for node_tag, edge_group_num in mesh_obj.edge_support_nodes.items():
if edge_group_num == 0: # 0 is edge of start of span
if self.pyfile: # if writing py file
with open(self.filename, "a") as file_handle:
file_handle.write(
"ops.fix({}, *{})\n".format(
node_tag, self.fixity_vector["pin"]
)
)
else: # run instance
ops.fix(node_tag, *self.fixity_vector["pin"])
elif edge_group_num == 1: # 1 is edge of end of span
if self.pyfile: # if writing py file
with open(self.filename, "a") as file_handle:
file_handle.write(
"ops.fix({}, *{})\n".format(
node_tag, self.fixity_vector["roller"]
)
)
else: # run instance
ops.fix(node_tag, *self.fixity_vector["roller"])
def _write_rigid_link(self):
"""
Private procedure to write or execute OpenSeesPy rigidLink() command. Reads rigid link data from link_str_list
variable
"""
# loop all rigidLink command, write or eval rigid link command. note link_str is already formatted
for link_str in self.Mesh_obj.link_str_list:
if self.pyfile:
with open(self.filename, "a") as file_handle:
file_handle.write(link_str)
else:
eval(link_str)
| 45.260501 | 149 | 0.561137 |
86aab1f38c5fffada075fdb4e1ed3c8286bb402f | 98 | py | Python | setup.py | GlenWalker/apipkg | 4322f61343204b030bb5922ef4330f6348b9ffca | [
"MIT"
] | 25 | 2017-06-06T07:06:45.000Z | 2021-11-27T13:50:13.000Z | setup.py | GlenWalker/apipkg | 4322f61343204b030bb5922ef4330f6348b9ffca | [
"MIT"
] | 21 | 2017-09-02T18:50:17.000Z | 2022-03-11T22:49:34.000Z | setup.py | GlenWalker/apipkg | 4322f61343204b030bb5922ef4330f6348b9ffca | [
"MIT"
] | 12 | 2017-09-02T13:25:24.000Z | 2021-09-21T14:12:22.000Z | from setuptools import setup
setup(
use_scm_version={"write_to": "src/apipkg/version.py"},
)
| 16.333333 | 58 | 0.72449 |
46e299d516e358d26a8c45d4830e23e813d6e399 | 1,224 | py | Python | setup.py | simonw/dbf-to-sqlite | 1d1e1e8d64feb74d8a96b99200fb558b06a6957b | [
"Apache-2.0"
] | 30 | 2019-01-31T20:57:55.000Z | 2022-01-25T20:23:11.000Z | setup.py | fg-fly/dbf-to-sqlite | 1d1e1e8d64feb74d8a96b99200fb558b06a6957b | [
"Apache-2.0"
] | 3 | 2019-04-09T01:06:11.000Z | 2020-11-30T03:24:28.000Z | setup.py | fg-fly/dbf-to-sqlite | 1d1e1e8d64feb74d8a96b99200fb558b06a6957b | [
"Apache-2.0"
] | 10 | 2019-04-09T15:34:51.000Z | 2022-01-09T12:05:30.000Z | from setuptools import setup, find_packages
import io
import os
VERSION = "0.1"
def get_long_description():
with io.open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read()
setup(
name="dbf-to-sqlite",
description="CLI tool for converting DBF files (dBase, FoxPro etc) to SQLite",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Simon Willison",
version=VERSION,
license="Apache License, Version 2.0",
packages=find_packages(),
install_requires=["dbf==0.97.11", "click", "sqlite_utils"],
entry_points="""
[console_scripts]
dbf-to-sqlite=dbf_to_sqlite.cli:cli
""",
url="https://github.com/simonw/dbf-to-sqlite",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: End Users/Desktop",
"Topic :: Database",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
)
| 29.142857 | 82 | 0.634804 |
3e902501a19c49dee5443abf7599cb7ae511d510 | 2,875 | py | Python | gemynd/api/telegramapi/user.py | gemynd/gemynd | e2bca0dc48d19cb2aecf0921e2e5ae5be7a2e2f1 | [
"Apache-2.0"
] | null | null | null | gemynd/api/telegramapi/user.py | gemynd/gemynd | e2bca0dc48d19cb2aecf0921e2e5ae5be7a2e2f1 | [
"Apache-2.0"
] | null | null | null | gemynd/api/telegramapi/user.py | gemynd/gemynd | e2bca0dc48d19cb2aecf0921e2e5ae5be7a2e2f1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# A library that provides a Gemynd AI bot interface
# Copyright (C) 2016
# Gemynd AI Team <devs@gemynd.ai>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import logging
import pg8000
from gemynd import Config
from gemynd import Database
logger = logging.getLogger(__name__)
class Users:
def __init__(self, config):
try:
if (config['verbose'] == 'on'):
logger.setLevel(logging.DEBUG)
self.db = Database(config)
self.db.connect()
self.telegramUsers = dict()
except Exception, ex:
logger.error('Exception occured initializing user list')
logger.error('%s' % str(ex))
def getTelegramUser(self, telegram_id):
if not telegram_id in self.telegramUsers:
retset = self.db.fetch(
"""select user_id
from telegram.users
where telegram_id = %d
""" % telegram_id)
if len(retset) > 0:
self.telegramUsers[telegram_id] = retset[0][0]
return self.telegramUsers.get(telegram_id)
def getCoreUser(self, user_name):
retset = self.db.fetch(
"""select id
from core.users
where name = '%s'
""" % user_name)
return retset
def addTelegramUser(self, user_id, telegram_id):
self.db.call(
"""insert into telegram.users (user_id, telegram_id)
values (%d, %d)""" % (user_id, telegram_id))
return
def addCoreUser(self, user_name):
self.db.call(
"""insert into core.users (name)
values ('%s')""" % user_name)
return self.getCoreUser(user_name)
def getUser(self, telegram_id, user_name):
user_id = self.getTelegramUser(telegram_id)
if user_id is None:
retset = self.getCoreUser(user_name)
if len(retset) == 0:
retset = self.addCoreUser(user_name)
user_id = retset[0][0]
self.addTelegramUser(user_id, telegram_id)
self.getTelegramUser(telegram_id)
logger.debug("Telegram user id '%d' name '%s' has core id '%d'" %
(telegram_id, user_name, user_id))
return user_id
def close(self):
self.db.close() | 33.430233 | 81 | 0.598261 |
0eccdd9b6215b10722bff50c9168ccf60e8c0aee | 2,792 | py | Python | legacy/steps/preprocesser/base_preprocesser.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 1,275 | 2020-11-19T14:18:25.000Z | 2021-08-13T07:31:39.000Z | legacy/steps/preprocesser/base_preprocesser.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 62 | 2020-11-30T16:06:14.000Z | 2021-08-10T08:34:52.000Z | legacy/steps/preprocesser/base_preprocesser.py | ParikhKadam/zenml | 867e4d4c982a50447bd182b30af37f2141dac5a4 | [
"Apache-2.0"
] | 75 | 2020-12-22T19:15:08.000Z | 2021-08-13T03:07:50.000Z | # Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
import json
from typing import Dict, Text, List
from tfx.proto import transform_pb2
from zenml.enums import StepTypes
from zenml.steps import BaseStep
from zenml.steps.trainer.utils import TRAIN_SPLITS
SPLIT_MAPPING = 'split_mapping'
def build_split_mapping(args):
args = json.loads(args)
if SPLIT_MAPPING in args and args[SPLIT_MAPPING]:
splits_config = transform_pb2.SplitsConfig()
assert TRAIN_SPLITS in args[SPLIT_MAPPING], \
f'When you are defining a custom split mapping, please define ' \
f'{TRAIN_SPLITS}!'
for process, splits in args[SPLIT_MAPPING].items():
for split in splits:
if process == TRAIN_SPLITS:
splits_config.analyze.append(split)
splits_config.transform.append(split)
return splits_config
else:
return None
class BasePreprocesserStep(BaseStep):
"""
Base class for all preprocessing steps. These steps are used to
specify transformation and filling operations on data that occur before
the machine learning model is trained.
"""
STEP_TYPE = StepTypes.preprocesser.name
def __init__(self,
split_mapping: Dict[Text, List[Text]] = None,
**kwargs):
"""
Base preprocessing step constructor. Custom preprocessing steps need
to override the `preprocessing_fn` class method.
Args:
**kwargs: Additional keyword arguments.
"""
super(BasePreprocesserStep, self).__init__(split_mapping=split_mapping,
**kwargs)
def preprocessing_fn(self, inputs: Dict):
"""
Function used in the Transform component. Override this to do custom
preprocessing logic.
Args:
inputs (dict): Inputs where keys are feature names and values are
tensors which represent the values of the features.
Returns:
outputs (dict): Inputs where keys are transformed feature names
and values are tensors with the transformed values of the
features.
"""
pass
| 34.04878 | 79 | 0.661175 |
6a10bcfd8cb498763c98f8713d652c9041dfca86 | 922 | py | Python | kubernetes_asyncio/test/test_v1_taint.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | 1 | 2021-01-13T09:28:57.000Z | 2021-01-13T09:28:57.000Z | kubernetes_asyncio/test/test_v1_taint.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/test/test_v1_taint.py | hubo1016/kubernetes_asyncio | d57e9e9be11f6789e1ce8d5b161acb64d29acf35 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.12.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes_asyncio.client
from kubernetes_asyncio.client.models.v1_taint import V1Taint # noqa: E501
from kubernetes_asyncio.client.rest import ApiException
class TestV1Taint(unittest.TestCase):
"""V1Taint unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1Taint(self):
"""Test V1Taint"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes_asyncio.client.models.v1_taint.V1Taint() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 22.487805 | 119 | 0.701735 |
fc38623c40b74424fc69f8b4359491545cea1959 | 706 | py | Python | poop/hfdp/factory/challenge/zone_factory.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | 37 | 2020-12-27T00:13:07.000Z | 2022-01-31T19:30:18.000Z | poop/hfdp/factory/challenge/zone_factory.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | null | null | null | poop/hfdp/factory/challenge/zone_factory.py | cassiobotaro/poop | fc218fbf638c50da8ea98dab7de26ad2a52e83f5 | [
"MIT"
] | 7 | 2020-12-26T22:33:47.000Z | 2021-11-07T01:29:59.000Z | from poop.hfdp.factory.challenge.zone import Zone
from poop.hfdp.factory.challenge.zone_central import ZoneCentral
from poop.hfdp.factory.challenge.zone_eastern import ZoneEastern
from poop.hfdp.factory.challenge.zone_mountain import ZoneMountain
from poop.hfdp.factory.challenge.zone_pacific import ZonePacific
class ZoneFactory:
def create_zone(self, zoneId: str) -> Zone | None:
if zoneId == "US/Pacific":
return ZonePacific()
elif zoneId == "US/Mountain":
return ZoneMountain()
elif zoneId == "US/Central":
return ZoneCentral()
elif zoneId == "US/Eastern":
return ZoneEastern()
else:
return None
| 35.3 | 66 | 0.681303 |
9fb60fabd4f17e55a9bb6998eab1cd6685540613 | 1,506 | py | Python | tests/test_utils_tracing.py | gva-jjoyce/mabel | eb99e02d0287b851e65ad9a75b5f4188805d4ec9 | [
"Apache-2.0"
] | null | null | null | tests/test_utils_tracing.py | gva-jjoyce/mabel | eb99e02d0287b851e65ad9a75b5f4188805d4ec9 | [
"Apache-2.0"
] | 2 | 2022-01-24T14:56:42.000Z | 2022-01-24T16:04:20.000Z | tests/test_utils_tracing.py | gva-jjoyce/mabel | eb99e02d0287b851e65ad9a75b5f4188805d4ec9 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import string
sys.path.insert(1, os.path.join(sys.path[0], ".."))
from mabel.flows.internals.trace_blocks import TraceBlocks
from mabel.data.formats.json import parse, serialize
from mabel.utils import entropy
from rich import traceback
traceback.install()
def test_hashes():
data_hashes = []
data_hashes.append(entropy.random_string(length=32, characters=string.hexdigits))
data_hashes.append(entropy.random_string(length=32, characters=string.hexdigits))
tb = TraceBlocks()
tb.add_block(data_hash=data_hashes[0])
tb.add_block(data_hash=data_hashes[1])
blocks = parse(str(tb))
previous_block = ""
for index, block in enumerate(blocks):
if index > 0: # the first block is a seed - it looks different
# check the data is being written as expected
assert block.get("data_hash") == data_hashes[index - 1]
# Check the prev hash
rehash = tb.hash(previous_block)
assert rehash == block.get("previous_block_hash")
# Check the proof - the proof is when the number prepended to the
# previous block's hash and reshashed resultant hash ends with
# either 0 or 5.
# reproof = tb.hash(''.join([block.get('proof',''), block.get('previous_block_hash', '')]))
# assert reproof[-1] in ['0', '5'], reproof
previous_block = block
if __name__ == "__main__": # pragma: no cover
test_hashes()
print("okay")
| 28.961538 | 103 | 0.656707 |
cb3aaa4e121c694e0988b5d91573cea276923aff | 15,349 | py | Python | bin/mapped_2hic_dnase.py | gauravj49/nfhic | d21761f50ad39d29fcaaa69a93824793a952c803 | [
"MIT"
] | null | null | null | bin/mapped_2hic_dnase.py | gauravj49/nfhic | d21761f50ad39d29fcaaa69a93824793a952c803 | [
"MIT"
] | null | null | null | bin/mapped_2hic_dnase.py | gauravj49/nfhic | d21761f50ad39d29fcaaa69a93824793a952c803 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# HiC-Pro
# Copyleft 2015 Institut Curie
# Author(s): Nicolas Servant, Eric Viara
# Contact: nicolas.servant@curie.fr
# This software is distributed without any guarantee under the terms of the
# GNU General
# Public License, either Version 2, June 1991 or Version 3, June 2007.
"""
Script to keep only valid pairs when no restriction enzyme are used (i.e. DNAse or Micro-HiC)
"""
import getopt
import sys
import os
import re
import pysam
def usage():
"""Usage function"""
print("Usage : python mapped_2hic_dnase.py")
print("-r/--mappedReadsFile <BAM/SAM file of mapped reads>")
print("[-o/--outputDir] <Output directory. Default is current directory>")
print("[-d/--minCisDist] <Minimum distance between intrachromosomal contact to consider>")
print("[-g/--gtag] <Genotype tag. If specified, this tag will be reported in the valid pairs output for allele specific classification>")
print("[-a/--all] <Write all additional output files, with information about the discarded reads (self-circle, dangling end, etc.)>")
print("[-v/--verbose] <Verbose>")
print("[-h/--help] <Help>")
return
def get_args():
"""Get argument"""
try:
opts, args = getopt.getopt(
sys.argv[1:],
"r:o:d:g:avh",
["mappedReadsFile=",
"outputDir=", "minDist=", "gatg", "all", "verbose", "help"])
except getopt.GetoptError:
usage()
sys.exit(-1)
return opts
def get_read_strand(read):
"""
Conversion of read position to naive strand representation
Parameters
----------
read : list
list of aligned reads
"""
strand = "+"
if read.is_reverse:
strand = "-"
return strand
def get_read_pos(read, st="start"):
"""
Return the read position (zero-based) used for the intersection with
the restriction fragment
The 5' end is not a good choice for the reverse reads (which contain part
of the restriction site, and thus overlap the next restriction fragment)
Using the left-most position (5' for forward, 3' for reverse) or the
middle of the read should work but the middle of the reads might be more
safe
Parameters
-----------
read : list
list of aligned reads
"""
if st == "middle":
pos = read.pos + int(read.alen/2)
elif st =="start":
pos = get_read_start(read)
elif st == "left":
pos = read.pos
return pos
def get_read_start(read):
"""
Return the 5' end of the read
"""
if read.is_reverse:
pos = read.pos + read.alen -1
else:
pos = read.pos
return pos
def get_ordered_reads(read1, read2):
"""
Reorient reads
The sequencing is usually not oriented. Reorient the reads so that r1 is
always before r2
read1 = [AlignedRead]
read2 = [AlignedRead]
"""
if read1.tid == read2.tid:
if get_read_pos(read1) < get_read_pos(read2):
r1 = read1
r2 = read2
else:
r1 = read2
r2 = read1
else:
if read1.tid < read2.tid:
r1 = read1
r2 = read2
else:
r1 = read2
r2 = read1
return r1, r2
def isIntraChrom(read1, read2):
"""
Return true is the reads pair is intrachromosomal
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
if read1.tid == read2.tid:
return True
else:
return False
def get_valid_orientation(read1, read2):
"""
Both reads are expected to be on the different restriction fragments
Check the orientation of reads ->-> / <-<- / -><- / <-->
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
# Get oriented reads
r1, r2 = get_ordered_reads(read1, read2)
direction = None
if get_read_strand(r1) == "+" and get_read_strand(r2) == "+":
direction = "FF"
elif get_read_strand(r1) == "-" and get_read_strand(r2) == "-":
direction = "RR"
elif get_read_strand(r1) == "+" and get_read_strand(r2) == "-":
direction = "FR"
elif get_read_strand(r1) == "-" and get_read_strand(r2) == "+":
direction = "RF"
return direction
def get_cis_dist(read1, read2):
"""
Calculte the size of the DNA fragment library
read1 : [AlignedRead]
read2 : [AlignedRead]
"""
# Get oriented reads
##r1, r2 = get_ordered_reads(read1, read2)
dist = None
if not r1.is_unmapped and not r2.is_unmapped:
## Contact distances can be calculated for intrachromosomal reads only
if isIntraChrom(read1, read2):
r1pos = get_read_pos(read1)
r2pos = get_read_pos(read2)
dist = abs(r1pos - r2pos)
return dist
def get_read_tag(read, tag):
for t in read.tags:
if t[0] == tag:
return t[1]
return None
if __name__ == "__main__":
# Read command line arguments
opts = get_args()
verbose = False
allOutput = False
minInsertSize = None
maxInsertSize = None
minDist = None
outputDir = "."
gtag = None
if len(opts) == 0:
usage()
sys.exit()
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-r", "--mappedReadsFile"):
mappedReadsFile = arg
elif opt in ("-o", "--outputDir"):
outputDir = arg
elif opt in ("-d", "--minCisDist"):
minDist = arg
elif opt in ("-g", "--gtag"):
gtag = arg
elif opt in ("-a", "--all"):
allOutput = True
elif opt in ("-v", "--verbose"):
verbose = True
else:
assert False, "unhandled option"
# Verbose mode
if verbose:
print("## overlapMapped2HiCFragments.py")
print("## mappedReadsFile=", mappedReadsFile)
print("## minCisDist=", minDist)
print("## allOuput=", allOutput)
print("## verbose=", verbose, "\n")
# Initialize variables
reads_counter = 0
valid_counter = 0
valid_counter_FF = 0
valid_counter_RR = 0
valid_counter_FR = 0
valid_counter_RF = 0
single_counter = 0
dump_counter = 0
filt_counter = 0
# AS counter
G1G1_ascounter = 0
G2G2_ascounter = 0
G1U_ascounter = 0
UG1_ascounter = 0
G2U_ascounter = 0
UG2_ascounter = 0
G1G2_ascounter = 0
G2G1_ascounter = 0
UU_ascounter = 0
CF_ascounter = 0
baseReadsFile = os.path.basename(mappedReadsFile)
baseReadsFile = re.sub(r'\.bam$|\.sam$', '', baseReadsFile)
# Open handlers for output files
handle_valid = open(outputDir + '/' + baseReadsFile + '.validPairs', 'w')
if allOutput:
handle_dump = open(outputDir + '/' + baseReadsFile + '.DumpPairs', 'w')
handle_single = open(outputDir + '/' + baseReadsFile + '.SinglePairs','w')
handle_filt = open(outputDir + '/' + baseReadsFile + '.FiltPairs','w')
# Read the SAM/BAM file
if verbose:
print("## Opening SAM/BAM file '", mappedReadsFile, "'...")
samfile = pysam.Samfile(mappedReadsFile, "rb")
# Reads are 0-based too (for both SAM and BAM format)
# Loop on all reads
for read in samfile.fetch(until_eof=True):
reads_counter += 1
cur_handler = None
interactionType = None
htag = ""
# First mate
if read.is_read1:
r1 = read
if not r1.is_unmapped:
r1_chrom = samfile.getrname(r1.tid)
else:
r1_chrom = None
# Second mate
elif read.is_read2:
r2 = read
if not r2.is_unmapped:
r2_chrom = samfile.getrname(r2.tid)
else:
r2_chrom = None
if isIntraChrom(r1,r2):
dist = get_cis_dist(r1, r2)
else:
dist = None
# Check singleton
if r1.is_unmapped or r2.is_unmapped:
interactionType = "SI"
single_counter += 1
cur_handler = handle_single if allOutput else None
# Check Distance criteria - Filter
if (minDist is not None and dist is not None and dist < int(minDist)):
interactionType = "FILT"
filt_counter += 1
cur_handler = handle_filt if allOutput else None
# By default pair is valid
if interactionType == None:
interactionType = "VI"
valid_counter += 1
cur_handler = handle_valid
validType = get_valid_orientation(r1, r2)
if validType == "RR":
valid_counter_RR += 1
elif validType == "FF":
valid_counter_FF += 1
elif validType == "FR":
valid_counter_FR += 1
elif validType == "RF":
valid_counter_RF += 1
else:
interactionType = "DUMP"
dump_counter += 1
cur_handler = handle_dump if allOutput else None
# Split valid pairs based on XA tag
if gtag is not None:
r1as = get_read_tag(r1, gtag)
r2as = get_read_tag(r2, gtag)
if r1as == 1 and r2as == 1:
G1G1_ascounter += 1
elif r1as == 2 and r2as == 2:
G2G2_ascounter += 1
elif r1as == 1 and r2as == 0:
G1U_ascounter += 1
elif r1as == 0 and r2as == 1:
UG1_ascounter += 1
elif r1as == 2 and r2as == 0:
G2U_ascounter += 1
elif r1as == 0 and r2as == 2:
UG2_ascounter += 1
elif r1as == 1 and r2as == 2:
G1G2_ascounter += 1
elif r1as == 2 and r2as == 1:
G2G1_ascounter += 1
elif r1as == 3 or r2as == 3:
CF_ascounter += 1
else:
UU_ascounter += 1
if cur_handler is not None:
if not r1.is_unmapped and not r2.is_unmapped:
##reorient reads to ease duplicates removal
or1, or2 = get_ordered_reads(r1, r2)
or1_chrom = samfile.getrname(or1.tid)
or2_chrom = samfile.getrname(or2.tid)
##reset as tag now that the reads are oriented
r1as = get_read_tag(or1, gtag)
r2as = get_read_tag(or2, gtag)
if gtag is not None:
htag = str(r1as)+"-"+str(r2as)
cur_handler.write(
or1.qname + "\t" +
or1_chrom + "\t" +
str(get_read_pos(or1)+1) + "\t" +
str(get_read_strand(or1)) + "\t" +
or2_chrom + "\t" +
str(get_read_pos(or2)+1) + "\t" +
str(get_read_strand(or2)) + "\t" +
"NA" + "\t" + ##dist
"NA" + "\t" + ##resfrag1
"NA" + "\t" + ##resfrag2
str(or1.mapping_quality) + "\t" +
str(or2.mapping_quality) + "\t" +
str(htag) + "\n")
elif r2.is_unmapped and not r1.is_unmapped:
cur_handler.write(
r1.qname + "\t" +
r1_chrom + "\t" +
str(get_read_pos(r1)+1) + "\t" +
str(get_read_strand(r1)) + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
str(r1.mapping_quality) + "\t" +
"*" + "\n")
elif r1.is_unmapped and not r2.is_unmapped:
cur_handler.write(
r2.qname + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
r2_chrom + "\t" +
str(get_read_pos(r2)+1) + "\t" +
str(get_read_strand(r2)) + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
"*" + "\t" +
str(r2.mapping_quality) + "\n")
if (reads_counter % 100000 == 0 and verbose):
print("##", reads_counter)
# Close handler
handle_valid.close()
if allOutput:
handle_dump.close()
handle_single.close()
handle_filt.close()
# Write stats file
handle_stat = open(outputDir + '/' + baseReadsFile + '.RSstat', 'w')
handle_stat.write("## Hi-C processing - no restriction fragments\n")
handle_stat.write("Valid_interaction_pairs\t" + str(valid_counter) + "\n")
handle_stat.write(
"Valid_interaction_pairs_FF\t" + str(valid_counter_FF) + "\n")
handle_stat.write(
"Valid_interaction_pairs_RR\t" + str(valid_counter_RR) + "\n")
handle_stat.write(
"Valid_interaction_pairs_RF\t" + str(valid_counter_RF) + "\n")
handle_stat.write(
"Valid_interaction_pairs_FR\t" + str(valid_counter_FR) + "\n")
handle_stat.write("Single-end_pairs\t" + str(single_counter) + "\n")
handle_stat.write("Filtered_pairs\t" + str(filt_counter) + "\n")
handle_stat.write("Dumped_pairs\t" + str(dump_counter) + "\n")
## Write AS report
if gtag is not None:
handle_stat.write("## ======================================\n")
handle_stat.write("## Allele specific information\n")
handle_stat.write("Valid_pairs_from_ref_genome_(1-1)\t" + str(G1G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_ref_genome_with_one_unassigned_mate_(0-1/1-0)\t" + str(UG1_ascounter+G1U_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_(2-2)\t" + str(G2G2_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_genome_with_one_unassigned_mate_(0-2/2-0)\t" + str(UG2_ascounter+G2U_ascounter) + "\n")
handle_stat.write("Valid_pairs_from_alt_and_ref_genome_(1-2/2-1)\t" + str(G1G2_ascounter+G2G1_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_both_unassigned_mated_(0-0)\t" + str(UU_ascounter) + "\n")
handle_stat.write("Valid_pairs_with_at_least_one_conflicting_mate_(3-)\t" + str(CF_ascounter) + "\n")
handle_stat.close()
| 33.008602 | 207 | 0.506026 |
8ac5fef0408f0e055d132563ea4e98b7db9b0f89 | 5,749 | py | Python | nltk/corpus/util.py | FGDBTKD/nltk | 384e46e82789c7f47a7fb521ef976f82c3c4c3fb | [
"Apache-2.0"
] | null | null | null | nltk/corpus/util.py | FGDBTKD/nltk | 384e46e82789c7f47a7fb521ef976f82c3c4c3fb | [
"Apache-2.0"
] | null | null | null | nltk/corpus/util.py | FGDBTKD/nltk | 384e46e82789c7f47a7fb521ef976f82c3c4c3fb | [
"Apache-2.0"
] | null | null | null | # Natural Language Toolkit: Corpus Reader Utility Functions
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Edward Loper <edloper@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
######################################################################
#{ Lazy Corpus Loader
######################################################################
from __future__ import unicode_literals
import re
import gc
import nltk
from nltk.compat import python_2_unicode_compatible
TRY_ZIPFILE_FIRST = False
@python_2_unicode_compatible
class LazyCorpusLoader(object):
"""
To see the API documentation for this lazily loaded corpus, first
run corpus.ensure_loaded(), and then run help(this_corpus).
LazyCorpusLoader is a proxy object which is used to stand in for a
corpus object before the corpus is loaded. This allows NLTK to
create an object for each corpus, but defer the costs associated
with loading those corpora until the first time that they're
actually accessed.
The first time this object is accessed in any way, it will load
the corresponding corpus, and transform itself into that corpus
(by modifying its own ``__class__`` and ``__dict__`` attributes).
If the corpus can not be found, then accessing this object will
raise an exception, displaying installation instructions for the
NLTK data package. Once they've properly installed the data
package (or modified ``nltk.data.path`` to point to its location),
they can then use the corpus object without restarting python.
:param name: The name of the corpus
:type name: str
:param reader_cls: The specific CorpusReader class, e.g. PlaintextCorpusReader, WordListCorpusReader
:type reader: nltk.corpus.reader.api.CorpusReader
:param nltk_data_subdir: The subdirectory where the corpus is stored.
:type nltk_data_subdir: str
:param *args: Any other non-keywords arguments that `reader_cls` might need.
:param *kargs: Any other keywords arguments that `reader_cls` might need.
"""
def __init__(self, name, reader_cls, *args, **kwargs):
from nltk.corpus.reader.api import CorpusReader
assert issubclass(reader_cls, CorpusReader)
self.__name = self.__name__ = name
self.__reader_cls = reader_cls
# If nltk_data_subdir is set explicitly
if 'nltk_data_subdir' in kwargs:
# Use the specified subdirectory path
self.subdir = kwargs['nltk_data_subdir']
# Pops the `nltk_data_subdir` argument, we don't need it anymore.
kwargs.pop('nltk_data_subdir', None)
else: # Otherwise use 'nltk_data/corpora'
self.subdir = 'corpora'
self.__args = args
self.__kwargs = kwargs
def __load(self):
# Find the corpus root directory.
zip_name = re.sub(r'(([^/]+)(/.*)?)', r'\2.zip/\1/', self.__name)
if TRY_ZIPFILE_FIRST:
try:
root = nltk.data.find('{}/{}'.format(self.subdir, zip_name))
except LookupError as e:
try: root = nltk.data.find('{}/{}'.format(self.subdir, self.__name))
except LookupError: raise e
else:
try:
root = nltk.data.find('{}/{}'.format(self.subdir, self.__name))
except LookupError as e:
try: root = nltk.data.find('{}/{}'.format(self.subdir, zip_name))
except LookupError: raise e
# Load the corpus.
corpus = self.__reader_cls(root, *self.__args, **self.__kwargs)
# This is where the magic happens! Transform ourselves into
# the corpus by modifying our own __dict__ and __class__ to
# match that of the corpus.
args, kwargs = self.__args, self.__kwargs
name, reader_cls = self.__name, self.__reader_cls
self.__dict__ = corpus.__dict__
self.__class__ = corpus.__class__
# _unload support: assign __dict__ and __class__ back, then do GC.
# after reassigning __dict__ there shouldn't be any references to
# corpus data so the memory should be deallocated after gc.collect()
def _unload(self):
lazy_reader = LazyCorpusLoader(name, reader_cls, *args, **kwargs)
self.__dict__ = lazy_reader.__dict__
self.__class__ = lazy_reader.__class__
gc.collect()
self._unload = _make_bound_method(_unload, self)
def __getattr__(self, attr):
# Fix for inspect.isclass under Python 2.6
# (see http://bugs.python.org/issue1225107).
# Without this fix tests may take extra 1.5GB RAM
# because all corpora gets loaded during test collection.
if attr == '__bases__':
raise AttributeError("LazyCorpusLoader object has no attribute '__bases__'")
self.__load()
# This looks circular, but its not, since __load() changes our
# __class__ to something new:
return getattr(self, attr)
def __repr__(self):
return '<%s in %r (not loaded yet)>' % (
self.__reader_cls.__name__, '.../corpora/'+self.__name)
def _unload(self):
# If an exception occures during corpus loading then
# '_unload' method may be unattached, so __getattr__ can be called;
# we shouldn't trigger corpus loading again in this case.
pass
def _make_bound_method(func, self):
"""
Magic for creating bound methods (used for _unload).
"""
class Foo(object):
def meth(self): pass
f = Foo()
bound_method = type(f.meth)
try:
return bound_method(func, self, self.__class__)
except TypeError: # python3
return bound_method(func, self)
| 39.648276 | 104 | 0.644808 |
3c265a3a4741958163dd36d8398244eeee3e26b0 | 15,837 | py | Python | tests/test_octodns_provider_powerdns.py | swisstxt/octodns-2 | 34e36372b5bd60e27c84fce18d5429978bdfa6b7 | [
"MIT"
] | null | null | null | tests/test_octodns_provider_powerdns.py | swisstxt/octodns-2 | 34e36372b5bd60e27c84fce18d5429978bdfa6b7 | [
"MIT"
] | null | null | null | tests/test_octodns_provider_powerdns.py | swisstxt/octodns-2 | 34e36372b5bd60e27c84fce18d5429978bdfa6b7 | [
"MIT"
] | null | null | null | #
#
#
from __future__ import absolute_import, division, print_function, \
unicode_literals
from json import loads, dumps
from os.path import dirname, join
from requests import HTTPError
from requests_mock import ANY, mock as requests_mock
from six import text_type
from unittest import TestCase
from octodns.record import Record
from octodns.provider.powerdns import PowerDnsProvider
from octodns.provider.yaml import YamlProvider
from octodns.zone import Zone
EMPTY_TEXT = '''
{
"account": "",
"dnssec": false,
"id": "xunit.tests.",
"kind": "Master",
"last_check": 0,
"masters": [],
"name": "xunit.tests.",
"notified_serial": 0,
"rrsets": [],
"serial": 2017012801,
"soa_edit": "",
"soa_edit_api": "INCEPTION-INCREMENT",
"url": "api/v1/servers/localhost/zones/xunit.tests."
}
'''
with open('./tests/fixtures/powerdns-full-data.json') as fh:
FULL_TEXT = fh.read()
class TestPowerDnsProvider(TestCase):
def test_provider_version_detection(self):
provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=['8.8.8.8.',
'9.9.9.9.'])
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401, text='Unauthorized')
with self.assertRaises(Exception) as ctx:
provider.powerdns_version
self.assertTrue('unauthorized' in text_type(ctx.exception))
# Api not found
with requests_mock() as mock:
mock.get(ANY, status_code=404, text='Not Found')
with self.assertRaises(Exception) as ctx:
provider.powerdns_version
self.assertTrue('404' in text_type(ctx.exception))
# Test version detection
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.1.10"})
self.assertEquals(provider.powerdns_version, [4, 1, 10])
# Test version detection for second time (should stay at 4.1.10)
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.2.0"})
self.assertEquals(provider.powerdns_version, [4, 1, 10])
# Test version detection
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.2.0"})
# Reset version, so detection will try again
provider._powerdns_version = None
self.assertNotEquals(provider.powerdns_version, [4, 1, 10])
def test_provider_version_config(self):
provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=['8.8.8.8.',
'9.9.9.9.'])
# Test version 4.1.0
provider._powerdns_version = None
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.1.10"})
self.assertEquals(provider.soa_edit_api, 'INCEPTION-INCREMENT')
self.assertFalse(
provider.check_status_not_found,
'check_status_not_found should be false '
'for version 4.1.x and below')
# Test version 4.2.0
provider._powerdns_version = None
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.2.0"})
self.assertEquals(provider.soa_edit_api, 'INCEPTION-INCREMENT')
self.assertTrue(
provider.check_status_not_found,
'check_status_not_found should be true for version 4.2.x')
# Test version 4.3.0
provider._powerdns_version = None
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.3.0"})
self.assertEquals(provider.soa_edit_api, 'DEFAULT')
self.assertTrue(
provider.check_status_not_found,
'check_status_not_found should be true for version 4.3.x')
def test_provider(self):
provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=['8.8.8.8.',
'9.9.9.9.'])
# Test version detection
with requests_mock() as mock:
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': "4.1.10"})
self.assertEquals(provider.powerdns_version, [4, 1, 10])
# Bad auth
with requests_mock() as mock:
mock.get(ANY, status_code=401, text='Unauthorized')
with self.assertRaises(Exception) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertTrue('unauthorized' in text_type(ctx.exception))
# General error
with requests_mock() as mock:
mock.get(ANY, status_code=502, text='Things caught fire')
with self.assertRaises(HTTPError) as ctx:
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(502, ctx.exception.response.status_code)
# Non-existent zone in PowerDNS <4.3.0 doesn't populate anything
with requests_mock() as mock:
mock.get(ANY, status_code=422,
json={'error': "Could not find domain 'unit.tests.'"})
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
# Non-existent zone in PowerDNS >=4.2.0 doesn't populate anything
provider._powerdns_version = [4, 2, 0]
with requests_mock() as mock:
mock.get(ANY, status_code=404, text='Not Found')
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(set(), zone.records)
provider._powerdns_version = [4, 1, 0]
# The rest of this is messy/complicated b/c it's dealing with mocking
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
expected_n = len(expected.records) - 2
self.assertEquals(16, expected_n)
# No diffs == no changes
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=FULL_TEXT)
zone = Zone('unit.tests.', [])
provider.populate(zone)
self.assertEquals(16, len(zone.records))
changes = expected.changes(zone, provider)
self.assertEquals(1, len(changes))
# Used in a minute
def assert_rrsets_callback(request, context):
data = loads(request.body)
self.assertEquals(expected_n, len(data['rrsets']))
return ''
# No existing records -> creates for every record in expected
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=EMPTY_TEXT)
# post 201, is response to the create with data
mock.patch(ANY, status_code=201, text=assert_rrsets_callback)
plan = provider.plan(expected)
self.assertEquals(expected_n, len(plan.changes))
self.assertEquals(expected_n, provider.apply(plan))
self.assertTrue(plan.exists)
# Non-existent zone -> creates for every record in expected
# OMG this is fucking ugly, probably better to ditch requests_mocks and
# just mock things for real as it doesn't seem to provide a way to get
# at the request params or verify that things were called from what I
# can tell
not_found = {'error': "Could not find domain 'unit.tests.'"}
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 422's, unknown zone
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 201, is response to the create with data
mock.post(ANY, status_code=201, text=assert_rrsets_callback)
plan = provider.plan(expected)
self.assertEquals(expected_n, len(plan.changes))
self.assertEquals(expected_n, provider.apply(plan))
self.assertFalse(plan.exists)
provider._powerdns_version = [4, 2, 0]
with requests_mock() as mock:
# get 404's, unknown zone
mock.get(ANY, status_code=404, text='')
# patch 404's, unknown zone
mock.patch(ANY, status_code=404, text=dumps(not_found))
# post 201, is response to the create with data
mock.post(ANY, status_code=201, text=assert_rrsets_callback)
plan = provider.plan(expected)
self.assertEquals(expected_n, len(plan.changes))
self.assertEquals(expected_n, provider.apply(plan))
self.assertFalse(plan.exists)
provider._powerdns_version = [4, 1, 0]
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 422's,
data = {'error': "Key 'name' not present or not a String"}
mock.patch(ANY, status_code=422, text=dumps(data))
with self.assertRaises(HTTPError) as ctx:
plan = provider.plan(expected)
provider.apply(plan)
response = ctx.exception.response
self.assertEquals(422, response.status_code)
self.assertTrue('error' in response.json())
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 500's, things just blew up
mock.patch(ANY, status_code=500, text='')
with self.assertRaises(HTTPError):
plan = provider.plan(expected)
provider.apply(plan)
with requests_mock() as mock:
# get 422's, unknown zone
mock.get(ANY, status_code=422, text=dumps(not_found))
# patch 500's, things just blew up
mock.patch(ANY, status_code=422, text=dumps(not_found))
# post 422's, something wrong with create
mock.post(ANY, status_code=422, text='Hello Word!')
with self.assertRaises(HTTPError):
plan = provider.plan(expected)
provider.apply(plan)
def test_small_change(self):
provider = PowerDnsProvider('test', 'non.existent', 'api-key')
expected = Zone('unit.tests.', [])
source = YamlProvider('test', join(dirname(__file__), 'config'))
source.populate(expected)
self.assertEquals(18, len(expected.records))
# A small change to a single record
with requests_mock() as mock:
mock.get(ANY, status_code=200, text=FULL_TEXT)
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': '4.1.0'})
missing = Zone(expected.name, [])
# Find and delete the SPF record
for record in expected.records:
if record._type != 'SPF':
missing.add_record(record)
def assert_delete_callback(request, context):
self.assertEquals({
'rrsets': [{
'records': [
{'content': '"v=spf1 ip4:192.168.0.1/16-all"',
'disabled': False}
],
'changetype': 'DELETE',
'type': 'SPF',
'name': 'spf.unit.tests.',
'ttl': 600
}]
}, loads(request.body))
return ''
mock.patch(ANY, status_code=201, text=assert_delete_callback)
plan = provider.plan(missing)
self.assertEquals(1, len(plan.changes))
self.assertEquals(1, provider.apply(plan))
def test_existing_nameservers(self):
ns_values = ['8.8.8.8.', '9.9.9.9.']
provider = PowerDnsProvider('test', 'non.existent', 'api-key',
nameserver_values=ns_values)
expected = Zone('unit.tests.', [])
ns_record = Record.new(expected, '', {
'type': 'NS',
'ttl': 600,
'values': ns_values
})
expected.add_record(ns_record)
# no changes
with requests_mock() as mock:
data = {
'rrsets': [{
'comments': [],
'name': 'unit.tests.',
'records': [
{
'content': '8.8.8.8.',
'disabled': False
},
{
'content': '9.9.9.9.',
'disabled': False
}
],
'ttl': 600,
'type': 'NS'
}, {
'comments': [],
'name': 'unit.tests.',
'records': [{
'content': '1.2.3.4',
'disabled': False,
}],
'ttl': 60,
'type': 'A'
}]
}
mock.get(ANY, status_code=200, json=data)
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': '4.1.0'})
unrelated_record = Record.new(expected, '', {
'type': 'A',
'ttl': 60,
'value': '1.2.3.4'
})
expected.add_record(unrelated_record)
plan = provider.plan(expected)
self.assertFalse(plan)
# remove it now that we don't need the unrelated change any longer
expected._remove_record(unrelated_record)
# ttl diff
with requests_mock() as mock:
data = {
'rrsets': [{
'comments': [],
'name': 'unit.tests.',
'records': [
{
'content': '8.8.8.8.',
'disabled': False
},
{
'content': '9.9.9.9.',
'disabled': False
},
],
'ttl': 3600,
'type': 'NS'
}]
}
mock.get(ANY, status_code=200, json=data)
mock.get('http://non.existent:8081/api/v1/servers/localhost',
status_code=200, json={'version': '4.1.0'})
plan = provider.plan(expected)
self.assertEquals(1, len(plan.changes))
# create
with requests_mock() as mock:
data = {
'rrsets': []
}
mock.get(ANY, status_code=200, json=data)
plan = provider.plan(expected)
self.assertEquals(1, len(plan.changes))
| 38.816176 | 79 | 0.531856 |
cce3c31828f5a4b45c4a3595dd5129052ffaed81 | 699 | py | Python | docker/storperf-master/storperf/workloads/_ssd_preconditioning.py | hashnfv/hashnfv-storperf | 9eebe429ae9ec58a593611063da5b541634f8932 | [
"Apache-2.0"
] | null | null | null | docker/storperf-master/storperf/workloads/_ssd_preconditioning.py | hashnfv/hashnfv-storperf | 9eebe429ae9ec58a593611063da5b541634f8932 | [
"Apache-2.0"
] | null | null | null | docker/storperf-master/storperf/workloads/_ssd_preconditioning.py | hashnfv/hashnfv-storperf | 9eebe429ae9ec58a593611063da5b541634f8932 | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright (c) 2015 EMC and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
from storperf.workloads import _base_workload
class _ssd_preconditioning(_base_workload._base_workload):
def setup(self):
self.options['name'] = 'ssd_preconditioning'
self.options['rw'] = 'randwrite'
self.options['loops'] = '1'
| 38.833333 | 78 | 0.567954 |
215175067cd84152cc5dc18459dffae8113b643f | 6,823 | py | Python | env/chessandcard.py | jidiai/ai_lab | 2a9d5a940e3153e1698c2e1800e29723121b529b | [
"MIT"
] | null | null | null | env/chessandcard.py | jidiai/ai_lab | 2a9d5a940e3153e1698c2e1800e29723121b529b | [
"MIT"
] | null | null | null | env/chessandcard.py | jidiai/ai_lab | 2a9d5a940e3153e1698c2e1800e29723121b529b | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
# Time : 2021/10/25 下午2:28
# Author: Yahui Cui
import copy
from gym.utils import seeding
from env.simulators.game import Game
from env.obs_interfaces.observation import *
from utils.discrete import Discrete
class ChessAndCard(Game, DictObservation):
def __init__(self, conf):
super(ChessAndCard, self).__init__(conf['n_player'], conf['is_obs_continuous'], conf['is_act_continuous'],
conf['game_name'], conf['agent_nums'], conf['obs_type'])
self.seed = None
self.done = False
self.dones = {}
self.step_cnt = 0
self.max_step = int(conf["max_step"])
env_name = conf["game_name"]
import_path = "from pettingzoo.classic import " + env_name + " as env_imported"
exec(import_path)
func_name = "env_imported"
self.env_core = None
self.env_core = eval(func_name).env()
if self.env_core is None:
raise Exception("ChessAndCard env_core is None!")
self.init_info = None
self.won = {}
self.n_return = [0] * self.n_player
self.step_cnt = 0
self.done = False
self.env_core.reset()
self.player_id_map, self.player_id_reverse_map = self.get_player_id_map(self.env_core.agents)
# set up action spaces
self.new_action_spaces = self.load_action_space()
self.joint_action_space = self.set_action_space()
self.action_dim = self.joint_action_space
self.input_dimension = self.env_core.observation_spaces
# set up first all_observes
obs, _, _, _ = self.env_core.last()
self.current_state = obs
self.all_observes = self.get_all_observes()
def reset(self):
self.step_cnt = 0
self.done = False
self.init_info = None
self.env_core.reset()
obs, _, _, _ = self.env_core.last()
self.current_state = obs
self.all_observes = self.get_all_observes()
self.won = {}
self.n_return = [0] * self.n_player
return self.all_observes
def step(self, joint_action):
self.is_valid_action(joint_action)
info_before = self.step_before_info()
joint_action_decode = self.decode(joint_action)
self.env_core.step(joint_action_decode)
obs, reward, _, info_after = self.env_core.last()
info_after = ''
self.current_state = obs
self.all_observes = self.get_all_observes()
# print("debug all observes ", type(self.all_observes[0]["obs"]))
self.set_n_return()
self.step_cnt += 1
done = self.is_terminal()
return self.all_observes, reward, done, info_before, info_after
def is_valid_action(self, joint_action):
if len(joint_action) != self.n_player:
raise Exception("Input joint action dimension should be {}, not {}.".format(
self.n_player, len(joint_action)))
current_player_id = self.player_id_map[self.env_core.agent_selection]
if (self.env_core.agent_selection in self.env_core.agents) and \
(not self.env_core.dones[self.env_core.agent_selection]):
if joint_action[current_player_id] is None or joint_action[current_player_id][0] is None:
raise Exception("Action of current player is needed. Current player is {}, {}".format(
current_player_id, self.env_core.agent_selection))
for i in range(self.n_player):
if joint_action[i] is None or joint_action[i][0] is None:
continue
if len(joint_action[i][0]) != self.joint_action_space[i][0].n:
raise Exception("The input action dimension for player {} should be {}, not {}.".format(
i, self.joint_action_space[i][0].n, len(joint_action[i][0])))
def step_before_info(self, info=''):
return info
def is_terminal(self):
if self.step_cnt >= self.max_step:
self.done = True
if not self.env_core.agents:
self.done = True
if all(self.env_core.dones.values()):
self.done = True
return self.done
def get_single_action_space(self, player_id):
return self.joint_action_space[player_id]
def load_action_space(self):
origin_action_spaces = self.env_core.action_spaces
new_action_spaces = {}
for key, action_space in origin_action_spaces.items():
changed_key = self.player_id_map[key]
new_action_spaces[changed_key] = Discrete(action_space.n)
return new_action_spaces
def set_action_space(self):
action_space = [[self.new_action_spaces[i]] for i in range(self.n_player)]
return action_space
def check_win(self):
if self.all_equals(self.n_return):
return '-1'
index = []
max_n = max(self.n_return)
for i in range(len(self.n_return)):
if self.n_return[i] == max_n:
index.append(i)
if len(index) == 1:
return str(index[0])
else:
return str(index)
def decode(self, joint_action):
if self.env_core.agent_selection not in self.env_core.agents or \
self.env_core.dones[self.env_core.agent_selection]:
return None
current_player_id = self.player_id_map[self.env_core.agent_selection]
if joint_action[current_player_id] is None or joint_action[current_player_id][0] is None:
return None
joint_action_decode = joint_action[current_player_id][0].index(1)
return joint_action_decode
def set_n_return(self):
for player_key, player_reward in self.env_core.rewards.items():
player_id = self.player_id_map[player_key]
self.n_return[player_id] += player_reward
def get_player_id_map(self, player_keys):
player_id_map = {}
player_id_reverse_map = {}
for i, key in enumerate(player_keys):
player_id_map[key] = i
player_id_reverse_map[i] = key
return player_id_map, player_id_reverse_map
def create_seed(self):
seed = seeding.create_seed(None, max_bytes=4)
return seed
def set_seed(self, seed=None):
self.env_core.seed(seed)
self.seed = seed
def get_all_observes(self):
all_observes = []
for i in range(self.n_player):
player_name = self.player_id_reverse_map[i]
each_obs = copy.deepcopy(self.current_state)
each = {"obs": each_obs, "controlled_player_index": i, "controlled_player_name": player_name}
all_observes.append(each)
return all_observes
def all_equals(self, list_to_compare):
return len(set(list_to_compare)) == 1
| 36.486631 | 114 | 0.630808 |
c35ebf2550d5775ccde04c0bfdcdfa902f306f89 | 7,500 | py | Python | admin/rdm_timestampadd/views.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | null | null | null | admin/rdm_timestampadd/views.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | 20 | 2020-03-24T16:48:03.000Z | 2022-03-08T22:38:38.000Z | admin/rdm_timestampadd/views.py | rdm-dev12/RDM-osf.io | 14d9a924b8c6bc7d79fd34b87830ffa29acafed1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from admin.base import settings
from admin.rdm.utils import RdmPermissionMixin, get_dummy_institution
from django.contrib.auth.mixins import UserPassesTestMixin
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.generic import ListView, View, TemplateView
from osf.models import Institution, Node, AbstractNode, Guid
from website.util import timestamp
import json
class InstitutionList(RdmPermissionMixin, UserPassesTestMixin, ListView):
paginate_by = 25
template_name = 'rdm_timestampadd/list.html'
ordering = 'name'
raise_exception = True
model = Institution
def test_func(self):
"""validate user permissions"""
if not self.is_authenticated:
return False
if self.is_super_admin or self.is_admin:
return True
return False
def get(self, request, *args, **kwargs):
"""get contexts"""
user = self.request.user
if self.is_super_admin:
self.object_list = self.get_queryset()
ctx = self.get_context_data()
return self.render_to_response(ctx)
elif self.is_admin:
institution = user.affiliated_institutions.first()
if institution:
return redirect(reverse('timestampadd:nodes', args=[institution.id]))
else:
institution = get_dummy_institution()
return redirect(reverse('timestampadd:nodes', args=[institution.id]))
def get_queryset(self):
return Institution.objects.all().order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(query_set, page_size)
kwargs.setdefault('institutions', query_set)
kwargs.setdefault('page', page)
kwargs.setdefault('logohost', settings.OSF_URL)
return super(InstitutionList, self).get_context_data(**kwargs)
class InstitutionNodeList(RdmPermissionMixin, UserPassesTestMixin, ListView):
template_name = 'rdm_timestampadd/node_list.html'
paginate_by = 25
ordering = '-modified'
raise_exception = True
model = Node
def test_func(self):
"""valiate user permissions"""
institution_id = int(self.kwargs.get('institution_id'))
return self.has_auth(institution_id)
def get_queryset(self):
inst = self.kwargs['institution_id']
return Node.objects.filter(affiliated_institutions=inst).order_by(self.ordering)
def get_context_data(self, **kwargs):
query_set = kwargs.pop('object_list', self.object_list)
page_size = self.get_paginate_by(query_set)
paginator, page, query_set, is_paginated = self.paginate_queryset(query_set, page_size)
kwargs.setdefault('nodes', query_set)
kwargs.setdefault('institution', Institution.objects.get(id=self.kwargs['institution_id']))
kwargs.setdefault('page', page)
kwargs.setdefault('logohost', settings.OSF_URL)
return super(InstitutionNodeList, self).get_context_data(**kwargs)
class TimeStampAddList(RdmPermissionMixin, TemplateView):
template_name = 'rdm_timestampadd/timestampadd.html'
ordering = 'provider'
def get_context_data(self, **kwargs):
ctx = super(TimeStampAddList, self).get_context_data(**kwargs)
absNodeData = AbstractNode.objects.get(id=self.kwargs['guid'])
ctx['init_project_timestamp_error_list'] = timestamp.get_error_list(absNodeData._id)
ctx['project_title'] = absNodeData.title
ctx['guid'] = self.kwargs['guid']
ctx['institution_id'] = self.kwargs['institution_id']
return ctx
class VerifyTimeStampAddList(RdmPermissionMixin, View):
def post(self, request, *args, **kwargs):
json_data = dict(self.request.POST.iterlists())
ctx = {}
for key in json_data.keys():
ctx.update({key: json_data[key]})
guid = Guid.objects.get(object_id=self.kwargs['guid'], content_type_id=ContentType.objects.get_for_model(AbstractNode).id)
absNodeData = AbstractNode.objects.get(id=self.kwargs['guid'])
# Node Admin
admin_osfuser_list = list(absNodeData.get_admin_contributors(absNodeData.contributors))
source_user = self.request.user
self.request.user = admin_osfuser_list[0]
uid = self.request.user.id
# Admin User
self.request.user = source_user
ctx['provider_list'] = timestamp.get_full_list(uid, guid._id, absNodeData)
return HttpResponse(json.dumps(ctx), content_type='application/json')
class TimestampVerifyData(RdmPermissionMixin, View):
def test_func(self):
"""validate user permissions"""
institution_id = int(self.kwargs.get('institution_id'))
return self.has_auth(institution_id)
def post(self, request, *args, **kwargs):
json_data = dict(self.request.POST.iterlists())
request_data = {}
for key in json_data.keys():
request_data.update({key: json_data[key]})
data = {}
for key in request_data.keys():
data.update({key: request_data[key][0]})
absNodeData = AbstractNode.objects.get(id=self.kwargs['guid'])
# Node Admin
admin_osfuser_list = list(absNodeData.get_admin_contributors(absNodeData.contributors))
source_user = self.request.user
self.request.user = admin_osfuser_list[0]
response = timestamp.check_file_timestamp(self.request.user.id, absNodeData, data)
# Admin User
self.request.user = source_user
return HttpResponse(json.dumps(response), content_type='application/json')
class AddTimeStampResultList(RdmPermissionMixin, TemplateView):
template_name = 'rdm_timestampadd/timestampadd.html'
def test_func(self):
"""validate user permissions"""
institution_id = int(self.kwargs.get('institution_id'))
return self.has_auth(institution_id)
def get_context_data(self, **kwargs):
ctx = super(AddTimeStampResultList, self).get_context_data(**kwargs)
guid = Guid.objects.get(object_id=self.kwargs['guid'], content_type_id=ContentType.objects.get_for_model(AbstractNode).id)
ctx['provider_file_list'] = timestamp.get_error_list(guid._id)
return ctx
class AddTimestampData(RdmPermissionMixin, View):
def test_func(self):
"""validate user permissions"""
institution_id = int(self.kwargs.get('institution_id'))
return self.has_auth(institution_id)
def post(self, request, *args, **kwargs):
absNodeData = AbstractNode.objects.get(id=self.kwargs['guid'])
request_data = dict(self.request.POST.iterlists())
data = {}
for key in request_data.keys():
data.update({key: request_data[key][0]})
# Change user Node-Admin
admin_osfuser_list = list(absNodeData.get_admin_contributors(absNodeData.contributors))
self.request.user = admin_osfuser_list[0]
result = timestamp.add_token(self.request.user.id, absNodeData, data)
return HttpResponse(
json.dumps({'result': result}),
content_type='application/json'
)
| 40.106952 | 130 | 0.689067 |
f7101a8b865c1f87f28c1270c97bd9246634db2e | 69 | py | Python | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 11 | 2020-04-05T07:24:46.000Z | 2021-01-10T06:58:00.000Z | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 1 | 2021-09-09T16:46:18.000Z | 2021-09-09T16:46:18.000Z | emailtrail/__init__.py | akshaykmr/emailtrail | 8298e4b68c70f9b64198f54e4f3baf77d5fe54fa | [
"MIT"
] | 1 | 2020-10-26T17:50:10.000Z | 2020-10-26T17:50:10.000Z | from .module import * # noqa
from .models import Trail, Hop # noqa
| 17.25 | 37 | 0.695652 |
717d75b35f7a398934a5d8ee400199c4c2274e34 | 4,763 | py | Python | nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | 1 | 2020-02-21T19:19:11.000Z | 2020-02-21T19:19:11.000Z | nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | null | null | null | nova/tests/api/openstack/compute/plugins/v3/test_extension_info.py | bopopescu/nova_audit | 1cd2901802f82d39411adfa04cf2f432ff3bf280 | [
"Apache-2.0"
] | 1 | 2020-07-24T09:15:58.000Z | 2020-07-24T09:15:58.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import plugins
from nova.api.openstack.compute.plugins.v3 import extension_info
from nova import exception
from nova import policy
from nova import test
from nova.tests.api.openstack import fakes
class fake_extension(object):
def __init__(self, name, alias, description, namespace, version):
self.name = name
self.alias = alias
self.__doc__ = description
self.namespace = namespace
self.version = version
fake_extensions = {
'ext1-alias': fake_extension('ext1', 'ext1-alias', 'ext1 description',
'ext1 namespace', 1),
'ext2-alias': fake_extension('ext2', 'ext2-alias', 'ext2 description',
'ext2 namespace', 2),
'ext3-alias': fake_extension('ext3', 'ext3-alias', 'ext3 description',
'ext3 namespace', 1)
}
def fake_policy_enforce(context, action, target, do_raise=True):
return True
def fake_policy_enforce_selective(context, action, target, do_raise=True):
if action == 'compute_extension:v3:ext1-alias:discoverable':
raise exception.NotAuthorized
else:
return True
class ExtensionInfoTest(test.NoDBTestCase):
def setUp(self):
super(ExtensionInfoTest, self).setUp()
ext_info = plugins.LoadedExtensionInfo()
ext_info.extensions = fake_extensions
self.controller = extension_info.ExtensionInfoController(ext_info)
def test_extension_info_list(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(3, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
def test_extension_info_show(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce)
req = fakes.HTTPRequestV3.blank('/extensions/ext1-alias')
res_dict = self.controller.show(req, 'ext1-alias')
self.assertEqual(1, len(res_dict))
self.assertEqual(res_dict['extension']['name'],
fake_extensions['ext1-alias'].name)
self.assertEqual(res_dict['extension']['alias'],
fake_extensions['ext1-alias'].alias)
self.assertEqual(res_dict['extension']['description'],
fake_extensions['ext1-alias'].__doc__)
self.assertEqual(res_dict['extension']['namespace'],
fake_extensions['ext1-alias'].namespace)
self.assertEqual(res_dict['extension']['version'],
fake_extensions['ext1-alias'].version)
def test_extension_info_list_not_all_discoverable(self):
self.stubs.Set(policy, 'enforce', fake_policy_enforce_selective)
req = fakes.HTTPRequestV3.blank('/extensions')
res_dict = self.controller.index(req)
self.assertEqual(2, len(res_dict['extensions']))
for e in res_dict['extensions']:
self.assertNotEqual('ext1-alias', e['alias'])
self.assertIn(e['alias'], fake_extensions)
self.assertEqual(e['name'], fake_extensions[e['alias']].name)
self.assertEqual(e['alias'], fake_extensions[e['alias']].alias)
self.assertEqual(e['description'],
fake_extensions[e['alias']].__doc__)
self.assertEqual(e['namespace'],
fake_extensions[e['alias']].namespace)
self.assertEqual(e['version'],
fake_extensions[e['alias']].version)
| 42.90991 | 78 | 0.636574 |
7bb86419d7eef865f279f7f4ae745c93a7d77598 | 14,677 | py | Python | codebase/third_party/spos_ofa/ofa/imagenet_classification/data_providers/cifar2.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | codebase/third_party/spos_ofa/ofa/imagenet_classification/data_providers/cifar2.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | codebase/third_party/spos_ofa/ofa/imagenet_classification/data_providers/cifar2.py | qdmy/Adelaidet-Quantization | e88cf41c62dc3944d2bd57ffc1d365535b0a1c4b | [
"Apache-2.0"
] | null | null | null | from typing import Any, Callable, Optional, Tuple
import numpy as np
import torch
import copy
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from PIL import Image
from torchvision.datasets import CIFAR100
from torch.utils.data.dataset import Dataset, T_co
class2superclass = [4, 1, 14, 8, 0, 6, 7, 7, 18, 3,
3, 14, 9, 18, 7, 11, 3, 9, 7, 11,
6, 11, 5, 10, 7, 6, 13, 15, 3, 15,
0, 11, 1, 10, 12, 14, 16, 9, 11, 5,
5, 19, 8, 8, 15, 13, 14, 17, 18, 10,
16, 4, 17, 4, 2, 0, 17, 4, 18, 17,
10, 3, 2, 12, 12, 16, 12, 1, 9, 19,
2, 10, 0, 1, 16, 12, 9, 13, 15, 13,
16, 19, 2, 4, 6, 19, 5, 5, 8, 19,
18, 1, 2, 15, 6, 0, 17, 8, 14, 13]
def sparse2coarse(targets):
"""Convert Pytorch CIFAR100 sparse targets to coarse targets.
Usage:
trainset = torchvision.datasets.CIFAR100(path)
trainset.targets = sparse2coarse(trainset.targets)
"""
coarse_labels = targets.new_tensor(class2superclass, dtype=torch.long)
return coarse_labels[targets]
def get_cifar_transforms(dataset):
if dataset == "cifar100":
CIFAR_MEAN = [0.50705882, 0.48666667, 0.44078431]
CIFAR_STD = [0.26745098, 0.25568627, 0.27607843]
elif dataset == "cifar10":
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
]
)
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(CIFAR_MEAN, CIFAR_STD), ]
)
return train_transform, test_transform
def get_cifar_train_loader(args, train_transform):
if args.dataset == "cifar10":
o_trainset = datasets.CIFAR10(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
elif args.dataset == "cifar100":
o_trainset = datasets.CIFAR100(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
train_sampler = torch.utils.data.RandomSampler(o_trainset)
train_loader = torch.utils.data.DataLoader(
o_trainset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
pin_memory=True,
sampler=train_sampler,
num_workers=args.num_workers,
)
return train_loader
def get_cifar_train_val_loader(args, train_transform):
if args.dataset == "cifar10":
o_trainset = datasets.CIFAR10(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
elif args.dataset == "cifar100":
o_trainset = datasets.CIFAR100(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
num_train = len(o_trainset)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
train_sampler = torch.utils.data.SubsetRandomSampler(indices[:split])
val_sampler = torch.utils.data.SubsetRandomSampler(indices[split:num_train])
train_loader = torch.utils.data.DataLoader(
o_trainset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
pin_memory=True,
sampler=train_sampler,
num_workers=args.num_workers,
)
val_loader = torch.utils.data.DataLoader(
o_trainset,
batch_size=args.batch_size,
shuffle=(val_sampler is None),
pin_memory=True,
sampler=val_sampler,
num_workers=args.num_workers,
)
return train_loader, val_loader
def get_cifar_superclass_train_loader(args, train_transform):
if args.dataset == "cifar10":
o_trainset = datasets.CIFAR10(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
elif args.dataset == "cifar100":
o_trainset = CIFAR100Coarse(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
o_trainset.superclass_masks = torch.tensor(
o_trainset.superclass_masks
).cuda()
o_trainset.coarse_labels = torch.tensor(o_trainset.coarse_labels).cuda()
train_sampler = torch.utils.data.RandomSampler(o_trainset)
train_loader = torch.utils.data.DataLoader(
o_trainset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
pin_memory=True,
sampler=train_sampler,
num_workers=args.num_workers,
)
return train_loader
def split_train_val(dataset, split_ratio):
total_train_indices = []
total_val_indices = []
superclass_train_indices = []
superclass_val_indices = []
for sample_indices in dataset.superclass_samples_indices:
num_samples = len(sample_indices)
split = int(np.floor(split_ratio * num_samples))
total_train_indices += sample_indices[:split]
total_val_indices += sample_indices[split:num_samples]
superclass_indices = list(range(num_samples))
superclass_train_indices.append(superclass_indices[:split])
superclass_val_indices.append(superclass_indices[split:num_samples])
total_train_indices.sort()
total_val_indices.sort()
return (
total_train_indices,
total_val_indices,
superclass_train_indices,
superclass_val_indices,
)
def get_cifar_superclass_train_val_loader(args, train_transform, test_transform):
if args.dataset == "cifar100":
dataset = CIFAR100Coarse(
root=args.cifar_root, train=True, download=True, transform=train_transform, test_transform=test_transform
)
dataset.superclass_masks = torch.tensor(dataset.superclass_masks).cuda()
dataset.coarse_labels = torch.tensor(dataset.coarse_labels).cuda()
# dataset.superclass_samples_indices = torch.tensor(
# dataset.superclass_samples_indices, device=device
# )
(
total_train_indices,
total_val_indices,
superclass_train_indices,
superclass_val_indices,
) = split_train_val(dataset, args.train_portion)
train_dataset = Subset(dataset, total_train_indices, superclass_train_indices, train=True)
val_dataset = Subset(dataset, total_val_indices, superclass_val_indices, train=False)
else:
raise NotImplementedError
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True,
pin_memory=True,
num_workers=args.num_workers,
)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=False,
pin_memory=True,
num_workers=args.num_workers,
)
return train_loader, val_loader
def get_cifar_test_loader(args, test_transform):
if args.dataset == "cifar10":
o_testset = datasets.CIFAR10(
root=args.cifar_root, train=False, download=True, transform=test_transform
)
elif args.dataset == "cifar100":
o_testset = datasets.CIFAR100(
root=args.cifar_root, train=False, download=True, transform=test_transform
)
test_sampler = torch.utils.data.SequentialSampler(o_testset)
test_loader = torch.utils.data.DataLoader(
o_testset,
batch_size=args.batch_size,
shuffle=(test_sampler is None),
pin_memory=True,
sampler=test_sampler,
num_workers=args.num_workers,
)
return test_loader
def get_cifar_superclass_test_loader(args, test_transform):
if args.dataset == "cifar10":
o_testset = datasets.CIFAR10(
root=args.cifar_root, train=False, download=True, transform=test_transform
)
elif args.dataset == "cifar100":
o_testset = CIFAR100Coarse(
root=args.cifar_root, train=False, download=True, transform=test_transform, test_transform=test_transform
)
o_testset.superclass_masks = torch.tensor(
o_testset.superclass_masks
).cuda()
o_testset.coarse_labels = torch.tensor(o_testset.coarse_labels).cuda()
test_sampler = torch.utils.data.SequentialSampler(o_testset)
test_loader = torch.utils.data.DataLoader(
o_testset,
batch_size=args.batch_size,
shuffle=(test_sampler is None),
pin_memory=True,
sampler=test_sampler,
num_workers=args.num_workers,
)
return test_loader
def get_cifar_bn_subset_loader(args, train_transform):
if args.dataset == "cifar10":
o_trainset = datasets.CIFAR10(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
elif args.dataset == "cifar100":
o_trainset = datasets.CIFAR100(
root=args.cifar_root, train=True, download=True, transform=train_transform
)
n_samples = len(o_trainset)
g = torch.Generator()
g.manual_seed(args.seed)
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
chosen_indexes = rand_indexes[: args.bn_subset_size]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
train_loader = torch.utils.data.DataLoader(
o_trainset,
batch_size=args.batch_size,
shuffle=(sub_sampler is None),
pin_memory=True,
sampler=sub_sampler,
num_workers=args.num_workers,
)
return train_loader
class CIFAR100Coarse(CIFAR100):
def __init__(
self, root, train=True, transform=None, target_transform=None, download=False, test_transform=None
):
super(CIFAR100Coarse, self).__init__(
root, train, transform, target_transform, download
)
self.test_transform = test_transform
# update labels
self.coarse_labels = np.array(class2superclass)
self.super_targets = self.coarse_labels[self.targets]
# update classes
self.n_superclass = 20
self.superclass_id = 0
self.classes = [
["beaver", "dolphin", "otter", "seal", "whale"],
["aquarium_fish", "flatfish", "ray", "shark", "trout"],
["orchid", "poppy", "rose", "sunflower", "tulip"],
["bottle", "bowl", "can", "cup", "plate"],
["apple", "mushroom", "orange", "pear", "sweet_pepper"],
["clock", "keyboard", "lamp", "telephone", "television"],
["bed", "chair", "couch", "table", "wardrobe"],
["bee", "beetle", "butterfly", "caterpillar", "cockroach"],
["bear", "leopard", "lion", "tiger", "wolf"],
["bridge", "castle", "house", "road", "skyscraper"],
["cloud", "forest", "mountain", "plain", "sea"],
["camel", "cattle", "chimpanzee", "elephant", "kangaroo"],
["fox", "porcupine", "possum", "raccoon", "skunk"],
["crab", "lobster", "snail", "spider", "worm"],
["baby", "boy", "girl", "man", "woman"],
["crocodile", "dinosaur", "lizard", "snake", "turtle"],
["hamster", "mouse", "rabbit", "shrew", "squirrel"],
["maple_tree", "oak_tree", "palm_tree", "pine_tree", "willow_tree"],
["bicycle", "bus", "motorcycle", "pickup_truck", "train"],
["lawn_mower", "rocket", "streetcar", "tank", "tractor"],
]
self.superclass_masks = []
self.superclass_data = []
self.superclass_targets = []
self.superclass_samples_indices = []
for i in range(20):
idx = (self.super_targets == i).nonzero()[0]
self.superclass_data.append(self.data[idx])
self.superclass_targets.append(np.array(self.targets)[idx].tolist())
superclass_mask = (self.coarse_labels == i).astype("int32")
self.superclass_masks.append(superclass_mask)
self.superclass_samples_indices.append(idx.tolist())
self.superclass_masks = np.vstack(self.superclass_masks)
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.data[index], self.targets[index]
else:
img, target = (
self.superclass_data[self.superclass_id][index],
self.superclass_targets[self.superclass_id][index],
)
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.train:
if self.transform is not None:
img = self.transform(img)
else:
if self.test_transform is not None:
img = self.test_transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
if self.train:
data = self.data
else:
data = self.superclass_data[self.superclass_id]
return len(data)
def set_superclass_id(self, superclass_index: int):
self.superclass_id = superclass_index
class Subset(Dataset):
r"""
Subset of a dataset at specified indices.
Arguments:
dataset (Dataset): The whole Dataset
indices (sequence): Indices in the whole set selected for subset
"""
def __init__(self, dataset, indices, subperclass_indices, train):
self.dataset = copy.deepcopy(dataset)
self.dataset.train = train
self.indices = indices
self.superclass_indices = subperclass_indices
self.superclass_masks = dataset.superclass_masks
self.n_superclass = dataset.n_superclass
self.superclass_id = 0
self.train = train
def __getitem__(self, idx):
if self.train:
return self.dataset[self.indices[idx]]
else:
return self.dataset[self.superclass_indices[self.superclass_id][idx]]
def __len__(self):
if self.train:
return len(self.indices)
else:
return len(self.superclass_indices[self.superclass_id])
def set_superclass_id(self, superclass_index: int):
self.superclass_id = superclass_index
self.dataset.set_superclass_id(superclass_index)
| 36.419355 | 117 | 0.633985 |
fe05905f74e18c9930938a46cd7dc883db126f57 | 11,693 | py | Python | ycm_extra_conf.py | amiller27/dotfiles | ba17fbc2dab044999f04db106cb1b2f92e953d1f | [
"Apache-2.0"
] | null | null | null | ycm_extra_conf.py | amiller27/dotfiles | ba17fbc2dab044999f04db106cb1b2f92e953d1f | [
"Apache-2.0"
] | 10 | 2019-01-23T20:51:41.000Z | 2020-04-13T23:43:44.000Z | ycm_extra_conf.py | amiller27/dotfiles | ba17fbc2dab044999f04db106cb1b2f92e953d1f | [
"Apache-2.0"
] | 2 | 2019-08-02T20:14:46.000Z | 2020-04-28T19:07:15.000Z | # -*- coding: utf-8 -*-
##########################################################################
# YouCompleteMe configuration for ROS #
# Original Author: Gaël Ecorchard (2015) #
# Heavily Modified by: Aaron Miller #
# #
# The file requires the definition of the $ROS_WORKSPACE variable in #
# your shell. #
# Name this file .ycm_extra_conf.py and place it in $ROS_WORKSPACE to #
# use it. #
# #
# Tested with Ubuntu 14.04 and Indigo. #
# #
# License: CC0 #
##########################################################################
import os
import ycm_core
def find_workspace_above(dirname):
'''
Return the first workspace at or above `dirname`, or None if there isn't one
'''
with open('/tmp/ycm.log', 'a') as logfile:
logfile.write('Testing directory {} for workspace...'.format(dirname))
# .catkin_workspace is generated if you use `catkin_make`
if os.path.exists(os.path.join(dirname, '.catkin_workspace')):
logfile.write('Succeeded\n')
return dirname
# if using `catkin build`
if os.path.exists(os.path.join(dirname, '.catkin_tools')):
logfile.write('Succeeded\n')
return dirname
logfile.write('Failed\n')
parent_dir = os.path.dirname(dirname)
if parent_dir == dirname:
return None
return find_workspace_above(parent_dir)
def is_ignored(dirname, workspace_dir):
if workspace_dir not in dirname:
return False
if os.path.exists(os.path.join(dirname, 'CATKIN_IGNORE')):
return True
parent_dir = os.path.dirname(dirname)
if parent_dir == dirname:
return False
return is_ignored(parent_dir, workspace_dir)
def GetRosIncludePaths(filename):
"""Return a list of potential include directories"""
try:
import rospkg
except ImportError:
return []
rospack = rospkg.RosPack()
includes = []
workspace_paths = {
find_workspace_above(path) for path in rospkg.get_ros_paths()
}.union({ find_workspace_above(filename) }) - { None }
includes.extend(
os.path.join(path, 'devel', 'include')
for path in workspace_paths)
for workspace_dir in workspace_paths:
for dirpath, dirnames, _ in os.walk(
os.path.join(workspace_dir, 'src'), followlinks=False):
if is_ignored(dirpath, workspace_dir): continue
for dirname in dirnames:
if dirname == 'include':
includes.append(os.path.join(dirpath, dirname))
for p in rospack.list():
if os.path.exists(rospack.get_path(p) + '/include'):
includes.append(rospack.get_path(p) + '/include')
for distribution in os.listdir('/opt/ros'):
includes.append('/opt/ros/' + distribution + '/include')
with open('/tmp/ycm.log', 'a') as f:
f.write('########## INCLUDES: ############\n')
for l in includes:
f.write(l + '\n')
return includes
def GetRosIncludeFlags(filename):
includes = GetRosIncludePaths(filename)
flags = []
for include in includes:
flags.append('-isystem')
flags.append(include)
return flags
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
# You can get CMake to generate the compilation_commands.json file for you by
# adding:
# set(CMAKE_EXPORT_COMPILE_COMMANDS 1)
# to your CMakeLists.txt file or by once entering
# catkin config --cmake-args '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON'
# in your shell.
DEFAULT_FLAGS = [
'-Wall',
'-Wextra',
# '-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-fexceptions',
'-DNDEBUG',
'-DROS_ASSERT_ENABLED',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know
# which language to use when compiling headers. So it will guess. Badly. So
# C++ headers will be compiled as C headers. You don't want that so ALWAYS
# specify a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++17',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-I',
'.',
# include third party libraries
'-isystem', '/usr/include/eigen3',
'-isystem', '/usr/include/OGRE',
'-isystem', '/usr/include/qt4',
] + sum([['-isystem', os.path.join('/usr/include/qt4', d)]
for d in os.listdir('/usr/include/qt4')],
[])
def GetCompilationDatabaseFolder(filename):
"""Return the directory potentially containing compilation_commands.json
Return the absolute path to the folder (NOT the file!) containing the
compile_commands.json file to use that instead of 'flags'. See here for
more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html.
The compilation_commands.json for the given file is returned by getting
the package the file belongs to.
"""
try:
import rospkg
except ImportError:
return ''
pkg_name = rospkg.get_package_name(filename)
if not pkg_name:
return ''
dir = os.path.join(os.path.expandvars('$ROS_WORKSPACE'), 'build', pkg_name)
return dir
def GetDatabase(compilation_database_folder):
if os.path.exists(os.path.join(compilation_database_folder,
'compile_commands.json')):
return ycm_core.CompilationDatabase(compilation_database_folder)
return None
SOURCE_EXTENSIONS = ['.cpp', '.cxx', '.cc', '.c', '.m', '.mm']
def DirectoryOfThisScript():
return os.path.dirname(os.path.abspath(__file__))
def MakeRelativePathsInFlagsAbsolute(flags, working_directory):
if not working_directory:
return list(flags)
new_flags = []
make_next_absolute = False
path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith('/'):
new_flag = os.path.join(working_directory, flag)
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith(path_flag):
path = flag[len(path_flag):]
new_flag = path_flag + os.path.join(working_directory, path)
break
if new_flag:
new_flags.append(new_flag)
return new_flags
def IsHeaderFile(filename):
extension = os.path.splitext(filename)[1]
return extension in ['.h', '.hxx', '.hpp', '.hh']
def GetCompilationInfoForHeaderSameDir(headerfile, database):
"""Return compile flags for src file with same base in the same directory
"""
filename_no_ext = os.path.splitext(headerfile)[0]
for extension in SOURCE_EXTENSIONS:
replacement_file = filename_no_ext + extension
if os.path.exists(replacement_file):
compilation_info = database.GetCompilationInfoForFile(
replacement_file)
if compilation_info.compiler_flags_:
return compilation_info
return None
def GetCompilationInfoForHeaderRos(headerfile, database):
"""Return the compile flags for the corresponding src file in ROS
Return the compile flags for the source file corresponding to the header
file in the ROS where the header file is.
"""
try:
import rospkg
except ImportError:
return None
pkg_name = rospkg.get_package_name(headerfile)
if not pkg_name:
return None
try:
pkg_path = rospkg.RosPack().get_path(pkg_name)
except rospkg.ResourceNotFound:
return None
filename_no_ext = os.path.splitext(headerfile)[0]
hdr_basename_no_ext = os.path.basename(filename_no_ext)
for path, dirs, files in os.walk(pkg_path):
for src_filename in files:
src_basename_no_ext = os.path.splitext(src_filename)[0]
if hdr_basename_no_ext != src_basename_no_ext:
continue
for extension in SOURCE_EXTENSIONS:
if src_filename.endswith(extension):
compilation_info = database.GetCompilationInfoForFile(
path + os.path.sep + src_filename)
if compilation_info.compiler_flags_:
return compilation_info
return None
def GetCompilationInfoForFile(filename, database):
# The compilation_commands.json file generated by CMake does not have
# entries for header files. So we do our best by asking the db for flags
# for a corresponding source file, if any. If one exists, the flags for
# that file should be good enough.
# Corresponding source file are looked for in the same package.
if IsHeaderFile(filename):
# Look in the same directory.
compilation_info = GetCompilationInfoForHeaderSameDir(
filename, database)
if compilation_info:
return compilation_info
# Look in the package.
compilation_info = GetCompilationInfoForHeaderRos(filename, database)
if compilation_info:
return compilation_info
return database.GetCompilationInfoForFile(filename)
def FlagsForFile(filename):
with open('/tmp/ycm_flags.log', 'a') as logfile:
logfile.write('COMPILATION FLAGS FOR {}\n'.format(filename))
database = GetDatabase(GetCompilationDatabaseFolder(filename))
logfile.write('FOUND DATABASE? {}\n'.format(bool(database)))
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile(filename, database)
logfile.write(
'FOUND COMPILATION_INFO? {}\n'.format(
bool(compilation_info)))
if compilation_info:
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_)
final_flags += default_flags
else:
# Return the default flags defined above.
final_flags = DEFAULT_FLAGS + GetRosIncludeFlags(filename)
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute(
DEFAULT_FLAGS + GetRosIncludeFlags(filename), relative_to)
logfile.write('FLAGS: {}\n'.format(final_flags))
return {
'flags': final_flags,
'do_cache': True
}
| 36.77044 | 83 | 0.595912 |
a90f07b564716efb0eccbf6a4c2ebdeca50db04e | 666 | py | Python | ASE/celery.py | 5wimming/ase | 0d506add3a83caf9afd01f216c256c4678010918 | [
"Apache-2.0"
] | 10 | 2021-07-13T02:15:15.000Z | 2022-02-21T07:27:54.000Z | ASE/celery.py | 5wimming/ase | 0d506add3a83caf9afd01f216c256c4678010918 | [
"Apache-2.0"
] | 3 | 2022-02-21T08:59:01.000Z | 2022-03-05T02:45:34.000Z | ASE/celery.py | 5wimming/ase | 0d506add3a83caf9afd01f216c256c4678010918 | [
"Apache-2.0"
] | 3 | 2021-08-19T07:54:39.000Z | 2022-02-21T07:27:55.000Z | # coding:utf-8
from __future__ import absolute_import, unicode_literals
import os
from celery import Celery, platforms
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ASE.settings')
app = Celery('ASE', backend='redis://127.0.0.1:6379/0', broker='redis://127.0.0.1:6379/0') # amqp://asemq:Ase.mq.005 @127.0.0.1:5672/ase
# 指定从django的settings.py里读取celery配置
app.config_from_object('django.conf:settings')
# 自动从所有已注册的django app中加载任务
app.autodiscover_tasks()
# 用于测试的异步任务
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request)) | 28.956522 | 138 | 0.758258 |
12da7a042b2e9ce3e36e2416e766509384cc3513 | 4,236 | py | Python | reflex_gripper/src/reflex_gripper/motor.py | rpiRobotics/reflex-gripper-ros-pkg | 12388049d65ebcc7b54c650a87e16718801e6fdb | [
"Apache-2.0"
] | null | null | null | reflex_gripper/src/reflex_gripper/motor.py | rpiRobotics/reflex-gripper-ros-pkg | 12388049d65ebcc7b54c650a87e16718801e6fdb | [
"Apache-2.0"
] | null | null | null | reflex_gripper/src/reflex_gripper/motor.py | rpiRobotics/reflex-gripper-ros-pkg | 12388049d65ebcc7b54c650a87e16718801e6fdb | [
"Apache-2.0"
] | null | null | null | #############################################################################
# Code Modified by Gregory Grebe for use with the CATS Robotics Lab
# http://github.com/rpiRobotics/reflex-gripper-ros-pkg
# Aug 17, 2015
#
# Original License:
#
# Copyright 2015 Right Hand Robotics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################################################################
import rospy
from std_msgs.msg import Float64
import reflex_gripper_msgs.msg
class Motor(object):
def __init__(self, name):
'''
Assumes that "name" is the name of the controller with a preceding
slash, e.g. /reflex_sf_f1
'''
self.name = name[1:]
self._DEFAULT_MOTOR_SPEED = rospy.get_param(name + '/default_motor_speed')
self._MAX_MOTOR_SPEED = rospy.get_param(name + '/max_motor_speed')
self._MAX_MOTOR_TRAVEL = rospy.get_param(name + '/max_motor_travel')
self._OVERLOAD_THRESHOLD = rospy.get_param(name + '/overload_threshold')
self._motor_msg = reflex_gripper_msgs.msg.Motor()
self._in_control_force_mode = False
def get_current_joint_angle(self):
return self._motor_msg.joint_angle
def get_load(self):
return self._motor_msg.load
def get_velocity(self):
return self._motor_msg.velocity
def get_motor_msg(self):
return self._motor_msg
def set_motor_angle(self, goal_pos):
raise NotImplementedError
def _check_motor_angle_command(self, angle_command):
raise NotImplementedError
def set_motor_speed(self, goal_speed):
raise NotImplementedError
def reset_motor_speed(self):
raise NotImplementedError
def set_motor_velocity(self, goal_vel):
raise NotImplementedError
def tighten(self, tighten_angle=0.05):
raise NotImplementedError
def loosen(self, loosen_angle=0.05):
raise NotImplementedError
def _receive_state_cb(self, data):
raise NotImplementedError
def _handle_motor_load(self, load):
raise NotImplementedError
def _check_motor_speed_command(self, goal_speed):
'''
Returns absolute of given command if within the allowable range,
returns bounded command if out of range. Always returns positive
'''
bounded_command = min(abs(goal_speed), self._MAX_MOTOR_SPEED)
return bounded_command
def enable_force_control(self):
self._in_control_force_mode = True
self.previous_load_control_output = self.get_current_joint_angle()
self.previous_load_control_error = 0.0
def disable_force_control(self):
self._in_control_force_mode = False
def set_force_cmd(self, force_cmd):
'''
Bounds the given goal load and sets it as the goal
'''
self.force_cmd = min(max(force_cmd, 0.0), self._OVERLOAD_THRESHOLD)
def _control_force(self, current_force, k):
'''
Uses discrete integral control to try and maintain goal force
k is Compensator gain - higher gain has faster response and is more unstable
'''
current_error = self.force_cmd - current_force
output = self.previous_load_control_output + k * (current_error + self.previous_load_control_error)
self.set_motor_angle(output)
self.previous_load_control_output = output
self.previous_load_control_error = current_error
def _loosen_if_overloaded(self, load):
'''
Takes the given load and checks against threshold, loosen motor if over
'''
if abs(load) > self._OVERLOAD_THRESHOLD:
rospy.logwarn("Motor %s overloaded at %f, loosening", self.name, load)
self.loosen()
| 34.721311 | 107 | 0.673513 |
5cb774271d9d5b9cac65bad3c6f85c033316a5b6 | 1,199 | py | Python | test/generator_spa_test.py | fmcooper/matchingproblems | 0d99e442c581427a7f09dce9f64d487f276e906c | [
"MIT"
] | null | null | null | test/generator_spa_test.py | fmcooper/matchingproblems | 0d99e442c581427a7f09dce9f64d487f276e906c | [
"MIT"
] | 1 | 2021-11-14T03:39:48.000Z | 2021-11-14T03:39:48.000Z | test/generator_spa_test.py | fmcooper/matchingproblems | 0d99e442c581427a7f09dce9f64d487f276e906c | [
"MIT"
] | null | null | null | import matchingproblems
from matchingproblems.generator import generator_spa as generator
import unittest
"""Testing class for the SPA instance generator."""
class TestSPAGenerator(unittest.TestCase):
def test_instance_generation(self):
gen = generator.Generator_spa()
instance = gen.create_instance(
n1=3,
n2=4,
n3=2,
pref_lists_students=[
[2, 4, 1],
[1, 3],
[4, 2]],
st_ties=[
[1, 0, 1],
[1, 0],
[0, 1]],
project_lecturers=[1, 1, 2, 2],
lower_quotas=[1, 1, 0, 0],
upper_quotas=[2, 1, 1, 1],
pref_lists_lecturers=[
[2, 1, 3],
[1, 3, 2]],
lec_ties=[
[1, 0, 1],
[1, 1, 1]],
lec_lower_quotas=[1, 0],
lec_targets=[1, 1],
lec_upper_quotas=[2, 2],
instance_info='info')
instance_test = (
'3 4 2\n'
'1: (2 4) 1\n'
'2: (1 3)\n'
'3: 4 2\n'
'1: 1: 2: 1\n'
'2: 1: 1: 1\n'
'3: 0: 1: 2\n'
'4: 0: 1: 2\n'
'1: 1: 1: 2: (2 1) 3\n'
'2: 0: 1: 2: (1 3 2)\n\n'
'info')
self.assertEquals(instance, instance_test)
| 23.509804 | 65 | 0.473728 |
b82632d36f01e1889ce2cb7a6dd429ac43eca8b5 | 8,457 | py | Python | transform_sintel.py | AhmedAlaa10/Consistent_Video_Depth_Estimation | 1a8868eadcf0b2082cdfea8ed339865f0ba8ea01 | [
"MIT"
] | null | null | null | transform_sintel.py | AhmedAlaa10/Consistent_Video_Depth_Estimation | 1a8868eadcf0b2082cdfea8ed339865f0ba8ea01 | [
"MIT"
] | null | null | null | transform_sintel.py | AhmedAlaa10/Consistent_Video_Depth_Estimation | 1a8868eadcf0b2082cdfea8ed339865f0ba8ea01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import re
import sys
from posix import listdir
from shutil import copyfile
from pathlib import Path
import importlib.util
#from pyquaternion import Quaternion
import numpy as np
from math import sqrt
import cv2 #TODO: pip install opencv-python
import os
from os.path import isfile, join
from PIL import Image
name = "alley_1" #TODO
img_type = "clean" #TODO "clean"/ "final" / "albedo"
data_type = "training" #TODO "training"/ "test"
collab=False
method="FN"
#local:
dest_path = "./data/FN" #TODO
sintel_complete_path = "../MPI-Sintel-complete" #TODO
sintel_depth_path = "../MPI-Sintel-depth-training-20150305" #TODO
#server:
#dest_path = "/cluster_HDD/char/practicum_project_b/results"
#sintel_complete_path = "/cluster_HDD/char/practicum_project_b/MPI-Sintel-complete"
#sintel_depth_path = "/cluster_HDD/char/practicum_project_b/MPI-Sintel-depth"
#collab:
if collab:
dest_path = "/content/consistent_depth/results"
sintel_complete_path = "/content/consistent_depth/data/sintel-complete"
sintel_depth_path = "/content/consistent_depth/data/sintel-depth"
fps = 15 #TODO
fps_input_vid = fps
if len(sys.argv) > 1:
name = str(sys.argv[1])
if len(sys.argv) > 2:
method = str(sys.argv[2])
dest_path = "./data/"+method+"/" #TODO
#sintel_io = importlib.import_module(os.path.join(sintel_depth_path,"sdk/python/sintel_io.py"))
#spec = importlib.util.spec_from_file_location("sintel_io", os.path.join(sintel_depth_path,"sdk/python/sintel_io.py"))
#sintel_io = importlib.util.module_from_spec(spec)
#spec.loader.exec_module(sintel_io)
#sintel_io.cam_read("path")
Path(dest_path).mkdir(parents=True, exist_ok=True)
dest_path = os.path.join(dest_path, name)
Path(dest_path).mkdir(parents=True, exist_ok=True)
dest_path = os.path.join(dest_path, img_type)
Path(dest_path).mkdir(parents=True, exist_ok=True)
img_path = os.path.join(dest_path, "color_full")
cam_path = os.path.join(dest_path, "colmap_dense")
Path(img_path).mkdir(parents=True, exist_ok=True)
Path(cam_path).mkdir(parents=True, exist_ok=True)
cam_path = os.path.join(cam_path, "pose_init")
Path(cam_path).mkdir(parents=True, exist_ok=True)
src_img_path = os.path.join(sintel_complete_path, data_type, img_type, name)
src_cam_path = os.path.join(sintel_depth_path, data_type, "camdata_left", name)
def cam_read(filename): #Copied from sintel_io.py from http://sintel.is.tue.mpg.de/depth
""" Read camera data, return (M,N) tuple.
M is the intrinsic matrix, N is the extrinsic matrix, so that
x = M*N*X,
with x being a point in homogeneous image pixel coordinates, X being a
point in homogeneous world coordinates.
"""
f = open(filename,'rb')
check = np.fromfile(f,dtype=np.float32,count=1)[0]
M = np.fromfile(f,dtype='float64',count=9).reshape((3,3))
N = np.fromfile(f,dtype='float64',count=12).reshape((3,4))
return M,N
def trace_method(matrix): #Copied from https://github.com/KieranWynn/pyquaternion/blob/master/pyquaternion/quaternion.py
"""
This code uses a modification of the algorithm described in:
https://d3cw3dd2w32x2b.cloudfront.net/wp-content/uploads/2015/01/matrix-to-quat.pdf
which is itself based on the method described here:
http://www.euclideanspace.com/maths/geometry/rotations/conversions/matrixToQuaternion/
Altered to work with the column vector convention instead of row vectors
"""
m = matrix.conj().transpose() # This method assumes row-vector and postmultiplication of that vector
if m[2, 2] < 0:
if m[0, 0] > m[1, 1]:
t = 1 + m[0, 0] - m[1, 1] - m[2, 2]
q = [m[1, 2]-m[2, 1], t, m[0, 1]+m[1, 0], m[2, 0]+m[0, 2]]
else:
t = 1 - m[0, 0] + m[1, 1] - m[2, 2]
q = [m[2, 0]-m[0, 2], m[0, 1]+m[1, 0], t, m[1, 2]+m[2, 1]]
else:
if m[0, 0] < -m[1, 1]:
t = 1 - m[0, 0] - m[1, 1] + m[2, 2]
q = [m[0, 1]-m[1, 0], m[2, 0]+m[0, 2], m[1, 2]+m[2, 1], t]
else:
t = 1 + m[0, 0] + m[1, 1] + m[2, 2]
q = [t, m[1, 2]-m[2, 1], m[2, 0]-m[0, 2], m[0, 1]-m[1, 0]]
q = np.array(q).astype('float64')
q *= 0.5 / sqrt(t)
return q
def quaternion_from_matrix(matrix):
return trace_method(matrix)
def video_from_frames(): #Adapted from: https://medium.com/@iKhushPatel/convert-video-to-images-images-to-video-using-opencv-python-db27a128a481
pathIn= img_path
pathOut = os.path.join(dest_path, "video.mp4")
frame_array = []
files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
#for sorting the file names properly
files.sort(key = lambda x: x[5:-4])
files.sort()
frame_array = []
files = [f for f in os.listdir(pathIn) if isfile(join(pathIn, f))]
#for sorting the file names properly
files.sort(key = lambda x: x[5:-4])
for i in range(len(files)):
filename=os.path.join(pathIn, files[i])
#reading each files
img = cv2.imread(filename)
height, width, layers = img.shape
size = (width,height)
#inserting the frames into an image array
frame_array.append(img)
out = cv2.VideoWriter(pathOut,cv2.VideoWriter_fourcc(*'DIVX'), fps_input_vid, size)
for i in range(len(frame_array)):
# writing to a image array
out.write(frame_array[i])
out.release()
#os.chdir(path)
#print(os.listdir("."))
width, height = (None,None)
# Copy and rename images:
if len(listdir(img_path))==0:
files = listdir(src_img_path)
for file in files:
split1 = file.split("_")
split2 = split1[1].split(".")
index = int(split2[0])-1
index =str(index).zfill(6)
file_new = str(split1[0]+"_"+index+".png")
#print(file_new)
#copyfile(os.path.join(src_img_path, file), os.path.join(img_path, file_new))
im = Image.open(os.path.join(src_img_path, file))
im.save(os.path.join(img_path, file_new), "PNG")
if width is None:
width, height = im.size
# Copy and transform cam data:
if len(listdir(cam_path))==0:
cameras_file = open(os.path.join(cam_path, "cameras.txt"),"w")
images_file = open(os.path.join(cam_path, "images.txt"),"w")
points3D_file = open(os.path.join(cam_path, "points3D.txt"),"w")
points3D_file.close()
frames_file = open(os.path.join(dest_path, "frames.txt"),"w")
frame_cams = listdir(src_cam_path)
frame_cams.sort()
cams =[]
for i , frame_cam in enumerate(frame_cams):
I,E = cam_read(os.path.join(src_cam_path, frame_cam))
print(frame_cam)
#if I not in cams:
# cams.append(I)
#cam_id = cams.index(I)+1
cam_id=-1
new = True
for j, cam in enumerate(cams):
if (I==cam).all():
new =False
cam_id =j +1
break
if new:
cam_id = len(cams) +1
cams.append(I)
split1 = frame_cam.split("_")
split2 = split1[1].split(".")
index = int(split2[0])-1
index =str(index).zfill(6)
frame_name = str(split1[0]+"_"+index+".png")
R = np.delete(E, np.s_[3], axis=1)
q = quaternion_from_matrix(matrix=R)
line = str(i+1)+" "+str(q[0])+" "+str(q[1])+" "+str(q[2])+" "+str(q[3])+" "+str(E[0,3])+" "+str(E[1,3])+" "+str(E[2,3])+" "+str(cam_id)+" "+frame_name+"\n" +"\n" #TODO: remove one \n?
images_file.write(line)
images_file.close()
for i , I in enumerate(cams):
line = str(i+1)+" "+"PINHOLE"+" "+str(width)+" "+str(height)+" "+str(I[0,0])+" "+str(I[1,1])+" "+str(I[0,2])+" "+str(I[1,2])+"\n"
cameras_file.write(line)
cameras_file.close()
number_of_frames=len(frame_cams)
line = str(number_of_frames)+"\n"
frames_file.write(line)
line = str(width)+"\n"
frames_file.write(line)
line = str(height)+"\n"
frames_file.write(line)
step_size=(float(number_of_frames)/float(fps))/float(number_of_frames)
time= 0.
for i in range(number_of_frames):
line = str(time)+"\n"
frames_file.write(line)
time+=step_size
frames_file.close()
#video_from_frames()
| 33.963855 | 192 | 0.614639 |
28052532948fd20618d5c689dd7818672002e0fd | 688 | py | Python | travellite/migrations/0005_review.py | Greko2017/kenda-travel-new | 9fc1fcbd160847a8d266a50345f2bda9a2da99de | [
"MIT"
] | 25 | 2019-01-16T16:24:08.000Z | 2022-03-03T04:43:29.000Z | travellite/migrations/0005_review.py | Greko2017/kenda-travel-new | 9fc1fcbd160847a8d266a50345f2bda9a2da99de | [
"MIT"
] | 3 | 2018-08-02T08:13:37.000Z | 2020-10-15T13:41:17.000Z | travellite/migrations/0005_review.py | Greko2017/kenda-travel-new | 9fc1fcbd160847a8d266a50345f2bda9a2da99de | [
"MIT"
] | 16 | 2019-03-25T11:19:49.000Z | 2021-12-03T10:04:17.000Z | # Generated by Django 2.0.4 on 2018-05-08 00:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('travellite', '0004_auto_20180507_1920'),
]
operations = [
migrations.CreateModel(
name='Review',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('review', models.CharField(max_length=1000)),
('rating', models.IntegerField()),
('author', models.CharField(max_length=30)),
('submissionDate', models.DateField()),
],
),
]
| 28.666667 | 114 | 0.56686 |
4c9897a36dff8ecac8168b73bf0927a1b150d53e | 1,063 | py | Python | setup.py | byhyu/explainable-ai-demo | 6e798544facb74b54169f9d55bd1503953db8727 | [
"RSA-MD"
] | null | null | null | setup.py | byhyu/explainable-ai-demo | 6e798544facb74b54169f9d55bd1503953db8727 | [
"RSA-MD"
] | 7 | 2021-02-02T23:12:35.000Z | 2022-01-13T03:32:52.000Z | setup.py | byhyu/explainable-ai-demo | 6e798544facb74b54169f9d55bd1503953db8727 | [
"RSA-MD"
] | null | null | null | from distutils.core import setup
def readme():
"""Import the README.md Markdown file and try to convert it to RST format."""
try:
import pypandoc
return pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
with open('README.md') as readme_file:
return readme_file.read()
setup(
name='chicagocrime',
version='0.1',
description='predictive modeling for chicago crime data',
long_description=readme(),
classifiers=[
'Programming Language :: Python :: 3',
],
url='https://github.com/quantiply-labs/dslab-HongYu',
author='Hong Yu',
author_email='hongyu.us@gmail.com',
license='...',
packages=['chicagocrime'],
install_requires=[
'pypandoc>=1.4',
'watermark>=1.8.1',
'pandas>=0.24.2',
'scikit-learn>=0.20.3',
'scipy>=1.2.1',
'matplotlib>=3.0.3',
'pytest>=4.3.1',
'pytest-runner>=4.4',
'click>=7.0'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
)
| 25.926829 | 81 | 0.584196 |
b2e20bcf97b45ad680e23fa25f257ee700ae53fe | 2,928 | py | Python | VGG-backbone-32/utils.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | 5 | 2021-05-23T13:05:45.000Z | 2022-02-13T21:40:59.000Z | VGG-backbone-32/utils.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | null | null | null | VGG-backbone-32/utils.py | ashleylqx/AIB | 77e418cac52f0ca5f2a7c54927468a7bd75a8fc9 | [
"MIT"
] | 3 | 2021-08-11T03:23:31.000Z | 2021-11-17T01:48:52.000Z | from nested_dict import nested_dict
from functools import partial
import torch
from torch.nn.init import kaiming_normal_
from torch.nn.parallel._functions import Broadcast
from torch.nn.parallel import scatter, parallel_apply, gather
import torch.nn.functional as F
def distillation(y, teacher_scores, labels, T, alpha):
p = F.log_softmax(y/T, dim=1)
q = F.softmax(teacher_scores/T, dim=1)
# l_kl = F.kl_div(p, q, size_average=False) * (T**2) / y.shape[0]
l_kl = F.kl_div(p, q, reduction='sum') * (T**2) / y.shape[0]
l_ce = F.cross_entropy(y, labels)
return l_kl * alpha + l_ce * (1. - alpha)
def at(x):
return F.normalize(x.pow(2).mean(1).view(x.size(0), -1))
def at_loss(x, y):
return (at(x) - at(y)).pow(2).mean()
def cast(params, dtype='float'):
if isinstance(params, dict):
return {k: cast(v, dtype) for k,v in params.items()}
else:
return getattr(params.cuda() if torch.cuda.is_available() else params, dtype)()
def conv_params(ni, no, k=1):
return kaiming_normal_(torch.Tensor(no, ni, k, k))
def linear_params(ni, no):
return {'weight': kaiming_normal_(torch.Tensor(no, ni)), 'bias': torch.zeros(no)}
def bnparams(n):
return {'weight': torch.rand(n),
'bias': torch.zeros(n),
'running_mean': torch.zeros(n),
'running_var': torch.ones(n)}
def data_parallel(f, input, params, mode, device_ids, output_device=None):
device_ids = list(device_ids)
if output_device is None:
output_device = device_ids[0]
if len(device_ids) == 1:
return f(input, params, mode)
params_all = Broadcast.apply(device_ids, *params.values())
params_replicas = [{k: params_all[i + j*len(params)] for i, k in enumerate(params.keys())}
for j in range(len(device_ids))]
replicas = [partial(f, params=p, mode=mode)
for p in params_replicas]
inputs = scatter([input], device_ids)
outputs = parallel_apply(replicas, inputs)
return gather(outputs, output_device)
def flatten(params):
return {'.'.join(k): v for k, v in nested_dict(params).items_flat() if v is not None}
def batch_norm(x, params, base, mode):
return F.batch_norm(x, weight=params[base + '.weight'],
bias=params[base + '.bias'],
running_mean=params[base + '.running_mean'],
running_var=params[base + '.running_var'],
training=mode)
def print_tensor_dict(params):
kmax = max(len(key) for key in params.keys())
for i, (key, v) in enumerate(params.items()):
print(str(i).ljust(5), key.ljust(kmax + 3), str(tuple(v.shape)).ljust(23), torch.typename(v), v.requires_grad)
def set_requires_grad_except_bn_(params):
for k, v in params.items():
if not k.endswith('running_mean') and not k.endswith('running_var'):
v.requires_grad = True
| 32.533333 | 118 | 0.635587 |
f225b35b5ef9ac310633b62b6790878ea239faf8 | 8,625 | py | Python | src/AI.py | Progitiel/Slideways | 64b8672860893eea6be245b5138f5ec48e97ed8c | [
"MIT"
] | null | null | null | src/AI.py | Progitiel/Slideways | 64b8672860893eea6be245b5138f5ec48e97ed8c | [
"MIT"
] | null | null | null | src/AI.py | Progitiel/Slideways | 64b8672860893eea6be245b5138f5ec48e97ed8c | [
"MIT"
] | null | null | null | import random
import copy
import numpy as np
from Config import *
def coup_AI(plateau, playerNumber, line=None, column=None, direction=None):
"""
Simule le coup d'un joueur sur un plateau
Entrées :
plateau [[int,int,...],[int,int,...],...] : listes de listes représentant le plateau
playerNumber [int] : numéro du joueur
line [int] : coordonnée de la ligne
column [int] : coordonnée de la colonne
direction [+1 ou -1] : direction du décalage
"""
board = copy.deepcopy(plateau)
if line != None and column != None: # Coordonnées case
board[line][column] = playerNumber
elif line != None and direction != None: # Décalage
board[line] = np.roll(board[line], direction)
return board
class AI():
def __init__(self, app):
"""Initialise l'IA basé sur app"""
self.app = app
self.mode = self.app.getPlayerMode()
def play(self):
"""
Jouer le coup en tant qu'ia
Sorties :
[(line, column, direction)] : tuple du coup
"""
if self.mode == -1:
return self.alphaBeta(self.app.getBoard(), self.app.getCurrentPlayer())[0]
elif self.mode == -2:
self.heuristique = self.heuristique_max
return self.alphaBetaHeuristique(self.app.getBoard(), self.app.getCurrentPlayer())[0]
elif self.mode == -3:
self.heuristique = self.heuristique_mean
return self.alphaBetaHeuristique(self.app.getBoard(), self.app.getCurrentPlayer())[0]
def alphaBeta(self, board, player, depth=2, maxi=True):
"""
IA AlphaBeta
Entrées :
board [[int,int,...],[int,int,...],...] : listes de listes représentant le plateau
player [int] : numéro du joueur
depth [int] : profondeur d'itération
maxi [bool] : maximise le joueur actuel
Sorties :
[(score, (line, column, direction))] : la meilleur coup avec son score
"""
otherPlayer = (player % 2)+1
if depth == 0:
AI_tie, AI_win_player = self.app.getWinner(board)
if AI_win_player == player:
res = WIN
elif AI_win_player == otherPlayer:
res = LOSS
else:
res = DRAW
return (None, res)
meilleurscore = -INFINITE if maxi else +INFINITE
meilleurcoup = []
caseCoup = [(line, column, None)
for line, column in self.app.getValidCase()]
shiftCoup = [(line, None, direction)
for line, direction in self.app.getValidShift(-1)]
shiftCoup += [(line, None, direction)
for line, direction in self.app.getValidShift(+1)]
for coup in caseCoup + shiftCoup:
board, pres = self.app.setCoup(board, line=coup[0],
column=coup[1], playerNumber=player, direction=coup[2])
score = self.alphaBeta(
board, otherPlayer, depth=depth-1, maxi=not maxi)[1]
board, pres = self.app.setCoup(board, line=coup[0],
column=coup[1], playerNumber=pres, direction=(+1 if coup[2] == -1 else -1))
if (maxi and score > meilleurscore) or (not maxi and score < meilleurscore):
meilleurscore = score
meilleurcoup = [(coup, score)]
elif meilleurscore == score:
meilleurcoup.append((coup, score))
else:
break
return random.choice(meilleurcoup)
def alphaBetaHeuristique(self, board, playerNumber, depth=2, maxi=True):
"""
IA AlphaBeta avec une fonction heuristique
Entrées :
board [[int,int,...],[int,int,...],...] : listes de listes représentant le plateau
player [int] : numéro du joueur
depth [int] : profondeur d'itération
maxi [bool] : maximise le joueur actuel
Sorties :
[(score, (line, column, direction))] : la meilleur coup avec son score
"""
otherPlayer = (playerNumber % 2)+1
if depth == 0:
return (None, self.heuristique(board, playerNumber if maxi else otherPlayer))
meilleurscore = -INFINITE if maxi else +INFINITE
meilleurcoup = []
valid = self.app.getValid()
for coup in valid:
board, pres = self.app.setCoup(board, line=coup[0],
column=coup[1], playerNumber=playerNumber, direction=coup[2])
score = self.alphaBetaHeuristique(
board, otherPlayer, depth=depth-1, maxi=not maxi)[1]
board, pres = self.app.setCoup(board, line=coup[0],
column=coup[1], playerNumber=pres, direction=(+1 if coup[2] == -1 else -1))
if (maxi and score > meilleurscore) or (not maxi and score < meilleurscore):
meilleurscore = score
meilleurcoup = [(coup, score)]
elif meilleurscore == score:
meilleurcoup.append((coup, score))
else:
break
return random.choice(meilleurcoup)
def heuristique_max(self, board, playerNumber):
"""Evalue la probabilité que le player a de gagner sur cette board"""
otherPlayer = (playerNumber % 2)+1
# case du joueur par colonne
ccp = np.count_nonzero(board == playerNumber, axis=0)
ccpMax = np.amax(ccp)/NUMBER_CASE_TO_WIN
# case du joueur par ligne
clp = np.count_nonzero(board == playerNumber, axis=1)
clpMax = np.amax(clp)/NUMBER_CASE_TO_WIN
# case de l'autre joueur par colonne
cco = np.count_nonzero(board == otherPlayer, axis=0)
ccoMax = np.amax(cco)/NUMBER_CASE_TO_WIN
# case de l'autre joueur par ligne
clo = np.count_nonzero(board == otherPlayer, axis=1)
cloMax = np.amax(clo)/NUMBER_CASE_TO_WIN
diagonals = [np.diagonal(board, i) for i in range((-BOARD_SIZE+1)+NUMBER_CASE_TO_WIN-1,
(BOARD_SIZE+(BOARD_SIZE-1)*2)-NUMBER_CASE_TO_WIN+1)] # Diagonnales de S-O vers N-E
flipBoard = np.flipud(board)
diagonals += [np.diagonal(flipBoard, i) for i in range((-BOARD_SIZE+1)+NUMBER_CASE_TO_WIN-1,
(BOARD_SIZE+(BOARD_SIZE-1)*2)-NUMBER_CASE_TO_WIN+1)] # Diagonnales de N-O vers S-E
diagonals = np.array(diagonals)
cdp = np.count_nonzero(diagonals == playerNumber, axis=1)
cdpMax = np.amax(ccp)/NUMBER_CASE_TO_WIN
cdo = np.count_nonzero(diagonals == otherPlayer, axis=1)
cdoMax = np.amax(ccp)/NUMBER_CASE_TO_WIN
return 100*((ccpMax + clpMax + cdpMax)/3) - 80*((ccoMax + cloMax + cdoMax)/3)
def heuristique_mean(self, board, playerNumber):
"""Evalue la probabilité que le player a de gagner sur cette board"""
otherPlayer = (playerNumber % 2)+1
# case du joueur par colonne
ccp = np.count_nonzero(board == playerNumber, axis=0)
ccpMean = np.mean(ccp)/NUMBER_CASE_TO_WIN
# case du joueur par ligne
clp = np.count_nonzero(board == playerNumber, axis=1)
clpMean = np.mean(clp)/NUMBER_CASE_TO_WIN
# case de l'autre joueur par colonne
cco = np.count_nonzero(board == otherPlayer, axis=0)
ccoMean = np.mean(cco)/NUMBER_CASE_TO_WIN
# case de l'autre joueur par ligne
clo = np.count_nonzero(board == otherPlayer, axis=1)
cloMean = np.mean(clo)/NUMBER_CASE_TO_WIN
diagonals = [np.diagonal(board, i) for i in range((-BOARD_SIZE+1)+NUMBER_CASE_TO_WIN-1,
(BOARD_SIZE+(BOARD_SIZE-1)*2)-NUMBER_CASE_TO_WIN+1)] # Diagonnales de S-O vers N-E
flipBoard = np.flipud(board)
diagonals += [np.diagonal(flipBoard, i) for i in range((-BOARD_SIZE+1)+NUMBER_CASE_TO_WIN-1,
(BOARD_SIZE+(BOARD_SIZE-1)*2)-NUMBER_CASE_TO_WIN+1)] # Diagonnales de N-O vers S-E
diagonals = np.array(diagonals)
cdp = np.count_nonzero(diagonals == playerNumber, axis=1)
cdpMean = np.mean(ccp)/NUMBER_CASE_TO_WIN
cdo = np.count_nonzero(diagonals == otherPlayer, axis=1)
cdoMean = np.mean(ccp)/NUMBER_CASE_TO_WIN
return 100*((ccpMean + clpMean + cdpMean)/3) - 80*((ccoMean + cloMean + cdoMean)/3)
| 38.333333 | 146 | 0.574609 |
d24d40d992379da2b47471a62b3c0adc861bca62 | 4,625 | py | Python | finishedgames/catalogsources/management/commands/fetch_platforms.py | Kartones/finished-games | 9b1f86aee3ea26be50b666887e3bdecad2c8f757 | [
"Unlicense"
] | 7 | 2019-01-23T20:09:00.000Z | 2021-12-19T17:50:48.000Z | finishedgames/catalogsources/management/commands/fetch_platforms.py | Kartones/finished-games | 9b1f86aee3ea26be50b666887e3bdecad2c8f757 | [
"Unlicense"
] | 2 | 2019-08-11T11:16:00.000Z | 2019-09-04T00:07:04.000Z | finishedgames/catalogsources/management/commands/fetch_platforms.py | Kartones/finished-games | 9b1f86aee3ea26be50b666887e3bdecad2c8f757 | [
"Unlicense"
] | 2 | 2019-01-23T20:09:05.000Z | 2020-09-06T10:43:25.000Z | from typing import Any, Dict, List
from catalogsources.helpers import clean_string_field
from catalogsources.management.helpers import TimeProfiler, source_class_from_id, wait_if_needed
from catalogsources.models import FetchedPlatform
from django.core.management.base import BaseCommand, CommandParser
class Command(BaseCommand):
help = "Fetches platforms from specified source ids"
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument("sources", nargs="+", type=str)
def handle(self, *args: Any, **options: Dict) -> None:
self._display_legend()
for source_id in options["sources"]:
self._fetch_source(source_id=source_id)
def _fetch_source(self, source_id: str) -> None:
had_errors = False
self.stdout.write(self.style.WARNING("> Started fetching platforms from '{}'".format(source_id)))
adapter_class = source_class_from_id(source_id)
self.default_publish_date = adapter_class.DEFAULT_PUBLISH_DATE
with adapter_class(stdout=self.stdout, stdout_color_style=self.style) as adapter:
self.stdout.write(self.style.WARNING("> Batch size:{}".format(adapter.batch_size())))
while adapter.has_more_items() and not had_errors:
total = adapter.total_results if adapter.total_results != adapter.UNKOWN_TOTAL_RESULTS_VALUE else "-"
self.stdout.write("\n> Fetch call: {current}/{total}".format(current=adapter.next_offset, total=total))
with TimeProfiler(use_performance_counter=True) as profiler:
platforms = adapter.fetch_platforms_block()
self._upsert_results(results=platforms)
had_errors = adapter.has_errored()
wait_if_needed(profiler.duration)
self.stdout.write("")
if had_errors:
self.stdout.write(self.style.WARNING("> Finished fetching '{}' with errors".format(source_id)))
else:
self.stdout.write(self.style.SUCCESS("> Finished fetching '{}'".format(source_id)))
def _upsert_results(self, results: List[FetchedPlatform]) -> None:
errors = []
count = 0
for platform in results:
self.stdout.write("{}:".format(platform.source_platform_id), ending="")
platform.name = clean_string_field(platform.name)
platform.shortname = clean_string_field(platform.shortname)
try:
existing_platform = FetchedPlatform.objects.get(
source_platform_id=platform.source_platform_id, source_id=platform.source_id
)
existing_platform.name = platform.name
existing_platform.source_platform_id = platform.source_platform_id
existing_platform.source_id = platform.source_id
existing_platform.source_url = platform.source_url
if platform.publish_date != self.default_publish_date:
existing_platform.publish_date = platform.publish_date
last_modified_date = existing_platform.last_modified_date
existing_platform.save()
if existing_platform.last_modified_date != last_modified_date:
self.stdout.write(self.style.SUCCESS("☑ "), ending="")
else:
self.stdout.write("☐ ", ending="")
except FetchedPlatform.DoesNotExist:
platform.save()
self.stdout.write(self.style.SUCCESS("✓ "), ending="")
except Exception as error:
errors.append(str(error))
self.stdout.write(self.style.ERROR("✗ "), ending="")
count += 1
if count % 10 == 0:
self.stdout.write("")
if errors:
self.stdout.write(self.style.ERROR("\nErrors:"))
for error_item in errors:
self.stdout.write(self.style.ERROR(error_item))
def _display_legend(self) -> None:
self.stdout.write(self.style.WARNING("Legend: "))
self.stdout.write(self.style.SUCCESS("✓ "), ending="")
self.stdout.write("Added new platform")
self.stdout.write(self.style.SUCCESS("☑ "), ending="")
self.stdout.write("Updated existing platform (new changes)")
self.stdout.write("☐ ", ending="")
self.stdout.write("Existing platform not updated (no changes)")
self.stdout.write(self.style.ERROR("✗ "), ending="")
self.stdout.write("Error adding/updating platform")
self.stdout.write(self.style.WARNING("-------\n"))
| 45.792079 | 119 | 0.635676 |
1bbe636b1d0d1a3d902a35bbb0d747d630dc1bad | 2,118 | py | Python | src/pyotp/otp.py | ddboline/pyotp | 147885b1b860a4439ca4a89030c0e3c6b97e7de0 | [
"MIT"
] | null | null | null | src/pyotp/otp.py | ddboline/pyotp | 147885b1b860a4439ca4a89030c0e3c6b97e7de0 | [
"MIT"
] | null | null | null | src/pyotp/otp.py | ddboline/pyotp | 147885b1b860a4439ca4a89030c0e3c6b97e7de0 | [
"MIT"
] | null | null | null | import base64
import hashlib
import hmac
from typing import Any, Optional
class OTP(object):
"""
Base class for OTP handlers.
"""
def __init__(self, s: str, digits: int = 6, digest: Any = hashlib.sha1, name: Optional[str] = None,
issuer: Optional[str] = None) -> None:
self.digits = digits
self.digest = digest
self.secret = s
self.name = name or 'Secret'
self.issuer = issuer
def generate_otp(self, input: int) -> str:
"""
:param input: the HMAC counter value to use as the OTP input.
Usually either the counter, or the computed integer based on the Unix timestamp
"""
if input < 0:
raise ValueError('input must be positive integer')
hasher = hmac.new(self.byte_secret(), self.int_to_bytestring(input), self.digest)
hmac_hash = bytearray(hasher.digest())
offset = hmac_hash[-1] & 0xf
code = ((hmac_hash[offset] & 0x7f) << 24 |
(hmac_hash[offset + 1] & 0xff) << 16 |
(hmac_hash[offset + 2] & 0xff) << 8 |
(hmac_hash[offset + 3] & 0xff))
str_code = str(code % 10 ** self.digits)
while len(str_code) < self.digits:
str_code = '0' + str_code
return str_code
def byte_secret(self) -> bytes:
missing_padding = len(self.secret) % 8
if missing_padding != 0:
self.secret += '=' * (8 - missing_padding)
return base64.b32decode(self.secret, casefold=True)
@staticmethod
def int_to_bytestring(i: int, padding: int = 8) -> bytes:
"""
Turns an integer to the OATH specified
bytestring, which is fed to the HMAC
along with the secret
"""
result = bytearray()
while i != 0:
result.append(i & 0xFF)
i >>= 8
# It's necessary to convert the final result from bytearray to bytes
# because the hmac functions in python 2.6 and 3.3 don't work with
# bytearray
return bytes(bytearray(reversed(result)).rjust(padding, b'\0'))
| 35.3 | 103 | 0.576487 |
7e4ad8f928601eb39b474758a597621539885e9e | 2,390 | py | Python | taotao-cloud-python/taotao-cloud-django/taotao_cloud_auto_cmdb/cmdb_server/api/service/disk.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 47 | 2021-04-13T10:32:13.000Z | 2022-03-31T10:30:30.000Z | taotao-cloud-python/taotao-cloud-django/taotao_cloud_auto_cmdb/cmdb_server/api/service/disk.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 1 | 2021-11-01T07:41:04.000Z | 2021-11-01T07:41:10.000Z | taotao-cloud-python/taotao-cloud-django/taotao_cloud_auto_cmdb/cmdb_server/api/service/disk.py | shuigedeng/taotao-cloud-paren | 3d281b919490f7cbee4520211e2eee5da7387564 | [
"Apache-2.0"
] | 21 | 2021-04-13T10:32:17.000Z | 2022-03-26T07:43:22.000Z | from repository import models
class Disk(object):
def __init__(self):
pass
@classmethod
def initial(cls):
return cls()
def process(self, new, server_obj):
new_data = new.get('data')
if new.get('status'):
new_slot = list(new_data.keys()) # 新采集硬盘插槽列表
objs = server_obj.disk.all()
old_slot = [obj.slot for obj in objs] # 旧硬盘插槽列表
add_slot = list(set(new_slot).difference(set(old_slot))) # 新增插槽
del_slot = list(set(old_slot).difference(set(new_slot))) # 删除插槽
update_slot = list(set(new_slot).intersection(set(old_slot))) # 更新插槽
# 新增槽位
log = []
add_objs = []
for slot in add_slot:
new_data[slot]['server_obj_id'] = server_obj.id
log.append('硬盘信息:新增槽位{slot};型号{model};容量GB{capacity};类型{pd_type}'.format(**new_data[slot]))
add_objs.append(models.Disk(**new_data.get(slot)))
print(add_objs, '...add_objs..')
models.Disk.objects.bulk_create(add_objs, 10)
if log:
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content='新增硬盘:%s' % (';'.join(log)))
# 删除槽位
if del_slot:
models.Disk.objects.filter(server_obj=server_obj, slot__in=del_slot).delete()
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content='移除硬盘:%s' % (';'.join(del_slot)))
# 更新槽位
log = []
field_map = {'model': '型号', 'capacity': '容量', 'pd_type': '类型'}
for slot in update_slot:
slot_data = new_data.get(slot)
slot_obj = models.Disk.objects.filter(slot=slot, server_obj=server_obj).first()
for k, v in slot_data.items():
if k == 'capacity':
v = float(v)
value = getattr(slot_obj, k)
if v != value:
log.append('硬盘槽位:%s,%s由%s变为%s' % (slot, field_map.get(k), value, v))
setattr(slot_obj, k, v)
slot_obj.save()
if log:
models.AssetRecord.objects.create(asset_obj=server_obj.asset, content=';'.join(log))
else:
models.ErrorLog.objects.create(title='硬盘信息采集出错', content=new_data)
| 41.206897 | 119 | 0.535565 |
414797794c963888b85142bc4f206d8fac4cb40a | 1,319 | py | Python | models/han_origin/args.py | Mrmoore98/hedwig | dc8c2f1f5e6886b9ce9999bbd071bce02cfbbaf1 | [
"Apache-2.0"
] | null | null | null | models/han_origin/args.py | Mrmoore98/hedwig | dc8c2f1f5e6886b9ce9999bbd071bce02cfbbaf1 | [
"Apache-2.0"
] | null | null | null | models/han_origin/args.py | Mrmoore98/hedwig | dc8c2f1f5e6886b9ce9999bbd071bce02cfbbaf1 | [
"Apache-2.0"
] | null | null | null | import os
import models.args
def get_args():
parser = models.args.get_args()
parser.add_argument('--mode', type=str, default='static', choices=['rand', 'static', 'non-static'])
parser.add_argument('--dataset', type=str, default='Reuters', choices=['Reuters', 'AAPD', 'IMDB', 'Yelp2014'])
parser.add_argument('--output-channel', type=int, default=100)
parser.add_argument('--words-dim', type=int, default=300)
parser.add_argument('--embed-dim', type=int, default=300)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--epoch-decay', type=int, default=15)
parser.add_argument('--weight-decay', type=float, default=0)
parser.add_argument('--word-num-hidden', type=int, default=50)
parser.add_argument('--sentence-num-hidden', type=int, default=50)
parser.add_argument('--word-vectors-dir', default=os.path.join(os.pardir, 'hedwig-data', 'embeddings', 'word2vec'))
parser.add_argument('--word-vectors-file', default='GoogleNews-vectors-negative300.txt')
parser.add_argument('--save-path', type=str, default=os.path.join('model_checkpoints', 'han'))
parser.add_argument('--resume-snapshot', type=str)
parser.add_argument('--trained-model', type=str)
args = parser.parse_args()
return args
| 47.107143 | 120 | 0.682335 |
b40db11cb05c9fdcad4f0c60733cfc9527da2b2c | 18,468 | py | Python | datasets/vcoco.py | iloveat/HoiTransformer | 904736d80e42158bda988565b12cb65e823e5597 | [
"Apache-2.0"
] | null | null | null | datasets/vcoco.py | iloveat/HoiTransformer | 904736d80e42158bda988565b12cb65e823e5597 | [
"Apache-2.0"
] | null | null | null | datasets/vcoco.py | iloveat/HoiTransformer | 904736d80e42158bda988565b12cb65e823e5597 | [
"Apache-2.0"
] | null | null | null | # ------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License")
# ------------------------------------------------------------------------
# Modified from DETR (https://github.com/facebookresearch/detr)
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# ------------------------------------------------------------------------
from torchvision.datasets.vision import VisionDataset
import torchvision
import torch
import numpy as np
import json
import cv2
import random
import PIL
import torchvision.transforms as T
import torchvision.transforms.functional as F
from util.box_ops import box_xyxy_to_cxcywh
from PIL import Image
coco_classes_originID = {
"person": 1,
"bicycle": 2,
"car": 3,
"motorcycle": 4,
"airplane": 5,
"bus": 6,
"train": 7,
"truck": 8,
"boat": 9,
"traffic light": 10,
"fire hydrant": 11,
"stop sign": 13,
"parking meter": 14,
"bench": 15,
"bird": 16,
"cat": 17,
"dog": 18,
"horse": 19,
"sheep": 20,
"cow": 21,
"elephant": 22,
"bear": 23,
"zebra": 24,
"giraffe": 25,
"backpack": 27,
"umbrella": 28,
"handbag": 31,
"tie": 32,
"suitcase": 33,
"frisbee": 34,
"skis": 35,
"snowboard": 36,
"sports ball": 37,
"kite": 38,
"baseball bat": 39,
"baseball glove": 40,
"skateboard": 41,
"surfboard": 42,
"tennis racket": 43,
"bottle": 44,
"wine glass": 46,
"cup": 47,
"fork": 48,
"knife": 49,
"spoon": 50,
"bowl": 51,
"banana": 52,
"apple": 53,
"sandwich": 54,
"orange": 55,
"broccoli": 56,
"carrot": 57,
"hot dog": 58,
"pizza": 59,
"donut": 60,
"cake": 61,
"chair": 62,
"couch": 63,
"potted plant": 64,
"bed": 65,
"dining table": 67,
"toilet": 70,
"tv": 72,
"laptop": 73,
"mouse": 74,
"remote": 75,
"keyboard": 76,
"cell phone": 77,
"microwave": 78,
"oven": 79,
"toaster": 80,
"sink": 81,
"refrigerator": 82,
"book": 84,
"clock": 85,
"vase": 86,
"scissors": 87,
"teddy bear": 88,
"hair drier": 89,
"toothbrush": 90,
}
coco_instance_ID_to_name = {
1: "person",
2: "bicycle",
3: "car",
4: "motorcycle",
5: "airplane",
6: "bus",
7: "train",
8: "truck",
9: "boat",
10: "traffic light",
11: "fire hydrant",
13: "stop sign",
14: "parking meter",
15: "bench",
16: "bird",
17: "cat",
18: "dog",
19: "horse",
20: "sheep",
21: "cow",
22: "elephant",
23: "bear",
24: "zebra",
25: "giraffe",
27: "backpack",
28: "umbrella",
31: "handbag",
32: "tie",
33: "suitcase",
34: "frisbee",
35: "skis",
36: "snowboard",
37: "sports ball",
38: "kite",
39: "baseball bat",
40: "baseball glove",
41: "skateboard",
42: "surfboard",
43: "tennis racket",
44: "bottle",
46: "wine glass",
47: "cup",
48: "fork",
49: "knife",
50: "spoon",
51: "bowl",
52: "banana",
53: "apple",
54: "sandwich",
55: "orange",
56: "broccoli",
57: "carrot",
58: "hot dog",
59: "pizza",
60: "donut",
61: "cake",
62: "chair",
63: "couch",
64: "potted plant",
65: "bed",
67: "dining table",
70: "toilet",
72: "tv",
73: "laptop",
74: "mouse",
75: "remote",
76: "keyboard",
77: "cell phone",
78: "microwave",
79: "oven",
80: "toaster",
81: "sink",
82: "refrigerator",
84: "book",
85: "clock",
86: "vase",
87: "scissors",
88: "teddy bear",
89: "hair drier",
90: "toothbrush",
}
hoi_interaction_names = json.loads(
open('/data/DATA/VCOCO/vcoco_verb_names.json', 'r').readlines()[0])['verb_names']
def convert_xywh2x1y1x2y2(box, shape, flip):
ih, iw = shape[:2]
x, y, w, h = box
if flip == 1:
x1_org = x
x2_org = x + w - 1
x2 = iw - 1 - x1_org
x1 = iw - 1 - x2_org
else:
x1 = x
x2 = x + w - 1
x1 = max(x1, 0)
x2 = min(x2, iw-1)
y1 = max(y, 0)
y2 = min(y + h - 1, ih-1)
return [x1, y1, x2, y2]
def get_det_annotation_from_odgt(item, shape, flip, gt_size_min=1):
total_boxes, gt_boxes, ignored_boxes = [], [], []
for annot in item['gtboxes']:
box = convert_xywh2x1y1x2y2(annot['box'], shape, flip)
x1, y1, x2, y2 = box
cls_id = coco_classes_originID[annot['tag']]
total_boxes.append([x1, y1, x2, y2, cls_id, ])
if annot['tag'] not in coco_classes_originID:
continue
if annot.get('extra', {}).get('ignore', 0) == 1:
ignored_boxes.append(box)
continue
if (x2 - x1 + 1) * (y2 - y1 + 1) < gt_size_min ** 2:
ignored_boxes.append(box)
continue
if x2 <= x1 or y2 <= y1:
ignored_boxes.append(box)
continue
gt_boxes.append([x1, y1, x2, y2, cls_id, ])
return gt_boxes, ignored_boxes, total_boxes
def get_interaction_box(human_box, object_box, hoi_id):
hx1, hy1, hx2, hy2, hid = human_box
ox1, oy1, ox2, oy2, oid = object_box
# hcx, hcy = (hx1 + hx2) / 2, (hy1 + hy2) / 2
# ocx, ocy = (ox1 + ox2) / 2, (oy1 + oy2) / 2
# dx = (hcx - ocx) / 5
# dy = (hcy - ocy) / 5
# xx1, yy1, xx2, yy2 = list(map(int, [ox1 + dx, oy1 + dy, ox2 + dx, oy2 + dy]))
xx1, yy1, xx2, yy2 = min(hx1, ox1), min(hy1, oy1), max(hx2, ox2), max(hy2, oy2)
return [xx1, yy1, xx2, yy2, hoi_id]
def xyxy_to_cxcywh(box):
x0, y0, x1, y1, cid = box
return [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0), cid]
def get_hoi_annotation_from_odgt(item, total_boxes, scale):
human_boxes, object_boxes, action_boxes = [], [], []
human_labels, object_labels, action_labels = [], [], []
img_hh, img_ww = item['height'], item['width']
for hoi in item.get('hoi', []):
x1, y1, x2, y2, cls_id = list(map(int, total_boxes[hoi['subject_id']]))
human_box = x1 // scale, y1 // scale, x2 // scale, y2 // scale, cls_id
if cls_id == -1 or x1 >= x2 or y1 >= y2:
continue
x1, y1, x2, y2, cls_id = list(map(int, total_boxes[hoi['object_id']]))
object_box = x1 // scale, y1 // scale, x2 // scale, y2 // scale, cls_id
if cls_id == -1 or x1 >= x2 or y1 >= y2:
continue
hoi_id = hoi_interaction_names.index(hoi['interaction'])
hoi_box = get_interaction_box(human_box=human_box, object_box=object_box, hoi_id=hoi_id)
human_boxes.append(human_box[0:4])
object_boxes.append(object_box[0:4])
action_boxes.append(hoi_box[0:4])
human_labels.append(human_box[4])
object_labels.append(object_box[4])
action_labels.append(hoi_box[4])
return dict(
human_boxes=torch.from_numpy(np.array(human_boxes).astype(np.float32)),
human_labels=torch.from_numpy(np.array(human_labels)),
object_boxes=torch.from_numpy(np.array(object_boxes).astype(np.float32)),
object_labels=torch.from_numpy(np.array(object_labels)),
action_boxes=torch.from_numpy(np.array(action_boxes).astype(np.float32)),
action_labels=torch.from_numpy(np.array(action_labels)),
image_id=item['file_name'],
org_size=torch.as_tensor([int(img_hh), int(img_ww)]),
)
def parse_one_gt_line(gt_line, scale=1):
item = json.loads(gt_line)
img_name = item['file_name']
img_shape = item['height'], item['width']
gt_boxes, ignored_boxes, total_boxes = get_det_annotation_from_odgt(item, img_shape, flip=0)
interaction_boxes = get_hoi_annotation_from_odgt(item, total_boxes, scale)
return dict(image_id=img_name, annotations=interaction_boxes)
def hflip(image, target):
flipped_image = F.hflip(image)
w, h = image.size
target = target.copy()
if "human_boxes" in target:
boxes = target["human_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["human_boxes"] = boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["object_boxes"] = boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0])
target["action_boxes"] = boxes
return flipped_image, target
class RandomHorizontalFlip(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return hflip(img, target)
return img, target
class RandomAdjustImage(object):
def __init__(self, p=0.5):
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
img = F.adjust_brightness(img, random.choice([0.8, 0.9, 1.0, 1.1, 1.2]))
if random.random() < self.p:
img = F.adjust_contrast(img, random.choice([0.8, 0.9, 1.0, 1.1, 1.2]))
return img, target
class RandomSelect(object):
"""
Randomly selects between transforms1 and transforms2,
with probability p for transforms1 and (1 - p) for transforms2
"""
def __init__(self, transforms1, transforms2, p=0.5):
self.transforms1 = transforms1
self.transforms2 = transforms2
self.p = p
def __call__(self, img, target):
if random.random() < self.p:
return self.transforms1(img, target)
return self.transforms2(img, target)
def resize(image, target, size, max_size=None):
def get_size_with_aspect_ratio(image_size, size, max_size=None):
w, h = image_size
if max_size is not None:
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
if max_original_size / min_original_size * size > max_size:
size = int(round(max_size * min_original_size / max_original_size))
if (w <= h and w == size) or (h <= w and h == size):
return h, w
if w < h:
ow = size
oh = int(size * h / w)
else:
oh = size
ow = int(size * w / h)
return oh, ow
rescale_size = get_size_with_aspect_ratio(image_size=image.size, size=size, max_size=max_size)
rescaled_image = F.resize(image, rescale_size)
if target is None:
return rescaled_image, None
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size))
ratio_width, ratio_height = ratios
target = target.copy()
if "human_boxes" in target:
boxes = target["human_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["human_boxes"] = scaled_boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["object_boxes"] = scaled_boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height])
target["action_boxes"] = scaled_boxes
return rescaled_image, target
class RandomResize(object):
def __init__(self, sizes, max_size=None):
assert isinstance(sizes, (list, tuple))
self.sizes = sizes
self.max_size = max_size
def __call__(self, img, target=None):
size = random.choice(self.sizes)
return resize(img, target, size, self.max_size)
def crop(image, org_target, region):
cropped_image = F.crop(image, *region)
target = org_target.copy()
i, j, h, w = region
fields = ["human_labels", "object_labels", "action_labels"]
if "human_boxes" in target:
boxes = target["human_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["human_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("human_boxes")
if "object_boxes" in target:
boxes = target["object_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["object_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("object_boxes")
if "action_boxes" in target:
boxes = target["action_boxes"]
max_size = torch.as_tensor([w, h], dtype=torch.float32)
cropped_boxes = boxes - torch.as_tensor([j, i, j, i])
cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size)
cropped_boxes = cropped_boxes.clamp(min=0)
target["action_boxes"] = cropped_boxes.reshape(-1, 4)
fields.append("action_boxes")
# remove elements for which the boxes or masks that have zero area
if "human_boxes" in target and "object_boxes" in target:
cropped_boxes = target['human_boxes'].reshape(-1, 2, 2)
keep1 = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
cropped_boxes = target['object_boxes'].reshape(-1, 2, 2)
keep2 = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1)
keep = keep1 * keep2
if keep.any().sum() == 0:
return image, org_target
for field in fields:
target[field] = target[field][keep]
return cropped_image, target
class RandomSizeCrop(object):
def __init__(self, min_size: int, max_size: int):
self.min_size = min_size
self.max_size = max_size
def __call__(self, img: PIL.Image.Image, target: dict):
w = random.randint(self.min_size, min(img.width, self.max_size))
h = random.randint(self.min_size, min(img.height, self.max_size))
region = T.RandomCrop.get_params(img, (h, w))
return crop(img, target, region)
class ToTensor(object):
def __call__(self, img, target):
return torchvision.transforms.functional.to_tensor(img), target
class Normalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, image, target):
image = torchvision.transforms.functional.normalize(image, mean=self.mean, std=self.std)
if target is None:
return image, None
target = target.copy()
h, w = image.shape[-2:]
if "human_boxes" in target:
boxes = target["human_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["human_boxes"] = boxes
if "object_boxes" in target:
boxes = target["object_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["object_boxes"] = boxes
if "action_boxes" in target:
boxes = target["action_boxes"]
boxes = box_xyxy_to_cxcywh(boxes)
boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32)
target["action_boxes"] = boxes
return image, target
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
def make_hico_transforms(image_set, test_scale=-1):
scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]
normalize = Compose([
ToTensor(),
Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
if image_set == 'train':
return Compose([
RandomHorizontalFlip(),
RandomAdjustImage(),
RandomSelect(
RandomResize(scales, max_size=1333),
Compose([
RandomResize([400, 500, 600]),
RandomSizeCrop(384, 600),
RandomResize(scales, max_size=1333),
])
),
normalize,
])
if image_set == 'test':
if test_scale == -1:
return Compose([
normalize,
])
assert 400 <= test_scale <= 800, test_scale
return Compose([
RandomResize([test_scale], max_size=1333),
normalize,
])
raise ValueError(f'unknown {image_set}')
class HoiDetection(VisionDataset):
def __init__(self, root, annFile, transform=None, target_transform=None, transforms=None):
super(HoiDetection, self).__init__(root, transforms, transform, target_transform)
self.annotations = [parse_one_gt_line(l.strip()) for l in open(annFile, 'r').readlines()]
self.transforms = transforms
def __getitem__(self, index):
ann = self.annotations[index]
img_name = ann['image_id']
target = ann['annotations']
if 'train2014' in img_name:
img_path = './data/vcoco/images/train2014/%s' % img_name
elif 'val2014' in img_name:
img_path = './data/vcoco/images/val2014/%s' % img_name
else:
raise NotImplementedError()
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = Image.fromarray(img[:, :, ::-1]).convert('RGB')
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.annotations)
def build(image_set, test_scale=-1):
assert image_set in ['train', 'test'], image_set
if image_set == 'train':
annotation_file = './data/vcoco/vcoco_trainval_retag_hoitr.odgt'
else:
annotation_file = './data/vcoco/vcoco_test_retag_hoitr.odgt'
dataset = HoiDetection(root='./data/vcoco', annFile=annotation_file,
transforms=make_hico_transforms(image_set, test_scale))
return dataset
| 32.174216 | 104 | 0.580355 |
d754fa31a01365082584d76e182fcd659f22c333 | 5,747 | py | Python | rxneqn/half_rxn_balancer.py | djinnome/rxneqn | 357b2caf3ba7081f79a6c3356431b1ed5fca4b20 | [
"MIT"
] | 1 | 2019-02-12T22:56:50.000Z | 2019-02-12T22:56:50.000Z | rxneqn/half_rxn_balancer.py | djinnome/rxneqn | 357b2caf3ba7081f79a6c3356431b1ed5fca4b20 | [
"MIT"
] | null | null | null | rxneqn/half_rxn_balancer.py | djinnome/rxneqn | 357b2caf3ba7081f79a6c3356431b1ed5fca4b20 | [
"MIT"
] | null | null | null | from . import Reaction, Mixture
from .utils import LCM, LCD
import pandas as pd
from fractions import Fraction
class HalfReactionBalancer:
def __init__( self ):
pass
def custom_half_reaction( self, C, H, O, N, charge=0):
"""generate custom half reaction from empirical formula
(n-c)/d CO2 + c/d NH4+ + c/d HCO3- + (d+f)/d H+ + E- ==> 1/d CnHaObNc + (2*n -b + c)/d H2O
where d = (4*n + a - 2*b - 3*c)
"""
n,a,b,c, f = Fraction(C),Fraction(H),Fraction(O),Fraction(N), charge
d = (4*n + a - 2*b -3*c - f)
if f == 0:
formula_charge = ''
elif f == 1:
formula_charge = '+'
elif f > 1:
formula_charge = '+{}'.format(f)
else:
formula_charge = '{}'.format(f)
stoichiometry = dict(n=C,a=H, b=O,c=N,
CO2=(n-c)/d, NH4 = c/d, HCO3 = c/d,
biomass=1/d,H2O = (2*n - b + c)/d, proton=(d+f)/d,
charge=formula_charge)
eqn = '{CO2} CO2 + {NH4} NH4+ + {HCO3} HCO3- + {proton} H+ + E- ==> {biomass} C{n}H{a}O{b}N{c}{charge} + {H2O} H2O'
return Reaction(eqn.format(**stoichiometry))
def balance_half_reaction( self, oxidized_form, reduced_form, nitrogen_source='NH3' ):
return self.normalize_by_electron(
self.balance_charge(
self.balance_hydrogen(
self.balance_oxygen(
self.balance_nonwater_atoms(
self.add_species(
self.setup_reduction(
oxidized_form, reduced_form),
nitrogen_source))))))
def setup_reduction( self, oxidized_form, reduced_form ):
return Reaction(str(oxidized_form) + ' ==> ' + str(reduced_form) )
def balance_element( self, rxn, elements_to_be_balanced ):
rxn = Reaction( str(rxn))
cc = rxn.get_chemical_composition().loc[elements_to_be_balanced]
molecules_of_elements = [m for m in cc.columns if cc[m].any()]
lcm = LCM([cc.loc[e,m] for m in molecules_of_elements for e in elements_to_be_balanced])
stoich = pd.Series(lcm, index=molecules_of_elements)
return rxn.multiply_factor(stoich)
def add_species( self, rxn1, nitrogen_source ):
if 'N' in rxn1.get_chemical_composition().index:
reactant2 = rxn1.rxn['reactant'].add_to_mixture(Mixture('H2O + ' + str(nitrogen_source)))
else:
reactant2 = rxn1.rxn['reactant'].add_to_mixture(Mixture('H2O'))
product2 = Mixture(str(rxn1.rxn['product']))
return Reaction( str(reactant2) + ' ==> ' + str(product2))
def balance_nonwater_atoms( self, rxn2 ):
step3 = Reaction( str( rxn2 ))
elements_to_be_balanced = []
for element in rxn2.get_chemical_composition().index:
if element not in ['H', 'O', 'Charge']:
elements_to_be_balanced.append( element )
step3 = self.balance_element( step3, elements_to_be_balanced )
charge = step3.get_charge()
if 'C' in step3.get_chemical_composition().index:
if step3.get_charge() < 0:
step3.rxn['reactant'] = step3.rxn['reactant'].\
add_to_mixture(Mixture('{c:} HCO3-'.format(c=-step3.get_charge()))).\
subtract_from_mixture(Mixture('{c:} CO2'.format(c=-step3.get_charge())))
elif step3.get_charge() > 0:
step3.rxn['reactant'] = step3.rxn['reactant'].\
subtract_from_mixture(Mixture('{c:} HCO3-'.format(c=step3.get_charge()))).\
add_to_mixture(Mixture('{c:} CO2'.format(c=step3.get_charge())))
return step3
def balance_oxygen( self, rxn3 ):
step4 = Reaction( str( rxn3 ))
num_O = step4.rxn['reactant'].get_number_of_atoms('O') - step4.rxn['product'].get_number_of_atoms('O')
water = '{} H2O ==> {} H2O'
if num_O > 0:
return step4.subtract_reaction( Reaction(water.format(num_O, 0)))
else:
return step4.subtract_reaction( Reaction(water.format(0, -num_O)))
def balance_hydrogen( self, rxn4 ):
step5 = Reaction( str( rxn4 ))
num_H = step5.rxn['reactant'].get_number_of_atoms('H') - step5.rxn['product'].get_number_of_atoms('H')
protons = '{} H+ ==> {} H+'
if num_H > 0:
return step5.subtract_reaction( Reaction( protons.format(num_H, 0)))
else:
return step5.subtract_reaction( Reaction( protons.format(0, -num_H)))
def balance_charge( self, rxn5 ):
step6 = Reaction( str( rxn5 ))
num_charge = step6.rxn['reactant'].get_charge_of_mixture() - step6.rxn['product'].get_charge_of_mixture()
charge = '{} E- ==> {} E-'
if num_charge >0:
return step6.add_reaction( Reaction( charge.format( num_charge, 0)))
else:
return step6.add_reaction( Reaction( charge.format( 0, -num_charge)))
def normalize_by_electron( self, rxn ):
step7 = Reaction( str( rxn ))
electrons = rxn.get_stoichiometry_of_species( 'E-')
if electrons > 0:
factor = pd.Series(Fraction(1,electrons), index=[str(s) for s in step7.get_species()], dtype='object')
return step7.multiply_factor( factor )
elif electrons == 0:
return step7
else:
factor = pd.Series(Fraction(1,electrons), index=[str(s) for s in step7.get_species()], dtype='object')
return step7.multiply_factor( -factor )
| 48.294118 | 123 | 0.562728 |
54dc1da6cab05f45ff308b2e8db93a9b3cc7758d | 6,564 | py | Python | aqt/utils/hparams_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-19T04:26:12.000Z | 2022-03-19T04:26:12.000Z | aqt/utils/hparams_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | aqt/utils/hparams_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to load/save the hparams to/from a config dict."""
import dataclasses
import json
import os
import typing
from typing import Any, Dict, Optional, Type, TypeVar
import dacite
import jax
import ml_collections
from aqt.jax import quant_config
from aqt.jax import quantization
from aqt.jax.flax import struct as flax_struct
T = TypeVar('T')
dataclass = flax_struct.dataclass if not typing.TYPE_CHECKING else dataclasses.dataclass
@dataclass
class HParamsMetadata:
"""Metadata associated with an experiment configuration."""
# Human-readable description of this hparams configuration. Mainly
# useful for hand inspection of serialized JSON files.
description: str
# Creation time of the configuration in the format of seconds from epoch.
# Used for versioning different hyperparameter settings for the same
# model configuration.
last_updated_time: Optional[float]
# By default, it is used to name the model directory and label the
# experiment in tensorboard.
hyper_str: Optional[str] = None
# TODO(abdolrashidi): Add unit tests for the functions below.
def save_dataclass_to_disk(data, path):
"""Serializes the given dataclass to a JSON file on disk.
Args:
data: A dataclass instance.
path: Path to save the dataclass to.
"""
data_dict = dataclasses.asdict(data)
with open(path, 'w') as file:
json.dump(data_dict, file, indent=2)
def write_hparams_to_file_with_host_id_check(hparams,
output_dir):
"""Writes hparams to file for master host.
Args:
hparams: Hparams.
output_dir: Output directory to save hparams to, saves as output_dir /
'hparams_config.json.
"""
if jax.host_id() == 0 and output_dir is not None:
# The directory is usually created automatically by the time we reach here,
# but on some training runs it appears not to be.
# MakeDirs will create the directory if it doesn't already exist and is a
# no-op if it already exists.
os.makedirs(output_dir, exist_ok=True)
save_dataclass_to_disk(hparams,
os.path.join(output_dir, 'hparams_config.json'))
def load_dataclass_from_dict(dataclass_name,
data_dict):
"""Converts parsed dictionary from JSON into a dataclass.
Args:
dataclass_name: Name of the dataclass.
data_dict: Dictionary parsed from JSON.
Returns:
An instance of `dataclass` populated with the data from `data_dict`.
"""
# Some fields in TrainingHParams are formal Python enums, but they are stored
# as plain text in the json. Dacite needs to be given a list of which classes
# to convert from a string into an enum. The classes of all enum values which
# are stored in a TrainingHParams instance (directly or indirectly) should be
# listed here. See https://github.com/konradhalas/dacite#casting.
enum_classes = [
quantization.QuantOps.ActHParams.InputDistribution,
quantization.QuantType, quant_config.QuantGranularity,
]
data_dict = _convert_lists_to_tuples(data_dict)
return dacite.from_dict(
data_class=dataclass_name,
data=data_dict,
config=dacite.Config(cast=enum_classes))
T = TypeVar('T')
def _convert_lists_to_tuples(node):
"""Recursively converts all lists to tuples in a nested structure.
Recurses into all lists and dictionary values referenced by 'node',
converting all lists to tuples.
Args:
node: A Python structure corresponding to JSON (a dictionary, a list,
scalars, and compositions thereof)
Returns:
A Python structure identical to the input, but with lists replaced by
tuples.
"""
if isinstance(node, dict):
return {key: _convert_lists_to_tuples(value) for key, value in node.items()}
elif isinstance(node, (list, tuple)):
return tuple([_convert_lists_to_tuples(value) for value in node])
else:
return node
def load_dataclass_from_json(dataclass_name, json_data):
"""Creates a dataclass instance from JSON.
Args:
dataclass_name: Name of the dataclass to deserialize the JSON into.
json_data: A Python string containing JSON.
Returns:
An instance of 'dataclass' populated with the JSON data.
"""
data_dict = json.loads(json_data)
return load_dataclass_from_dict(dataclass_name, data_dict)
# TODO(shivaniagrawal): functionality `load_hparams_from_file` is created for a
# generic (model hparams independent) train_hparams class; either we should move
# towards shared TrainHparams or remove the following functionalities.
def load_hparams_from_config_dict(hparams_classname,
model_classname,
config_dict):
"""Loads hparams from a configdict, and populates its model object.
Args:
hparams_classname: Name of the hparams class.
model_classname: Name of the model class within the hparams class
config_dict: A config dict mirroring the structure of hparams.
Returns:
An instance of 'hparams_classname' populated with the data from
'config_dict'.
"""
hparams = load_dataclass_from_config_dict(hparams_classname, config_dict)
hparams.model_hparams = load_dataclass_from_dict(model_classname,
hparams.model_hparams)
return hparams
def load_dataclass_from_config_dict(
dataclass_name, config_dict):
"""Creates a dataclass instance from a configdict.
Args:
dataclass_name: Name of the dataclass to deserialize the configdict into.
config_dict: A config dict mirroring the structure of 'dataclass_name'.
Returns:
An instance of 'dataclass_name' populated with the data from 'config_dict'.
"""
# We convert the config dicts to JSON instead of a dictionary to force all
# recursive field references to fully resolve in a way that Dacite can
# consume.
json_data = config_dict.to_json()
return load_dataclass_from_json(dataclass_name, json_data)
| 32.49505 | 88 | 0.731261 |
dee4a5a4fb2eb2ae6134a0d6568e50638b695519 | 21,138 | py | Python | src/nnicotine/datasets/cath.py | KIT-MBS/nnicotine | 3681391d05d0a2d92e16431f5bc985cd6ff606fd | [
"MIT"
] | 2 | 2021-03-28T02:02:30.000Z | 2021-09-09T23:27:39.000Z | src/nnicotine/datasets/cath.py | KIT-MBS/nnicotine | 3681391d05d0a2d92e16431f5bc985cd6ff606fd | [
"MIT"
] | null | null | null | src/nnicotine/datasets/cath.py | KIT-MBS/nnicotine | 3681391d05d0a2d92e16431f5bc985cd6ff606fd | [
"MIT"
] | null | null | null | import os
import sys
from collections import OrderedDict
import gzip
import re
import numpy as np
import h5py
import torch
from Bio.PDB.MMCIFParser import MMCIFParser
from Bio import AlignIO
from Bio import pairwise2
from torchvision.datasets.utils import download_url
from tqdm import tqdm
from .utils import cath_train_test_split, get_pdb_filename, protein_letters_3to1
# NOTE silence warnings for the biopython parser
import warnings
from Bio import BiopythonWarning
warnings.simplefilter('ignore', BiopythonWarning)
urlbase = "ftp://orengoftp.biochem.ucl.ac.uk/cath/releases/"
class CATHDerived(torch.utils.data.Dataset):
"""
root: root directory of dataset
mode: toy, train or val. there is no test set. A good test set would be a set of CASP targets released after the CATH version release
version: either daily, a date in yyyymmdd format or valid CATH plus version (latest at time of writing is v4.2.0) or latest (the most recent CATH plus release)
generate: whether to generate a dataset (does not automatically generate MSAs, only a list of sequences to generate MSAs from). Default is False.
transform: transformation to apply to sample input
target_transform: transformation to apply to sample target
"""
def __init__(self, root, mode='train', version='daily', generate=False, transform=None, target_transform=None, msa_format='clustal', **kwargs):
self.root = root
self.mode = mode
self.msa_format = msa_format
# NOTE there is no validation set for now, test set is for validation, casp set is for 'testing'
self.modes = ["toy", "train", "test"]
assert mode in self.modes
if version == 'daily' or version == 'newest':
self.url = urlbase + "daily-release/newest/cath-b-s35-newest.gz"
version = 'newest'
elif version == 'latest':
raise NotImplementedError("CATH plus versions are not supported")
self.url = urlbase + "all-releases/{}/".format()
elif version[0] == 'v':
raise NotImplementedError("CATH plus versions are not supported")
self.url = urlbase + "all-releases/{}/".format(version.replace('.', '_')) # TODO put in the actual file
else:
y = version[0:4]
m = version[4:6]
d = version[6:]
self.url = urlbase + "daily-release/archive/cath-b-{}{}{}-s35-all.gz".format(y, m, d)
self.version = version
self.versionroot = os.path.join(self.root, self.version)
os.makedirs(self.versionroot, exist_ok=True)
self.cathfile = os.path.join(self.versionroot, "cath-b-s35.gz")
self.h5pyfilename = os.path.join(root, "{}/{}.hdf5".format(version, mode))
self.transform = transform
self.target_transform = target_transform
if generate:
self.download()
self.preprocess(**kwargs)
self.h5pyfile = h5py.File(self.h5pyfilename, 'r')
self.num_samples = len(self.h5pyfile['ids'])
def __getitem__(self, index):
if index >= len(self):
raise IndexError()
msa_extensions = {'stockholm': '.sto', 'clustal': '.clu'}
domain_id = self.h5pyfile['ids'][index].tostring().decode('utf-8')
msa_file = os.path.join(self.root, f'{self.version}/{self.msa_format}/{domain_id[1:3]}/{domain_id}{msa_extensions[self.msa_format]}')
sequence = self.h5pyfile[domain_id]['sequence'][...].tostring().decode('utf-8')
msa = AlignIO.read(open(msa_file), self.msa_format)
assert sequence == str(msa[0].seq).replace('-', '')
sample = OrderedDict([('sequence', sequence), ('msa', msa)])
target = OrderedDict([('ca', self.h5pyfile[domain_id]['ca'][...]), ('cb', self.h5pyfile[domain_id]['cb'][...])])
if self.transform is not None:
sample = self.transform(sample)
if self.target_transform is not None:
target = self.target_transform(target)
return sample, target
def __len__(self):
return self.num_samples
def download(self):
if os.path.exists(os.path.join(self.versionroot, self.cathfile)):
return
download_url(self.url, self.versionroot, filename=self.cathfile)
def preprocess(self, **kwargs):
if os.path.exists(self.h5pyfilename):
print("using existing file")
return
pdb_root = kwargs.get('pdb_root', os.path.join(self.root, '../pdb'))
if not os.path.exists(pdb_root):
raise RuntimeError("A PDB containing structural data on CATH domains is required.")
lines = []
with gzip.open(self.cathfile, 'rt') as f:
for line in f:
lines.append(line)
domains = [line.split() for line in lines]
train_domains, test_domains = cath_train_test_split(domains, **kwargs)
toy_domains = train_domains[:10]
for mode, domains in [('toy', toy_domains), ('train', train_domains), ('test', test_domains)]:
h5pyfilename = os.path.join(self.root, "{}/{}.hdf5".format(self.version, mode))
with h5py.File(h5pyfilename, 'w') as handle:
id_dset = handle.create_dataset('ids', (len(domains),), dtype='S7')
for i, domain in enumerate(tqdm(domains)): # NOTE decided to ignore obsolete files because they are even more of a pain than the normal ones
# for i, domain in enumerate(["4hubI02 v4_2_0 1.10.10.250.4 71-156:I".split()]):
# for i, domain in enumerate(["2f07B00 v4_2_0 1.10.357.10.117 303-193:B".split()]):
# for i, domain in enumerate(["3frhA01 v4_2_0 1.10.8.10.2 -1-56:A".split(), "3bbzA00 v4_2_0 1.10.8.10.18 344-391:A".split(), "2zs0D00 v4_2_0 1.10.490.10.13 1-145:D".split()]):
# for i, domain in enumerate(["3mlgA00 v4_2_0 1.20.120.1980.1 13-181:A".split()]):
# for i, domain in enumerate(["6guxA00 putative 1.20.1070.10.1 8-247:A".split()]): # NOTE this can't even pe parsed because of duplicate atom names. i just modified the file. i'm so tired
# for i, domain in enumerate(["1vs9S01 v4_2_0 2.30.30.30.32 1-72:S".split()]): # NOTE superfluously added insertion code to residue id. in some positions. in others they just put letters randomly at the end of the sid? wtf
# for i, domain in enumerate(["4ck4A00 v4_2_0 2.40.128.20.7 3-162:A".split()]): # NOTE these guys didn't bother to put the mutated sequence anywhere, TODO i put it in as a quick fix. it would be better to have something scalable
# for i, domain in enumerate(["6sxtA02 putative 2.80.10.50.32 338-501:A".split()]):
# for i, domain in enumerate(["1br7003 v4_2_0 3.10.110.10.34 430-546:0".split()]): # NOTE this ones seems to be completely bonkers. it's been superseded 20 years ago... why is it still in a cath database?
# for i, domain in enumerate(["4k5yA01 v4_2_0 1.20.1070.10.22 115-1002:A,224-368:A".split()]): # NOTE this one has a tag at the end with a weird numbering that is still part of the domain for some reason. i'll tread the upper limit as exclusive, even though i don't think it is (have not seen documentation on this)
domain_id, version, cath_code, boundaries = domain
pdb_id = domain_id[:4]
id_dset[i] = np.string_(domain_id)
if ',' in boundaries:
# NOTE if the domain consists of several fragments: pick only the longest one # TODO should be pick the one with the most resolved residues
fragment_boundaries = boundaries.split(',')
fragment_sizes = []
for b in fragment_boundaries:
b = b.split(':')[0]
lower, upper = re.findall("-*\d+", b)
lower = int(lower)
# NOTE tread upper limit as exclusive
upper = int(upper[1:])
fragment_sizes.append(upper-lower)
boundaries = fragment_boundaries[np.argmax(fragment_sizes)]
boundaries, chain_id = boundaries.split(':')
lower, upper = re.findall("-*\d+", boundaries)
lower = int(lower)
upper = int(upper[1:])
# NOTE if lower > upper assume they included a prefix with some arcane numbering scheme. set lower to 1 and hope nothing to idiiotic happens by sticking to upper. inspired by 2f07B00 303-193
if lower > upper: lower = 1
assert lower != upper
l = upper - lower
pdb_file = get_pdb_filename(pdb_id, pdb_root)
parser = MMCIFParser()
with gzip.open(pdb_file, 'rt') as f:
s = parser.get_structure(pdb_id, f)
c = s[0][chain_id]
# NOTE some cifs are formatted weirdly
strand_ids = parser._mmcif_dict["_entity_poly.pdbx_strand_id"]
seq_idx = [idx for idx, s in enumerate(strand_ids) if chain_id in s][0]
# refseq = _get_reference_sequence(parser._mmcif_dict, seq_idx)
refseq = parser._mmcif_dict["_entity_poly.pdbx_seq_one_letter_code_can"][seq_idx]
refseq = ''.join(refseq.split())
try:
try:
# TODO read the _struct_ref_seq_dif entries to modify the reference sequence
seq, ca_coords, cb_coords = construct_sample(lower, upper, c, refseq)
except ValueError as e:
refseq = parser._mmcif_dict["_struct_ref.pdbx_seq_one_letter_code"][seq_idx]
refseq = ''.join(refseq.split())
seq, ca_coords, cb_coords = construct_sample(lower, upper, c, refseq)
except (AssertionError, RuntimeError) as e:
print(domain_id)
raise(e)
# residues = []
# for i in range(lower, upper):
# if i in c:
# residues.append(protein_letters_3to1[c[i].get_resname().capitalize()])
# else:
# residues.append('-')
# strucseq = [aa for aa in residues]
# if '-' in residues:
# strucseq = _resolve_gaps(strucseq, seq)
# if ''.join(strucseq) not in seq:
# degapped_strucseq = ''.join([subseq for subseq in ''.join(strucseq).split('-') if subseq != ''])
# if degapped_strucseq not in seq:
# raise RuntimeError("Reconstructed sequence does not match the expected data base sequence:\n{}\nsequence from atom entries: {}\nsequence without unresolved gaps:{}\nreference sequence: {}".format(domain_id, ''.join(strucseq), degapped_strucseq, seq))
# else:
# print("Warning: Missing residues could not be filled in from sequence information. Keeping unresolved gaps.", file=sys.stderr)
# ca_coords = np.full((l, 3), float('nan'), dtype=np.float32)
# cb_coords = np.full((l, 3), float('nan'), dtype=np.float32)
# for i in range(lower, upper):
# if i in c:
# r = c[i]
# if 'CA' in r:
# ca_coords[i-lower, :] = r['CA'].get_coord()[:]
# if 'CB' in r:
# cb_coords[i-lower, :] = r['CB'].get_coord()[:]
# if r.get_resname() == 'GLY':
# cb_coords[i-lower, :] = ca_coords[i-lower, :]
sample_group = handle.create_group(domain_id)
sequence_ds = sample_group.create_dataset('sequence', data=np.string_(seq))
ca_ds = sample_group.create_dataset('ca', data=ca_coords)
cb_ds = sample_group.create_dataset('cb', data=cb_coords)
return
def _divide_by_alignment(segment, sequence):
assert '-' not in segment
s, seq = pairwise2.align.globalms(segment, sequence, match=10, mismatch=-100, open=-5, extend=0., penalize_end_gaps=False)[0][0:2]
# if '-' in seq: print("unable to align \n{} to \n{} without introducing a gap in to reference".format(s, seq))
if '-' in seq: raise ValueError("unable to align \n{} to \n{} without introducing a gap in to reference".format(s, seq))
return re.findall(r'\w+', s)
# TODO optimize a little
# NOTE this does not work exactly right if there is e.g. a long gap and than a single AA at the end
# NOTE this will probably underestimate the size of the gap
# TODO unsolved: in 4hub there is a gap in the structure that is not properly indexed. solution: align only the subsequence to the reference, find the gap, fix resolved subsequences, continue
def _align_subseqs(strucseq, seq):
# subseqs = re.findall(r'\w+|-+', ''.join(strucseq))
# resolved_subseqs = [s for s in subseqs if s[0] != '-']
resolved_subseqs = re.findall(r'\w+', ''.join(strucseq))
# NOTE check if all resolved subsequences are contained in the reference sequence
# NOTE if not assume a gap was not indexed correctly, find the gap by alignment
l = len(resolved_subseqs)
i= 0
while i < l:
s = resolved_subseqs[i]
if s not in seq:
new_subseqs = _divide_by_alignment(s, seq)
resolved_subseqs[i:i+1] = new_subseqs
l = len(resolved_subseqs)
i+=1
# NOTE use longest resolved subsequence as anchor into reference sequence
longest_resolved = max(resolved_subseqs, key=len)
# longest_subseqs_index = subseqs.index(longest_resolved)
longest_resolved_index = resolved_subseqs.index(longest_resolved)
# longest_start_seq = seq.find(longest_resolved)
# NOTE inspired by 3mlgA00 which consists of two identical pieces concatenated
longest_start_seq = sum(len(s) for s in resolved_subseqs[:longest_resolved_index]) + seq[sum(len(s) for s in resolved_subseqs[:longest_resolved_index]):].find(longest_resolved)
assert longest_start_seq >= 0
longest_start_struc = ''.join(strucseq).find(longest_resolved)
offset = longest_start_seq - sum(len(s) for s in resolved_subseqs[:longest_resolved_index])
# subseqs_starts = [offset + sum([len(s) for s in subseqs[:i]]) for i in range(len(subseqs))]
resolved_subseqs_starts = [offset + sum([len(s) for s in resolved_subseqs[:i]]) for i in range(len(resolved_subseqs))]
# NOTE fix false indices from too long gaps before anchor
for i in range(longest_resolved_index-1, -1, -1):
s = resolved_subseqs[i]
while s != seq[resolved_subseqs_starts[i]:resolved_subseqs_starts[i]+len(s)]:
for j in range(i, -1, -1):
resolved_subseqs_starts[j] -= 1
assert resolved_subseqs_starts[j] >= 0
# # NOTE fix false indices from too long gaps after anchor
for i in range(longest_resolved_index+1, len(resolved_subseqs)):
s = resolved_subseqs[i]
while s != seq[resolved_subseqs_starts[i]:resolved_subseqs_starts[i]+len(s)]:
# assert s in seq[resolved_subseqs_starts[i]:]
for j in range(i, len(resolved_subseqs)):
resolved_subseqs_starts[j] += 1
assert resolved_subseqs_starts[j] <= len(seq)
assert all([x >= 0 for x in resolved_subseqs_starts])
result = ['-']*len(seq)
for i in range(len(resolved_subseqs)):
for j in range(len(resolved_subseqs[i])):
result[resolved_subseqs_starts[i]+j] = resolved_subseqs[i][j]
# # NOTE resolve gaps
# for i in range((longest_subseqs_index+1)%2, len(subseqs), 2):
# # s = [x for x in subseqs[i]]
# s = ['-' for x in range(subseqs_starts[i+1]-subseqs_starts[i])]
# for j in range(subseqs_starts[i+1]-subseqs_starts[i]):
# s[j] = seq[subseqs_starts[i]+j]
# subseqs[i] = ''.join(s)
# return ''.join(subseqs)
return ''.join(result), seq
def construct_sample(lower, upper, c, sequence):
resolvedseq = ''.join([protein_letters_3to1[c[i].get_resname().capitalize()] for i in range(lower, upper) if i in c])
chainseq = ''.join([protein_letters_3to1[c[i].get_resname().capitalize()] if i in c else '-' for i in range(lower, upper)])
# TODO remove subseqs shorter than 3 residues
unresolved_prefix_length = 0
for i in range(lower, upper):
if i in c: break
unresolved_prefix_length+=1
unresolved_postfix_length = 0
for i in range(upper-1, lower-1, -1):
if i in c: break
unresolved_postfix_length+=1
ca_coords = []
cb_coords = []
for i in range(lower, upper):
if i in c:
r = c[i]
if 'CA' in r:
ca_coords.append(r['CA'].get_coord())
else:
ca_coords.append(np.full((3), float('nan'), dtype=np.float32))
if 'CB' in r:
cb_coords.append(r['CB'].get_coord())
else:
cb_coords.append(np.full((3), float('nan'), dtype=np.float32))
if r.get_resname() == 'GLY':
cb_coords[-1] = ca_coords[-1]
assert len(resolvedseq) == len(ca_coords) == len(cb_coords)
alignment = _align_subseqs(chainseq, sequence)
aseq1, aseq2 = alignment
aligned_gap_prefix_length = 0
for i in range(len(aseq1)):
if aseq1[i] != '-': break
aligned_gap_prefix_length += 1
aligned_gap_postfix_length = 0
for i in range(len(aseq1)-1, -1, -1):
if aseq1[i] != '-': break
aligned_gap_postfix_length += 1
out_seq = []
out_ca_coords = []
out_cb_coords = []
j = 0
for i in range(aligned_gap_prefix_length-unresolved_prefix_length, min(len(aseq1)-aligned_gap_postfix_length + unresolved_postfix_length, len(aseq1))): # NOTE the min check is required by 6sxtA02, where the reference sequence is apparently missing the last two residues. the last three residues are not resolved so we just ignore the last two
if aseq1[i] == '-':
out_seq.append(aseq2[i])
out_ca_coords.append(np.array([float('nan')]*3))
out_cb_coords.append(np.array([float('nan')]*3))
else:
out_seq.append(aseq1[i])
assert aseq1[i] == aseq2[i]
assert aseq1[i] == resolvedseq[j]
out_ca_coords.append(ca_coords[j])
out_cb_coords.append(cb_coords[j])
j += 1
out_seq = ''.join(out_seq)
out_ca_coords = np.array(out_ca_coords)
out_cb_coords = np.array(out_cb_coords)
if out_seq not in sequence:
raise(RuntimeError("Reconstructed sequence does not match the expected data base sequence:\n{}\nsequence from atom entries: {}\nreconstructed sequence:{}\nreference sequence: {}".format(domain_id, ''.join(strucseq), degapped_strucseq, seq)))
return out_seq, out_ca_coords, out_cb_coords
def _get_reference_sequence(mmcif_dict, seq_idx):
raise NotImplementedError()
# NOTE this entire thing does not work as intended yet
# NOTE the entity poly entry is missing mutations sometimes, while the struct ref one is missing residues (probably not resolved ones?) inspired by 2zs0D00
struct_seq = None
if "_struct_ref.pdbx_seq_one_letter_code" in mmcif_dict:
struct_seq = mmcif_dict["_struct_ref.pdbx_seq_one_letter_code"][seq_idx]
struct_seq = ''.join(struct_seq.split())
# TODO replace (MSE) with M and find a solution for similar non-canonical residues
entity_seq = mmcif_dict["_entity_poly.pdbx_seq_one_letter_code_can"][seq_idx]
entity_seq = ''.join(entity_seq.split())
print("\nstruct: ", ''.join(struct_seq.split()), 'AAAAAAAAAAAAAAAAAAAAAA')
print("entity: ", ''.join(entity_seq.split()), 'BBBBBBBBBBBBBBBBBBBBBB')
if struct_seq is None or struct_seq == '?' or struct_seq in entity_seq or '_struct_ref_seq_dif.align_id' in mmcif_dict: # NOTE last condition inspired by 3bbzA00
refseq = entity_seq
else:
# NOTE assuming that if we can't use entity_seq as reference we can use struct_seq without modifications
assert '(' not in struct_seq and ')' not in struct_seq
assert '(' not in entity_seq and ')' not in entity_seq
# from Bio.pairwise2 import format_alignment
# alignments = pairwise2.align.globalms(struct_seq, entity_seq, match=10, mismatch=-100, open=-5, extend=0., penalize_end_gaps=False)
# aligned_struct_seq, aligned_entity_seq = alignments[0][0:2]
# for a in alignments:
# print(format_alignment(*a))
# break
refseq = struct_seq
return refseq
| 50.8125 | 346 | 0.612593 |
99aa624f19efa1afde630b3576e0d7e527c447d3 | 2,297 | py | Python | tests/test_text_preprocess.py | crystina-z/nirtools | c2ac5ac515f85353f05b25570a85169babc5f26e | [
"MIT"
] | 1 | 2020-06-19T06:07:05.000Z | 2020-06-19T06:07:05.000Z | tests/test_text_preprocess.py | crystina-z/nir-tools | 19018fa2870c0f0156b7f0cf05a85e81a02cff30 | [
"MIT"
] | 1 | 2020-09-30T12:22:37.000Z | 2020-09-30T12:22:37.000Z | tests/test_text_preprocess.py | crystina-z/nirtools | c2ac5ac515f85353f05b25570a85169babc5f26e | [
"MIT"
] | null | null | null | from nirtools.text import preprocess
def test_get_lang_reserved_words():
words = preprocess.get_lang_reserved_words("ruby")
assert words == [
"__ENCODING__", "__LINE__", "__FILE__", "BEGIN", "END", "alias", "and", "begin",
"break", "case", "class", "def", "defined", "do", "else", "elsif", "end", "ensure",
"false", "for", "if", "in", "module", "next", "nil", "not", "or", "redo", "rescue",
"retry", "return", "self", "super", "then", "true", "undef", "unless", "until",
"when", "while", "yield"]
def test_code_tokenize():
code = "func ( t * SecondaryTree ) SeekFirst ( ) ( e * SecondaryEnumerator , err error ) { q := t . first if q == nil { return nil , io . EOF } return btEPool2 . get ( nil , true , 0 , q . d [ 0 ] . k , q , t , atomic . LoadUint64 ( & t . ver ) ) , nil }"
expected = "func ( t * secondary tree ) seek first ( ) ( e * secondary enumerator , err error ) { q := t . " \
"first if q == nil { return nil , io . eof } return bt e pool2 . get ( nil , true , 0 , q . d [ 0 ] . " \
"k , q , t , atomic . load uint64 ( & t . ver ) ) , nil }"
assert preprocess.code_tokenize(code) == expected
def test_remove_non_alphabet():
code = "func ( t * SecondaryTree ) SeekFirst ( ) ( e * SecondaryEnumerator , err error ) { q := t . first if q == nil { return nil , io . EOF } return btEPool2 . get ( nil , true , 0 , q . d [ 0 ] . k , q , t , atomic . LoadUint64 ( & t . ver ) ) , nil }"
expected = "func t SecondaryTree SeekFirst e SecondaryEnumerator err error q t first if q nil return nil io EOF " \
"return btEPool get nil true q d k q t atomic LoadUint t ver nil"
assert preprocess.remove_non_alphabet(code) == expected
def test_remove_unicharacter():
code = "func ( t * SecondaryTree ) SeekFirst ( ) ( e * SecondaryEnumerator , err error ) { q := t . first if q == nil { return nil , io . EOF } return btEPool2 . get ( nil , true , 0 , q . d [ 0 ] . k , q , t , atomic . LoadUint64 ( & t . ver ) ) , nil }"
expected = "func SecondaryTree SeekFirst SecondaryEnumerator err error := first if == nil return nil io " \
"EOF return btEPool2 get nil true atomic LoadUint64 ver nil"
assert preprocess.remove_unicharacter(code) == expected
| 65.628571 | 259 | 0.588158 |
c76621fbd6ea46c51dafb892b056abb73fe77f1b | 2,997 | py | Python | ert3py/node/Pub.py | trihome/eRT3_ros2 | 237cf026b9575fcd2b4341ec6b45dd7e87436ed6 | [
"MIT"
] | null | null | null | ert3py/node/Pub.py | trihome/eRT3_ros2 | 237cf026b9575fcd2b4341ec6b45dd7e87436ed6 | [
"MIT"
] | null | null | null | ert3py/node/Pub.py | trihome/eRT3_ros2 | 237cf026b9575fcd2b4341ec6b45dd7e87436ed6 | [
"MIT"
] | null | null | null | #!/usr/bin/env /usr/bin/python3
# -*- coding: utf-8 -*-
# -----------------------------------------------
# ROS2 Node 送信側
#
# The MIT License (MIT)
# Copyright (C) 2020 myasu.
# -----------------------------------------------
import rclpy
from rclpy.node import Node
# カスタムメッセージ
from ert3_mes.msg import DioMsg
class MyPublisher(Node):
"""
送信側
"""
# ノード名
SELFNODE = "ert3pub"
# トピック名
SELFTOPIC = "mes_" + SELFNODE
def __init__(self):
"""
コンストラクタ
"""
# ノードの初期化
super().__init__(self.SELFNODE)
# コンソールに表示
self.get_logger().info(f"{self.SELFNODE} initializing...")
# publisherインスタンスを生成
self.pub = self.create_publisher(DioMsg, self.SELFTOPIC, 10)
# タイマーのインスタンスを生成(1秒ごとに発生)
self.create_timer(0.5, self.callback)
# シーケンス番号をリセット
self.sequence = 0
# コンソールに表示
self.get_logger().info(f"{self.SELFNODE} do...")
def __del__(self):
"""
デストラクタ
"""
# コンソールに表示
self.get_logger().info(f"{self.SELFNODE} done.")
def callback(self):
"""
タイマーの実行部・歩行者信号の動作
"""
# シーケンス制御
if self.sequence == 0:
# 初期化(一度だけ実行)
pass
if self.sequence == 1:
# 赤色点灯
self.sendmsg(1, 1)
# 緑色消灯
self.sendmsg(2, 0)
elif self.sequence == 10:
# 赤色消灯
self.sendmsg(1, 0)
# 緑色点灯
self.sendmsg(2, 1)
elif self.sequence in [21, 23, 25, 27, 29]:
# 緑色消灯
self.sendmsg(2, 0)
elif self.sequence in [22, 24, 26, 28]:
# 緑色点灯
self.sendmsg(2, 1)
elif self.sequence == 30:
# 赤色点灯
self.sendmsg(1, 1)
# 緑色消灯
self.sendmsg(2, 0)
elif self.sequence > 30:
# シーケンス終了
self.sequence = 0
# シーケンス番号をインクリメント
self.sequence += 1
def sendmsg(self, relay, value):
"""
メッセージの送信
Parameters
----------
relay : int
リレー番号を指定
value : int
0:消灯、1>:点灯
"""
# 送信するメッセージの作成
msg = DioMsg()
msg.relay = relay
msg.value = value
# 送信
self.pub.publish(msg)
# ログの表示
self.get_logger().info(
f"Publish [{self.sequence} / relay: {relay}, val: {value}]")
def main(args=None):
"""
メイン関数
Parameters
----------
"""
try:
# rclpyの初期化
rclpy.init(args=args)
# インスタンスを生成
node = MyPublisher()
# プロセス終了までアイドリング
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
# 終了処理
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 22.877863 | 73 | 0.446446 |
2126cbaf09e551a0654a23721db3febb956a2b85 | 1,546 | py | Python | simplecep/models.py | cauethenorio/django-simplecep | acab8a99fe3df8c6a2f01909c07fa36b1ea2d922 | [
"MIT"
] | 3 | 2020-10-18T16:29:32.000Z | 2021-09-22T12:10:19.000Z | simplecep/models.py | cauethenorio/django-simplecep | acab8a99fe3df8c6a2f01909c07fa36b1ea2d922 | [
"MIT"
] | 6 | 2019-11-26T01:18:43.000Z | 2022-02-10T12:50:16.000Z | simplecep/models.py | cauethenorio/django-simplecep | acab8a99fe3df8c6a2f01909c07fa36b1ea2d922 | [
"MIT"
] | 1 | 2020-03-31T03:52:13.000Z | 2020-03-31T03:52:13.000Z | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
from simplecep import CEPAddress
from simplecep.conf import simplecep_settings
class ValidCepsManager(models.Manager):
def get_queryset(self):
return (
super()
.get_queryset()
.filter(
updated_at__gte=timezone.now() - simplecep_settings["CEP_CACHE_MAXAGE"]
)
)
class CepCache(models.Model):
cep = models.CharField(_("CEP"), max_length=8, primary_key=True)
state = models.CharField(_("State"), max_length=2, null=False)
city = models.CharField(_("City"), max_length=128, null=False)
district = models.CharField(_("District"), max_length=128, null=True)
street = models.CharField(_("Address"), max_length=128, null=True)
provider = models.CharField(_("Provider"), max_length=128)
updated_at = models.DateTimeField(_("Updated at"), auto_now=True, db_index=True)
valid_ceps = ValidCepsManager()
all_ceps = models.Manager()
@classmethod
def update_from_cep_address(cls, cep_address: CEPAddress):
cls.all_ceps.update_or_create(
cep=cep_address.cep, defaults=cep_address.to_dict(with_provider=True)
)
def to_cep_address(self) -> CEPAddress:
return CEPAddress(
cep=self.cep,
street=self.street,
state=self.state,
district=self.district,
city=self.city,
provider=self.provider,
)
| 32.208333 | 87 | 0.660414 |
de689f14fd9dd0104cdedeef011654c1fff2ea5e | 1,556 | py | Python | apps/cms/urls.py | dengjinshan/xfz | 2387a214a81b1487518c358291944d84736d7fff | [
"MIT"
] | null | null | null | apps/cms/urls.py | dengjinshan/xfz | 2387a214a81b1487518c358291944d84736d7fff | [
"MIT"
] | null | null | null | apps/cms/urls.py | dengjinshan/xfz | 2387a214a81b1487518c358291944d84736d7fff | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from . import course_views, staff_views
from django.conf.urls.static import static
from django.conf import settings
app_name = 'cms'
urlpatterns = [
path('index/', views.index, name='index'),
path('news_list/', views.NewsListView.as_view(), name='news_list'),
path('write_news/', views.CMSView.as_view(), name='write_news'),
path('edit_news/', views.EditNewsView.as_view(), name='edit_newses'),
path('delete_news/', views.delete_news, name='delete_news'),
path('news_category/', views.news_category, name='news_category'),
path('add_news_category/', views.NewsAddView.as_view(), name='add_news_category'),
path('edit_news_category/', views.edit_news_category, name='edit_news'),
path('delete_news_category/', views.delete_news_category, name='delete_news'),
path('upload_file/', views.upload_file, name='upload_file'),
path('qntoken/',views.qntoken,name='qntoken'),
path('banners/', views.banner, name='banners'),
path('add_banner/', views.add_banner, name='add_banners'),
path('banner_list', views.banner_list, name='banner_list'),
path('delete_banner', views.delete_banner, name='delete_banner'),
path('edit_banner', views.edit_banner, name='edit_banner'),
]
# 课程相关url映射
urlpatterns += [
path('pub_course/', course_views.PubCourseView.as_view(), name='pub_course'),
]
# 员工管理
urlpatterns += [
path('staffs/', staff_views.staffs_view, name='staffs'),
path('add_staff/', staff_views.AddStaffView.as_view(), name='add_staff'),
] | 40.947368 | 86 | 0.715938 |
65d6fc9e42ad286a76244a5bf5c404d35f4ae617 | 24,950 | py | Python | vkontakte_wall/migrations/0015_auto__del_comment__del_field_post_likes__del_field_post_comments__del_.py | ramusus/django-vkontakte-wall | dbbc71f79b2651c6367c706d0cbe66f4dcd1b277 | [
"BSD-3-Clause"
] | 10 | 2015-01-10T15:34:25.000Z | 2021-07-30T11:14:22.000Z | vkontakte_wall/migrations/0015_auto__del_comment__del_field_post_likes__del_field_post_comments__del_.py | gorelikspb/django-vkontakte-wall | 09b921034d909d7162ee48e8a3eb1c29c0747f40 | [
"BSD-3-Clause"
] | 2 | 2015-06-11T15:28:52.000Z | 2015-08-04T11:53:13.000Z | vkontakte_wall/migrations/0015_auto__del_comment__del_field_post_likes__del_field_post_comments__del_.py | gorelikspb/django-vkontakte-wall | 09b921034d909d7162ee48e8a3eb1c29c0747f40 | [
"BSD-3-Clause"
] | 7 | 2015-01-29T15:51:38.000Z | 2020-09-01T03:14:47.000Z | # -*- coding: utf-8 -*-
from django.db import models
from south.db import db
from south.utils import datetime_utils as datetime
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Comment'
db.delete_table(u'vkontakte_wall_comment')
# Removing M2M table for field like_users on 'Comment'
db.delete_table(db.shorten_name(u'vkontakte_wall_comment_like_users'))
# Deleting field 'Post.likes'
db.delete_column(u'vkontakte_wall_post', 'likes')
# Deleting field 'Post.comments'
db.delete_column(u'vkontakte_wall_post', 'comments')
# Deleting field 'Post.wall_owner_content_type'
db.delete_column(u'vkontakte_wall_post', 'wall_owner_content_type_id')
# Deleting field 'Post.wall_owner_id'
db.delete_column(u'vkontakte_wall_post', 'wall_owner_id')
# Deleting field 'Post.reposts'
db.delete_column(u'vkontakte_wall_post', 'reposts')
# Adding field 'Post.owner_content_type'
db.add_column(u'vkontakte_wall_post', 'owner_content_type',
self.gf('django.db.models.fields.related.ForeignKey')(
related_name=u'content_type_owners_vkontakte_wall_posts', null=True, to=orm['contenttypes.ContentType']),
keep_default=False)
# Adding field 'Post.owner_id'
db.add_column(u'vkontakte_wall_post', 'owner_id',
self.gf('django.db.models.fields.BigIntegerField')(null=True, db_index=True),
keep_default=False)
# Adding field 'Post.likes_count'
db.add_column(u'vkontakte_wall_post', 'likes_count',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, db_index=True),
keep_default=False)
# Adding field 'Post.comments_count'
db.add_column(u'vkontakte_wall_post', 'comments_count',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True),
keep_default=False)
# Adding field 'Post.reposts_count'
db.add_column(u'vkontakte_wall_post', 'reposts_count',
self.gf('django.db.models.fields.PositiveIntegerField')(null=True, db_index=True),
keep_default=False)
# Removing M2M table for field repost_users on 'Post'
db.delete_table(db.shorten_name(u'vkontakte_wall_post_repost_users'))
# Removing M2M table for field like_users on 'Post'
db.delete_table(db.shorten_name(u'vkontakte_wall_post_like_users'))
# Adding M2M table for field likes_users on 'Post'
m2m_table_name = db.shorten_name(u'vkontakte_wall_post_likes_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'vkontakte_wall.post'], null=False)),
('user', models.ForeignKey(orm[u'vkontakte_users.user'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'user_id'])
# Adding M2M table for field reposts_users on 'Post'
m2m_table_name = db.shorten_name(u'vkontakte_wall_post_reposts_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'vkontakte_wall.post'], null=False)),
('user', models.ForeignKey(orm[u'vkontakte_users.user'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'user_id'])
# Changing field 'Post.remote_id'
db.alter_column(u'vkontakte_wall_post', 'remote_id', self.gf(
'django.db.models.fields.CharField')(unique=True, max_length=20))
# Changing field 'Post.author_content_type'
db.alter_column(u'vkontakte_wall_post', 'author_content_type_id', self.gf(
'django.db.models.fields.related.ForeignKey')(null=True, to=orm['contenttypes.ContentType']))
# Changing field 'Post.author_id'
db.alter_column(u'vkontakte_wall_post', 'author_id', self.gf(
'django.db.models.fields.BigIntegerField')(null=True))
def backwards(self, orm):
# Adding model 'Comment'
db.create_table(u'vkontakte_wall_comment', (
('archived', self.gf('django.db.models.fields.BooleanField')(default=False)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length='20', unique=True)),
('fetched', self.gf('django.db.models.fields.DateTimeField')(blank=True, null=True, db_index=True)),
('raw_html', self.gf('django.db.models.fields.TextField')()),
('post', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='wall_comments', to=orm['vkontakte_wall.Post'])),
('text', self.gf('django.db.models.fields.TextField')()),
('raw_json', self.gf('annoying.fields.JSONField')(default={}, null=True)),
('author_content_type', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='comments', to=orm['contenttypes.ContentType'])),
('wall_owner_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
('reply_for_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, db_index=True)),
('reply_to', self.gf('django.db.models.fields.related.ForeignKey')
(to=orm['vkontakte_wall.Comment'], null=True)),
('wall_owner_content_type', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='vkontakte_wall_comments', to=orm['contenttypes.ContentType'])),
('likes', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True)),
('reply_for_content_type', self.gf('django.db.models.fields.related.ForeignKey')
(related_name='replies', null=True, to=orm['contenttypes.ContentType'])),
('date', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('author_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('from_id', self.gf('django.db.models.fields.IntegerField')(null=True)),
))
db.send_create_signal(u'vkontakte_wall', ['Comment'])
# Adding M2M table for field like_users on 'Comment'
m2m_table_name = db.shorten_name(u'vkontakte_wall_comment_like_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('comment', models.ForeignKey(orm[u'vkontakte_wall.comment'], null=False)),
('user', models.ForeignKey(orm[u'vkontakte_users.user'], null=False))
))
db.create_unique(m2m_table_name, ['comment_id', 'user_id'])
# Adding field 'Post.likes'
db.add_column(u'vkontakte_wall_post', 'likes',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Adding field 'Post.comments'
db.add_column(u'vkontakte_wall_post', 'comments',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Post.wall_owner_content_type'
raise RuntimeError(
"Cannot reverse this migration. 'Post.wall_owner_content_type' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct
# migration # Adding field 'Post.wall_owner_content_type'
db.add_column(u'vkontakte_wall_post', 'wall_owner_content_type',
self.gf('django.db.models.fields.related.ForeignKey')(
related_name='vkontakte_wall_posts', to=orm['contenttypes.ContentType']),
keep_default=False)
# User chose to not deal with backwards NULL issues for 'Post.wall_owner_id'
raise RuntimeError("Cannot reverse this migration. 'Post.wall_owner_id' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct
# migration # Adding field 'Post.wall_owner_id'
db.add_column(u'vkontakte_wall_post', 'wall_owner_id',
self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True),
keep_default=False)
# Adding field 'Post.reposts'
db.add_column(u'vkontakte_wall_post', 'reposts',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0, db_index=True),
keep_default=False)
# Deleting field 'Post.owner_content_type'
db.delete_column(u'vkontakte_wall_post', 'owner_content_type_id')
# Deleting field 'Post.owner_id'
db.delete_column(u'vkontakte_wall_post', 'owner_id')
# Deleting field 'Post.likes_count'
db.delete_column(u'vkontakte_wall_post', 'likes_count')
# Deleting field 'Post.comments_count'
db.delete_column(u'vkontakte_wall_post', 'comments_count')
# Deleting field 'Post.reposts_count'
db.delete_column(u'vkontakte_wall_post', 'reposts_count')
# Adding M2M table for field repost_users on 'Post'
m2m_table_name = db.shorten_name(u'vkontakte_wall_post_repost_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'vkontakte_wall.post'], null=False)),
('user', models.ForeignKey(orm[u'vkontakte_users.user'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'user_id'])
# Adding M2M table for field like_users on 'Post'
m2m_table_name = db.shorten_name(u'vkontakte_wall_post_like_users')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('post', models.ForeignKey(orm[u'vkontakte_wall.post'], null=False)),
('user', models.ForeignKey(orm[u'vkontakte_users.user'], null=False))
))
db.create_unique(m2m_table_name, ['post_id', 'user_id'])
# Removing M2M table for field likes_users on 'Post'
db.delete_table(db.shorten_name(u'vkontakte_wall_post_likes_users'))
# Removing M2M table for field reposts_users on 'Post'
db.delete_table(db.shorten_name(u'vkontakte_wall_post_reposts_users'))
# Changing field 'Post.remote_id'
db.alter_column(u'vkontakte_wall_post', 'remote_id', self.gf(
'django.db.models.fields.CharField')(max_length='20', unique=True))
# User chose to not deal with backwards NULL issues for 'Post.author_content_type'
raise RuntimeError(
"Cannot reverse this migration. 'Post.author_content_type' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Post.author_content_type'
db.alter_column(u'vkontakte_wall_post', 'author_content_type_id', self.gf(
'django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType']))
# User chose to not deal with backwards NULL issues for 'Post.author_id'
raise RuntimeError("Cannot reverse this migration. 'Post.author_id' and its values cannot be restored.")
# The following code is provided here to aid in writing a correct migration
# Changing field 'Post.author_id'
db.alter_column(u'vkontakte_wall_post', 'author_id', self.gf('django.db.models.fields.PositiveIntegerField')())
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'vkontakte_comments.comment': {
'Meta': {'object_name': 'Comment'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'content_type_authors_vkontakte_comments_comments'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'likes_users': ('m2m_history.fields.ManyToManyHistoryField', [], {'related_name': "'like_comments'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'object_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_objects_vkontakte_comments'", 'to': u"orm['contenttypes.ContentType']"}),
'object_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True'}),
'owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'content_type_owners_vkontakte_comments_comments'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'owner_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'reply_for_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'replies'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'reply_for_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_comments.Comment']", 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'vkontakte_places.city': {
'Meta': {'object_name': 'City'},
'area': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cities'", 'null': 'True', 'to': u"orm['vkontakte_places.Country']"}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
u'vkontakte_places.country': {
'Meta': {'object_name': 'Country'},
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'})
},
u'vkontakte_users.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {}),
'activity': ('django.db.models.fields.TextField', [], {}),
'albums': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'audios': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'bdate': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'books': ('django.db.models.fields.TextField', [], {}),
'city': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_places.City']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'counters_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['vkontakte_places.Country']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'facebook': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'facebook_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'faculty': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'faculty_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'followers': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'friends_users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'followers_users'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'games': ('django.db.models.fields.TextField', [], {}),
'graduation': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'has_avatar': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'has_mobile': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'interests': ('django.db.models.fields.TextField', [], {}),
'is_deactivated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'livejournal': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'movies': ('django.db.models.fields.TextField', [], {}),
'mutual_friends': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'notes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'photo': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_big': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_medium_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'photo_rec': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'rate': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'relation': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.BigIntegerField', [], {'primary_key': 'True'}),
'screen_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sex': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'skype': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'subscriptions': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'sum_counters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'timezone': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'tv': ('django.db.models.fields.TextField', [], {}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'university': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'university_name': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'user_photos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'user_videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'videos': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'wall_comments': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'vkontakte_wall.post': {
'Meta': {'object_name': 'Post'},
'archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'attachments': ('django.db.models.fields.TextField', [], {}),
'author_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'content_type_authors_vkontakte_wall_posts'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'author_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'comments_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'copy_owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'vkontakte_wall_copy_posts'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'copy_owner_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'copy_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'wall_reposts'", 'null': 'True', 'to': u"orm['vkontakte_wall.Post']"}),
'copy_text': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'fetched': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'geo': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'likes_users': ('m2m_history.fields.ManyToManyHistoryField', [], {'related_name': "'like_posts'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'media': ('django.db.models.fields.TextField', [], {}),
'online': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'owner_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'content_type_owners_vkontakte_wall_posts'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'owner_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'post_source': ('django.db.models.fields.TextField', [], {}),
'raw_html': ('django.db.models.fields.TextField', [], {}),
'raw_json': ('annoying.fields.JSONField', [], {'default': '{}', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}),
'reply_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'reposts_count': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'reposts_users': ('m2m_history.fields.ManyToManyHistoryField', [], {'related_name': "'reposts_posts'", 'symmetrical': 'False', 'to': u"orm['vkontakte_users.User']"}),
'signer_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
}
}
complete_apps = ['vkontakte_wall']
| 68.922652 | 217 | 0.618637 |
730c7a8660cd5bcaf3c61c637c0730768d6a9a27 | 1,905 | py | Python | src/schmetterling/tag/git_delivery.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | src/schmetterling/tag/git_delivery.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | src/schmetterling/tag/git_delivery.py | bjuvensjo/schmetterling | 0cdbfe4f379a081d9d4711dd21866b90983365cf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
from vang.core.core import pmap_unordered
from vang.pio.shell import run_command
from schmetterling.core.git import get_commit, get_tag
from schmetterling.core.log import log_params_return
from schmetterling.setup.state import SetupState
from schmetterling.tag.state import Repo, Tag, TagState
TAG_PREFIX = 'delivery'
@log_params_return('debug')
def create_state(tagged_repos):
return TagState(__name__, [
Repo(
r['repo'].project,
r['repo'].name,
r['repo'].path,
Tag(
r['tag'],
r['commit'],
),
) for r in tagged_repos
])
@log_params_return('debug')
def create_tag(timestamp):
return f'{TAG_PREFIX}/{timestamp}'
@log_params_return('debug')
def get_repos(state):
return [
r for s in state if isinstance(s, SetupState) for r in s.repos
if not has_delivery_tag(r)
]
@log_params_return('debug')
def has_delivery_tag(repo):
head_tag = get_tag(repo.path)
return bool(head_tag and head_tag.startswith(TAG_PREFIX))
@log_params_return('debug')
def do_tag(repos, tag):
tag_cmd = f'git tag -a {tag} -m {tag}'
@log_params_return('debug')
def f(repo):
rc, output = run_command(
tag_cmd, return_output=True, cwd=repo.path, check=False)
return {
'repo': repo,
'commit': get_commit(repo.path),
'tag': tag,
'success': rc == 0,
'output': output,
}
return list(pmap_unordered(f, repos))
# TODO Consider from previous state if has already been tagged
# Really necessary to tag? Perhaps use head commit sha instead?
@log_params_return('info')
def execute(state, timestamp):
repos = get_repos(state)
tagged_repos = do_tag(repos, create_tag(timestamp))
tag_state = create_state(tagged_repos)
return tag_state
| 25.4 | 70 | 0.645669 |
b6eaac90086c01b09d3b865d0cd29bb5a4fa52eb | 1,524 | py | Python | custom/icds_reports/data_pull/exporter.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | custom/icds_reports/data_pull/exporter.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | custom/icds_reports/data_pull/exporter.py | scottwedge/commcare-hq | 900ccf81c9f23fb3b435962f065648669817f37a | [
"BSD-3-Clause"
] | null | null | null | import zipfile
from django.utils.functional import cached_property
from corehq.util.context_managers import prevent_parallel_execution
from custom.icds.const import DATA_PULL_CACHE_KEY
from custom.icds_reports.const import CUSTOM_DATA_PULLS
from custom.icds_reports.data_pull.data_pulls import DirectDataPull
class DataExporter(object):
def __init__(self, slug_or_file, db_alias, month, location_id):
"""
run data export by either passing slug to a custom data pull
or file name/path to a sql file which will be read as a single query
"""
if month:
# convert to string if date object received
month = str(month)
self.slug_or_file = slug_or_file
self.month = month
self.location_id = location_id
data_pull_class = CUSTOM_DATA_PULLS.get(self.slug_or_file, DirectDataPull)
self.data_pull_obj = data_pull_class(
db_alias,
query_file_path=self.slug_or_file,
month=self.month, location_id=self.location_id
)
@prevent_parallel_execution(DATA_PULL_CACHE_KEY)
def export(self):
zip_file_name = "%s-DataPull.zip" % self.data_pull_obj.name
with zipfile.ZipFile(zip_file_name, mode='a') as z:
for filename, string_buffer in self.data_pull_obj.run().items():
z.writestr(filename, string_buffer.getvalue())
return zip_file_name
@cached_property
def queries(self):
return self.data_pull_obj.get_queries()
| 37.170732 | 82 | 0.698819 |
5eb8146f5e102a7c44eba317b8de6767b046a6f7 | 11,353 | py | Python | graphene_django_extras/fields.py | keithhackbarth/graphene-django-extras | ba7cb36e2eedb603b9369b59ecac3999dc1e5bf9 | [
"MIT"
] | null | null | null | graphene_django_extras/fields.py | keithhackbarth/graphene-django-extras | ba7cb36e2eedb603b9369b59ecac3999dc1e5bf9 | [
"MIT"
] | null | null | null | graphene_django_extras/fields.py | keithhackbarth/graphene-django-extras | ba7cb36e2eedb603b9369b59ecac3999dc1e5bf9 | [
"MIT"
] | 1 | 2022-03-11T11:27:34.000Z | 2022-03-11T11:27:34.000Z | # -*- coding: utf-8 -*-
import operator
from functools import partial
from graphene import Field, List, ID, Argument
from graphene.types.structures import Structure, NonNull
from graphene_django.fields import DjangoListField as DLF
from graphene_django.filter.utils import get_filtering_args_from_filterset
from graphene_django.utils import (
maybe_queryset,
is_valid_django_model,
DJANGO_FILTER_INSTALLED,
)
from graphene_django_extras.filters.filter import get_filterset_class
from graphene_django_extras.settings import graphql_api_settings
from .base_types import DjangoListObjectBase
from .paginations.pagination import BaseDjangoGraphqlPagination
from .utils import get_extra_filters, queryset_factory, get_related_fields, find_field
# *********************************************** #
# *********** FIELD FOR SINGLE OBJECT *********** #
# *********************************************** #
class DjangoObjectField(Field):
def __init__(self, _type, *args, **kwargs):
kwargs["id"] = ID(
required=True, description="Django object unique identification field"
)
super(DjangoObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.node._meta.model
@staticmethod
def object_resolver(manager, root, info, **kwargs):
id = kwargs.pop("id", None)
try:
return manager.get_queryset().get(pk=id)
except manager.model.DoesNotExist:
return None
def wrap_resolve(self, parent_resolver):
return partial(self.object_resolver, self.type._meta.model._default_manager)
# *********************************************** #
# *************** FIELDS FOR LIST *************** #
# *********************************************** #
class DjangoListField(DLF):
def __init__(self, _type, *args, **kwargs):
if isinstance(_type, NonNull):
_type = _type.of_type
super(DLF, self).__init__(List(NonNull(_type)), *args, **kwargs)
class DjangoFilterListField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
kwargs["args"].update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterListField, self).__init__(List(_type), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
@staticmethod
def list_resolver(manager, filterset_class, filtering_args, root, info, **kwargs):
qs = None
field = None
if root and is_valid_django_model(root._meta.model):
available_related_fields = get_related_fields(root._meta.model)
field = find_field(info.field_nodes[0], available_related_fields)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
if field is not None:
try:
if filter_kwargs:
qs = operator.attrgetter(
"{}.filter".format(
getattr(field, "related_name", None) or field.name
)
)(root)(**filter_kwargs)
else:
qs = operator.attrgetter(
"{}.all".format(
getattr(field, "related_name", None) or field.name
)
)(root)()
except AttributeError:
qs = None
if qs is None:
qs = queryset_factory(manager, info.field_nodes, info.fragments, **kwargs)
qs = filterset_class(
data=filter_kwargs, queryset=qs, request=info.context
).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoFilterPaginateListField(Field):
def __init__(
self,
_type,
pagination=None,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
self.filtering_args.update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
kwargs["args"].update(
{
"id": Argument(
ID, description="Django object unique identification field"
)
}
)
pagination = pagination or graphql_api_settings.DEFAULT_PAGINATION_CLASS()
if pagination is not None:
assert isinstance(pagination, BaseDjangoGraphqlPagination), (
'You need to pass a valid DjangoGraphqlPagination in DjangoFilterPaginateListField, received "{}".'
).format(pagination)
pagination_kwargs = pagination.to_graphql_fields()
self.pagination = pagination
kwargs.update(**pagination_kwargs)
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoFilterPaginateListField, self).__init__(
List(NonNull(_type)), *args, **kwargs
)
@property
def model(self):
return self.type.of_type._meta.node._meta.model
def get_queryset(self, manager, info, **kwargs):
return queryset_factory(manager, info.field_nodes, info.fragments, **kwargs)
def list_resolver(
self, manager, filterset_class, filtering_args, root, info, **kwargs
):
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = self.get_queryset(manager, info, **kwargs)
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
if root and is_valid_django_model(root._meta.model):
extra_filters = get_extra_filters(root, manager.model)
qs = qs.filter(**extra_filters)
if getattr(self, "pagination", None):
qs = self.pagination.paginate_queryset(qs, **kwargs)
return maybe_queryset(qs)
def wrap_resolve(self, parent_resolver):
current_type = self.type
while isinstance(current_type, Structure):
current_type = current_type.of_type
return partial(
self.list_resolver,
current_type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
class DjangoListObjectField(Field):
def __init__(
self,
_type,
fields=None,
extra_filter_meta=None,
filterset_class=None,
*args,
**kwargs,
):
if DJANGO_FILTER_INSTALLED:
_fields = _type._meta.filter_fields
_model = _type._meta.model
self.fields = fields or _fields
meta = dict(model=_model, fields=self.fields)
if extra_filter_meta:
meta.update(extra_filter_meta)
filterset_class = filterset_class or _type._meta.filterset_class
self.filterset_class = get_filterset_class(filterset_class, **meta)
self.filtering_args = get_filtering_args_from_filterset(
self.filterset_class, _type
)
kwargs.setdefault("args", {})
kwargs["args"].update(self.filtering_args)
if "id" not in kwargs["args"].keys():
id_description = "Django object unique identification field"
self.filtering_args.update(
{"id": Argument(ID, description=id_description)}
)
kwargs["args"].update({"id": Argument(ID, description=id_description)})
if not kwargs.get("description", None):
kwargs["description"] = "{} list".format(_type._meta.model.__name__)
super(DjangoListObjectField, self).__init__(_type, *args, **kwargs)
@property
def model(self):
return self.type._meta.model
def list_resolver(
self, manager, filterset_class, filtering_args, root, info, **kwargs
):
qs = queryset_factory(manager, info.field_nodes, info.fragments, **kwargs)
filter_kwargs = {k: v for k, v in kwargs.items() if k in filtering_args}
qs = filterset_class(data=filter_kwargs, queryset=qs, request=info.context).qs
count = qs.count()
return DjangoListObjectBase(
count=count,
results=maybe_queryset(qs),
results_field_name=self.type._meta.results_field_name,
)
def wrap_resolve(self, parent_resolver):
return partial(
self.list_resolver,
self.type._meta.model._default_manager,
self.filterset_class,
self.filtering_args,
)
| 34.299094 | 115 | 0.583987 |
59329debecf86d90055447448d499bace3dc46ca | 14,106 | py | Python | libcxx/utils/libcxx/test/dsl.py | Diatrus/llvm-project | de78ef470a60fd6eee4b57f656315b0212101ba4 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/libcxx/test/dsl.py | Diatrus/llvm-project | de78ef470a60fd6eee4b57f656315b0212101ba4 | [
"Apache-2.0"
] | null | null | null | libcxx/utils/libcxx/test/dsl.py | Diatrus/llvm-project | de78ef470a60fd6eee4b57f656315b0212101ba4 | [
"Apache-2.0"
] | null | null | null | #===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import libcxx.test.newformat
import lit
import lit.util
import os
import pipes
import platform
import tempfile
def _memoize(f):
cache = dict()
def memoized(x):
if x not in cache:
cache[x] = f(x)
return cache[x]
return memoized
def _executeScriptInternal(test, commands):
"""
Returns (stdout, stderr, exitCode, timeoutInfo)
TODO: This really should be easier to access from Lit itself
"""
class FakeLitConfig(object):
def __init__(self):
self.isWindows = platform.system() == 'Windows'
self.maxIndividualTestTime = 0
litConfig = FakeLitConfig()
_, tmpBase = lit.TestRunner.getTempPaths(test)
execDir = os.path.dirname(test.getExecPath())
if not os.path.exists(execDir):
os.makedirs(execDir)
res = lit.TestRunner.executeScriptInternal(test, litConfig, tmpBase, commands, execDir)
if isinstance(res, lit.Test.Result):
res = ('', '', 127, None)
return res
def _makeConfigTest(config):
sourceRoot = os.path.join(config.test_exec_root, '__config_src__')
execRoot = os.path.join(config.test_exec_root, '__config_exec__')
suite = lit.Test.TestSuite('__config__', sourceRoot, execRoot, config)
if not os.path.exists(sourceRoot):
os.makedirs(sourceRoot)
tmp = tempfile.NamedTemporaryFile(dir=sourceRoot, delete=False, suffix='.cpp')
tmp.close()
pathInSuite = [os.path.relpath(tmp.name, sourceRoot)]
class TestWrapper(lit.Test.Test):
def __enter__(self): return self
def __exit__(self, *args): os.remove(tmp.name)
return TestWrapper(suite, pathInSuite, config)
def sourceBuilds(config, source):
"""
Return whether the program in the given string builds successfully.
This is done by compiling and linking a program that consists of the given
source with the %{cxx} substitution, and seeing whether that succeeds.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as sourceFile:
sourceFile.write(source)
commands = [
"mkdir -p %T",
"%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
commands = libcxx.test.newformat.parseScript(test, preamble=commands, fileDependencies=['%t.exe'])
out, err, exitCode, timeoutInfo = _executeScriptInternal(test, commands)
cleanup = libcxx.test.newformat.parseScript(test, preamble=['rm %t.exe'], fileDependencies=[])
_executeScriptInternal(test, cleanup)
return exitCode == 0
def hasCompileFlag(config, flag):
"""
Return whether the compiler in the configuration supports a given compiler flag.
This is done by executing the %{cxx} substitution with the given flag and
checking whether that succeeds.
"""
with _makeConfigTest(config) as test:
commands = ["%{{cxx}} -xc++ {} -Werror -fsyntax-only %{{flags}} %{{compile_flags}} {}".format(os.devnull, flag)]
commands = libcxx.test.newformat.parseScript(test, preamble=commands, fileDependencies=[])
out, err, exitCode, timeoutInfo = _executeScriptInternal(test, commands)
return exitCode == 0
def hasLocale(config, locale):
"""
Return whether the runtime execution environment supports a given locale.
This is done by executing a program that tries to set the given locale using
%{exec} -- this means that the command may be executed on a remote host
depending on the %{exec} substitution.
"""
with _makeConfigTest(config) as test:
with open(test.getSourcePath(), 'w') as source:
source.write("""
#include <locale.h>
int main(int, char** argv) {
if (::setlocale(LC_ALL, argv[1]) != NULL) return 0;
else return 1;
}
""")
commands = [
"mkdir -p %T",
"%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%{{exec}} %t.exe {}".format(pipes.quote(locale)),
]
commands = libcxx.test.newformat.parseScript(test, preamble=commands, fileDependencies=['%t.exe'])
out, err, exitCode, timeoutInfo = _executeScriptInternal(test, commands)
cleanup = libcxx.test.newformat.parseScript(test, preamble=['rm %t.exe'], fileDependencies=[])
_executeScriptInternal(test, cleanup)
return exitCode == 0
def compilerMacros(config, flags=''):
"""
Return a dictionary of predefined compiler macros.
The keys are strings representing macros, and the values are strings
representing what each macro is defined to.
If the optional `flags` argument (a string) is provided, these flags will
be added to the compiler invocation when generating the macros.
"""
with _makeConfigTest(config) as test:
commands = ["%{{cxx}} -xc++ {} -dM -E %{{flags}} %{{compile_flags}} {}".format(os.devnull, flags)]
commands = libcxx.test.newformat.parseScript(test, preamble=commands, fileDependencies=[])
unparsedOutput, err, exitCode, timeoutInfo = _executeScriptInternal(test, commands)
parsedMacros = dict()
defines = (l.strip() for l in unparsedOutput.split('\n') if l.startswith('#define '))
for line in defines:
line = line[len('#define '):]
macro, _, value = line.partition(' ')
parsedMacros[macro] = value
return parsedMacros
def featureTestMacros(config, flags=''):
"""
Return a dictionary of feature test macros.
The keys are strings representing feature test macros, and the values are
integers representing the value of the macro.
"""
allMacros = compilerMacros(config, flags)
return {m: int(v.rstrip('LlUu')) for (m, v) in allMacros.items() if m.startswith('__cpp_')}
class Feature(object):
"""
Represents a Lit available feature that is enabled whenever it is supported.
A feature like this informs the test suite about a capability of the compiler,
platform, etc. Unlike Parameters, it does not make sense to explicitly
control whether a Feature is enabled -- it should be enabled whenever it
is supported.
"""
def __init__(self, name, compileFlag=None, linkFlag=None, when=lambda _: True):
"""
Create a Lit feature for consumption by a test suite.
- name
The name of the feature. This is what will end up in Lit's available
features if the feature is enabled. This can be either a string or a
callable, in which case it is passed the TestingConfig and should
generate a string representing the name of the feature.
- compileFlag
An optional compile flag to add when this feature is added to a
TestingConfig. If provided, this must be a string representing a
compile flag that will be appended to the end of the %{compile_flags}
substitution of the TestingConfig.
- linkFlag
An optional link flag to add when this feature is added to a
TestingConfig. If provided, this must be a string representing a
link flag that will be appended to the end of the %{link_flags}
substitution of the TestingConfig.
- when
A callable that gets passed a TestingConfig and should return a
boolean representing whether the feature is supported in that
configuration. For example, this can use `hasCompileFlag` to
check whether the compiler supports the flag that the feature
represents. If omitted, the feature will always be considered
supported.
"""
self._name = name
self._compileFlag = compileFlag
self._linkFlag = linkFlag
self._isSupported = when
def isSupported(self, config):
"""
Return whether the feature is supported by the given TestingConfig.
"""
return self._isSupported(config)
def enableIn(self, config):
"""
Enable a feature in a TestingConfig.
The name of the feature is added to the set of available features of
`config`, and any compile or link flags provided upon construction of
the Feature are added to the end of the corresponding substitution in
the config.
It is an error to call `f.enableIn(cfg)` if the feature `f` is not
supported in that TestingConfig (i.e. if `not f.isSupported(cfg)`).
"""
assert self.isSupported(config), \
"Trying to enable feature {} that is not supported in the given configuration".format(self._name)
addTo = lambda subs, sub, flag: [(s, x + ' ' + flag) if s == sub else (s, x) for (s, x) in subs]
if self._compileFlag:
compileFlag = self._compileFlag(config) if callable(self._compileFlag) else self._compileFlag
config.substitutions = addTo(config.substitutions, '%{compile_flags}', compileFlag)
if self._linkFlag:
linkFlag = self._linkFlag(config) if callable(self._linkFlag) else self._linkFlag
config.substitutions = addTo(config.substitutions, '%{link_flags}', linkFlag)
name = self._name(config) if callable(self._name) else self._name
config.available_features.add(name)
def _str_to_bool(s):
"""
Convert a string value to a boolean.
True values are "y", "yes", "t", "true", "on" and "1", regardless of capitalization.
False values are "n", "no", "f", "false", "off" and "0", regardless of capitalization.
"""
trueVals = ["y", "yes", "t", "true", "on", "1"]
falseVals = ["n", "no", "f", "false", "off", "0"]
lower = s.lower()
if lower in trueVals:
return True
elif lower in falseVals:
return False
else:
raise ValueError("Got string '{}', which isn't a valid boolean".format(s))
class Parameter(object):
"""
Represents a parameter of a Lit test suite.
Parameters are used to customize the behavior of test suites in a user
controllable way, more specifically by passing `--param <KEY>=<VALUE>`
when running Lit. Parameters have multiple possible values, and they can
have a default value when left unspecified.
Parameters can have a Feature associated to them, in which case the Feature
is added to the TestingConfig if the parameter is enabled. It is an error if
the Parameter is enabled but the Feature associated to it is not supported,
for example trying to set the compilation standard to C++17 when `-std=c++17`
is not supported by the compiler.
One important point is that Parameters customize the behavior of the test
suite in a bounded way, i.e. there should be a finite set of possible choices
for `<VALUE>`. While this may appear to be an aggressive restriction, this
is actually a very important constraint that ensures that the set of
configurations supported by a test suite is finite. Otherwise, a test
suite could have an unbounded number of supported configurations, and
nobody wants to be stuck maintaining that. If it's not possible for an
option to have a finite set of possible values (e.g. the path to the
compiler), it can be handled in the `lit.cfg`, but it shouldn't be
represented with a Parameter.
"""
def __init__(self, name, choices, type, help, feature, default=None):
"""
Create a Lit parameter to customize the behavior of a test suite.
- name
The name of the parameter that can be used to set it on the command-line.
On the command-line, the parameter can be set using `--param <name>=<value>`
when running Lit. This must be non-empty.
- choices
A non-empty set of possible values for this parameter. This must be
anything that can be iterated. It is an error if the parameter is
given a value that is not in that set, whether explicitly or through
a default value.
- type
A callable that can be used to parse the value of the parameter given
on the command-line. As a special case, using the type `bool` also
allows parsing strings with boolean-like contents.
- help
A string explaining the parameter, for documentation purposes.
TODO: We should be able to surface those from the Lit command-line.
- feature
A callable that gets passed the parsed value of the parameter (either
the one passed on the command-line or the default one), and that returns
either None or a Feature.
- default
An optional default value to use for the parameter when no value is
provided on the command-line. If the default value is a callable, it
is called with the TestingConfig and should return the default value
for the parameter. Whether the default value is computed or specified
directly, it must be in the 'choices' provided for that Parameter.
"""
self._name = name
if len(self._name) == 0:
raise ValueError("Parameter name must not be the empty string")
self._choices = list(choices) # should be finite
if len(self._choices) == 0:
raise ValueError("Parameter '{}' must be given at least one possible value".format(self._name))
self._parse = lambda x: (_str_to_bool(x) if type is bool and isinstance(x, str)
else type(x))
self._help = help
self._feature = feature
self._default = default
@property
def name(self):
"""
Return the name of the parameter.
This is the name that can be used to set the parameter on the command-line
when running Lit.
"""
return self._name
def getFeature(self, config, litParams):
param = litParams.get(self.name, None)
if param is None and self._default is None:
raise ValueError("Parameter {} doesn't have a default value, but it was not specified in the Lit parameters".format(self.name))
getDefault = lambda: self._default(config) if callable(self._default) else self._default
value = self._parse(param) if param is not None else getDefault()
if value not in self._choices:
raise ValueError("Got value '{}' for parameter '{}', which is not in the provided set of possible choices: {}".format(value, self.name, self._choices))
return self._feature(value)
| 41.125364 | 157 | 0.688005 |
133e40fd0f3371147f4d81065f5ea25dacf62bbc | 5,839 | py | Python | intersight/models/hyperflex_abstract_app_setting_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | intersight/models/hyperflex_abstract_app_setting_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | intersight/models/hyperflex_abstract_app_setting_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class HyperflexAbstractAppSettingRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
HyperflexAbstractAppSettingRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this HyperflexAbstractAppSettingRef.
The Object Type of the referenced REST resource.
:return: The object_type of this HyperflexAbstractAppSettingRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this HyperflexAbstractAppSettingRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this HyperflexAbstractAppSettingRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this HyperflexAbstractAppSettingRef.
The Moid of the referenced REST resource.
:return: The moid of this HyperflexAbstractAppSettingRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this HyperflexAbstractAppSettingRef.
The Moid of the referenced REST resource.
:param moid: The moid of this HyperflexAbstractAppSettingRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this HyperflexAbstractAppSettingRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this HyperflexAbstractAppSettingRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this HyperflexAbstractAppSettingRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this HyperflexAbstractAppSettingRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, HyperflexAbstractAppSettingRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 32.082418 | 576 | 0.614831 |
e82f9bbf0aa8c865c524a93802b10a1a7820a2be | 8,717 | py | Python | openquake/hazardlib/geo/line.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 1 | 2019-08-01T00:28:24.000Z | 2019-08-01T00:28:24.000Z | openquake/hazardlib/geo/line.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 4 | 2018-08-31T14:14:35.000Z | 2021-10-11T12:53:13.000Z | openquake/hazardlib/geo/line.py | gfzriesgos/shakyground-lfs | 2caf67cc32e6800286eded2df1efb05973ccf41b | [
"BSD-3-Clause"
] | 3 | 2018-08-31T14:11:00.000Z | 2019-07-17T10:06:02.000Z | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2018 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module :mod:`openquake.hazardlib.geo.line` defines :class:`Line`.
"""
import numpy
from openquake.hazardlib.geo import geodetic
from openquake.hazardlib.geo import utils
class Line(object):
"""
This class represents a geographical line, which is basically
a sequence of geographical points.
A line is defined by at least one point.
:param points:
The sequence of points defining this line.
:type points:
list of :class:`~openquake.hazardlib.geo.point.Point` instances
"""
def __init__(self, points):
self.points = utils.clean_points(points)
if len(self.points) < 1:
raise ValueError("One point needed to create a line!")
def __eq__(self, other):
"""
>>> from openquake.hazardlib.geo.point import Point
>>> points = [Point(1, 2), Point(3, 4)]; Line(points) == Line(points)
True
>>> Line(points) == Line(list(reversed(points)))
False
"""
return self.points == other.points
def __ne__(self, other):
"""
>>> from openquake.hazardlib.geo.point import Point
>>> Line([Point(1, 2)]) != Line([Point(1, 2)])
False
>>> Line([Point(1, 2)]) != Line([Point(2, 1)])
True
"""
return not self.__eq__(other)
def __len__(self):
return len(self.points)
def __getitem__(self, key):
return self.points.__getitem__(key)
def on_surface(self):
"""
Check if this line is defined on the surface (i.e. all points
are on the surfance, depth=0.0).
:returns bool:
True if this line is on the surface, false otherwise.
"""
return all(point.on_surface() for point in self.points)
def horizontal(self):
"""
Check if this line is horizontal (i.e. all depths of points
are equal).
:returns bool:
True if this line is horizontal, false otherwise.
"""
return all(p.depth == self[0].depth for p in self)
def average_azimuth(self):
"""
Calculate and return weighted average azimuth of all line's segments
in decimal degrees.
Uses formula from
http://en.wikipedia.org/wiki/Mean_of_circular_quantities
>>> from openquake.hazardlib.geo.point import Point as P
>>> '%.1f' % Line([P(0, 0), P(1e-5, 1e-5)]).average_azimuth()
'45.0'
>>> '%.1f' % Line([P(0, 0), P(0, 1e-5), P(1e-5, 1e-5)]).average_azimuth()
'45.0'
>>> line = Line([P(0, 0), P(-2e-5, 0), P(-2e-5, 1.154e-5)])
>>> '%.1f' % line.average_azimuth()
'300.0'
"""
if len(self.points) == 2:
return self.points[0].azimuth(self.points[1])
lons = numpy.array([point.longitude for point in self.points])
lats = numpy.array([point.latitude for point in self.points])
azimuths = geodetic.azimuth(lons[:-1], lats[:-1], lons[1:], lats[1:])
distances = geodetic.geodetic_distance(lons[:-1], lats[:-1],
lons[1:], lats[1:])
azimuths = numpy.radians(azimuths)
# convert polar coordinates to Cartesian ones and calculate
# the average coordinate of each component
avg_x = numpy.mean(distances * numpy.sin(azimuths))
avg_y = numpy.mean(distances * numpy.cos(azimuths))
# find the mean azimuth from that mean vector
azimuth = numpy.degrees(numpy.arctan2(avg_x, avg_y))
if azimuth < 0:
azimuth += 360
return azimuth
def resample(self, section_length):
"""
Resample this line into sections.
The first point in the resampled line corresponds
to the first point in the original line.
Starting from the first point in the original line, a line
segment is defined as the line connecting the last point in the
resampled line and the next point in the original line.
The line segment is then split into sections of length equal to
``section_length``. The resampled line is obtained
by concatenating all sections.
The number of sections in a line segment is calculated as follows:
``round(segment_length / section_length)``.
Note that the resulting line has a length that is an exact multiple of
``section_length``, therefore its length is in general smaller
or greater (depending on the rounding) than the length
of the original line.
For a straight line, the difference between the resulting length
and the original length is at maximum half of the ``section_length``.
For a curved line, the difference my be larger,
because of corners getting cut.
:param section_length:
The length of the section, in km.
:type section_length:
float
:returns:
A new line resampled into sections based on the given length.
:rtype:
An instance of :class:`Line`
"""
if len(self.points) < 2:
return Line(self.points)
resampled_points = []
# 1. Resample the first section. 2. Loop over the remaining points
# in the line and resample the remaining sections.
# 3. Extend the list with the resampled points, except the first one
# (because it's already contained in the previous set of
# resampled points).
resampled_points.extend(
self.points[0].equally_spaced_points(self.points[1],
section_length)
)
# Skip the first point, it's already resampled
for i in range(2, len(self.points)):
points = resampled_points[-1].equally_spaced_points(
self.points[i], section_length
)
resampled_points.extend(points[1:])
return Line(resampled_points)
def get_length(self):
"""
Calculate and return the length of the line as a sum of lengths
of all its segments.
:returns:
Total length in km.
"""
length = 0
for i, point in enumerate(self.points):
if i != 0:
length += point.distance(self.points[i - 1])
return length
def resample_to_num_points(self, num_points):
"""
Resample the line to a specified number of points.
:param num_points:
Integer number of points the resulting line should have.
:returns:
A new line with that many points as requested.
"""
assert len(self.points) > 1, "can not resample the line of one point"
section_length = self.get_length() / (num_points - 1)
resampled_points = [self.points[0]]
segment = 0
acc_length = 0
last_segment_length = 0
for i in range(num_points - 1):
tot_length = (i + 1) * section_length
while tot_length > acc_length and segment < len(self.points) - 1:
last_segment_length = self.points[segment].distance(
self.points[segment + 1]
)
acc_length += last_segment_length
segment += 1
p1, p2 = self.points[segment - 1:segment + 1]
offset = tot_length - (acc_length - last_segment_length)
if offset < 1e-5:
# forward geodetic transformations for very small distances
# are very inefficient (and also unneeded). if target point
# is just 1 cm away from original (non-resampled) line vertex,
# don't even bother doing geodetic calculations.
resampled = p1
else:
resampled = p1.equally_spaced_points(p2, offset)[1]
resampled_points.append(resampled)
return Line(resampled_points)
| 35.72541 | 81 | 0.602616 |
944e5faa8c684a2aa973e306e54b14f9ef436444 | 17,657 | py | Python | custard/tests/test.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 6 | 2015-06-15T07:40:26.000Z | 2016-06-27T08:01:34.000Z | custard/tests/test.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 3 | 2015-03-11T22:43:01.000Z | 2015-06-07T21:50:36.000Z | custard/tests/test.py | kunitoki/django-custard | 3cf3aa5acf84de2f653e96469e2f9c42813df50a | [
"MIT"
] | 6 | 2015-03-11T22:19:57.000Z | 2021-03-10T15:40:52.000Z | from __future__ import unicode_literals
from datetime import date, time, datetime
import django
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db.models import Q
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase, Client
from django.test.client import RequestFactory
from django.test.utils import override_settings
from custard.conf import (CUSTOM_TYPE_TEXT, CUSTOM_TYPE_INTEGER,
CUSTOM_TYPE_BOOLEAN, CUSTOM_TYPE_FLOAT,
CUSTOM_TYPE_DATE, CUSTOM_TYPE_DATETIME,
CUSTOM_TYPE_TIME, settings)
from custard.builder import CustomFieldsBuilder
from custard.utils import import_class
from .models import (SimpleModelWithManager, SimpleModelWithoutManager,
CustomFieldsModel, CustomValuesModel, builder,
SimpleModelUnique, CustomFieldsUniqueModel, CustomValuesUniqueModel, builder_unique)
#==============================================================================
class SimpleModelWithManagerForm(builder.create_modelform()):
class Meta:
model = SimpleModelWithManager
fields = '__all__'
#class ExampleAdmin(admin.ModelAdmin):
# form = ExampleForm
# search_fields = ('name',)
#
# def get_search_results(self, request, queryset, search_term):
# queryset, use_distinct = super(ExampleAdmin, self).get_search_results(request, queryset, search_term)
# queryset |= self.model.objects.search(search_term)
# return queryset, use_distinct
#
# admin.site.register(Example, ExampleAdmin)
#==============================================================================
class CustomModelsTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.simple_with_manager_ct = ContentType.objects.get_for_model(SimpleModelWithManager)
self.simple_without_manager_ct = ContentType.objects.get_for_model(SimpleModelWithoutManager)
self.simple_unique = ContentType.objects.get_for_model(SimpleModelUnique)
self.cf = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='text_field',
label="Text field",
data_type=CUSTOM_TYPE_TEXT)
self.cf.save()
self.cf2 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='another_text_field',
label="Text field 2",
data_type=CUSTOM_TYPE_TEXT,
required=True,
searchable=False)
self.cf2.clean()
self.cf2.save()
self.cf3 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='int_field', label="Integer field",
data_type=CUSTOM_TYPE_INTEGER)
self.cf3.save()
self.cf4 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='boolean_field', label="Boolean field",
data_type=CUSTOM_TYPE_BOOLEAN)
self.cf4.save()
self.cf5 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='float_field', label="Float field",
data_type=CUSTOM_TYPE_FLOAT)
self.cf5.save()
self.cf6 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='date_field', label="Date field",
data_type=CUSTOM_TYPE_DATE)
self.cf6.save()
self.cf7 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='datetime_field', label="Datetime field",
data_type=CUSTOM_TYPE_DATETIME)
self.cf7.save()
self.cf8 = CustomFieldsModel.objects.create(content_type=self.simple_with_manager_ct,
name='time_field', label="Time field",
data_type=CUSTOM_TYPE_TIME)
self.cf8.save()
self.obj = SimpleModelWithManager.objects.create(name='old test')
self.obj.save()
def tearDown(self):
CustomFieldsModel.objects.all().delete()
def test_import_class(self):
self.assertEqual(import_class('custard.builder.CustomFieldsBuilder'), CustomFieldsBuilder)
def test_model_repr(self):
self.assertEqual(repr(self.cf), "<CustomFieldsModel: text_field>")
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="abcdefg")
val.save()
self.assertEqual(repr(val), "<CustomValuesModel: text_field: abcdefg>")
@override_settings(CUSTOM_CONTENT_TYPES=['tests.SimpleModelWithManager'])
def test_field_creation(self):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel',
'tests.CustomValuesModel',
settings.CUSTOM_CONTENT_TYPES)
class TestCustomFieldsModel(builder2.create_fields()):
class Meta:
app_label = 'tests'
self.assertQuerysetEqual(ContentType.objects.filter(builder2.content_types_query),
ContentType.objects.filter(Q(app_label__in=['tests'],
model__in=['SimpleModelWithManager'])))
def test_mixin(self):
self.assertIn(self.cf, self.obj.get_custom_fields())
self.assertIn(self.cf, SimpleModelWithManager.get_model_custom_fields())
with self.assertRaises(ObjectDoesNotExist):
self.obj.get_custom_value(self.cf2)
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="123456")
val.save()
self.assertEqual("123456", self.obj.get_custom_value(self.cf).value)
self.obj.set_custom_value(self.cf, "abcdefg")
self.assertEqual("abcdefg", self.obj.get_custom_value(self.cf).value)
val.delete()
def test_field_model_clean(self):
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='xxx',
label="Field not present anywhere",
data_type=CUSTOM_TYPE_TEXT)
cf.full_clean()
cf.save()
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='xxx',
label="Field already in custom fields",
data_type=CUSTOM_TYPE_TEXT)
with self.assertRaises(ValidationError):
cf.full_clean()
cf = CustomFieldsUniqueModel.objects.create(content_type=self.simple_unique,
name='name',
label="Field already present in model",
data_type=CUSTOM_TYPE_INTEGER)
with self.assertRaises(ValidationError):
cf.full_clean()
def test_value_model_clean(self):
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk)
val.value = "qwertyuiop"
val.save()
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk)
val.value = "qwertyuiop"
with self.assertRaises(ValidationError):
val.full_clean()
def test_value_types_accessor(self):
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk,
value="xxxxxxxxxxxxx")
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf3,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf3,
object_id=self.obj.pk,
value=1)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf4,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf4,
object_id=self.obj.pk,
value=True)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf5,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf5,
object_id=self.obj.pk,
value=3.1456)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf6,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf6,
object_id=self.obj.pk,
value=date.today())
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf7,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf7,
object_id=self.obj.pk,
value=datetime.now())
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf8,
object_id=self.obj.pk)
val.save()
val = val.value
val = CustomValuesModel.objects.create(custom_field=self.cf8,
object_id=self.obj.pk,
value=datetime.now().time())
val.save()
val = val.value
def test_value_creation(self):
val = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="qwertyuiop")
val.save()
self.assertEqual(val.content_type, self.simple_with_manager_ct)
self.assertEqual(val.content_type, val.custom_field.content_type)
self.assertEqual(val.value_text, "qwertyuiop")
self.assertEqual(val.value, "qwertyuiop")
def test_value_search(self):
newobj = SimpleModelWithManager.objects.create(name='new simple')
newobj.save()
v1 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="qwertyuiop")
v1.save()
v2 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=newobj.pk,
value="qwertyuiop")
v2.save()
v3 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=newobj.pk,
value="000asdf123")
v3.save()
qs1 = SimpleModelWithManager.objects.search("asdf")
self.assertQuerysetEqual(qs1, [repr(newobj)])
qs2 = SimpleModelWithManager.objects.search("qwerty")
self.assertQuerysetEqual(qs2, [repr(self.obj), repr(newobj)], ordered=False)
def test_value_search_not_searchable_field(self):
v1 = CustomValuesModel.objects.create(custom_field=self.cf,
object_id=self.obj.pk,
value="12345")
v1.save()
v2 = CustomValuesModel.objects.create(custom_field=self.cf2,
object_id=self.obj.pk,
value="67890")
v2.save()
qs1 = SimpleModelWithManager.objects.search("12345")
self.assertQuerysetEqual(qs1, [repr(self.obj)])
qs2 = SimpleModelWithManager.objects.search("67890")
self.assertQuerysetEqual(qs2, [])
def test_get_formfield_for_field(self):
with self.settings(CUSTOM_FIELD_TYPES={CUSTOM_TYPE_TEXT: 'django.forms.fields.EmailField'}):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel', 'tests.CustomValuesModel')
class SimpleModelWithManagerForm2(builder2.create_modelform(field_types=settings.CUSTOM_FIELD_TYPES)):
class Meta:
model = SimpleModelWithManager
fields = '__all__'
form = SimpleModelWithManagerForm2(data={}, instance=self.obj)
self.assertIsNotNone(form.get_formfield_for_field(self.cf))
self.assertEqual(django.forms.fields.EmailField, form.get_formfield_for_field(self.cf).__class__)
def test_get_widget_for_field(self):
with self.settings(CUSTOM_WIDGET_TYPES={CUSTOM_TYPE_TEXT: 'django.forms.widgets.CheckboxInput'}):
builder2 = CustomFieldsBuilder('tests.CustomFieldsModel', 'tests.CustomValuesModel')
class SimpleModelWithManagerForm2(builder2.create_modelform(widget_types=settings.CUSTOM_WIDGET_TYPES)):
class Meta:
fields = '__all__'
model = SimpleModelWithManager
form = SimpleModelWithManagerForm2(data={}, instance=self.obj)
self.assertIsNotNone(form.get_widget_for_field(self.cf))
self.assertEqual(django.forms.widgets.CheckboxInput, form.get_widget_for_field(self.cf).__class__)
def test_form(self):
class TestForm(builder.create_modelform()):
custom_name = 'My Custom Fields'
custom_description = 'Edit the Example custom fields here'
custom_classes = 'zzzap-class'
class Meta:
fields = '__all__'
model = SimpleModelWithManager
request = self.factory.post('/', { 'text_field': '123' })
form = TestForm(request.POST, instance=self.obj)
self.assertFalse(form.is_valid())
self.assertIn('another_text_field', form.errors)
self.assertRaises(ValueError, lambda: form.save())
request = self.factory.post('/', { 'id': self.obj.pk,
'name': 'xxx',
'text_field': '000111222333',
'another_text_field': 'wwwzzzyyyxxx' })
form = TestForm(request.POST, instance=self.obj)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(self.obj.get_custom_value(self.cf).value, '000111222333')
self.assertEqual(self.obj.get_custom_value(self.cf2).value, 'wwwzzzyyyxxx')
self.assertEqual(self.obj.name, 'xxx')
request = self.factory.post('/', { 'id': self.obj.pk,
'name': 'aaa',
'another_text_field': 'qqqwwweeerrrtttyyyy'})
form = TestForm(request.POST, instance=self.obj)
self.assertTrue(form.is_valid())
obj = form.save(commit=False)
obj.save()
self.assertEqual(self.obj.get_custom_value(self.cf2).value, 'wwwzzzyyyxxx')
form.save_m2m()
form.save_custom_fields()
self.assertEqual(self.obj.get_custom_value(self.cf2).value, 'qqqwwweeerrrtttyyyy')
self.assertEqual(obj.name, 'aaa')
#self.assertInHTML(TestForm.custom_name, form.as_p())
#self.assertInHTML(TestForm.custom_description, form.as_p())
#self.assertInHTML(TestForm.custom_classes, form.as_p())
def test_admin(self):
modeladmin_class = builder.create_modeladmin()
#c = Client()
#if c.login(username='fred', password='secret'):
# response = c.get('/admin/', follow=True)
# print(response) | 46.960106 | 116 | 0.544543 |
0895e610f9355d7f226588ec8e02e27b320e2d48 | 969 | py | Python | takaggle/tools/create_pickle_data.py | takapy0210/takaggle | fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0 | [
"MIT"
] | 3 | 2021-03-21T02:28:25.000Z | 2022-02-12T07:28:56.000Z | takaggle/tools/create_pickle_data.py | takapy0210/takaggle | fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0 | [
"MIT"
] | null | null | null | takaggle/tools/create_pickle_data.py | takapy0210/takaggle | fcaa6ef23f3fd2a5a8ebe15e66b66c99d684d8d0 | [
"MIT"
] | null | null | null | import os
import pandas as pd
import yaml
import load_data
from util import get_logger, reduce_mem_usage
logger = get_logger()
file_path = os.path.dirname(__file__)
CONFIG_FILE = '../config/config.yaml'
with open(CONFIG_FILE) as file:
yml = yaml.load(file)
INPUT_DIR_NAME = yml['SETTING']['INPUT_DIR_NAME']
def create_pickle(train, test, specs, train_labels):
logger.info('save pickle file')
train.to_pickle(file_path + INPUT_DIR_NAME + 'train.pkl')
test.to_pickle(file_path + INPUT_DIR_NAME + 'test.pkl')
specs.to_pickle(file_path + INPUT_DIR_NAME + 'specs.pkl')
train_labels.to_pickle(file_path + INPUT_DIR_NAME + 'train_labels.pkl')
if __name__ == '__main__':
train = reduce_mem_usage(load_data.read_train())
test = reduce_mem_usage(load_data.read_test())
specs = reduce_mem_usage(load_data.read_specs())
train_labels = reduce_mem_usage(load_data.read_train_labels())
create_pickle(train, test, specs, train_labels)
| 32.3 | 75 | 0.750258 |
3c83924f1b83b4725fe4f1034bb0e5e5c315f3f8 | 1,848 | py | Python | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/project/views/project_version_view.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 6 | 2018-11-26T08:42:52.000Z | 2020-06-01T08:33:48.000Z | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/project/views/project_version_view.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | null | null | null | teamcat_service/docker_build/target/one_step_build/teamcat/doraemon/project/views/project_version_view.py | zhangyin2088/Teamcat | be9be8d7c1e58c8d2d22ab78d25783d9aee4de71 | [
"Apache-2.0"
] | 1 | 2019-01-22T06:45:36.000Z | 2019-01-22T06:45:36.000Z | #coding=utf-8
# coding=utf-8
'''
Created on 2014-1-5
@author: ETHAN
'''
from django.shortcuts import render_to_response
from django.http import HttpResponse
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from gatesidelib.common.simplelogger import SimpleLogger
from doraemon.project.pagefactory.project_version_pageworker import ProjectVersionPageWorker
from business.project.version_service import VersionService
from doraemon.resources.project.resource_string import Version
@login_required
def all(request,projectid):
''' index page'''
page_worker=ProjectVersionPageWorker(request)
return page_worker.get_full_page(request, projectid)
@login_required
def create(request,projectid):
result=True
try:
VersionService.create_version(request, projectid)
except Exception as ex:
result=Version.version_save_fail
SimpleLogger.error(ex)
return HttpResponse(result)
@login_required
def delete(request,projectid,version_id):
result=True
try:
VersionService.delete_version(request,version_id)
except Exception as ex:
result=Version.version_delete_fail
SimpleLogger.error(ex)
return HttpResponse(result)
@login_required
def update_version(request,projectid,version_id):
result=True
try:
VersionService.update_version(request,version_id)
except Exception as ex:
result=Version.version_update_fail
SimpleLogger.error(ex)
return HttpResponse(result)
@login_required
def update_date(request,projectid,version_id):
result=True
try:
VersionService.update_date(request,version_id)
except Exception as ex:
result=Version.version_update_fail
SimpleLogger.error(ex)
return HttpResponse(result)
| 24.64 | 92 | 0.752706 |
c77c4eecbdd638bedf7fba794f1af0294ff95ec8 | 383 | py | Python | alembic/versions/f4e906862bfa_.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | 1 | 2022-03-20T21:46:05.000Z | 2022-03-20T21:46:05.000Z | alembic/versions/f4e906862bfa_.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | alembic/versions/f4e906862bfa_.py | danieliheonu/bigfastapi | 483554776195c9f38bb46ba719b613360eda1028 | [
"MIT"
] | null | null | null | """empty message
Revision ID: f4e906862bfa
Revises: 647f600ed6a8, 709ea24ea2ce
Create Date: 2022-02-13 22:58:14.523431
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f4e906862bfa'
down_revision = ('647f600ed6a8', '709ea24ea2ce')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
| 15.32 | 48 | 0.741514 |
914004cb45fc6ae292b1fe99b6051d586293d9d6 | 7,660 | py | Python | kaggle/src/divide.py | hpecl-sspku/hpecl-2017 | 895757eb7d5f984e0268ab99da95663172bc2f50 | [
"MIT"
] | null | null | null | kaggle/src/divide.py | hpecl-sspku/hpecl-2017 | 895757eb7d5f984e0268ab99da95663172bc2f50 | [
"MIT"
] | 8 | 2018-03-19T03:24:56.000Z | 2018-07-31T15:25:25.000Z | kaggle/src/divide.py | hpecl-sspku/hpecl-2017 | 895757eb7d5f984e0268ab99da95663172bc2f50 | [
"MIT"
] | 3 | 2018-11-13T06:46:51.000Z | 2020-07-20T05:53:56.000Z | import numpy as np
def detect_and_disply(model,img,return_mode="thres",verbose = 1):
results = model.detect([img],return_mode=return_mode)
r = results
if r is None:
print("No instance has been detect")
return None,None
else:
r = r[0]
if verbose == 1:
print("total nuclei detected: ",r['masks'].shape[2])
#visualize.display_instances(img, r['rois'], r['masks'], r['class_ids'],
# dataset_val.class_names, r['scores'], ax=get_ax())
#print(r['masks'].dtype)
return r['masks'],r['scores']
def find_match(l_ind,r_ind,l_mask,r_mask,threshold = 20):
match = []
for l in l_ind:
for r in r_ind:
new = l_mask[:,:,l] + r_mask[:,:,r]
if np.count_nonzero(new>1)>threshold:
match.append((l,r))
#r_ind.remove(r)
break
return match
def expand_unmatch(a,a_score,ind,raw_shape,mode="lr"):
a = a[:,:,ind]
a_score = a_score[ind]
assert mode in {"ud","du","lr","rl"}
height = a.shape[0]
raw_height = raw_shape[0]
width = a.shape[1]
raw_width =raw_shape[1]
num = a.shape[2]
if mode == "ud":
new_mask = np.zeros((raw_height,raw_width,num))
new_mask[:height,:,:] += a[:,:,:]
elif mode == "du":
new_mask = np.zeros((raw_height,raw_width,num))
new_mask[raw_height-height:,:,:] += a[:,:,:]
elif mode == "lr":
new_mask = np.zeros((raw_height,raw_width,num))
new_mask[:,:width,:] += a[:,:,:]
elif mode == "rl":
new_mask = np.zeros((raw_height,raw_width,num))
new_mask[:,raw_width-width:,:] += a[:,:,:]
return new_mask,a_score
def combine_mask(a,b,score_a,score_b,match,raw_shape,mode="lr",binary=True):
re_masks = []
re_scores = []
a_width = a.shape[1]
b_width = b.shape[1]
raw_height = raw_shape[0]
raw_width =raw_shape[1]
for single in match:
l = single[0]
r = single[1]
new_mask = np.zeros((raw_height,raw_width))
new_mask[:,:a_width] += a[:,:,l]
new_mask[:,raw_width-b_width:] += b[:,:,r]
if binary:
new_mask[np.where(new_mask == 2.0)] = 1.0
else :
new_mask[:,raw_width-b_width:a_width] = new_mask[:,raw_width-b_width:a_width]/2
"""
plt.imshow(a[:,:,l])
plt.show()
plt.imshow(b[:,:,r])
plt.show()
plt.imshow(new_mask)
plt.show()
"""
#print(score_a.shape)
#print(score_b.shape)
re_scores.append((score_a[l]+score_b[r])/2)
re_masks.append(new_mask)
#print("re_scores: " ,re_scores)
#print("np re_scores: ",np.array(re_scores))
#print("re_scores: " ,len(re_scores),re_scores[0].shape)
#print("re_masks: " ,len(re_masks),re_masks[0].shape)
re_scores = np.array(re_scores)
re_masks = np.transpose(np.array(re_masks),[1,2,0])
#print("re_scores: " ,re_scores.shape)
#print("re_masks: " ,re_masks.shape)
return re_masks , re_scores
def divide_recursive_detect(model,img,inter_width,max_edge,combine_threshold,return_mode,verbose = 0):
miss_a_flag = 0
miss_b_flag = 0
a_match = []
b_match = []
match = []
raw_shape = img.shape
if raw_shape[0] <= max_edge and raw_shape[1] <= max_edge:
return detect_and_disply(model,img,return_mode,verbose = verbose)
elif raw_shape[1] > max_edge:
cscore = np.zeros((1))
expa_score = np.zeros((1))
expb_score = np.zeros((1))
cmask = np.zeros((raw_shape[0],raw_shape[1],1))
expb = np.zeros((raw_shape[0],raw_shape[1],1))
expa = np.zeros((raw_shape[0],raw_shape[1],1))
a_width = int(raw_shape[1]/2)
b_width = raw_shape[1] - a_width +inter_width
if verbose:
print("the width is",raw_shape[1])
print("divding in to ",a_width,"and ",b_width)
left = img[:,:a_width,:]
right = img[:,raw_shape[1]-b_width:,:]
if verbose:
plt.imshow(left)
plt.show()
plt.imshow(right)
plt.show()
a,sa = divide_recursive_detect(model,left,inter_width,max_edge,combine_threshold,return_mode,verbose)
if a is not None:
#print("1",type(a))
inter_a = a[:,a_width-inter_width:a_width,:]
mask_height = np.sum(np.any(inter_a, axis=0), axis=0)
mask_width = np.sum(np.any(inter_a, axis=1), axis=0)
flag = (mask_height > 2) * (mask_width > 2)
#print("2",flag)
inter_ind_l = list(np.argwhere(flag == True).squeeze(1))
empty_ind_l = list(np.argwhere(flag == False).squeeze(1))
else :
miss_a_flag +=1
b,sb = divide_recursive_detect(model,right,inter_width,max_edge,combine_threshold,return_mode,verbose)
if b is not None:
inter_b = b[:,:inter_width,:]
mask_height = np.sum(np.any(inter_b, axis=0), axis=0)
mask_width = np.sum(np.any(inter_b, axis=1), axis=0)
flag = (mask_height > 2) * (mask_width > 2)
inter_ind_r = list(np.argwhere(flag == True).squeeze(1))
empty_ind_r = list(np.argwhere(flag == False).squeeze(1))
else:
miss_b_flag +=1
if miss_a_flag+miss_b_flag == 2:
return None,None
if miss_a_flag+miss_b_flag == 0 :
match = find_match(inter_ind_l,inter_ind_r,inter_a,inter_b,combine_threshold)
#cmask= combine_mask(a,b,match,img.shape[0:2],mode="lr")
#print("find ",len(match), " match!!!!")
if len(match) != 0:
#print("start combining")
if return_mode == "thres":
binary = True
else:
return_mode == "raw"
binary = False
cmask,cscore= combine_mask(a,b,sa,sb,match,raw_shape,mode="lr",binary=binary)
#print("combining mask shape",cmask.shape,cscore.shape)
#print("combine mask",cmask.sum())
a_match = list(zip(*match))[0]
b_match = list(zip(*match))[1]
if miss_a_flag == 0:
a_unmatch = set(empty_ind_l)|set(inter_ind_l) - set(a_match)
expa,expa_score = expand_unmatch(a,sa,list(a_unmatch),raw_shape[0:2],mode="lr")
if miss_b_flag == 0 :
b_unmatch = set(empty_ind_r)|set(inter_ind_r) - set(b_match)
#print(b.dtype)
expb,expb_score = expand_unmatch(b,sb,list(b_unmatch),raw_shape[0:2],mode="rl")
"""
print(expa.dtype)
print(expb.dtype)
print(cmask.dtype)
"""
if return_mode == "thres":
return np.concatenate((expa,expb,cmask),axis=2).astype(np.uint8), np.concatenate((expa_score,expb_score,cscore),axis=0).astype(np.uint8)
elif return_mode == "raw":
return np.concatenate((expa,expb,cmask),axis=2).astype(np.float32), np.concatenate((expa_score,expb_score,cscore),axis=0).astype(np.float32)
elif raw_shape[0] > max_edge:
img = np.transpose(img,[1,0,2])
ret_mask,ret_score = divide_recursive_detect(model,img,inter_width,max_edge,combine_threshold,return_mode,verbose)
if ret_mask is not None:
return np.transpose(ret_mask,[1,0,2]),ret_score
else:
return None,None
| 33.744493 | 152 | 0.55483 |
d640a7a5b2cb84f5e9d014a19da06b435a0be078 | 8,832 | py | Python | tests/test_torchtrain/test_callbacks/test_abstract.py | mv1388/AIToolbox | c64ac4810a02d230ce471d86b758e82ea232a7e7 | [
"MIT"
] | 3 | 2019-10-12T12:24:09.000Z | 2020-08-02T02:42:43.000Z | tests/test_torchtrain/test_callbacks/test_abstract.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | 3 | 2020-04-10T14:07:07.000Z | 2020-04-22T19:04:38.000Z | tests/test_torchtrain/test_callbacks/test_abstract.py | mv1388/aitoolbox | 1060435e6cbdfd19abcb726c4080b663536b7467 | [
"MIT"
] | null | null | null | import unittest
from tests.utils import *
from aitoolbox.torchtrain.callbacks.abstract import AbstractCallback, AbstractExperimentCallback
from aitoolbox.torchtrain.train_loop import TrainLoop, TrainLoopCheckpointEndSave
from tests.utils import function_exists, NetUnifiedBatchFeed, CallbackTracker
class TestAbstractCallback(unittest.TestCase):
def test_abstract_callback_has_hook_methods(self):
callback = AbstractCallback('test_callback')
self.assertTrue(function_exists(callback, 'on_train_loop_registration'))
self.assertTrue(function_exists(callback, 'on_epoch_begin'))
self.assertTrue(function_exists(callback, 'on_epoch_end'))
self.assertTrue(function_exists(callback, 'on_train_begin'))
self.assertTrue(function_exists(callback, 'on_train_end'))
self.assertTrue(function_exists(callback, 'on_batch_begin'))
self.assertTrue(function_exists(callback, 'on_batch_end'))
self.assertTrue(function_exists(callback, 'on_after_gradient_update'))
self.assertTrue(function_exists(callback, 'on_after_optimizer_step'))
def test_on_train_loop_registration_hook(self):
train_loop = TrainLoop(NetUnifiedBatchFeed(), None, None, None, None, None)
callback = CallbackTracker()
callback.register_train_loop_object(train_loop)
self.assertIsInstance(callback, AbstractCallback)
self.assertEqual(callback.callback_calls, ['on_train_loop_registration'])
class TestAbstractExperimentCallback(unittest.TestCase):
def test_init(self):
callback = AbstractExperimentCallback('test_callback')
self.assertTrue(function_exists(callback, 'try_infer_experiment_details'))
self.assertIsNone(callback.project_name)
self.assertIsNone(callback.experiment_name)
self.assertIsNone(callback.local_model_result_folder_path)
def test_try_infer_experiment_details_fail(self):
callback = AbstractExperimentCallback('test_callback')
model = NetUnifiedBatchFeed()
train_loop_non_exp = TrainLoop(model, None, None, None, None, None)
train_loop_non_exp.callbacks_handler.register_callbacks([callback])
with self.assertRaises(AttributeError):
callback.try_infer_experiment_details(infer_cloud_details=False)
with self.assertRaises(AttributeError):
callback.try_infer_experiment_details(infer_cloud_details=True)
def test_try_infer_experiment_details(self):
callback = AbstractExperimentCallback('test_callback')
model = NetUnifiedBatchFeed()
project_name = 'test_project'
experiment_name = 'test_experiment'
local_path = 'my_local_path'
train_loop = TrainLoopCheckpointEndSave(model, None, [], None, DummyOptimizer(), None,
project_name=project_name, experiment_name=experiment_name,
local_model_result_folder_path=local_path,
hyperparams={}, val_result_package=DummyResultPackageExtend(),
cloud_save_mode=None,
lazy_experiment_save=True)
train_loop.callbacks_handler.register_callbacks([callback])
callback.try_infer_experiment_details(infer_cloud_details=False)
self.assertEqual(callback.project_name, project_name)
self.assertEqual(callback.experiment_name, experiment_name)
self.assertEqual(callback.local_model_result_folder_path, local_path)
def test_try_infer_experiment_details_cloud(self):
callback = AbstractExperimentCallback('test_callback')
model = NetUnifiedBatchFeed()
project_name = 'test_project'
experiment_name = 'test_experiment'
local_path = 'my_local_path'
train_loop = TrainLoopCheckpointEndSave(model, None, [], None, DummyOptimizer(), None,
project_name=project_name, experiment_name=experiment_name,
local_model_result_folder_path=local_path,
hyperparams={}, val_result_package=DummyResultPackageExtend(),
lazy_experiment_save=True)
train_loop.callbacks_handler.register_callbacks([callback])
callback.try_infer_experiment_details(infer_cloud_details=True)
self.assertEqual(callback.project_name, project_name)
self.assertEqual(callback.experiment_name, experiment_name)
self.assertEqual(callback.local_model_result_folder_path, local_path)
self.assertEqual(callback.cloud_save_mode, train_loop.cloud_save_mode)
self.assertEqual(callback.bucket_name, train_loop.bucket_name)
self.assertEqual(callback.cloud_dir_prefix, train_loop.cloud_dir_prefix)
def test_try_infer_experiment_details_cloud_spec(self):
callback = AbstractExperimentCallback('test_callback')
model = NetUnifiedBatchFeed()
project_name = 'test_project'
experiment_name = 'test_experiment'
local_path = 'my_local_path'
cloud_save_mode = 's3'
bucket_name = 'my_fancy_bucket'
cloud_dir_prefix = 'MyFolder_prefix'
train_loop = TrainLoopCheckpointEndSave(model, None, [], None, DummyOptimizer(), None,
project_name=project_name, experiment_name=experiment_name,
local_model_result_folder_path=local_path,
hyperparams={}, val_result_package=DummyResultPackageExtend(),
cloud_save_mode=cloud_save_mode, bucket_name=bucket_name,
cloud_dir_prefix=cloud_dir_prefix,
lazy_experiment_save=True)
train_loop.callbacks_handler.register_callbacks([callback])
callback.try_infer_experiment_details(infer_cloud_details=True)
self.assertEqual(callback.project_name, project_name)
self.assertEqual(callback.experiment_name, experiment_name)
self.assertEqual(callback.local_model_result_folder_path, local_path)
self.assertEqual(callback.cloud_save_mode, train_loop.cloud_save_mode)
self.assertEqual(callback.bucket_name, train_loop.bucket_name)
self.assertEqual(callback.cloud_dir_prefix, train_loop.cloud_dir_prefix)
self.assertEqual(callback.cloud_save_mode, cloud_save_mode)
self.assertEqual(callback.bucket_name, bucket_name)
self.assertEqual(callback.cloud_dir_prefix, cloud_dir_prefix)
def test_override_train_loop_values_in_callback(self):
project_name = 'test_project'
experiment_name = 'test_experiment'
local_path = 'my_local_path'
cloud_save_mode = 'gcs'
bucket_name = 'my_fancy_bucket'
cloud_dir_prefix = 'MyFolder_prefix'
callback = AbstractExperimentCallback('test_callback',
project_name, experiment_name, local_path,
cloud_save_mode, bucket_name, cloud_dir_prefix)
model = NetUnifiedBatchFeed()
train_loop = TrainLoopCheckpointEndSave(model, None, [], None, DummyOptimizer(), None,
project_name=f'TL_{project_name}', experiment_name=f'TL_{experiment_name}',
local_model_result_folder_path=f'TL_{local_path}',
hyperparams={}, val_result_package=DummyResultPackageExtend(),
cloud_save_mode='s3', bucket_name=f'TL_{bucket_name}',
cloud_dir_prefix=f'TL_{cloud_dir_prefix}',
lazy_experiment_save=True)
train_loop.callbacks_handler.register_callbacks([callback])
callback.try_infer_experiment_details(infer_cloud_details=True)
self.assertEqual(callback.project_name, project_name)
self.assertEqual(callback.experiment_name, experiment_name)
self.assertEqual(callback.local_model_result_folder_path, local_path)
self.assertEqual(callback.cloud_save_mode, cloud_save_mode)
self.assertEqual(callback.bucket_name, bucket_name)
self.assertEqual(callback.cloud_dir_prefix, cloud_dir_prefix)
self.assertEqual(train_loop.cloud_save_mode, 's3')
self.assertEqual(train_loop.bucket_name, f'TL_{bucket_name}')
self.assertEqual(train_loop.cloud_dir_prefix, f'TL_{cloud_dir_prefix}')
| 52.886228 | 123 | 0.673913 |
b57e53f6573269b496be59883c155128c360eb78 | 34,713 | py | Python | core/domain/opportunity_services.py | kaylahardie/oppia | e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec | [
"Apache-2.0"
] | 2 | 2020-10-13T12:59:08.000Z | 2020-10-13T17:10:26.000Z | core/domain/opportunity_services.py | kaylahardie/oppia | e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec | [
"Apache-2.0"
] | 1 | 2020-03-02T21:05:42.000Z | 2020-03-03T07:09:51.000Z | core/domain/opportunity_services.py | kaylahardie/oppia | e93ed02dfc7f654ef4fb62268c1a9b9d9ded30ec | [
"Apache-2.0"
] | 1 | 2020-11-05T12:26:10.000Z | 2020-11-05T12:26:10.000Z | # coding: utf-8
#
# Copyright 2019 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commands that can be used to operate on opportunity models."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import logging
from constants import constants
from core.domain import exp_fetchers
from core.domain import opportunity_domain
from core.domain import question_fetchers
from core.domain import story_fetchers
from core.domain import topic_fetchers
from core.platform import models
import feconf
import utils
(opportunity_models,) = models.Registry.import_models(
[models.NAMES.opportunity])
def is_exploration_available_for_contribution(exp_id):
"""Checks whether a given exploration id belongs to a curated list of
exploration i.e, whether it's used as the chapter of any story.
Args:
exp_id: str. The id of the exploration which is needed to be checked.
Returns:
bool. Whether the given exp_id belongs to the curated explorations.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(
exp_id, strict=False)
return True if model is not None else False
def get_exploration_opportunity_summary_from_model(model):
"""Returns the ExplorationOpportunitySummary object out of the model.
Args:
model: ExplorationOpportunitySummaryModel. The exploration opportunity
summary model.
Returns:
ExplorationOpportunitySummary. The corresponding
ExplorationOpportunitySummary object.
"""
# We're making sure that the audio language codes in any exploration
# opportunity domain object match the ones in
# constants.SUPPORTED_AUDIO_LANGUAGES.
set_of_all_languages = set(
model.incomplete_translation_language_codes +
model.need_voice_artist_in_language_codes +
model.assigned_voice_artist_in_language_codes)
supported_language_codes = set([language['id'] for language in (
constants.SUPPORTED_AUDIO_LANGUAGES)])
missing_language_codes = list(
supported_language_codes - set_of_all_languages)
if missing_language_codes:
logging.info(
'Missing language codes %s in exploration opportunity model with '
'id %s' % (missing_language_codes, model.id))
new_incomplete_translation_language_codes = (
model.incomplete_translation_language_codes + missing_language_codes)
return opportunity_domain.ExplorationOpportunitySummary(
model.id, model.topic_id, model.topic_name, model.story_id,
model.story_title, model.chapter_title, model.content_count,
new_incomplete_translation_language_codes, model.translation_counts,
model.need_voice_artist_in_language_codes,
model.assigned_voice_artist_in_language_codes)
def _save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list):
"""Stores multiple ExplorationOpportunitySummary into datastore as a
ExplorationOpportunitySummaryModel.
Args:
exploration_opportunity_summary_list: list(
ExplorationOpportunitySummary). A list of exploration opportunity
summary object.
"""
exploration_opportunity_summary_model_list = []
for opportunity_summary in exploration_opportunity_summary_list:
model = opportunity_models.ExplorationOpportunitySummaryModel(
id=opportunity_summary.id,
topic_id=opportunity_summary.topic_id,
topic_name=opportunity_summary.topic_name,
story_id=opportunity_summary.story_id,
story_title=opportunity_summary.story_title,
chapter_title=opportunity_summary.chapter_title,
content_count=opportunity_summary.content_count,
incomplete_translation_language_codes=(
opportunity_summary.incomplete_translation_language_codes),
translation_counts=opportunity_summary.translation_counts,
need_voice_artist_in_language_codes=(
opportunity_summary.need_voice_artist_in_language_codes),
assigned_voice_artist_in_language_codes=(
opportunity_summary.assigned_voice_artist_in_language_codes)
)
exploration_opportunity_summary_model_list.append(model)
opportunity_models.ExplorationOpportunitySummaryModel.put_multi(
exploration_opportunity_summary_model_list)
def _create_exploration_opportunity_summary(topic, story, exploration):
"""Create an ExplorationOpportunitySummary object with the given topic,
story and exploration object.
Args:
topic: Topic. The topic object to which the opportunity belongs.
story: Story. The story object to which the opportunity belongs.
exploration: Exploration. The exploration object to which the
opportunity belongs.
Returns:
ExplorationOpportunitySummary. The exploration opportunity summary
object.
"""
audio_language_codes = set([
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES])
complete_translation_languages = set(
exploration.get_languages_with_complete_translation())
incomplete_translation_language_codes = (
audio_language_codes - complete_translation_languages)
need_voice_artist_in_language_codes = complete_translation_languages
if exploration.language_code in incomplete_translation_language_codes:
# Removing exploration language from incomplete translation
# languages list as exploration does not need any translation in
# its own language.
incomplete_translation_language_codes.discard(
exploration.language_code)
# Adding exploration language to voiceover required languages
# list as exploration can be voiceovered in it's own language.
need_voice_artist_in_language_codes.add(exploration.language_code)
content_count = exploration.get_content_count()
translation_counts = exploration.get_translation_counts()
story_node = story.story_contents.get_node_with_corresponding_exp_id(
exploration.id)
# TODO(#7376): Once the voiceover application functionality is
# implemented change this method such that it also populates the
# assigned_voice_artist_in_language_codes with the required data.
exploration_opportunity_summary = (
opportunity_domain.ExplorationOpportunitySummary(
exploration.id, topic.id, topic.name, story.id, story.title,
story_node.title, content_count,
list(incomplete_translation_language_codes), translation_counts,
list(need_voice_artist_in_language_codes), []))
return exploration_opportunity_summary
def add_new_exploration_opportunities(story_id, exp_ids):
"""Adds new exploration opportunity into the model.
Args:
story_id: str. ID of the story.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(story.corresponding_topic_id)
_create_exploration_opportunities(story, topic, exp_ids)
def create_exploration_opportunities_for_story(story_id, topic_id):
"""Creates exploration opportunities corresponding to the supplied published
story ID iff the topic linked to the story is published.
Args:
story_id: str. The ID of the story domain object.
topic_id: str. The ID of the topic domain object corresponding to the
supplied story.
Raises:
Exception. A topic with the given ID doesn't exist.
Exception. The topic rights could not be found.
"""
story = story_fetchers.get_story_by_id(story_id)
topic = topic_fetchers.get_topic_by_id(topic_id)
topic_rights = topic_fetchers.get_topic_rights(topic.id)
if topic_rights.topic_is_published:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def create_exploration_opportunities_for_topic(topic_id):
"""Creates exploration opportunities corresponding to each of the supplied
published topic's published stories.
Args:
topic_id: str. The ID of the topic domain object.
"""
topic = topic_fetchers.get_topic_by_id(topic_id)
for story_reference in topic.get_all_story_references():
if not story_reference.story_is_published:
continue
story = story_fetchers.get_story_by_id(
story_reference.story_id, strict=False)
if story is not None:
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
_create_exploration_opportunities(story, topic, exp_ids_in_story)
def _create_exploration_opportunities(story, topic, exp_ids):
"""Creates new exploration opportunities corresponding to the supplied
story, topic, and exploration IDs.
Args:
story: Story. The story domain object corresponding to the exploration
opportunities.
topic: Topic. The topic domain object corresponding to the exploration
opportunities.
exp_ids: list(str). A list of exploration ids for which new
opportunities are to be created. All exp_ids must be part of the
given story.
"""
explorations = exp_fetchers.get_multiple_explorations_by_id(exp_ids)
exploration_opportunity_summary_list = []
for exploration in explorations.values():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exploration))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_opportunity_with_updated_exploration(exp_id):
"""Updates the opportunities models with the changes made in the
exploration.
Args:
exp_id: str. The exploration id which is also the id of the opportunity
model.
"""
updated_exploration = exp_fetchers.get_exploration_by_id(exp_id)
content_count = updated_exploration.get_content_count()
translation_counts = updated_exploration.get_translation_counts()
complete_translation_language_list = (
updated_exploration.get_languages_with_complete_translation())
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.content_count = content_count
exploration_opportunity_summary.translation_counts = translation_counts
exploration_opportunity_summary.incomplete_translation_language_codes = (
utils.compute_list_difference(
exploration_opportunity_summary
.incomplete_translation_language_codes,
complete_translation_language_list
)
)
new_languages_for_voiceover = set(complete_translation_language_list) - set(
exploration_opportunity_summary.assigned_voice_artist_in_language_codes)
# We only append new languages to need_voice_artist_in_language_codes(
# instead of adding all of the complete_translation_language_list), as the
# complete translation languages list will be dynamic based on some
# content text are changed, where as the voiceover is a long term work and
# we can allow a voice_artist to work for an exploration which needs a
# little bit update in text translation.
need_voice_artist_in_language_codes_set = set(
exploration_opportunity_summary.need_voice_artist_in_language_codes)
need_voice_artist_in_language_codes_set |= set(new_languages_for_voiceover)
exploration_opportunity_summary.need_voice_artist_in_language_codes = list(
need_voice_artist_in_language_codes_set)
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def update_exploration_opportunities_with_story_changes(story, exp_ids):
"""Updates the opportunities models with the story changes.
Args:
story: Story. The new story object.
exp_ids: list(str). A list of exploration IDs whose exploration
opportunity summary models need to be updated.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.story_title = story.title
node = story.story_contents.get_node_with_corresponding_exp_id(
exploration_opportunity_summary.id)
exploration_opportunity_summary.chapter_title = node.title
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def update_exploration_voiceover_opportunities(
exp_id, assigned_voice_artist_in_language_code):
"""Updates the assigned_voice_artist_in_language_codes of exploration
opportunity model.
Args:
exp_id: str. The ID of the exploration.
assigned_voice_artist_in_language_code: str. The language code in which
a voice artist is assigned to the exploration.
"""
model = opportunity_models.ExplorationOpportunitySummaryModel.get(exp_id)
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(model))
exploration_opportunity_summary.need_voice_artist_in_language_codes.remove(
assigned_voice_artist_in_language_code)
(
exploration_opportunity_summary
.assigned_voice_artist_in_language_codes.append(
assigned_voice_artist_in_language_code))
exploration_opportunity_summary.validate()
_save_multi_exploration_opportunity_summary(
[exploration_opportunity_summary])
def delete_exploration_opportunities(exp_ids):
"""Deletes the ExplorationOpportunitySummaryModel models corresponding to
the given exp_ids.
Args:
exp_ids: list(str). A list of exploration IDs whose opportunity summary
models are to be deleted.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(
exp_ids))
exp_opportunity_models_to_be_deleted = [
model for model in exp_opportunity_models
if model is not None]
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models_to_be_deleted)
def delete_exploration_opportunities_corresponding_to_topic(topic_id):
"""Deletes the ExplorationOpportunitySummaryModel models which corresponds
to the given topic_id.
Args:
topic_id: str. The ID of the topic.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
def get_exploration_opportunity_ids_corresponding_to_topic(topic_id):
"""Returns the exploration IDs corresponding to the
ExplorationOpportunitySummaryModels that are associated with the supplied
topic ID.
Args:
topic_id: str. The ID of the topic.
Returns:
list(str). The exploration IDs.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
return [model.id for model in exp_opportunity_models if model is not None]
def update_exploration_opportunities(old_story, new_story):
"""Updates the opportunities models according to the changes made in the
story.
Args:
old_story: Story. The old story object which is now updated.
new_story: Story. The new story object.
"""
model_ids_need_update = set([])
exp_ids_in_old_story = old_story.story_contents.get_all_linked_exp_ids()
exp_ids_in_new_story = new_story.story_contents.get_all_linked_exp_ids()
new_added_exp_ids = set(exp_ids_in_new_story) - set(exp_ids_in_old_story)
deleted_exp_ids = set(exp_ids_in_old_story) - set(exp_ids_in_new_story)
unchanged_exp_ids = set(exp_ids_in_new_story) - new_added_exp_ids
if old_story.title != new_story.title:
model_ids_need_update |= set(unchanged_exp_ids)
else:
for exp_id in unchanged_exp_ids:
new_node = (
new_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
old_node = (
old_story.story_contents.get_node_with_corresponding_exp_id(
exp_id))
if old_node.title != new_node.title:
model_ids_need_update.add(exp_id)
update_exploration_opportunities_with_story_changes(
new_story, list(model_ids_need_update))
add_new_exploration_opportunities(new_story.id, new_added_exp_ids)
delete_exploration_opportunities(list(deleted_exp_ids))
def get_translation_opportunities(language_code, cursor):
"""Returns a list of opportunities available for translation in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which translation opportunities
should be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next batch of
results. If there are no more results, this might be None.
more: bool. If True, there are (probably) more results after this
batch. If False, there are no further results after this batch.
"""
page_size = feconf.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models
.ExplorationOpportunitySummaryModel.get_all_translation_opportunities(
page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_voiceover_opportunities(language_code, cursor):
"""Returns a list of opportunities available for voiceover in a specific
language.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
language_code: str. The language for which voiceover opportunities
to be fetched.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = feconf.OPPORTUNITIES_PAGE_SIZE
exp_opportunity_summary_models, cursor, more = (
opportunity_models.ExplorationOpportunitySummaryModel
.get_all_voiceover_opportunities(page_size, cursor, language_code))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
return opportunities, cursor, more
def get_exploration_opportunity_summaries_by_ids(ids):
"""Returns a list of ExplorationOpportunitySummary objects corresponding to
the given list of ids.
Args:
ids: list(str). A list of opportunity ids.
Returns:
list(ExplorationOpportunitySummary). A list of
ExplorationOpportunitySummary domain objects corresponding to the
supplied ids.
"""
exp_opportunity_summary_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_multi(ids))
opportunities = []
for exp_opportunity_summary_model in exp_opportunity_summary_models:
if exp_opportunity_summary_model is not None:
exp_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_summary_model))
opportunities.append(exp_opportunity_summary)
else:
logging.warning(
'When getting the exploration opportunity summary models for '
'ids: %s, one of the models was None.' % ids)
return opportunities
def update_opportunities_with_new_topic_name(topic_id, topic_name):
"""Updates the exploration opportunity summary models with new topic name.
Args:
topic_id: str. The corresponding topic id of the opportunity.
topic_name: str. The new topic name.
"""
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
exploration_opportunity_summary_list = []
for exp_opportunity_model in exp_opportunity_models:
exploration_opportunity_summary = (
get_exploration_opportunity_summary_from_model(
exp_opportunity_model))
exploration_opportunity_summary.topic_name = topic_name
exploration_opportunity_summary.validate()
exploration_opportunity_summary_list.append(
exploration_opportunity_summary)
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
def get_skill_opportunity_from_model(model):
"""Returns a SkillOpportunity domain object from a SkillOpportunityModel.
Args:
model: SkillOpportunityModel. The skill opportunity model.
Returns:
SkillOpportunity. The corresponding SkillOpportunity object.
"""
return opportunity_domain.SkillOpportunity(
model.id, model.skill_description, model.question_count)
def get_skill_opportunities(cursor):
"""Returns a list of skill opportunities available for questions.
Args:
cursor: str or None. If provided, the list of returned entities
starts from this datastore cursor. Otherwise, the returned
entities start from the beginning of the full list of entities.
Returns:
3-tuple(opportunities, cursor, more). where:
opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are (probably) more results after
this batch. If False, there are no further results after
this batch.
"""
page_size = feconf.OPPORTUNITIES_PAGE_SIZE
skill_opportunity_models, cursor, more = (
opportunity_models.SkillOpportunityModel
.get_skill_opportunities(page_size, cursor))
opportunities = []
for skill_opportunity_model in skill_opportunity_models:
skill_opportunity = (
get_skill_opportunity_from_model(skill_opportunity_model))
opportunities.append(skill_opportunity)
return opportunities, cursor, more
def get_skill_opportunities_by_ids(ids):
"""Returns a list of SkillOpportunity domain objects corresponding to the
given list of ids.
Args:
ids: list(str). A list of the opportunity ids.
Returns:
list(SkillOpportunity). A list of SkillOpportunity domain objects
corresponding to the supplied ids.
"""
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(ids))
opportunities = []
for skill_opportunity_model in skill_opportunity_models:
skill_opportunity = (
get_skill_opportunity_from_model(skill_opportunity_model))
opportunities.append(skill_opportunity)
return opportunities
def create_skill_opportunity(skill_id, skill_description):
"""Creates a SkillOpportunityModel entity in the datastore.
Args:
skill_id: str. The skill_id of the opportunity.
skill_description: str. The skill_description of the opportunity.
Raises:
Exception. If a SkillOpportunityModel corresponding to the supplied
skill_id already exists.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
raise Exception(
'SkillOpportunity corresponding to skill ID %s already exists.' % (
skill_id))
questions, _, _ = (
question_fetchers.get_questions_and_skill_descriptions_by_skill_ids(
constants.MAX_QUESTIONS_PER_SKILL, [skill_id], ''))
skill_opportunity = opportunity_domain.SkillOpportunity(
skill_id=skill_id,
skill_description=skill_description,
question_count=len(questions)
)
_save_skill_opportunities([skill_opportunity])
def _save_skill_opportunities(skill_opportunities):
"""Saves SkillOpportunity domain objects into datastore as
SkillOpportunityModel objects.
Args:
skill_opportunities: list(SkillOpportunity). A list of SkillOpportunity
domain objects.
"""
skill_opportunity_models = []
for skill_opportunity in skill_opportunities:
skill_opportunity.validate()
model = opportunity_models.SkillOpportunityModel(
id=skill_opportunity.id,
skill_description=skill_opportunity.skill_description,
question_count=skill_opportunity.question_count,
)
skill_opportunity_models.append(model)
opportunity_models.SkillOpportunityModel.put_multi(skill_opportunity_models)
def update_skill_opportunity_skill_description(skill_id, new_description):
"""Updates the skill_description of the SkillOpportunityModel with
new_description.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
new_description: str. The new skill_description.
"""
skill_opportunity = _get_skill_opportunity(skill_id)
if skill_opportunity is not None:
skill_opportunity.skill_description = new_description
_save_skill_opportunities([skill_opportunity])
def _get_skill_opportunity(skill_id):
"""Returns the SkillOpportunity domain object representing a
SkillOpportunityModel with the supplied skill_id in the datastore.
Args:
skill_id: str. The corresponding skill_id of the opportunity.
Returns:
SkillOpportunity or None. The domain object representing a
SkillOpportunity with the supplied skill_id, or None if it does not
exist.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
return get_skill_opportunity_from_model(skill_opportunity_model)
return None
def delete_skill_opportunity(skill_id):
"""Deletes the SkillOpportunityModel corresponding to the supplied skill_id.
Args:
skill_id: str. The skill_id corresponding to the to-be-deleted
SkillOpportunityModel.
"""
skill_opportunity_model = (
opportunity_models.SkillOpportunityModel.get_by_id(skill_id))
if skill_opportunity_model is not None:
opportunity_models.SkillOpportunityModel.delete(skill_opportunity_model)
def increment_question_counts(skill_ids, delta):
"""Increments question_count(s) of SkillOpportunityModel(s) with
corresponding skill_ids.
Args:
skill_ids: list(str). A list of skill_ids corresponding to
SkillOpportunityModel(s).
delta: int. The delta for which to increment each question_count.
"""
updated_skill_opportunities = (
_get_skill_opportunities_with_updated_question_counts(skill_ids, delta))
_save_skill_opportunities(updated_skill_opportunities)
def update_skill_opportunities_on_question_linked_skills_change(
old_skill_ids, new_skill_ids):
"""Updates question_count(s) of SkillOpportunityModel(s) corresponding to
the change in linked skill IDs for a question from old_skill_ids to
new_skill_ids, e.g. if skill_id1 is in old_skill_ids, but not in
new_skill_ids, the question_count of the SkillOpportunityModel for skill_id1
would be decremented.
NOTE: Since this method is updating the question_counts based on the change
of skill_ids from old_skill_ids to new_skill_ids, the input skill_id lists
must be related.
Args:
old_skill_ids: list(str). A list of old skill_id(s).
new_skill_ids: list(str). A list of new skill_id(s).
"""
old_skill_ids_set = set(old_skill_ids)
new_skill_ids_set = set(new_skill_ids)
new_skill_ids_added_to_question = new_skill_ids_set - old_skill_ids_set
skill_ids_removed_from_question = old_skill_ids_set - new_skill_ids_set
updated_skill_opportunities = []
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
new_skill_ids_added_to_question, 1))
updated_skill_opportunities.extend(
_get_skill_opportunities_with_updated_question_counts(
skill_ids_removed_from_question, -1))
_save_skill_opportunities(updated_skill_opportunities)
def _get_skill_opportunities_with_updated_question_counts(skill_ids, delta):
"""Returns a list of SkillOpportunities with corresponding skill_ids
with question_count(s) updated by delta.
Args:
skill_ids: iterable(str). The IDs of the matching SkillOpportunityModels
in the datastore.
delta: int. The delta by which to update each question_count (can be
negative).
Returns:
list(SkillOpportunity). The updated SkillOpportunities.
"""
updated_skill_opportunities = []
skill_opportunity_models = (
opportunity_models.SkillOpportunityModel.get_multi(skill_ids))
for skill_opportunity_model in skill_opportunity_models:
if skill_opportunity_model is not None:
skill_opportunity = get_skill_opportunity_from_model(
skill_opportunity_model)
skill_opportunity.question_count += delta
updated_skill_opportunities.append(skill_opportunity)
return updated_skill_opportunities
def regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=False):
"""Regenerates opportunity models which belongs to a given topic.
Args:
topic_id: str. The ID of the topic.
delete_existing_opportunities: bool. Whether to delete all the existing
opportunities related to the given topic.
Returns:
int. The number of opportunity models created.
"""
if delete_existing_opportunities:
exp_opportunity_models = (
opportunity_models.ExplorationOpportunitySummaryModel.get_by_topic(
topic_id))
opportunity_models.ExplorationOpportunitySummaryModel.delete_multi(
exp_opportunity_models)
topic = topic_fetchers.get_topic_by_id(topic_id)
story_ids = topic.get_canonical_story_ids()
stories = story_fetchers.get_stories_by_ids(story_ids)
exp_ids = []
non_existing_story_ids = []
for index, story in enumerate(stories):
if story is None:
non_existing_story_ids.append(story_ids[index])
else:
exp_ids += story.story_contents.get_all_linked_exp_ids()
exp_ids_to_exp = exp_fetchers.get_multiple_explorations_by_id(
exp_ids, strict=False)
non_existing_exp_ids = set(exp_ids) - set(exp_ids_to_exp.keys())
if len(non_existing_exp_ids) > 0 or len(non_existing_story_ids) > 0:
raise Exception(
'Failed to regenerate opportunities for topic id: %s, '
'missing_exp_with_ids: %s, missing_story_with_ids: %s' % (
topic_id, list(non_existing_exp_ids), non_existing_story_ids))
exploration_opportunity_summary_list = []
for story in stories:
for exp_id in story.story_contents.get_all_linked_exp_ids():
exploration_opportunity_summary_list.append(
_create_exploration_opportunity_summary(
topic, story, exp_ids_to_exp[exp_id]))
_save_multi_exploration_opportunity_summary(
exploration_opportunity_summary_list)
return len(exploration_opportunity_summary_list)
def delete_all_exploration_opportunity_summary_models():
"""Deletes all of the ExplorationOpportunitySummaryModel."""
opportunity_models.ExplorationOpportunitySummaryModel.delete_all()
def delete_all_skill_opportunity_models():
"""Deletes all of the SkillOpportunityModels from the datastore."""
opportunity_models.SkillOpportunityModel.delete_all()
| 40.505251 | 80 | 0.735431 |
7cdc04c65383cc9b3588d5045d9274feee8cc8ae | 3,522 | py | Python | lib/django-1.3/django/contrib/markup/templatetags/markup.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 790 | 2015-01-03T02:13:39.000Z | 2020-05-10T19:53:57.000Z | django/contrib/markup/templatetags/markup.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | django/contrib/markup/templatetags/markup.py | mradziej/django | 5d38965743a369981c9a738a298f467f854a2919 | [
"BSD-3-Clause"
] | 155 | 2015-01-08T22:59:31.000Z | 2020-04-08T08:01:53.000Z | """
Set of "markup" template filters for Django. These filters transform plain text
markup syntaxes to HTML; currently there is support for:
* Textile, which requires the PyTextile library available at
http://loopcore.com/python-textile/
* Markdown, which requires the Python-markdown library from
http://www.freewisdom.org/projects/python-markdown
* reStructuredText, which requires docutils from http://docutils.sf.net/
"""
from django import template
from django.conf import settings
from django.utils.encoding import smart_str, force_unicode
from django.utils.safestring import mark_safe
register = template.Library()
def textile(value):
try:
import textile
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% textile %} filter: The Python textile library isn't installed.")
return force_unicode(value)
else:
return mark_safe(force_unicode(textile.textile(smart_str(value), encoding='utf-8', output='utf-8')))
textile.is_safe = True
def markdown(value, arg=''):
"""
Runs Markdown over a given value, optionally using various
extensions python-markdown supports.
Syntax::
{{ value|markdown:"extension1_name,extension2_name..." }}
To enable safe mode, which strips raw HTML and only returns HTML
generated by actual Markdown syntax, pass "safe" as the first
extension in the list.
If the version of Markdown in use does not support extensions,
they will be silently ignored.
"""
try:
import markdown
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% markdown %} filter: The Python markdown library isn't installed.")
return force_unicode(value)
else:
# markdown.version was first added in 1.6b. The only version of markdown
# to fully support extensions before 1.6b was the shortlived 1.6a.
if hasattr(markdown, 'version'):
extensions = [e for e in arg.split(",") if e]
if len(extensions) > 0 and extensions[0] == "safe":
extensions = extensions[1:]
safe_mode = True
else:
safe_mode = False
# Unicode support only in markdown v1.7 or above. Version_info
# exist only in markdown v1.6.2rc-2 or above.
if getattr(markdown, "version_info", None) < (1,7):
return mark_safe(force_unicode(markdown.markdown(smart_str(value), extensions, safe_mode=safe_mode)))
else:
return mark_safe(markdown.markdown(force_unicode(value), extensions, safe_mode=safe_mode))
else:
return mark_safe(force_unicode(markdown.markdown(smart_str(value))))
markdown.is_safe = True
def restructuredtext(value):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError("Error in {% restructuredtext %} filter: The Python docutils library isn't installed.")
return force_unicode(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", {})
parts = publish_parts(source=smart_str(value), writer_name="html4css1", settings_overrides=docutils_settings)
return mark_safe(force_unicode(parts["fragment"]))
restructuredtext.is_safe = True
register.filter(textile)
register.filter(markdown)
register.filter(restructuredtext)
| 38.282609 | 134 | 0.687394 |
3e1ca8d37f3169718c376bf5d7849566f0d481ac | 25 | py | Python | tests/__init__.py | Polydynamical/morfeus | b50d35477faed6ed40a1f769d27e6ac68c874525 | [
"MIT"
] | 62 | 2021-04-28T23:36:53.000Z | 2022-03-30T07:42:42.000Z | tests/__init__.py | Polydynamical/morfeus | b50d35477faed6ed40a1f769d27e6ac68c874525 | [
"MIT"
] | 5 | 2021-05-25T14:47:14.000Z | 2022-03-29T01:42:22.000Z | tests/__init__.py | Polydynamical/morfeus | b50d35477faed6ed40a1f769d27e6ac68c874525 | [
"MIT"
] | 10 | 2021-04-30T06:42:31.000Z | 2022-03-26T04:47:04.000Z | """Tests for Mᴏʀғᴇᴜs."""
| 12.5 | 24 | 0.6 |
479820918616856f3864ca886786083dfad6e163 | 1,545 | py | Python | sympy/utilities/source.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/utilities/source.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | sympy/utilities/source.py | msgoff/sympy | 1e7daef7514902f5e89718fa957b7b36c6669a10 | [
"BSD-3-Clause"
] | null | null | null | """
This module adds several functions for interactive source code inspection.
"""
from __future__ import print_function, division
from sympy.core.decorators import deprecated
import inspect
@deprecated(
useinstead="?? in IPython/Jupyter or inspect.getsource",
issue=14905,
deprecated_since_version="1.3",
)
def source(object):
"""
Prints the source code of a given object.
"""
print("In file: %s" % inspect.getsourcefile(object))
print(inspect.getsource(object))
def get_class(lookup_view):
"""
Convert a string version of a class name to the object.
For example, get_class('sympy.core.Basic') will return
class Basic located in module sympy.core
"""
if isinstance(lookup_view, str):
mod_name, func_name = get_mod_func(lookup_view)
if func_name != "":
lookup_view = getattr(__import__(mod_name, {}, {}, ["*"]), func_name)
if not callable(lookup_view):
raise AttributeError(
"'%s.%s' is not a callable." % (mod_name, func_name)
)
return lookup_view
def get_mod_func(callback):
"""
splits the string path to a class into a string path to the module
and the name of the class.
Examples
========
>>> from sympy.utilities.source import get_mod_func
>>> get_mod_func('sympy.core.basic.Basic')
('sympy.core.basic', 'Basic')
"""
dot = callback.rfind(".")
if dot == -1:
return callback, ""
return callback[:dot], callback[dot + 1 :]
| 26.186441 | 81 | 0.634951 |
aa0bd12879d74bf71ce3756aea06695e9cb62eca | 8,267 | py | Python | testinfra/modules/file.py | degibenz/testinfra | 3105cea40116b389f9185901f78e3c6db4ebfb18 | [
"Apache-2.0"
] | null | null | null | testinfra/modules/file.py | degibenz/testinfra | 3105cea40116b389f9185901f78e3c6db4ebfb18 | [
"Apache-2.0"
] | null | null | null | testinfra/modules/file.py | degibenz/testinfra | 3105cea40116b389f9185901f78e3c6db4ebfb18 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
from testinfra.modules.base import Module
class File(Module):
"""Test various files attributes"""
def __init__(self, path):
self.path = path
super(File, self).__init__()
@property
def exists(self):
"""Test if file exists
>>> host.file("/etc/passwd").exists
True
>>> host.file("/nonexistent").exists
False
"""
return self.run_test("test -e %s", self.path).rc == 0
@property
def is_file(self):
return self.run_test("test -f %s", self.path).rc == 0
@property
def is_directory(self):
return self.run_test("test -d %s", self.path).rc == 0
@property
def is_pipe(self):
return self.run_test("test -p %s", self.path).rc == 0
@property
def is_socket(self):
return self.run_test("test -S %s", self.path).rc == 0
@property
def is_symlink(self):
return self.run_test("test -L %s", self.path).rc == 0
@property
def linked_to(self):
"""Resolve symlink
>>> host.file("/var/lock").linked_to
'/run/lock'
"""
return self.check_output("readlink -f %s", self.path)
@property
def user(self):
"""Return file owner as string
>>> host.file("/etc/passwd").user
'root'
"""
raise NotImplementedError
@property
def uid(self):
"""Return file user id as integer
>>> host.file("/etc/passwd").uid
0
"""
raise NotImplementedError
@property
def group(self):
raise NotImplementedError
@property
def gid(self):
raise NotImplementedError
@property
def mode(self):
"""Return file mode as octal integer
>>> host.file("/etc/passwd").mode
384 # 0o600 (octal)
>>> host.file("/etc/password").mode == 0o600
True
>>> oct(host.file("/etc/password").mode) == '0600'
True
Note: Python 3 oct(x)_ function will produce ``'0o600'``
You can also utilize the file mode constants from
the stat_ library for testing file mode.
>>> import stat
>>> host.file("/etc/password").mode == stat.S_IRUSR | stat.S_IWUSR
True
.. _oct(x): https://docs.python.org/3.5/library/functions.html#oct
.. _stat: https://docs.python.org/2/library/stat.html
"""
raise NotImplementedError
def contains(self, pattern):
return self.run_test("grep -qs -- %s %s", pattern, self.path).rc == 0
@property
def md5sum(self):
raise NotImplementedError
@property
def sha256sum(self):
raise NotImplementedError
def _get_content(self, decode):
out = self.run_test("cat -- %s", self.path)
if out.rc != 0:
raise RuntimeError("Unexpected output %s" % (out,))
if decode:
return out.stdout
return out.stdout_bytes
@property
def content(self):
"""Return file content as bytes
>>> host.file("/tmp/foo").content
b'caf\\xc3\\xa9'
"""
return self._get_content(False)
@property
def content_string(self):
"""Return file content as string
>>> host.file("/tmp/foo").content_string
'café'
"""
return self._get_content(True)
@property
def mtime(self):
"""Return time of last modification as datetime.datetime object
>>> host.file("/etc/passwd").mtime
datetime.datetime(2015, 3, 15, 20, 25, 40)
"""
raise NotImplementedError
@property
def size(self):
"""Return size of file in bytes"""
raise NotImplementedError
@property
def listdir(self):
"""Return list of items under the directory
>>> host.file("/tmp").listdir
['foo_file', 'bar_dir']
"""
out = self.run_test("ls -1 -q -- %s", self.path)
if out.rc != 0:
raise RuntimeError("Unexpected output %s" % (out,))
return out.stdout.splitlines()
def __repr__(self):
return "<file %s>" % (self.path,)
def __eq__(self, other):
if isinstance(other, File):
return self.path == other.path
if isinstance(other, str):
return self.path == other
return False
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def get_module_class(cls, host):
if host.system_info.type == "linux":
return GNUFile
if host.system_info.type == "netbsd":
return NetBSDFile
if host.system_info.type.endswith("bsd"):
return BSDFile
if host.system_info.type == "darwin":
return DarwinFile
raise NotImplementedError
class GNUFile(File):
@property
def user(self):
return self.check_output("stat -c %%U %s", self.path)
@property
def uid(self):
return int(self.check_output("stat -c %%u %s", self.path))
@property
def group(self):
return self.check_output("stat -c %%G %s", self.path)
@property
def gid(self):
return int(self.check_output("stat -c %%g %s", self.path))
@property
def mode(self):
# Supply a base of 8 when parsing an octal integer
# e.g. int('644', 8) -> 420
return int(self.check_output("stat -c %%a %s", self.path), 8)
@property
def mtime(self):
ts = self.check_output("stat -c %%Y %s", self.path)
return datetime.datetime.fromtimestamp(float(ts))
@property
def size(self):
return int(self.check_output("stat -c %%s %s", self.path))
@property
def md5sum(self):
return self.check_output("md5sum %s | cut -d' ' -f1", self.path)
@property
def sha256sum(self):
return self.check_output(
"sha256sum %s | cut -d ' ' -f 1", self.path)
class BSDFile(File):
@property
def user(self):
return self.check_output("stat -f %%Su %s", self.path)
@property
def uid(self):
return int(self.check_output("stat -f %%u %s", self.path))
@property
def group(self):
return self.check_output("stat -f %%Sg %s", self.path)
@property
def gid(self):
return int(self.check_output("stat -f %%g %s", self.path))
@property
def mode(self):
# Supply a base of 8 when parsing an octal integer
# e.g. int('644', 8) -> 420
return int(self.check_output("stat -f %%Lp %s", self.path), 8)
@property
def mtime(self):
ts = self.check_output("stat -f %%m %s", self.path)
return datetime.datetime.fromtimestamp(float(ts))
@property
def size(self):
return int(self.check_output("stat -f %%z %s", self.path))
@property
def md5sum(self):
return self.check_output("md5 < %s", self.path)
@property
def sha256sum(self):
return self.check_output(
"sha256 < %s", self.path)
class DarwinFile(BSDFile):
@property
def linked_to(self):
link_script = '''
TARGET_FILE='{0}'
cd `dirname $TARGET_FILE`
TARGET_FILE=`basename $TARGET_FILE`
while [ -L "$TARGET_FILE" ]
do
TARGET_FILE=`readlink $TARGET_FILE`
cd `dirname $TARGET_FILE`
TARGET_FILE=`basename $TARGET_FILE`
done
PHYS_DIR=`pwd -P`
RESULT=$PHYS_DIR/$TARGET_FILE
echo $RESULT
'''.format(self.path)
return self.check_output(link_script)
class NetBSDFile(BSDFile):
@property
def sha256sum(self):
return self.check_output(
"cksum -a sha256 < %s", self.path)
| 25.996855 | 77 | 0.581952 |
9e37486f8e2ec82c4186330f33bd6327e820e4eb | 79,487 | py | Python | salt/config.py | allmightyspiff/salt | 80e54f8b294ea07e0fbb711f661f4ba79eb2c5be | [
"Apache-2.0"
] | null | null | null | salt/config.py | allmightyspiff/salt | 80e54f8b294ea07e0fbb711f661f4ba79eb2c5be | [
"Apache-2.0"
] | null | null | null | salt/config.py | allmightyspiff/salt | 80e54f8b294ea07e0fbb711f661f4ba79eb2c5be | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
All salt configuration loading and defaults should be in this module
'''
from __future__ import absolute_import, generators
# Import python libs
import os
import re
import sys
import glob
import time
import codecs
import logging
from copy import deepcopy
# Import third party libs
import yaml
try:
yaml.Loader = yaml.CLoader
yaml.Dumper = yaml.CDumper
except Exception:
pass
# pylint: disable=import-error,no-name-in-module
import salt.ext.six as six
from salt.ext.six import string_types, text_type
from salt.ext.six.moves.urllib.parse import urlparse
# pylint: enable=import-error,no-name-in-module
# Import salt libs
import salt.utils
import salt.utils.network
import salt.syspaths
import salt.utils.validate.path
import salt.utils.xdg
import salt.exceptions
log = logging.getLogger(__name__)
_DFLT_LOG_DATEFMT = '%H:%M:%S'
_DFLT_LOG_DATEFMT_LOGFILE = '%Y-%m-%d %H:%M:%S'
_DFLT_LOG_FMT_CONSOLE = '[%(levelname)-8s] %(message)s'
_DFLT_LOG_FMT_LOGFILE = (
'%(asctime)s,%(msecs)03.0f [%(name)-17s][%(levelname)-8s][%(process)d] %(message)s'
)
FLO_DIR = os.path.join(
os.path.dirname(__file__),
'daemons', 'flo')
VALID_OPTS = {
'master': str,
'master_port': int,
'master_type': str,
'master_finger': str,
'master_shuffle': bool,
'master_alive_interval': int,
'master_sign_key_name': str,
'master_sign_pubkey': bool,
'verify_master_pubkey_sign': bool,
'always_verify_signature': bool,
'master_pubkey_signature': str,
'master_use_pubkey_signature': bool,
'syndic_finger': str,
'user': str,
'root_dir': str,
'pki_dir': str,
'id': str,
'cachedir': str,
'cache_jobs': bool,
'conf_file': str,
'sock_dir': str,
'backup_mode': str,
'renderer': str,
'failhard': bool,
'autoload_dynamic_modules': bool,
'environment': str,
'state_top': str,
'startup_states': str,
'sls_list': list,
'top_file': str,
'file_client': str,
'use_master_when_local': bool,
'file_roots': dict,
'pillar_roots': dict,
'hash_type': str,
'disable_modules': list,
'disable_returners': list,
'whitelist_modules': list,
'module_dirs': list,
'returner_dirs': list,
'states_dirs': list,
'grains_dirs': list,
'render_dirs': list,
'outputter_dirs': list,
'utils_dirs': list,
'providers': dict,
'clean_dynamic_modules': bool,
'open_mode': bool,
'multiprocessing': bool,
'mine_interval': int,
'ipc_mode': str,
'ipv6': bool,
'file_buffer_size': int,
'tcp_pub_port': int,
'tcp_pull_port': int,
'log_file': str,
'log_level': bool,
'log_level_logfile': bool,
'log_datefmt': str,
'log_datefmt_logfile': str,
'log_fmt_console': str,
'log_fmt_logfile': tuple,
'log_granular_levels': dict,
'max_event_size': int,
'test': bool,
'cython_enable': bool,
'show_timeout': bool,
'show_jid': bool,
'state_verbose': bool,
'state_output': str,
'state_auto_order': bool,
'state_events': bool,
'acceptance_wait_time': float,
'acceptance_wait_time_max': float,
'rejected_retry': bool,
'loop_interval': float,
'verify_env': bool,
'grains': dict,
'permissive_pki_access': bool,
'default_include': str,
'update_url': bool,
'update_restart_services': list,
'retry_dns': float,
'recon_max': float,
'recon_default': float,
'recon_randomize': float,
'event_return': str,
'event_return_queue': int,
'event_return_whitelist': list,
'event_return_blacklist': list,
'win_repo_cachefile': str,
'pidfile': str,
'range_server': str,
'tcp_keepalive': bool,
'tcp_keepalive_idle': float,
'tcp_keepalive_cnt': float,
'tcp_keepalive_intvl': float,
'interface': str,
'publish_port': int,
'auth_mode': int,
'pub_hwm': int,
'rep_hwm': int,
'worker_threads': int,
'ret_port': int,
'keep_jobs': int,
'master_roots': dict,
'gitfs_remotes': list,
'gitfs_mountpoint': str,
'gitfs_root': str,
'gitfs_base': str,
'gitfs_user': str,
'gitfs_password': str,
'gitfs_insecure_auth': bool,
'gitfs_privkey': str,
'gitfs_pubkey': str,
'gitfs_passphrase': str,
'gitfs_env_whitelist': list,
'gitfs_env_blacklist': list,
'hgfs_remotes': list,
'hgfs_mountpoint': str,
'hgfs_root': str,
'hgfs_base': str,
'hgfs_branch_method': str,
'hgfs_env_whitelist': list,
'hgfs_env_blacklist': list,
'svnfs_remotes': list,
'svnfs_mountpoint': str,
'svnfs_root': str,
'svnfs_trunk': str,
'svnfs_branches': str,
'svnfs_tags': str,
'svnfs_env_whitelist': list,
'svnfs_env_blacklist': list,
'minionfs_env': str,
'minionfs_mountpoint': str,
'minionfs_whitelist': list,
'minionfs_blacklist': list,
'ext_pillar': list,
'pillar_version': int,
'pillar_opts': bool,
'pillar_safe_render_error': bool,
'pillar_source_merging_strategy': str,
'ping_on_rotate': bool,
'peer': dict,
'preserve_minion_cache': bool,
'syndic_master': str,
'runner_dirs': list,
'client_acl': dict,
'client_acl_blacklist': dict,
'sudo_acl': bool,
'external_auth': dict,
'token_expire': int,
'file_recv': bool,
'file_recv_max_size': int,
'file_ignore_regex': bool,
'file_ignore_glob': bool,
'fileserver_backend': list,
'fileserver_followsymlinks': bool,
'fileserver_ignoresymlinks': bool,
'fileserver_limit_traversal': bool,
'max_open_files': int,
'auto_accept': bool,
'autosign_timeout': int,
'master_tops': bool,
'order_masters': bool,
'job_cache': bool,
'ext_job_cache': str,
'master_job_cache': str,
'minion_data_cache': bool,
'publish_session': int,
'reactor': list,
'reactor_refresh_interval': int,
'reactor_worker_threads': int,
'reactor_worker_hwm': int,
'serial': str,
'search': str,
'search_index_interval': int,
'nodegroups': dict,
'key_logfile': str,
'win_repo': str,
'win_repo_mastercachefile': str,
'win_gitrepos': list,
'modules_max_memory': int,
'grains_refresh_every': int,
'enable_lspci': bool,
'syndic_wait': int,
'jinja_lstrip_blocks': bool,
'jinja_trim_blocks': bool,
'minion_id_caching': bool,
'sign_pub_messages': bool,
'keysize': int,
'transport': str,
'enumerate_proxy_minions': bool,
'gather_job_timeout': int,
'auth_timeout': int,
'auth_tries': int,
'auth_safemode': bool,
'random_master': bool,
'random_reauth_delay': int,
'syndic_event_forward_timeout': float,
'syndic_max_event_process_time': float,
'ssh_passwd': str,
'ssh_port': str,
'ssh_sudo': bool,
'ssh_timeout': float,
'ssh_user': str,
'ssh_scan_ports': str,
'ssh_scan_timeout': float,
'ioflo_verbose': int,
'ioflo_period': float,
'ioflo_realtime': bool,
'ioflo_console_logdir': str,
'raet_port': int,
'raet_alt_port': int,
'raet_mutable': bool,
'raet_main': bool,
'raet_clear_remotes': bool,
'raet_clear_remote_masters': bool,
'cluster_mode': bool,
'cluster_masters': list,
'sqlite_queue_dir': str,
'queue_dirs': list,
'ping_interval': int,
'cli_summary': bool,
'max_minions': int,
'username': str,
'password': str,
'zmq_filtering': bool,
'con_cache': bool,
'rotate_aes_key': bool,
'cache_sreqs': bool,
'cmd_safe': bool,
'dummy_publisher': bool,
}
# default configurations
DEFAULT_MINION_OPTS = {
'interface': '0.0.0.0',
'master': 'salt',
'master_type': 'str',
'master_port': '4506',
'master_finger': '',
'master_shuffle': False,
'master_alive_interval': 0,
'verify_master_pubkey_sign': False,
'always_verify_signature': False,
'master_sign_key_name': 'master_sign',
'syndic_finger': '',
'user': 'root',
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'minion'),
'id': None,
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'minion'),
'cache_jobs': False,
'grains_cache': False,
'grains_cache_expiration': 300,
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'minion'),
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'minion'),
'backup_mode': '',
'renderer': 'yaml_jinja',
'failhard': False,
'autoload_dynamic_modules': True,
'environment': None,
'extension_modules': '',
'state_top': 'top.sls',
'startup_states': '',
'sls_list': [],
'top_file': '',
'file_client': 'remote',
'use_master_when_local': False,
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
},
'fileserver_limit_traversal': False,
'file_recv': False,
'file_recv_max_size': 100,
'file_ignore_regex': None,
'file_ignore_glob': None,
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
},
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'hash_type': 'md5',
'disable_modules': [],
'disable_returners': [],
'whitelist_modules': [],
'module_dirs': [],
'returner_dirs': [],
'grains_dirs': [],
'states_dirs': [],
'render_dirs': [],
'outputter_dirs': [],
'utils_dirs': [],
'providers': {},
'clean_dynamic_modules': True,
'open_mode': False,
'auto_accept': True,
'autosign_timeout': 120,
'multiprocessing': True,
'mine_interval': 60,
'ipc_mode': 'ipc',
'ipv6': False,
'file_buffer_size': 262144,
'tcp_pub_port': 4510,
'tcp_pull_port': 4511,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'minion'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
'max_event_size': 1048576,
'test': False,
'ext_job_cache': '',
'cython_enable': False,
'state_verbose': True,
'state_output': 'full',
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'acceptance_wait_time': 10,
'acceptance_wait_time_max': 0,
'rejected_retry': False,
'loop_interval': 1,
'verify_env': True,
'grains': {},
'permissive_pki_access': False,
'default_include': 'minion.d/*.conf',
'update_url': False,
'update_restart_services': [],
'retry_dns': 30,
'recon_max': 10000,
'recon_default': 1000,
'recon_randomize': True,
'random_reauth_delay': 10,
'win_repo_cachefile': 'salt://win/repo/winrepo.p',
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-minion.pid'),
'range_server': 'range:80',
'tcp_keepalive': True,
'tcp_keepalive_idle': 300,
'tcp_keepalive_cnt': -1,
'tcp_keepalive_intvl': -1,
'modules_max_memory': -1,
'grains_refresh_every': 0,
'minion_id_caching': True,
'keysize': 2048,
'transport': 'zeromq',
'auth_timeout': 60,
'auth_tries': 7,
'auth_safemode': False,
'random_master': False,
'minion_floscript': os.path.join(FLO_DIR, 'minion.flo'),
'caller_floscript': os.path.join(FLO_DIR, 'caller.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.1,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4510,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': False,
'raet_clear_remotes': True,
'raet_clear_remote_masters': True,
'cluster_mode': False,
'cluster_masters': [],
'restart_on_error': False,
'ping_interval': 0,
'username': None,
'password': None,
'zmq_filtering': False,
'zmq_monitor': False,
'cache_sreqs': True,
'cmd_safe': True,
}
DEFAULT_MASTER_OPTS = {
'interface': '0.0.0.0',
'publish_port': '4505',
'pub_hwm': 1000,
'rep_hwm': 50000,
'auth_mode': 1,
'user': 'root',
'worker_threads': 5,
'sock_dir': os.path.join(salt.syspaths.SOCK_DIR, 'master'),
'ret_port': '4506',
'timeout': 5,
'keep_jobs': 24,
'root_dir': salt.syspaths.ROOT_DIR,
'pki_dir': os.path.join(salt.syspaths.CONFIG_DIR, 'pki', 'master'),
'cachedir': os.path.join(salt.syspaths.CACHE_DIR, 'master'),
'file_roots': {
'base': [salt.syspaths.BASE_FILE_ROOTS_DIR],
},
'master_roots': {
'base': [salt.syspaths.BASE_MASTER_ROOTS_DIR],
},
'pillar_roots': {
'base': [salt.syspaths.BASE_PILLAR_ROOTS_DIR],
},
'gitfs_remotes': [],
'gitfs_mountpoint': '',
'gitfs_root': '',
'gitfs_base': 'master',
'gitfs_user': '',
'gitfs_password': '',
'gitfs_insecure_auth': False,
'gitfs_privkey': '',
'gitfs_pubkey': '',
'gitfs_passphrase': '',
'gitfs_env_whitelist': [],
'gitfs_env_blacklist': [],
'hgfs_remotes': [],
'hgfs_mountpoint': '',
'hgfs_root': '',
'hgfs_base': 'default',
'hgfs_branch_method': 'branches',
'hgfs_env_whitelist': [],
'hgfs_env_blacklist': [],
'show_timeout': True,
'show_jid': False,
'svnfs_remotes': [],
'svnfs_mountpoint': '',
'svnfs_root': '',
'svnfs_trunk': 'trunk',
'svnfs_branches': 'branches',
'svnfs_tags': 'tags',
'svnfs_env_whitelist': [],
'svnfs_env_blacklist': [],
'max_event_size': 1048576,
'minionfs_env': 'base',
'minionfs_mountpoint': '',
'minionfs_whitelist': [],
'minionfs_blacklist': [],
'ext_pillar': [],
'pillar_version': 2,
'pillar_opts': False,
'pillar_safe_render_error': True,
'pillar_source_merging_strategy': 'smart',
'ping_on_rotate': False,
'peer': {},
'preserve_minion_cache': False,
'syndic_master': '',
'runner_dirs': [],
'outputter_dirs': [],
'client_acl': {},
'client_acl_blacklist': {},
'sudo_acl': False,
'external_auth': {},
'token_expire': 43200,
'extension_modules': os.path.join(salt.syspaths.CACHE_DIR, 'extmods'),
'file_recv': False,
'file_recv_max_size': 100,
'file_buffer_size': 1048576,
'file_ignore_regex': None,
'file_ignore_glob': None,
'fileserver_backend': ['roots'],
'fileserver_followsymlinks': True,
'fileserver_ignoresymlinks': False,
'fileserver_limit_traversal': False,
'max_open_files': 100000,
'hash_type': 'md5',
'conf_file': os.path.join(salt.syspaths.CONFIG_DIR, 'master'),
'open_mode': False,
'auto_accept': False,
'renderer': 'yaml_jinja',
'failhard': False,
'state_top': 'top.sls',
'master_tops': {},
'order_masters': False,
'job_cache': True,
'ext_job_cache': '',
'master_job_cache': 'local_cache',
'minion_data_cache': True,
'enforce_mine_cache': False,
'ipv6': False,
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'master'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
'pidfile': os.path.join(salt.syspaths.PIDFILE_DIR, 'salt-master.pid'),
'publish_session': 86400,
'range_server': 'range:80',
'reactor': [],
'reactor_refresh_interval': 60,
'reactor_worker_threads': 10,
'reactor_worker_hwm': 10000,
'event_return': '',
'event_return_queue': 0,
'event_return_whitelist': [],
'event_return_blacklist': [],
'serial': 'msgpack',
'state_verbose': True,
'state_output': 'full',
'state_auto_order': True,
'state_events': False,
'state_aggregate': False,
'search': '',
'search_index_interval': 3600,
'loop_interval': 60,
'nodegroups': {},
'cython_enable': False,
'enable_gpu_grains': False,
# XXX: Remove 'key_logfile' support in 2014.1.0
'key_logfile': os.path.join(salt.syspaths.LOGS_DIR, 'key'),
'verify_env': True,
'permissive_pki_access': False,
'default_include': 'master.d/*.conf',
'win_repo': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR, 'win', 'repo'),
'win_repo_mastercachefile': os.path.join(salt.syspaths.BASE_FILE_ROOTS_DIR,
'win', 'repo', 'winrepo.p'),
'win_gitrepos': ['https://github.com/saltstack/salt-winrepo.git'],
'syndic_wait': 5,
'jinja_lstrip_blocks': False,
'jinja_trim_blocks': False,
'sign_pub_messages': False,
'keysize': 2048,
'transport': 'zeromq',
'enumerate_proxy_minions': False,
'gather_job_timeout': 5,
'syndic_event_forward_timeout': 0.5,
'syndic_max_event_process_time': 0.5,
'ssh_passwd': '',
'ssh_port': '22',
'ssh_sudo': False,
'ssh_timeout': 60,
'ssh_user': 'root',
'ssh_scan_ports': '22',
'ssh_scan_timeout': 0.01,
'master_floscript': os.path.join(FLO_DIR, 'master.flo'),
'worker_floscript': os.path.join(FLO_DIR, 'worker.flo'),
'maintenance_floscript': os.path.join(FLO_DIR, 'maint.flo'),
'ioflo_verbose': 0,
'ioflo_period': 0.01,
'ioflo_realtime': True,
'ioflo_console_logdir': '',
'raet_port': 4506,
'raet_alt_port': 4511,
'raet_mutable': False,
'raet_main': True,
'raet_clear_remotes': False,
'raet_clear_remote_masters': True,
'cluster_mode': False,
'cluster_masters': [],
'sqlite_queue_dir': os.path.join(salt.syspaths.CACHE_DIR, 'master', 'queues'),
'queue_dirs': [],
'cli_summary': False,
'max_minions': 0,
'master_sign_key_name': 'master_sign',
'master_sign_pubkey': False,
'master_pubkey_signature': 'master_pubkey_signature',
'master_use_pubkey_signature': False,
'zmq_filtering': False,
'con_cache': False,
'rotate_aes_key': True,
'cache_sreqs': True,
'dummy_pub': False,
}
# ----- Salt Cloud Configuration Defaults ----------------------------------->
CLOUD_CONFIG_DEFAULTS = {
'verify_env': True,
'default_include': 'cloud.conf.d/*.conf',
# Global defaults
'ssh_auth': '',
'keysize': 4096,
'os': '',
'script': 'bootstrap-salt',
'start_action': None,
'enable_hard_maps': False,
'delete_sshkeys': False,
# Custom deploy scripts
'deploy_scripts_search_path': 'cloud.deploy.d',
# Logging defaults
'log_file': os.path.join(salt.syspaths.LOGS_DIR, 'cloud'),
'log_level': None,
'log_level_logfile': None,
'log_datefmt': _DFLT_LOG_DATEFMT,
'log_datefmt_logfile': _DFLT_LOG_DATEFMT_LOGFILE,
'log_fmt_console': _DFLT_LOG_FMT_CONSOLE,
'log_fmt_logfile': _DFLT_LOG_FMT_LOGFILE,
'log_granular_levels': {},
}
DEFAULT_API_OPTS = {
# ----- Salt master settings overridden by Salt-API --------------------->
'pidfile': '/var/run/salt-api.pid',
'logfile': '/var/log/salt/api',
# <---- Salt master settings overridden by Salt-API ----------------------
}
VM_CONFIG_DEFAULTS = {
'default_include': 'cloud.profiles.d/*.conf',
}
PROVIDER_CONFIG_DEFAULTS = {
'default_include': 'cloud.providers.d/*.conf',
}
# <---- Salt Cloud Configuration Defaults ------------------------------------
def _validate_file_roots(opts):
'''
If the file_roots option has a key that is None then we will error out,
just replace it with an empty list
'''
if not isinstance(opts['file_roots'], dict):
log.warning('The file_roots parameter is not properly formatted,'
' using defaults')
return {'base': _expand_glob_path([salt.syspaths.BASE_FILE_ROOTS_DIR])}
for saltenv, dirs in six.iteritems(opts['file_roots']):
if not isinstance(dirs, (list, tuple)):
opts['file_roots'][saltenv] = []
opts['file_roots'][saltenv] = _expand_glob_path(opts['file_roots'][saltenv])
return opts['file_roots']
def _expand_glob_path(file_roots):
'''
Applies shell globbing to a set of directories and returns
the expanded paths
'''
unglobbed_path = []
for path in file_roots:
if glob.has_magic(path):
unglobbed_path.extend(glob.glob(path))
else:
unglobbed_path.append(path)
return unglobbed_path
def _validate_opts(opts):
'''
Check that all of the types of values passed into the config are
of the right types
'''
errors = []
err = ('Key {0} with value {1} has an invalid type of {2}, a {3} is '
'required for this value')
for key, val in six.iteritems(opts):
if key in VALID_OPTS:
if isinstance(VALID_OPTS[key](), list):
if isinstance(val, VALID_OPTS[key]):
continue
else:
errors.append(err.format(key, val, type(val), 'list'))
if isinstance(VALID_OPTS[key](), dict):
if isinstance(val, VALID_OPTS[key]):
continue
else:
errors.append(err.format(key, val, type(val), 'dict'))
else:
try:
VALID_OPTS[key](val)
except ValueError:
errors.append(
err.format(key, val, type(val), VALID_OPTS[key])
)
except TypeError:
errors.append(
err.format(key, val, type(val), VALID_OPTS[key])
)
for error in errors:
log.warning(error)
if errors:
return False
return True
def _append_domain(opts):
'''
Append a domain to the existing id if it doesn't already exist
'''
# Domain already exists
if opts['id'].endswith(opts['append_domain']):
return opts['id']
# Trailing dot should mean an FQDN that is terminated, leave it alone.
if opts['id'].endswith('.'):
return opts['id']
return '{0[id]}.{0[append_domain]}'.format(opts)
def _read_conf_file(path):
'''
Read in a config file from a given path and process it into a dictionary
'''
log.debug('Reading configuration from {0}'.format(path))
with salt.utils.fopen(path, 'r') as conf_file:
try:
conf_opts = yaml.safe_load(conf_file.read()) or {}
except yaml.YAMLError as err:
log.error(
'Error parsing configuration file: {0} - {1}'.format(path, err)
)
conf_opts = {}
# only interpret documents as a valid conf, not things like strings,
# which might have been caused by invalid yaml syntax
if not isinstance(conf_opts, dict):
log.error(
'Error parsing configuration file: {0} - conf should be a '
'document, not {1}.'.format(path, type(conf_opts))
)
conf_opts = {}
# allow using numeric ids: convert int to string
if 'id' in conf_opts:
conf_opts['id'] = str(conf_opts['id'])
for key, value in six.iteritems(conf_opts.copy()):
if isinstance(value, text_type):
# We do not want unicode settings
conf_opts[key] = value.encode('utf-8')
return conf_opts
def _absolute_path(path, relative_to=None):
'''
Return an absolute path. In case ``relative_to`` is passed and ``path`` is
not an absolute path, we try to prepend ``relative_to`` to ``path``and if
that path exists, return that one
'''
if path and os.path.isabs(path):
return path
if path and relative_to is not None:
_abspath = os.path.join(relative_to, path)
if os.path.isfile(_abspath):
log.debug(
'Relative path {0!r} converted to existing absolute path {1!r}'.format(
path, _abspath
)
)
return _abspath
return path
def load_config(path, env_var, default_path=None):
'''
Returns configuration dict from parsing either the file described by
``path`` or the environment variable described by ``env_var`` as YAML.
'''
if path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if default_path is None:
# This is most likely not being used from salt, i.e., could be salt-cloud
# or salt-api which have not yet migrated to the new default_path
# argument. Let's issue a warning message that the environ vars won't
# work.
import inspect
previous_frame = inspect.getframeinfo(inspect.currentframe().f_back)
log.warning(
'The function \'{0}()\' defined in {1!r} is not yet using the '
'new \'default_path\' argument to `salt.config.load_config()`. '
'As such, the {2!r} environment variable will be ignored'.format(
previous_frame.function, previous_frame.filename, env_var
)
)
# In this case, maintain old behavior
default_path = DEFAULT_MASTER_OPTS['conf_file']
# Default to the environment variable path, if it exists
env_path = os.environ.get(env_var, path)
if not env_path or not os.path.isfile(env_path):
env_path = path
# If non-default path from `-c`, use that over the env variable
if path != default_path:
env_path = path
path = env_path
# If the configuration file is missing, attempt to copy the template,
# after removing the first header line.
if not os.path.isfile(path):
template = '{0}.template'.format(path)
if os.path.isfile(template):
log.debug('Writing {0} based on {1}'.format(path, template))
with salt.utils.fopen(path, 'w') as out:
with salt.utils.fopen(template, 'r') as ifile:
ifile.readline() # skip first line
out.write(ifile.read())
if salt.utils.validate.path.is_readable(path):
opts = _read_conf_file(path)
opts['conf_file'] = path
return opts
log.debug('Missing configuration file: {0}'.format(path))
return {}
def include_config(include, orig_path, verbose):
'''
Parses extra configuration file(s) specified in an include list in the
main config file.
'''
# Protect against empty option
if not include:
return {}
if orig_path is None:
# When the passed path is None, we just want the configuration
# defaults, not actually loading the whole configuration.
return {}
if isinstance(include, str):
include = [include]
configuration = {}
for path in include:
# Allow for includes like ~/foo
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(orig_path), path)
# Catch situation where user typos path in configuration; also warns
# for empty include directory (which might be by design)
if len(glob.glob(path)) == 0:
if verbose:
log.warn(
'Warning parsing configuration file: "include" path/glob '
'{0!r} matches no files'.format(path)
)
for fn_ in sorted(glob.glob(path)):
log.debug('Including configuration from {0!r}'.format(fn_))
configuration.update(_read_conf_file(fn_))
return configuration
def prepend_root_dir(opts, path_options):
'''
Prepends the options that represent filesystem paths with value of the
'root_dir' option.
'''
root_dir = os.path.abspath(opts['root_dir'])
for path_option in path_options:
if path_option in opts:
if opts[path_option].startswith(opts['root_dir']):
opts[path_option] = opts[path_option][len(opts['root_dir']):]
opts[path_option] = salt.utils.path_join(
root_dir,
opts[path_option]
)
def insert_system_path(opts, paths):
'''
Inserts path into python path taking into consideration 'root_dir' option.
'''
if isinstance(paths, str):
paths = [paths]
for path in paths:
path_options = {'path': path, 'root_dir': opts['root_dir']}
prepend_root_dir(path_options, path_options)
if (os.path.isdir(path_options['path'])
and path_options['path'] not in sys.path):
sys.path.insert(0, path_options['path'])
def minion_config(path,
env_var='SALT_MINION_CONFIG',
defaults=None,
minion_id=False):
'''
Reads in the minion configuration file and sets up special options
This is useful for Minion-side operations, such as the
:py:class:`~salt.client.Caller` class, and manually running the loader
interface.
.. code-block:: python
import salt.client
minion_opts = salt.config.minion_config('/etc/salt/minion')
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'minion')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MINION_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False))
overrides.update(include_config(include, path, verbose=True))
opts = apply_minion_config(overrides, defaults, minion_id=minion_id)
_validate_opts(opts)
return opts
def syndic_config(master_config_path,
minion_config_path,
master_env_var='SALT_MASTER_CONFIG',
minion_env_var='SALT_MINION_CONFIG',
minion_defaults=None,
master_defaults=None):
if minion_defaults is None:
minion_defaults = DEFAULT_MINION_OPTS
if master_defaults is None:
master_defaults = DEFAULT_MASTER_OPTS
opts = {}
master_opts = master_config(
master_config_path, master_env_var, master_defaults
)
minion_opts = minion_config(
minion_config_path, minion_env_var, minion_defaults
)
opts['_minion_conf_file'] = master_opts['conf_file']
opts['_master_conf_file'] = minion_opts['conf_file']
opts.update(master_opts)
opts.update(minion_opts)
syndic_opts = {
'__role': 'syndic',
'root_dir': opts.get('root_dir', salt.syspaths.ROOT_DIR),
'pidfile': opts.get('syndic_pidfile', 'salt-syndic.pid'),
'log_file': opts.get('syndic_log_file', 'salt-syndic.log'),
'id': minion_opts['id'],
'pki_dir': minion_opts['pki_dir'],
'master': opts['syndic_master'],
'master_port': int(
opts.get(
# The user has explicitly defined the syndic master port
'syndic_master_port',
opts.get(
# No syndic_master_port, grab master_port from opts
'master_port',
# No master_opts, grab from the provided minion defaults
minion_defaults.get(
'master_port',
# Not on the provided minion defaults, load from the
# static minion defaults
DEFAULT_MINION_OPTS['master_port']
)
)
)
),
'user': opts.get('syndic_user', opts['user']),
'sock_dir': os.path.join(
opts['cachedir'], opts.get('syndic_sock_dir', opts['sock_dir'])
),
}
opts.update(syndic_opts)
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir'
]
for config_key in ('log_file', 'key_logfile'):
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
return opts
# ----- Salt Cloud Configuration Functions ---------------------------------->
def apply_sdb(opts, sdb_opts=None):
'''
Recurse for sdb:// links for opts
'''
if sdb_opts is None:
sdb_opts = opts
if isinstance(sdb_opts, string_types) and sdb_opts.startswith('sdb://'):
return salt.utils.sdb.sdb_get(sdb_opts, opts)
elif isinstance(sdb_opts, dict):
for key, value in six.iteritems(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
elif isinstance(sdb_opts, list):
for key, value in enumerate(sdb_opts):
if value is None:
continue
sdb_opts[key] = apply_sdb(opts, value)
return sdb_opts
def cloud_config(path, env_var='SALT_CLOUD_CONFIG', defaults=None,
master_config_path=None, master_config=None,
providers_config_path=None, providers_config=None,
profiles_config_path=None, profiles_config=None):
'''
Read in the salt cloud config and return the dict
'''
# Load the cloud configuration
overrides = load_config(
path,
env_var,
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud')
)
if path:
config_dir = os.path.dirname(path)
else:
config_dir = salt.syspaths.CONFIG_DIR
if defaults is None:
defaults = CLOUD_CONFIG_DEFAULTS
# Load cloud configuration from any default or provided includes
default_include = overrides.get(
'default_include', defaults['default_include']
)
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
# The includes have been evaluated, let's see if master, providers and
# profiles configuration settings have been included and if not, set the
# default value
if 'master_config' in overrides and master_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
master_config_path = overrides['master_config']
elif 'master_config' not in overrides and not master_config \
and not master_config_path:
# The configuration setting is not being provided in the main cloud
# configuration file, and
master_config_path = os.path.join(config_dir, 'master')
# Convert relative to absolute paths if necessary
master_config_path = _absolute_path(master_config_path, config_dir)
if 'providers_config' in overrides and providers_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
providers_config_path = overrides['providers_config']
elif 'providers_config' not in overrides and not providers_config \
and not providers_config_path:
providers_config_path = os.path.join(config_dir, 'cloud.providers')
# Convert relative to absolute paths if necessary
providers_config_path = _absolute_path(providers_config_path, config_dir)
if 'profiles_config' in overrides and profiles_config_path is None:
# The configuration setting is being specified in the main cloud
# configuration file
profiles_config_path = overrides['profiles_config']
elif 'profiles_config' not in overrides and not profiles_config \
and not profiles_config_path:
profiles_config_path = os.path.join(config_dir, 'cloud.profiles')
# Convert relative to absolute paths if necessary
profiles_config_path = _absolute_path(profiles_config_path, config_dir)
# Prepare the deploy scripts search path
deploy_scripts_search_path = overrides.get(
'deploy_scripts_search_path',
defaults.get('deploy_scripts_search_path', 'cloud.deploy.d')
)
if isinstance(deploy_scripts_search_path, string_types):
deploy_scripts_search_path = [deploy_scripts_search_path]
# Check the provided deploy scripts search path removing any non existing
# entries.
for idx, entry in enumerate(deploy_scripts_search_path[:]):
if not os.path.isabs(entry):
# Let's try if adding the provided path's directory name turns the
# entry into a proper directory
entry = os.path.join(os.path.dirname(path), entry)
if os.path.isdir(entry):
# Path exists, let's update the entry (its path might have been
# made absolute)
deploy_scripts_search_path[idx] = entry
continue
# It's not a directory? Remove it from the search path
deploy_scripts_search_path.pop(idx)
# Add the built-in scripts directory to the search path (last resort)
deploy_scripts_search_path.append(
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'cloud',
'deploy'
)
)
)
# Let's make the search path a tuple and add it to the overrides.
overrides.update(
deploy_scripts_search_path=tuple(deploy_scripts_search_path)
)
# Grab data from the 4 sources
# 1st - Master config
if master_config_path is not None and master_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `master_config` or `master_config_path`, not both.'
)
elif master_config_path is None and master_config is None:
master_config = salt.config.master_config(
overrides.get(
# use the value from the cloud config file
'master_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'master')
)
)
elif master_config_path is not None and master_config is None:
master_config = salt.config.master_config(master_config_path)
# 2nd - salt-cloud configuration which was loaded before so we could
# extract the master configuration file if needed.
# Override master configuration with the salt cloud(current overrides)
master_config.update(overrides)
# We now set the overridden master_config as the overrides
overrides = master_config
if providers_config_path is not None and providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `providers_config` or `providers_config_path`, '
'not both.'
)
elif providers_config_path is None and providers_config is None:
providers_config_path = overrides.get(
# use the value from the cloud config file
'providers_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
if profiles_config_path is not None and profiles_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Only pass `profiles_config` or `profiles_config_path`, not both.'
)
elif profiles_config_path is None and profiles_config is None:
profiles_config_path = overrides.get(
# use the value from the cloud config file
'profiles_config',
# if not found, use the default path
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
# Apply the salt-cloud configuration
opts = apply_cloud_config(overrides, defaults)
# 3rd - Include Cloud Providers
if 'providers' in opts:
if providers_config is not None:
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the passing a pre-configured providers configuration '
'dictionary.'
)
if providers_config_path is not None:
providers_confd = os.path.join(
os.path.dirname(providers_config_path),
'cloud.providers.d', '*'
)
if (os.path.isfile(providers_config_path) or
glob.glob(providers_confd)):
raise salt.exceptions.SaltCloudConfigError(
'Do not mix the old cloud providers configuration with '
'the new one. The providers configuration should now go '
'in the file `{0}` or a separate `*.conf` file within '
'`cloud.providers.d/` which is relative to `{0}`.'.format(
os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
)
# No exception was raised? It's the old configuration alone
providers_config = opts['providers']
elif providers_config_path is not None:
# Load from configuration file, even if that files does not exist since
# it will be populated with defaults.
providers_config = cloud_providers_config(providers_config_path)
# Let's assign back the computed providers configuration
opts['providers'] = providers_config
# 4th - Include VM profiles config
if profiles_config is None:
# Load profiles configuration from the provided file
profiles_config = vm_profiles_config(profiles_config_path,
providers_config)
opts['profiles'] = profiles_config
# recurse opts for sdb configs
apply_sdb(opts)
# Return the final options
return opts
def apply_cloud_config(overrides, defaults=None):
'''
Return a cloud config
'''
if defaults is None:
defaults = CLOUD_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
# If the user defined providers in salt cloud's main configuration file, we
# need to take care for proper and expected format.
if 'providers' in config:
# Keep a copy of the defined providers
providers = config['providers'].copy()
# Reset the providers dictionary
config['providers'] = {}
# Populate the providers dictionary
for alias, details in six.iteritems(providers):
if isinstance(details, list):
for detail in details:
if 'provider' not in detail:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has an entry '
'missing the required setting \'provider\''.format(
alias
)
)
driver = detail['provider']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
detail['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = detail
elif isinstance(details, dict):
if 'provider' not in details:
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has an entry '
'missing the required setting \'provider\''.format(
alias
)
)
driver = details['provider']
if ':' in driver:
# Weird, but...
alias, driver = driver.split(':')
if alias not in config['providers']:
config['providers'][alias] = {}
details['provider'] = '{0}:{1}'.format(alias, driver)
config['providers'][alias][driver] = details
# Migrate old configuration
config = old_to_new(config)
return config
def old_to_new(opts):
providers = (
'AWS',
'CLOUDSTACK',
'DIGITAL_OCEAN',
'EC2',
'GOGRID',
'IBMSCE',
'JOYENT',
'LINODE',
'OPENSTACK',
'PARALLELS'
'RACKSPACE',
'SALTIFY'
)
for provider in providers:
provider_config = {}
for opt, val in opts.items():
if provider in opt:
value = val
name = opt.split('.', 1)[1]
provider_config[name] = value
lprovider = provider.lower()
if provider_config:
provider_config['provider'] = lprovider
opts.setdefault('providers', {})
# provider alias
opts['providers'][lprovider] = {}
# provider alias, provider driver
opts['providers'][lprovider][lprovider] = provider_config
return opts
def vm_profiles_config(path,
providers,
env_var='SALT_CLOUDVM_CONFIG',
defaults=None):
'''
Read in the salt cloud VM config file
'''
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.profiles')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_vm_profiles_config(providers, overrides, defaults)
def apply_vm_profiles_config(providers, overrides, defaults=None):
if defaults is None:
defaults = VM_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
vms = {}
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, dict):
raise salt.exceptions.SaltCloudConfigError(
'The VM profiles configuration found in {0[conf_file]!r} is '
'not in the proper format'.format(config)
)
val['profile'] = key
vms[key] = val
# Is any VM profile extending data!?
for profile, details in six.iteritems(vms.copy()):
if 'extends' not in details:
if ':' in details['provider']:
alias, driver = details['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, details)
)
vms.pop(profile)
continue
if 'profiles' not in providers[alias][driver]:
providers[alias][driver]['profiles'] = {}
providers[alias][driver]['profiles'][profile] = details
if details['provider'] not in providers:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, details)
)
vms.pop(profile)
continue
driver = next(iter(list(providers[details['provider']].keys())))
providers[details['provider']][driver].setdefault(
'profiles', {}).update({profile: details})
details['provider'] = '{0[provider]}:{1}'.format(details, driver)
vms[profile] = details
continue
extends = details.pop('extends')
if extends not in vms:
log.error(
'The {0!r} profile is trying to extend data from {1!r} '
'though {1!r} is not defined in the salt profiles loaded '
'data. Not extending and removing from listing!'.format(
profile, extends
)
)
vms.pop(profile)
continue
extended = vms.get(extends).copy()
extended.pop('profile')
extended.update(details)
if ':' not in extended['provider']:
if extended['provider'] not in providers:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, extended)
)
vms.pop(profile)
continue
driver = next(iter(list(providers[extended['provider']].keys())))
providers[extended['provider']][driver].setdefault(
'profiles', {}).update({profile: extended})
extended['provider'] = '{0[provider]}:{1}'.format(extended, driver)
else:
alias, driver = extended['provider'].split(':')
if alias not in providers or driver not in providers[alias]:
log.trace(
'The profile {0!r} is defining {1[provider]!r} as the '
'provider. Since there\'s no valid configuration for '
'that provider, the profile will be removed from the '
'available listing'.format(profile, extended)
)
vms.pop(profile)
continue
providers[alias][driver].setdefault('profiles', {}).update(
{profile: extended}
)
# Update the profile's entry with the extended data
vms[profile] = extended
return vms
def cloud_providers_config(path,
env_var='SALT_CLOUD_PROVIDERS_CONFIG',
defaults=None):
'''
Read in the salt cloud providers configuration file
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
overrides = salt.config.load_config(
path, env_var, os.path.join(salt.syspaths.CONFIG_DIR, 'cloud.providers')
)
default_include = overrides.get(
'default_include', defaults['default_include']
)
include = overrides.get('include', [])
overrides.update(
salt.config.include_config(default_include, path, verbose=False)
)
overrides.update(
salt.config.include_config(include, path, verbose=True)
)
return apply_cloud_providers_config(overrides, defaults)
def apply_cloud_providers_config(overrides, defaults=None):
'''
Apply the loaded cloud providers configuration.
'''
if defaults is None:
defaults = PROVIDER_CONFIG_DEFAULTS
config = defaults.copy()
if overrides:
config.update(overrides)
# Is the user still using the old format in the new configuration file?!
for name, settings in six.iteritems(config.copy()):
if '.' in name:
log.warn(
'Please switch to the new providers configuration syntax'
)
# Let's help out and migrate the data
config = old_to_new(config)
# old_to_new will migrate the old data into the 'providers' key of
# the config dictionary. Let's map it correctly
for prov_name, prov_settings in six.iteritems(config.pop('providers')):
config[prov_name] = prov_settings
break
providers = {}
ext_count = 0
for key, val in six.iteritems(config):
if key in ('conf_file', 'include', 'default_include', 'user'):
continue
if not isinstance(val, (list, tuple)):
val = [val]
else:
# Need to check for duplicate cloud provider entries per "alias" or
# we won't be able to properly reference it.
handled_providers = set()
for details in val:
if 'provider' not in details:
if 'extends' not in details:
log.error(
'Please check your cloud providers configuration. '
'There\'s no \'provider\' nor \'extends\' '
'definition. So it\'s pretty useless.'
)
continue
if details['provider'] in handled_providers:
log.error(
'You can only have one entry per cloud provider. For '
'example, if you have a cloud provider configuration '
'section named, \'production\', you can only have a '
'single entry for EC2, Joyent, Openstack, and so '
'forth.'
)
raise salt.exceptions.SaltCloudConfigError(
'The cloud provider alias {0!r} has multiple entries '
'for the {1[provider]!r} driver.'.format(key, details)
)
handled_providers.add(details['provider'])
for entry in val:
if 'provider' not in entry:
entry['provider'] = '-only-extendable-{0}'.format(ext_count)
ext_count += 1
if key not in providers:
providers[key] = {}
provider = entry['provider']
if provider not in providers[key]:
providers[key][provider] = entry
# Is any provider extending data!?
while True:
keep_looping = False
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
# Set a holder for the defined profiles
providers[provider_alias][driver]['profiles'] = {}
if 'extends' not in details:
continue
extends = details.pop('extends')
if ':' in extends:
alias, provider = extends.split(':')
if alias not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is '
'trying to extend data from {2!r} though {2!r} '
'is not defined in the salt cloud providers '
'loaded data.'.format(
details['provider'],
provider_alias,
alias
)
)
if provider not in providers.get(alias):
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is '
'trying to extend data from \'{2}:{3}\' though '
'{3!r} is not defined in {1!r}'.format(
details['provider'],
provider_alias,
alias,
provider
)
)
details['extends'] = '{0}:{1}'.format(alias, provider)
# change provider details '-only-extendable-' to extended provider name
details['provider'] = provider
elif providers.get(extends):
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is trying '
'to extend from {2!r} and no provider was specified. '
'Not extending!'.format(
details['provider'], provider_alias, extends
)
)
elif extends not in providers:
raise salt.exceptions.SaltCloudConfigError(
'The {0!r} cloud provider entry in {1!r} is trying '
'to extend data from {2!r} though {2!r} is not '
'defined in the salt cloud providers loaded '
'data.'.format(
details['provider'], provider_alias, extends
)
)
else:
if driver in providers.get(extends):
details['extends'] = '{0}:{1}'.format(extends, driver)
elif '-only-extendable-' in providers.get(extends):
details['extends'] = '{0}:{1}'.format(
extends, '-only-extendable-{0}'.format(ext_count)
)
else:
# We're still not aware of what we're trying to extend
# from. Let's try on next iteration
details['extends'] = extends
keep_looping = True
if not keep_looping:
break
while True:
# Merge provided extends
keep_looping = False
for alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries):
if 'extends' not in details:
# Extends resolved or non existing, continue!
continue
if 'extends' in details['extends']:
# Since there's a nested extends, resolve this one in the
# next iteration
keep_looping = True
continue
# Let's get a reference to what we're supposed to extend
extends = details.pop('extends')
# Split the setting in (alias, driver)
ext_alias, ext_driver = extends.split(':')
# Grab a copy of what should be extended
extended = providers.get(ext_alias).get(ext_driver).copy()
# Merge the data to extend with the details
extended.update(details)
# Update the providers dictionary with the merged data
providers[alias][driver] = extended
# Update name of the driver, now that it's populated with extended information
if driver.startswith('-only-extendable-'):
providers[alias][ext_driver] = providers[alias][driver]
# Delete driver with old name to maintain dictionary size
del providers[alias][driver]
if not keep_looping:
break
# Now clean up any providers entry that was just used to be a data tree to
# extend from
for provider_alias, entries in six.iteritems(providers.copy()):
for driver, details in six.iteritems(entries.copy()):
if not driver.startswith('-only-extendable-'):
continue
log.info(
'There\'s at least one cloud driver details under the {0!r} '
'cloud provider alias which does not have the required '
'\'provider\' setting. Was probably just used as a holder '
'for additional data. Removing it from the available '
'providers listing'.format(
provider_alias
)
)
providers[provider_alias].pop(driver)
if not providers[provider_alias]:
providers.pop(provider_alias)
return providers
def get_cloud_config_value(name, vm_, opts, default=None, search_global=True):
'''
Search and return a setting in a known order:
1. In the virtual machine's configuration
2. In the virtual machine's profile configuration
3. In the virtual machine's provider configuration
4. In the salt cloud configuration if global searching is enabled
5. Return the provided default
'''
# As a last resort, return the default
value = default
if search_global is True and opts.get(name, None) is not None:
# The setting name exists in the cloud(global) configuration
value = deepcopy(opts[name])
if vm_ and name:
# Let's get the value from the profile, if present
if 'profile' in vm_ and vm_['profile'] is not None:
if name in opts['profiles'][vm_['profile']]:
if isinstance(value, dict):
value.update(opts['profiles'][vm_['profile']][name].copy())
else:
value = deepcopy(opts['profiles'][vm_['profile']][name])
# Let's get the value from the provider, if present
if ':' in vm_['provider']:
# The provider is defined as <provider-alias>:<provider-name>
alias, driver = vm_['provider'].split(':')
if alias in opts['providers'] and \
driver in opts['providers'][alias]:
details = opts['providers'][alias][driver]
if name in details:
if isinstance(value, dict):
value.update(details[name].copy())
else:
value = deepcopy(details[name])
elif len(opts['providers'].get(vm_['provider'], ())) > 1:
# The provider is NOT defined as <provider-alias>:<provider-name>
# and there's more than one entry under the alias.
# WARN the user!!!!
log.error(
'The {0!r} cloud provider definition has more than one '
'entry. Your VM configuration should be specifying the '
'provider as \'provider: {0}:<provider-engine>\'. Since '
'it\'s not, we\'re returning the first definition which '
'might not be what you intended.'.format(
vm_['provider']
)
)
if vm_['provider'] in opts['providers']:
# There's only one driver defined for this provider. This is safe.
alias_defs = opts['providers'].get(vm_['provider'])
provider_driver_defs = alias_defs[next(iter(list(alias_defs.keys())))]
if name in provider_driver_defs:
# The setting name exists in the VM's provider configuration.
# Return it!
if isinstance(value, dict):
value.update(provider_driver_defs[name].copy())
else:
value = deepcopy(provider_driver_defs[name])
if name and vm_ and name in vm_:
# The setting name exists in VM configuration.
if isinstance(value, dict):
value.update(vm_[name].copy())
else:
value = deepcopy(vm_[name])
return value
def is_provider_configured(opts, provider, required_keys=()):
'''
Check and return the first matching and fully configured cloud provider
configuration.
'''
if ':' in provider:
alias, driver = provider.split(':')
if alias not in opts['providers']:
return False
if driver not in opts['providers'][alias]:
return False
for key in required_keys:
if opts['providers'][alias][driver].get(key, None) is None:
# There's at least one require configuration key which is not
# set.
log.trace(
'The required {0!r} configuration setting is missing on '
'the {1!r} driver(under the {2!r} alias)'.format(
key, provider, alias
)
)
return False
# If we reached this far, there's a properly configured provider,
# return it!
return opts['providers'][alias][driver]
for alias, drivers in six.iteritems(opts['providers']):
for driver, provider_details in six.iteritems(drivers):
if driver != provider:
continue
# If we reached this far, we have a matching provider, let's see if
# all required configuration keys are present and not None
skip_provider = False
for key in required_keys:
if provider_details.get(key, None) is None:
# This provider does not include all necessary keys,
# continue to next one
log.trace(
'The required {0!r} configuration setting is missing '
'on the {1!r} driver(under the {2!r} alias)'.format(
key, provider, alias
)
)
skip_provider = True
break
if skip_provider:
continue
# If we reached this far, the provider included all required keys
return provider_details
# If we reached this point, the provider is not configured.
return False
# <---- Salt Cloud Configuration Functions -----------------------------------
def _cache_id(minion_id, cache_file):
'''
Helper function, writes minion id to a cache file.
'''
try:
with salt.utils.fopen(cache_file, 'w') as idf:
idf.write(minion_id)
except (IOError, OSError) as exc:
log.error('Could not cache minion ID: {0}'.format(exc))
def get_id(opts, minion_id=False):
'''
Guess the id of the minion.
If CONFIG_DIR/minion_id exists, use the cached minion ID from that file.
If no minion id is configured, use multiple sources to find a FQDN.
If no FQDN is found you may get an ip address.
Returns two values: the detected ID, and a boolean value noting whether or
not an IP address is being used for the ID.
'''
if opts['root_dir'] is None:
root_dir = salt.syspaths.ROOT_DIR
else:
root_dir = opts['root_dir']
config_dir = salt.syspaths.CONFIG_DIR
if config_dir.startswith(salt.syspaths.ROOT_DIR):
config_dir = config_dir.split(salt.syspaths.ROOT_DIR, 1)[-1]
# Check for cached minion ID
id_cache = os.path.join(root_dir,
config_dir.lstrip(os.path.sep),
'minion_id')
if opts.get('minion_id_caching', True):
try:
with salt.utils.fopen(id_cache) as idf:
name = idf.readline().strip()
if name.startswith(codecs.BOM): # Remove BOM if exists
name = name.replace(codecs.BOM, '', 1)
if name:
log.debug('Using cached minion ID from {0}: {1}'.format(id_cache, name))
return name, False
except (IOError, OSError):
pass
log.debug('Guessing ID. The id can be explicitly in set {0}'
.format(os.path.join(salt.syspaths.CONFIG_DIR, 'minion')))
newid = salt.utils.network.generate_minion_id()
log.info('Found minion id from generate_minion_id(): {0}'.format(newid))
if minion_id and opts.get('minion_id_caching', True):
_cache_id(newid, id_cache)
is_ipv4 = newid.count('.') == 3 and not any(c.isalpha() for c in newid)
return newid, is_ipv4
def apply_minion_config(overrides=None,
defaults=None,
minion_id=False):
'''
Returns minion configurations dict.
'''
if defaults is None:
defaults = DEFAULT_MINION_OPTS
opts = defaults.copy()
opts['__role'] = 'minion'
if overrides:
opts.update(overrides)
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
# No ID provided. Will getfqdn save us?
using_ip_for_id = False
if opts['id'] is None:
opts['id'], using_ip_for_id = get_id(
opts,
minion_id=minion_id)
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
# set up the extension_modules location from the cachedir
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
# Set up the utils_dirs location from the extension_modules location
opts['utils_dirs'] = (
opts.get('utils_dirs') or
[os.path.join(opts['extension_modules'], 'utils')]
)
# Insert all 'utils_dirs' directories to the system path
insert_system_path(opts, opts['utils_dirs'])
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'sock_dir', 'extension_modules', 'pidfile',
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
if urlparse(opts.get(config_key, '')).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# if there is no schedule option yet, add an empty scheduler
if 'schedule' not in opts:
opts['schedule'] = {}
return opts
def master_config(path, env_var='SALT_MASTER_CONFIG', defaults=None):
'''
Reads in the master configuration file and sets up default options
This is useful for running the actual master daemon. For running
Master-side client interfaces that need the master opts see
:py:func:`salt.client.client_config`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
if not os.environ.get(env_var, None):
# No valid setting was given using the configuration variable.
# Lets see is SALT_CONFIG_DIR is of any use
salt_config_dir = os.environ.get('SALT_CONFIG_DIR', None)
if salt_config_dir:
env_config_file_path = os.path.join(salt_config_dir, 'master')
if salt_config_dir and os.path.isfile(env_config_file_path):
# We can get a configuration file using SALT_CONFIG_DIR, let's
# update the environment with this information
os.environ[env_var] = env_config_file_path
overrides = load_config(path, env_var, DEFAULT_MASTER_OPTS['conf_file'])
default_include = overrides.get('default_include',
defaults['default_include'])
include = overrides.get('include', [])
overrides.update(include_config(default_include, path, verbose=False))
overrides.update(include_config(include, path, verbose=True))
opts = apply_master_config(overrides, defaults)
_validate_opts(opts)
# If 'nodegroups:' is uncommented in the master config file, and there are
# no nodegroups defined, opts['nodegroups'] will be None. Fix this by
# reverting this value to the default, as if 'nodegroups:' was commented
# out or not present.
if opts.get('nodegroups') is None:
opts['nodegroups'] = DEFAULT_MASTER_OPTS.get('nodegroups', {})
if opts.get('transport') == 'raet' and 'aes' in opts:
opts.pop('aes')
return opts
def apply_master_config(overrides=None, defaults=None):
'''
Returns master configurations dict.
'''
import salt.crypt
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
opts = defaults.copy()
opts['__role'] = 'master'
if overrides:
opts.update(overrides)
if len(opts['sock_dir']) > len(opts['cachedir']) + 10:
opts['sock_dir'] = os.path.join(opts['cachedir'], '.salt-unix')
opts['extension_modules'] = (
opts.get('extension_modules') or
os.path.join(opts['cachedir'], 'extmods')
)
opts['token_dir'] = os.path.join(opts['cachedir'], 'tokens')
using_ip_for_id = False
append_master = False
if opts.get('id') is None:
opts['id'], using_ip_for_id = get_id(
opts,
minion_id=None)
append_master = True
# it does not make sense to append a domain to an IP based id
if not using_ip_for_id and 'append_domain' in opts:
opts['id'] = _append_domain(opts)
if append_master:
opts['id'] += '_master'
# Prepend root_dir to other paths
prepend_root_dirs = [
'pki_dir', 'cachedir', 'pidfile', 'sock_dir', 'extension_modules',
'autosign_file', 'autoreject_file', 'token_dir', 'sqlite_queue_dir'
]
# These can be set to syslog, so, not actual paths on the system
for config_key in ('log_file', 'key_logfile'):
log_setting = opts.get(config_key, '')
if log_setting is None:
continue
if urlparse(log_setting).scheme == '':
prepend_root_dirs.append(config_key)
prepend_root_dir(opts, prepend_root_dirs)
# Enabling open mode requires that the value be set to True, and
# nothing else!
opts['open_mode'] = opts['open_mode'] is True
opts['auto_accept'] = opts['auto_accept'] is True
opts['file_roots'] = _validate_file_roots(opts)
if opts['file_ignore_regex']:
# If file_ignore_regex was given, make sure it's wrapped in a list.
# Only keep valid regex entries for improved performance later on.
if isinstance(opts['file_ignore_regex'], str):
ignore_regex = [opts['file_ignore_regex']]
elif isinstance(opts['file_ignore_regex'], list):
ignore_regex = opts['file_ignore_regex']
opts['file_ignore_regex'] = []
for regex in ignore_regex:
try:
# Can't store compiled regex itself in opts (breaks
# serialization)
re.compile(regex)
opts['file_ignore_regex'].append(regex)
except Exception:
log.warning(
'Unable to parse file_ignore_regex. Skipping: {0}'.format(
regex
)
)
if opts['file_ignore_glob']:
# If file_ignore_glob was given, make sure it's wrapped in a list.
if isinstance(opts['file_ignore_glob'], str):
opts['file_ignore_glob'] = [opts['file_ignore_glob']]
# Let's make sure `worker_threads` does not drop below 3 which has proven
# to make `salt.modules.publish` not work under the test-suite.
if opts['worker_threads'] < 3 and opts.get('peer', None):
log.warning(
'The \'worker_threads\' setting on {0!r} cannot be lower than 3. '
'Resetting it to the default value of 3.'.format(
opts['conf_file']
)
)
opts['worker_threads'] = 3
opts.setdefault('pillar_source_merging_strategy', 'smart')
return opts
def client_config(path, env_var='SALT_CLIENT_CONFIG', defaults=None):
'''
Load Master configuration data
Usage:
.. code-block:: python
import salt.config
master_opts = salt.config.client_config('/etc/salt/master')
Returns a dictionary of the Salt Master configuration file with necessary
options needed to communicate with a locally-running Salt Master daemon.
This function searches for client specific configurations and adds them to
the data from the master configuration.
This is useful for master-side operations like
:py:class:`~salt.client.LocalClient`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS
xdg_dir = salt.utils.xdg.xdg_config_dir()
if os.path.isdir(xdg_dir):
client_config_dir = xdg_dir
saltrc_config_file = 'saltrc'
else:
client_config_dir = '~'
saltrc_config_file = '.saltrc'
# Get the token file path from the provided defaults. If not found, specify
# our own, sane, default
opts = {
'token_file': defaults.get(
'token_file',
os.path.join(client_config_dir, 'salt_token')
)
}
# Update options with the master configuration, either from the provided
# path, salt's defaults or provided defaults
opts.update(
master_config(path, defaults=defaults)
)
# Update with the users salt dot file or with the environment variable
saltrc_config = os.path.join(client_config_dir, saltrc_config_file)
opts.update(
load_config(
saltrc_config,
env_var,
saltrc_config
)
)
# Make sure we have a proper and absolute path to the token file
if 'token_file' in opts:
opts['token_file'] = os.path.abspath(
os.path.expanduser(
opts['token_file']
)
)
# If the token file exists, read and store the contained token
if os.path.isfile(opts['token_file']):
# Make sure token is still valid
expire = opts.get('token_expire', 43200)
if os.stat(opts['token_file']).st_mtime + expire > time.mktime(time.localtime()):
with salt.utils.fopen(opts['token_file']) as fp_:
opts['token'] = fp_.read().strip()
# On some platforms, like OpenBSD, 0.0.0.0 won't catch a master running on localhost
if opts['interface'] == '0.0.0.0':
opts['interface'] = '127.0.0.1'
# Make sure the master_uri is set
if 'master_uri' not in opts:
opts['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=salt.utils.ip_bracket(opts['interface']),
port=opts['ret_port']
)
# Return the client options
_validate_opts(opts)
return opts
def api_config(path):
'''
Read in the salt master config file and add additional configs that
need to be stubbed out for salt-api
'''
# Let's grab a copy of salt's master default opts
defaults = DEFAULT_MASTER_OPTS
# Let's override them with salt-api's required defaults
defaults.update(DEFAULT_API_OPTS)
return client_config(path, defaults=defaults)
| 35.296181 | 94 | 0.59489 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.