code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
from ....pgrest import *
from ...constants import Constants
from ..rcconstants import REDCapConstants
from ..rcaptable import RcapTable
__all__ = ["RcapDailyItems6MoV036MonthDaily"]
class RcapDailyItems6MoV036MonthDaily(RcapTable):
"""Daily Items 6 Mo V03 6Month Daily"""
__redcap_form_name = "daily_items_6_mo_v03_6month_daily"
daily_items_6_mo_v03_6month_daily_id = Constants.SERIAL_PRIMARY_KEY_COLUMN
daily_items_6_mo_v03_6month_daily_complete = Column(
Integer, ForeignKey("status.status_id")
)
# 1. Please rate your knee pain by choosing the number that bes...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6moworstkneepainscl = Column(Integer, nullable=True, comments=None)
# 2. Please rate your knee pain by choosing the number that bes...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6moavgkneepainscl = Column(Integer, nullable=True, comments=None)
# 3. Please rate how much your knee pain has interfered with yo...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mokneepaininterscl = Column(Integer, nullable=True, comments=None)
# Please rate the overal QUALITY of your SLEEP in the past 24 h...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mosleepqualscl = Column(Integer, nullable=True, comments=None)
# During the past 24 hours, how physically active were you?
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mophysactscl = Column(Integer, nullable=True, comments=None)
# During the past 24 hours, did you take any kind of medication...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mopainmeduseyn = Column(Boolean, nullable=True, comments=None)
# Over-the-counter pain relievers (e.g., acetaminophen Tylenol,...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mootcuseyn = Column(Boolean, nullable=True, comments=None)
# Opioid pain relievers (e.g., oxycodone, Percocet, Nucynta, ta...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6moopiateuseyn = Column(Boolean, nullable=True, comments=None)
# THC/CBD or marijuana products (edibles, gummies, CBD oil, wee...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mocannabuseyn = Column(Boolean, nullable=True, comments=None)
# Gabapentin or pregabalin (Neurontin, Lyrica, etc)
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mogabapuseyn = Column(Boolean, nullable=True, comments=None)
# Duloxetine (Cymbalta) or venlafaxine (Wellbutrin)
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6moduloxuseyn = Column(Boolean, nullable=True, comments=None)
# Other, not specified above
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mootheruseyn = Column(Boolean, nullable=True, comments=None) | src/vbr/tableclasses/redcap/autogenerated/daily_items_6_mo_v03_6month_daily.py | from ....pgrest import *
from ...constants import Constants
from ..rcconstants import REDCapConstants
from ..rcaptable import RcapTable
__all__ = ["RcapDailyItems6MoV036MonthDaily"]
class RcapDailyItems6MoV036MonthDaily(RcapTable):
"""Daily Items 6 Mo V03 6Month Daily"""
__redcap_form_name = "daily_items_6_mo_v03_6month_daily"
daily_items_6_mo_v03_6month_daily_id = Constants.SERIAL_PRIMARY_KEY_COLUMN
daily_items_6_mo_v03_6month_daily_complete = Column(
Integer, ForeignKey("status.status_id")
)
# 1. Please rate your knee pain by choosing the number that bes...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6moworstkneepainscl = Column(Integer, nullable=True, comments=None)
# 2. Please rate your knee pain by choosing the number that bes...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6moavgkneepainscl = Column(Integer, nullable=True, comments=None)
# 3. Please rate how much your knee pain has interfered with yo...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mokneepaininterscl = Column(Integer, nullable=True, comments=None)
# Please rate the overal QUALITY of your SLEEP in the past 24 h...
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mosleepqualscl = Column(Integer, nullable=True, comments=None)
# During the past 24 hours, how physically active were you?
# Field Type: radio
# Choices: 0, 0 | 1, 1 | 2, 2 | 3, 3 | 4, 4 | 5, 5 | 6, 6 | 7, 7 | 8, 8 | 9, 9 | 10, 10
traj6mophysactscl = Column(Integer, nullable=True, comments=None)
# During the past 24 hours, did you take any kind of medication...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mopainmeduseyn = Column(Boolean, nullable=True, comments=None)
# Over-the-counter pain relievers (e.g., acetaminophen Tylenol,...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mootcuseyn = Column(Boolean, nullable=True, comments=None)
# Opioid pain relievers (e.g., oxycodone, Percocet, Nucynta, ta...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6moopiateuseyn = Column(Boolean, nullable=True, comments=None)
# THC/CBD or marijuana products (edibles, gummies, CBD oil, wee...
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mocannabuseyn = Column(Boolean, nullable=True, comments=None)
# Gabapentin or pregabalin (Neurontin, Lyrica, etc)
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mogabapuseyn = Column(Boolean, nullable=True, comments=None)
# Duloxetine (Cymbalta) or venlafaxine (Wellbutrin)
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6moduloxuseyn = Column(Boolean, nullable=True, comments=None)
# Other, not specified above
# Field Type: radio
# Choices: 1, Yes | 0, No
traj6mootheruseyn = Column(Boolean, nullable=True, comments=None) | 0.510496 | 0.370453 |
import logging
from json import JSONDecodeError
from celery import shared_task
from django.core.cache import cache
from nacl.exceptions import BadSignatureError
from thenewboston.blocks.signatures import verify_signature
from thenewboston.utils.format import format_address
from thenewboston.utils.messages import get_message_hash
from thenewboston.utils.network import fetch
from thenewboston.utils.tools import sort_and_encode
from v1.cache_tools.cache_keys import HEAD_BLOCK_HASH
from v1.confirmation_blocks.serializers.confirmation_block import ConfirmationBlockSerializerCreate
from .confirmation_block_queue import process_confirmation_block_queue
logger = logging.getLogger('thenewboston')
"""
Functions used by confirmation validators when syncing with a primary validator
Logic handles:
- initial sync (when the confirmation validator first comes online)
- syncing to new primary validator (when directed by most trusted bank)
"""
def get_confirmation_block(*, address, block_identifier):
"""
Return confirmation block chain segment
"""
url = f'{address}/confirmation_blocks/{block_identifier}'
results = fetch(url=url, headers={})
return results
def get_confirmation_block_chain_segment(*, address, block_identifier):
"""
Return confirmation block chain segment
"""
url = f'{address}/confirmation_block_chain_segment/{block_identifier}'
try:
results = fetch(url=url, headers={})
return results
except JSONDecodeError:
return []
except Exception as e:
print(e)
return []
def get_confirmation_block_from_results(*, block_identifier, results):
"""
Return the confirmation block from results list
"""
return next((i for i in results if i['message']['block_identifier'] == block_identifier), None)
def populate_confirmation_block_queue(*, address, error_handler, initial_block_identifier):
"""
Fetch confirmation blocks from primary validator starting with initial_block_identifier
Add all confirmation blocks to confirmation block queue
"""
block_identifier = initial_block_identifier
results = get_confirmation_block_chain_segment(address=address, block_identifier=block_identifier)
error = False
while results and not error:
confirmation_block = get_confirmation_block_from_results(
block_identifier=block_identifier,
results=results
)
while confirmation_block:
message = confirmation_block['message']
try:
verify_signature(
message=sort_and_encode(message),
signature=confirmation_block['signature'],
verify_key=confirmation_block['node_identifier']
)
except BadSignatureError as e:
error_handler(e)
error = True
break
except Exception as e:
error_handler(e)
error = True
break
serializer = ConfirmationBlockSerializerCreate(data=message)
if serializer.is_valid():
_bid = serializer.save()
print(_bid)
else:
error_handler(serializer.errors)
error = True
break
block_identifier = get_message_hash(message=message)
confirmation_block = get_confirmation_block_from_results(
block_identifier=block_identifier,
results=results
)
if error:
break
results = get_confirmation_block_chain_segment(address=address, block_identifier=block_identifier)
@shared_task
def sync_to_new_primary_validator(*, ip_address, port, protocol):
"""
Sync to new primary validator (as directed by most trusted bank)
"""
address = format_address(
ip_address=ip_address,
port=port,
protocol=protocol
)
populate_confirmation_block_queue(
address=address,
error_handler=logger.exception,
initial_block_identifier=cache.get(HEAD_BLOCK_HASH)
)
process_confirmation_block_queue() | v1/tasks/sync.py | import logging
from json import JSONDecodeError
from celery import shared_task
from django.core.cache import cache
from nacl.exceptions import BadSignatureError
from thenewboston.blocks.signatures import verify_signature
from thenewboston.utils.format import format_address
from thenewboston.utils.messages import get_message_hash
from thenewboston.utils.network import fetch
from thenewboston.utils.tools import sort_and_encode
from v1.cache_tools.cache_keys import HEAD_BLOCK_HASH
from v1.confirmation_blocks.serializers.confirmation_block import ConfirmationBlockSerializerCreate
from .confirmation_block_queue import process_confirmation_block_queue
logger = logging.getLogger('thenewboston')
"""
Functions used by confirmation validators when syncing with a primary validator
Logic handles:
- initial sync (when the confirmation validator first comes online)
- syncing to new primary validator (when directed by most trusted bank)
"""
def get_confirmation_block(*, address, block_identifier):
"""
Return confirmation block chain segment
"""
url = f'{address}/confirmation_blocks/{block_identifier}'
results = fetch(url=url, headers={})
return results
def get_confirmation_block_chain_segment(*, address, block_identifier):
"""
Return confirmation block chain segment
"""
url = f'{address}/confirmation_block_chain_segment/{block_identifier}'
try:
results = fetch(url=url, headers={})
return results
except JSONDecodeError:
return []
except Exception as e:
print(e)
return []
def get_confirmation_block_from_results(*, block_identifier, results):
"""
Return the confirmation block from results list
"""
return next((i for i in results if i['message']['block_identifier'] == block_identifier), None)
def populate_confirmation_block_queue(*, address, error_handler, initial_block_identifier):
"""
Fetch confirmation blocks from primary validator starting with initial_block_identifier
Add all confirmation blocks to confirmation block queue
"""
block_identifier = initial_block_identifier
results = get_confirmation_block_chain_segment(address=address, block_identifier=block_identifier)
error = False
while results and not error:
confirmation_block = get_confirmation_block_from_results(
block_identifier=block_identifier,
results=results
)
while confirmation_block:
message = confirmation_block['message']
try:
verify_signature(
message=sort_and_encode(message),
signature=confirmation_block['signature'],
verify_key=confirmation_block['node_identifier']
)
except BadSignatureError as e:
error_handler(e)
error = True
break
except Exception as e:
error_handler(e)
error = True
break
serializer = ConfirmationBlockSerializerCreate(data=message)
if serializer.is_valid():
_bid = serializer.save()
print(_bid)
else:
error_handler(serializer.errors)
error = True
break
block_identifier = get_message_hash(message=message)
confirmation_block = get_confirmation_block_from_results(
block_identifier=block_identifier,
results=results
)
if error:
break
results = get_confirmation_block_chain_segment(address=address, block_identifier=block_identifier)
@shared_task
def sync_to_new_primary_validator(*, ip_address, port, protocol):
"""
Sync to new primary validator (as directed by most trusted bank)
"""
address = format_address(
ip_address=ip_address,
port=port,
protocol=protocol
)
populate_confirmation_block_queue(
address=address,
error_handler=logger.exception,
initial_block_identifier=cache.get(HEAD_BLOCK_HASH)
)
process_confirmation_block_queue() | 0.528777 | 0.173113 |
from django.shortcuts import render
from rest_framework import generics
from authentication import models
from .serializers import UsuarioSerializer, UsuarioSigninSerializer
from . import serializers
from .authlog import token_expire_handler, expires_in
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK,
)
from rest_framework.response import Response
class ListUsuario(generics.ListCreateAPIView):
queryset = models.Usuario.objects.all()
serializer_class = serializers.UsuarioSerializer
class DetailUsuario(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Usuario.objects.all()
serializer_class = serializers.UsuarioSerializer
@api_view(["POST"])
@permission_classes((AllowAny,)) # here we specify permission by default we set IsAuthenticated
def signin(request):
signin_serializer = UsuarioSigninSerializer(data = request.data)
if not signin_serializer.is_valid():
return Response(signin_serializer.errors, status = HTTP_400_BAD_REQUEST)
user = authenticate(
username = signin_serializer.data['username'],
password = signin_serializer.data['password']
)
if not user:
return Response({'detail': 'Invalid Credentials or activate account'}, status=HTTP_404_NOT_FOUND)
#TOKEN STUFF
token, _ = Token.objects.get_or_create(user = user)
#token_expire_handler will check, if the token is expired it will generate new one
is_expired, token = token_expire_handler(token) # The implementation will be described further
user_serialized = UsuarioSerializer(user)
return Response({
'user': user_serialized.data,
'expires_in': expires_in(token),
'token': token.key
}, status=HTTP_200_OK) | firstdjango/api/views.py | from django.shortcuts import render
from rest_framework import generics
from authentication import models
from .serializers import UsuarioSerializer, UsuarioSigninSerializer
from . import serializers
from .authlog import token_expire_handler, expires_in
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import AllowAny
from rest_framework.status import (
HTTP_400_BAD_REQUEST,
HTTP_404_NOT_FOUND,
HTTP_200_OK,
)
from rest_framework.response import Response
class ListUsuario(generics.ListCreateAPIView):
queryset = models.Usuario.objects.all()
serializer_class = serializers.UsuarioSerializer
class DetailUsuario(generics.RetrieveUpdateDestroyAPIView):
queryset = models.Usuario.objects.all()
serializer_class = serializers.UsuarioSerializer
@api_view(["POST"])
@permission_classes((AllowAny,)) # here we specify permission by default we set IsAuthenticated
def signin(request):
signin_serializer = UsuarioSigninSerializer(data = request.data)
if not signin_serializer.is_valid():
return Response(signin_serializer.errors, status = HTTP_400_BAD_REQUEST)
user = authenticate(
username = signin_serializer.data['username'],
password = signin_serializer.data['password']
)
if not user:
return Response({'detail': 'Invalid Credentials or activate account'}, status=HTTP_404_NOT_FOUND)
#TOKEN STUFF
token, _ = Token.objects.get_or_create(user = user)
#token_expire_handler will check, if the token is expired it will generate new one
is_expired, token = token_expire_handler(token) # The implementation will be described further
user_serialized = UsuarioSerializer(user)
return Response({
'user': user_serialized.data,
'expires_in': expires_in(token),
'token': token.key
}, status=HTTP_200_OK) | 0.416678 | 0.067886 |
import logging
import time
import threading
import kombu
import socket
from umbra.browser import BrowserPool, BrowsingException
class AmqpBrowserController:
"""
Consumes amqp messages representing requests to browse urls, from the
specified amqp queue (default: "urls") on the specified amqp exchange
(default: "umbra"). Incoming amqp message is a json object with 3
attributes:
{
"clientId": "umbra.client.123",
"url": "http://example.com/my_fancy_page",
"metadata": {"arbitrary":"fields", "etc":4}
}
"url" is the url to browse.
"clientId" uniquely identifies the client of umbra. Umbra uses the clientId
as the amqp routing key, to direct information via amqp back to the client.
It sends this information on the same specified amqp exchange (default:
"umbra").
Each url requested in the browser is published to amqp this way. The
outgoing amqp message is a json object:
{
"url": "http://example.com/images/embedded_thing.jpg",
"method": "GET",
"headers": {"User-Agent": "...", "Accept": "...", ...},
"parentUrl": "http://example.com/my_fancy_page",
"parentUrlMetadata": {"arbitrary":"fields", "etc":4, ...}
}
POST requests have an additional field, postData.
"""
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, amqp_url='amqp://guest:guest@localhost:5672/%2f',
chrome_exe='chromium-browser', max_active_browsers=1,
queue_name='urls', exchange_name='umbra', routing_key='urls'):
self.amqp_url = amqp_url
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.max_active_browsers = max_active_browsers
self._browser_pool = BrowserPool(size=max_active_browsers, chrome_exe=chrome_exe)
def start(self):
self._browsing_threads = set()
self._browsing_threads_lock = threading.Lock()
self._exchange = kombu.Exchange(name=self.exchange_name, type='direct',
durable=True)
self._reconnect_requested = False
self._producer = None
self._producer_lock = threading.Lock()
with self._producer_lock:
self._producer_conn = kombu.Connection(self.amqp_url)
self._producer = self._producer_conn.Producer(serializer='json')
self._consumer_thread = threading.Thread(target=self._consume_amqp, name='AmqpConsumerThread')
self._consumer_stop = threading.Event()
self._consumer_thread.start()
def shutdown(self):
self.logger.info("shutting down amqp consumer {}".format(self.amqp_url))
self._consumer_stop.set()
self._consumer_thread.join()
def shutdown_now(self):
self._consumer_stop.set()
self._browser_pool.shutdown_now()
self._consumer_thread.join()
def reconnect(self, *args, **kwargs):
self._reconnect_requested = True
self._browser_pool.shutdown_now()
def _wait_for_and_browse_urls(self, conn, consumer, timeout):
start = time.time()
browser = None
consumer.qos(prefetch_count=self.max_active_browsers)
while not self._consumer_stop.is_set() and time.time() - start < timeout and not self._reconnect_requested:
try:
browser = self._browser_pool.acquire() # raises KeyError if none available
browser.start()
def callback(body, message):
try:
client_id, url, metadata = body['clientId'], body['url'], body['metadata']
except:
self.logger.error("unable to decipher message {}".format(message), exc_info=True)
self.logger.error("discarding bad message")
message.reject()
browser.stop()
self._browser_pool.release(browser)
return
self._start_browsing_page(browser, message, client_id, url, metadata)
consumer.callbacks = [callback]
while True:
try:
conn.drain_events(timeout=0.5)
break # out of "while True" to acquire another browser
except socket.timeout:
pass
except socket.error:
self.logger.error("problem consuming messages from AMQP, will try reconnecting after active browsing finishes", exc_info=True)
self._reconnect_requested = True
if self._consumer_stop.is_set() or time.time() - start >= timeout or self._reconnect_requested:
browser.stop()
self._browser_pool.release(browser)
break
except KeyError:
# no browsers available
time.sleep(0.5)
except:
self.logger.critical("problem with browser initialization", exc_info=True)
time.sleep(0.5)
finally:
consumer.callbacks = None
def _wait_for_active_browsers(self):
self.logger.info("waiting for browsing threads to finish")
while True:
with self._browsing_threads_lock:
if len(self._browsing_threads) == 0:
break
time.sleep(0.5)
self.logger.info("active browsing threads finished")
def _consume_amqp(self):
# XXX https://webarchive.jira.com/browse/ARI-3811
# After running for some amount of time (3 weeks in the latest case),
# consumer looks normal but doesn't consume any messages. Not clear if
# it's hanging in drain_events() or not. As a temporary measure for
# mitigation (if it works) or debugging (if it doesn't work), close and
# reopen the connection every 2.5 hours
RECONNECT_AFTER_SECONDS = 150 * 60
url_queue = kombu.Queue(self.queue_name, exchange=self._exchange, routing_key=self.routing_key)
while not self._consumer_stop.is_set():
try:
self.logger.info("connecting to amqp exchange={} at {}".format(self._exchange.name, self.amqp_url))
self._reconnect_requested = False
with kombu.Connection(self.amqp_url) as conn:
with conn.Consumer(url_queue) as consumer:
self._wait_for_and_browse_urls(conn, consumer, timeout=RECONNECT_AFTER_SECONDS)
# need to wait for browsers to finish here, before closing
# the amqp connection, because they use it to do
# message.ack() after they finish browsing a page
self._wait_for_active_browsers()
except BaseException as e:
self.logger.error("caught exception {}".format(e), exc_info=True)
time.sleep(0.5)
self.logger.error("attempting to reopen amqp connection")
def _start_browsing_page(self, browser, message, client_id, url, parent_url_metadata):
def on_request(chrome_msg):
payload = chrome_msg['params']['request']
payload['parentUrl'] = url
payload['parentUrlMetadata'] = parent_url_metadata
self.logger.debug('sending to amqp exchange={} routing_key={} payload={}'.format(self.exchange_name, client_id, payload))
with self._producer_lock:
publish = self._producer_conn.ensure(self._producer, self._producer.publish)
publish(payload, exchange=self._exchange, routing_key=client_id)
def browse_page_sync():
self.logger.info('browser={} client_id={} url={}'.format(browser, client_id, url))
try:
browser.browse_page(url, on_request=on_request)
message.ack()
except BrowsingException as e:
self.logger.warn("browsing did not complete normally, requeuing url {} - {}".format(url, e))
message.requeue()
except:
self.logger.critical("problem browsing page, requeuing url {}, may have lost browser process".format(url), exc_info=True)
message.requeue()
finally:
browser.stop()
self._browser_pool.release(browser)
def browse_thread_run_then_cleanup():
browse_page_sync()
with self._browsing_threads_lock:
self._browsing_threads.remove(threading.current_thread())
import random
thread_name = "BrowsingThread{}-{}".format(browser.chrome_port,
''.join((random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(6))))
th = threading.Thread(target=browse_thread_run_then_cleanup, name=thread_name)
with self._browsing_threads_lock:
self._browsing_threads.add(th)
th.start() | umbra/controller.py |
import logging
import time
import threading
import kombu
import socket
from umbra.browser import BrowserPool, BrowsingException
class AmqpBrowserController:
"""
Consumes amqp messages representing requests to browse urls, from the
specified amqp queue (default: "urls") on the specified amqp exchange
(default: "umbra"). Incoming amqp message is a json object with 3
attributes:
{
"clientId": "umbra.client.123",
"url": "http://example.com/my_fancy_page",
"metadata": {"arbitrary":"fields", "etc":4}
}
"url" is the url to browse.
"clientId" uniquely identifies the client of umbra. Umbra uses the clientId
as the amqp routing key, to direct information via amqp back to the client.
It sends this information on the same specified amqp exchange (default:
"umbra").
Each url requested in the browser is published to amqp this way. The
outgoing amqp message is a json object:
{
"url": "http://example.com/images/embedded_thing.jpg",
"method": "GET",
"headers": {"User-Agent": "...", "Accept": "...", ...},
"parentUrl": "http://example.com/my_fancy_page",
"parentUrlMetadata": {"arbitrary":"fields", "etc":4, ...}
}
POST requests have an additional field, postData.
"""
logger = logging.getLogger(__module__ + "." + __qualname__)
def __init__(self, amqp_url='amqp://guest:guest@localhost:5672/%2f',
chrome_exe='chromium-browser', max_active_browsers=1,
queue_name='urls', exchange_name='umbra', routing_key='urls'):
self.amqp_url = amqp_url
self.queue_name = queue_name
self.exchange_name = exchange_name
self.routing_key = routing_key
self.max_active_browsers = max_active_browsers
self._browser_pool = BrowserPool(size=max_active_browsers, chrome_exe=chrome_exe)
def start(self):
self._browsing_threads = set()
self._browsing_threads_lock = threading.Lock()
self._exchange = kombu.Exchange(name=self.exchange_name, type='direct',
durable=True)
self._reconnect_requested = False
self._producer = None
self._producer_lock = threading.Lock()
with self._producer_lock:
self._producer_conn = kombu.Connection(self.amqp_url)
self._producer = self._producer_conn.Producer(serializer='json')
self._consumer_thread = threading.Thread(target=self._consume_amqp, name='AmqpConsumerThread')
self._consumer_stop = threading.Event()
self._consumer_thread.start()
def shutdown(self):
self.logger.info("shutting down amqp consumer {}".format(self.amqp_url))
self._consumer_stop.set()
self._consumer_thread.join()
def shutdown_now(self):
self._consumer_stop.set()
self._browser_pool.shutdown_now()
self._consumer_thread.join()
def reconnect(self, *args, **kwargs):
self._reconnect_requested = True
self._browser_pool.shutdown_now()
def _wait_for_and_browse_urls(self, conn, consumer, timeout):
start = time.time()
browser = None
consumer.qos(prefetch_count=self.max_active_browsers)
while not self._consumer_stop.is_set() and time.time() - start < timeout and not self._reconnect_requested:
try:
browser = self._browser_pool.acquire() # raises KeyError if none available
browser.start()
def callback(body, message):
try:
client_id, url, metadata = body['clientId'], body['url'], body['metadata']
except:
self.logger.error("unable to decipher message {}".format(message), exc_info=True)
self.logger.error("discarding bad message")
message.reject()
browser.stop()
self._browser_pool.release(browser)
return
self._start_browsing_page(browser, message, client_id, url, metadata)
consumer.callbacks = [callback]
while True:
try:
conn.drain_events(timeout=0.5)
break # out of "while True" to acquire another browser
except socket.timeout:
pass
except socket.error:
self.logger.error("problem consuming messages from AMQP, will try reconnecting after active browsing finishes", exc_info=True)
self._reconnect_requested = True
if self._consumer_stop.is_set() or time.time() - start >= timeout or self._reconnect_requested:
browser.stop()
self._browser_pool.release(browser)
break
except KeyError:
# no browsers available
time.sleep(0.5)
except:
self.logger.critical("problem with browser initialization", exc_info=True)
time.sleep(0.5)
finally:
consumer.callbacks = None
def _wait_for_active_browsers(self):
self.logger.info("waiting for browsing threads to finish")
while True:
with self._browsing_threads_lock:
if len(self._browsing_threads) == 0:
break
time.sleep(0.5)
self.logger.info("active browsing threads finished")
def _consume_amqp(self):
# XXX https://webarchive.jira.com/browse/ARI-3811
# After running for some amount of time (3 weeks in the latest case),
# consumer looks normal but doesn't consume any messages. Not clear if
# it's hanging in drain_events() or not. As a temporary measure for
# mitigation (if it works) or debugging (if it doesn't work), close and
# reopen the connection every 2.5 hours
RECONNECT_AFTER_SECONDS = 150 * 60
url_queue = kombu.Queue(self.queue_name, exchange=self._exchange, routing_key=self.routing_key)
while not self._consumer_stop.is_set():
try:
self.logger.info("connecting to amqp exchange={} at {}".format(self._exchange.name, self.amqp_url))
self._reconnect_requested = False
with kombu.Connection(self.amqp_url) as conn:
with conn.Consumer(url_queue) as consumer:
self._wait_for_and_browse_urls(conn, consumer, timeout=RECONNECT_AFTER_SECONDS)
# need to wait for browsers to finish here, before closing
# the amqp connection, because they use it to do
# message.ack() after they finish browsing a page
self._wait_for_active_browsers()
except BaseException as e:
self.logger.error("caught exception {}".format(e), exc_info=True)
time.sleep(0.5)
self.logger.error("attempting to reopen amqp connection")
def _start_browsing_page(self, browser, message, client_id, url, parent_url_metadata):
def on_request(chrome_msg):
payload = chrome_msg['params']['request']
payload['parentUrl'] = url
payload['parentUrlMetadata'] = parent_url_metadata
self.logger.debug('sending to amqp exchange={} routing_key={} payload={}'.format(self.exchange_name, client_id, payload))
with self._producer_lock:
publish = self._producer_conn.ensure(self._producer, self._producer.publish)
publish(payload, exchange=self._exchange, routing_key=client_id)
def browse_page_sync():
self.logger.info('browser={} client_id={} url={}'.format(browser, client_id, url))
try:
browser.browse_page(url, on_request=on_request)
message.ack()
except BrowsingException as e:
self.logger.warn("browsing did not complete normally, requeuing url {} - {}".format(url, e))
message.requeue()
except:
self.logger.critical("problem browsing page, requeuing url {}, may have lost browser process".format(url), exc_info=True)
message.requeue()
finally:
browser.stop()
self._browser_pool.release(browser)
def browse_thread_run_then_cleanup():
browse_page_sync()
with self._browsing_threads_lock:
self._browsing_threads.remove(threading.current_thread())
import random
thread_name = "BrowsingThread{}-{}".format(browser.chrome_port,
''.join((random.choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(6))))
th = threading.Thread(target=browse_thread_run_then_cleanup, name=thread_name)
with self._browsing_threads_lock:
self._browsing_threads.add(th)
th.start() | 0.597138 | 0.091788 |
import asyncio
import itertools
import functools
import math
import re
from attr import __description__
import discord
import os
from discord.ext import tasks, commands
from discord.ext.commands.errors import MissingPermissions
from discord.user import Profile
from discord.utils import get
from dns.message import Message
import aiomysql
import random
import pyowm
import threading
import time
import datetime
import dbl
class Utilities(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name = "alive",
description = "A simple check for bot responsiveness. If you get no response, there may be an issue with permissions or the bot itself.",
brief = "A simple check for bot responsiveness.",
aliases = ["hb", "heartbeat"])
async def alive(self, ctx):
await ctx.send(ctx.message.author.mention + " I'm alive")
@commands.command(name = "id",
description = "Returns the User ID of a specified user when @mentioned. If no user is @mentioned, it returns your ID.",
brief = "Returns the User ID of a specified user.")
async def id(self, ctx, user: discord.User = None):
#Has the user presented an argument, or have they passed an @mention of themselves as an argument?
if user == None or user.id == ctx.message.author.id:
#Send the user a message presenting their ID that addresses them directly.
await ctx.send(ctx.message.author.mention + ", your User ID is: "+ str(ctx.message.author.id))
#In this case, the user must have passed something other than a reference to themselves
else:
#Try to get the ID of that argument, and send it to the channel
try:
await ctx.send(user.display_name+"'s ID is: "+ str(user.id))
#If there's an issue doing this, send an error message in the chat.
except:
raise self.bot.BOTrasedError("402 Sorry, there was an issue getting that user's ID. Check that the account of the user you are obtaining the ID of hasn't deleted their account. Please note that only @mentions will be taken as valid arguments, and that @role mentions will not work.")
@commands.command(name = "info",
description = "Displays information about BOTrased including the name, a description, server count and the creator of the bot.",
brief = "Displays information about BOTrased.",
aliases = ["i"])
async def info(self, ctx):
#Fetch my user profile using my ID
ownerProfile = await self.bot.fetch_user(int(self.bot.ownerID))
#Initialise the embed object and assign it to a local variable called "embed". Set the title and description and set the colour for the sidebar.
embed = discord.Embed(title = "BOTrased", description = "A Discord Bot written entirely in Python.", colour = discord.Colour.dark_purple())
#Set the content of the embed to an image type and pass the URL of my user profile
embed.set_image(url = str(ownerProfile.avatar_url))
#Set the content of the thumbnail (the image displayed in the top right corner of the embed) and pass the URL of the bot's user profile
embed.set_thumbnail(url = str(self.bot.user.avatar_url))
#Add a field which will display the server count of the bot
embed.add_field(name = "Currently serving:", value = str(len(self.bot.guilds)) + " servers", inline = False)
#Add a field which will provide an invite link to add the bot to other servers
embed.add_field(name="Invite Bot", value="[Invite link](https://discord.com/oauth2/authorize?client_id=541373621873016866&scope=bot&permissions=439610486)")
embed.add_field(name = "Support Server", value = "[Server Invite](https://discord.gg/KUSWws6XAA)")
embed.add_field(name = "Vote", value = "[Vote for BOTrased](https://top.gg/bot/541373621873016866/vote)")
embed.add_field(name = "Creator", value = ownerProfile.display_name+"#"+ownerProfile.discriminator, inline = False)
#Send the embed object as an embed type message into the channel
await ctx.send(embed = embed)
@commands.command(name = "weather",
description = "Gets the weather for a specified location and displays it as an embed.",
brief = "Check the weather for a specified location.")
async def weather(self, ctx, *, location = None):
if location == None:
raise self.bot.BOTrasedError("403")
try:
weather = self.bot.mgr.weather_at_place(location)
except:
raise self.bot.BOTrasedError("500")
data = weather.weather
distance = int(data.visibility_distance)/1000
embed = discord.Embed(title = "Weather for " + location.title())
embed.set_thumbnail(url = data.weather_icon_url(size = '4x'))
embed.add_field(name = (data.detailed_status.title()), value = "\u200b", inline = False)
embed.add_field(name = "Temperature:", value = str(data.temperature(unit = 'celsius')['temp']) + "°C", inline = False)
embed.add_field(name = "Humidity:", value = str(data.humidity)+"%", inline=False)
embed.add_field(name = "Wind Speed:", value = str(data.wind()['speed'])+"m/s", inline = False)
embed.add_field(name = "Cloud Cover:", value = str(data.clouds)+"%", inline = False)
embed.add_field(name = "Pressure:", value = str(data.pressure['press'])+"hPa", inline = False)
embed.add_field(name = "Visibility Distance:", value = str(distance)+"KM", inline = False)
await ctx.send(embed = embed)
@commands.command(name = "flip",
description = "Flips a coin and returns heads or tails.",
brief = "Flips a coin and returns heads or tails.",
aliases = ["coin"])
async def flip(self, ctx):
if random.randint(1,2) == 1:
await ctx.send("<:heads:809568187707817994>")
await asyncio.sleep(0.3)
await ctx.send(ctx.message.author.mention+", you got **heads**.")
else:
await ctx.send("<:tails:809568669029236766>")
await asyncio.sleep(0.3)
await ctx.send(ctx.message.author.mention+", you got **tails**.")
@commands.command(name = "changelog",
description = "View all the changes made to BOTrased since the last update.",
brief = "View the changelog.")
async def changelog(self, ctx):
async with ctx.typing():
changeLog = open("changelog.txt", "r")
changeLogBody = ""
changeLogLines = []
for line in changeLog:
changeLogLines.append(line)
for i in range(1, len(changeLogLines)):
changeLogBody += changeLogLines[i]
embed = discord.Embed(title = changeLogLines[0], description = changeLogBody, colour = discord.Colour.dark_purple())
embed.set_footer(text = "Note: \"Silent changes\" are changes that should not impact user experience, and instead only code stability or maintainability.")
changeLog.close()
await ctx.send(embed = embed)
@commands.command(name = "randomint",
description = "Generates a random integer within a given range",
brief = "Generates a random integer within a given range")
async def randomInt(self, ctx, val1 = None, val2 = None):
if val1 == None:
raise self.bot.BOTrasedError("403")
try:
int(val1)
if val2 != None:
int(val2)
assert int(val1) < int(val2)
except:
raise self.bot.BOTrasedError("400")
if val2 == None:
await ctx.send("Your number is " + str(random.randint(0, int(val1))))
else:
await ctx.send("Your number is " + str(random.randint(int(val1), int(val2))))
def setup(bot):
bot.add_cog(Utilities(bot)) | cogs/utilities.py | import asyncio
import itertools
import functools
import math
import re
from attr import __description__
import discord
import os
from discord.ext import tasks, commands
from discord.ext.commands.errors import MissingPermissions
from discord.user import Profile
from discord.utils import get
from dns.message import Message
import aiomysql
import random
import pyowm
import threading
import time
import datetime
import dbl
class Utilities(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(name = "alive",
description = "A simple check for bot responsiveness. If you get no response, there may be an issue with permissions or the bot itself.",
brief = "A simple check for bot responsiveness.",
aliases = ["hb", "heartbeat"])
async def alive(self, ctx):
await ctx.send(ctx.message.author.mention + " I'm alive")
@commands.command(name = "id",
description = "Returns the User ID of a specified user when @mentioned. If no user is @mentioned, it returns your ID.",
brief = "Returns the User ID of a specified user.")
async def id(self, ctx, user: discord.User = None):
#Has the user presented an argument, or have they passed an @mention of themselves as an argument?
if user == None or user.id == ctx.message.author.id:
#Send the user a message presenting their ID that addresses them directly.
await ctx.send(ctx.message.author.mention + ", your User ID is: "+ str(ctx.message.author.id))
#In this case, the user must have passed something other than a reference to themselves
else:
#Try to get the ID of that argument, and send it to the channel
try:
await ctx.send(user.display_name+"'s ID is: "+ str(user.id))
#If there's an issue doing this, send an error message in the chat.
except:
raise self.bot.BOTrasedError("402 Sorry, there was an issue getting that user's ID. Check that the account of the user you are obtaining the ID of hasn't deleted their account. Please note that only @mentions will be taken as valid arguments, and that @role mentions will not work.")
@commands.command(name = "info",
description = "Displays information about BOTrased including the name, a description, server count and the creator of the bot.",
brief = "Displays information about BOTrased.",
aliases = ["i"])
async def info(self, ctx):
#Fetch my user profile using my ID
ownerProfile = await self.bot.fetch_user(int(self.bot.ownerID))
#Initialise the embed object and assign it to a local variable called "embed". Set the title and description and set the colour for the sidebar.
embed = discord.Embed(title = "BOTrased", description = "A Discord Bot written entirely in Python.", colour = discord.Colour.dark_purple())
#Set the content of the embed to an image type and pass the URL of my user profile
embed.set_image(url = str(ownerProfile.avatar_url))
#Set the content of the thumbnail (the image displayed in the top right corner of the embed) and pass the URL of the bot's user profile
embed.set_thumbnail(url = str(self.bot.user.avatar_url))
#Add a field which will display the server count of the bot
embed.add_field(name = "Currently serving:", value = str(len(self.bot.guilds)) + " servers", inline = False)
#Add a field which will provide an invite link to add the bot to other servers
embed.add_field(name="Invite Bot", value="[Invite link](https://discord.com/oauth2/authorize?client_id=541373621873016866&scope=bot&permissions=439610486)")
embed.add_field(name = "Support Server", value = "[Server Invite](https://discord.gg/KUSWws6XAA)")
embed.add_field(name = "Vote", value = "[Vote for BOTrased](https://top.gg/bot/541373621873016866/vote)")
embed.add_field(name = "Creator", value = ownerProfile.display_name+"#"+ownerProfile.discriminator, inline = False)
#Send the embed object as an embed type message into the channel
await ctx.send(embed = embed)
@commands.command(name = "weather",
description = "Gets the weather for a specified location and displays it as an embed.",
brief = "Check the weather for a specified location.")
async def weather(self, ctx, *, location = None):
if location == None:
raise self.bot.BOTrasedError("403")
try:
weather = self.bot.mgr.weather_at_place(location)
except:
raise self.bot.BOTrasedError("500")
data = weather.weather
distance = int(data.visibility_distance)/1000
embed = discord.Embed(title = "Weather for " + location.title())
embed.set_thumbnail(url = data.weather_icon_url(size = '4x'))
embed.add_field(name = (data.detailed_status.title()), value = "\u200b", inline = False)
embed.add_field(name = "Temperature:", value = str(data.temperature(unit = 'celsius')['temp']) + "°C", inline = False)
embed.add_field(name = "Humidity:", value = str(data.humidity)+"%", inline=False)
embed.add_field(name = "Wind Speed:", value = str(data.wind()['speed'])+"m/s", inline = False)
embed.add_field(name = "Cloud Cover:", value = str(data.clouds)+"%", inline = False)
embed.add_field(name = "Pressure:", value = str(data.pressure['press'])+"hPa", inline = False)
embed.add_field(name = "Visibility Distance:", value = str(distance)+"KM", inline = False)
await ctx.send(embed = embed)
@commands.command(name = "flip",
description = "Flips a coin and returns heads or tails.",
brief = "Flips a coin and returns heads or tails.",
aliases = ["coin"])
async def flip(self, ctx):
if random.randint(1,2) == 1:
await ctx.send("<:heads:809568187707817994>")
await asyncio.sleep(0.3)
await ctx.send(ctx.message.author.mention+", you got **heads**.")
else:
await ctx.send("<:tails:809568669029236766>")
await asyncio.sleep(0.3)
await ctx.send(ctx.message.author.mention+", you got **tails**.")
@commands.command(name = "changelog",
description = "View all the changes made to BOTrased since the last update.",
brief = "View the changelog.")
async def changelog(self, ctx):
async with ctx.typing():
changeLog = open("changelog.txt", "r")
changeLogBody = ""
changeLogLines = []
for line in changeLog:
changeLogLines.append(line)
for i in range(1, len(changeLogLines)):
changeLogBody += changeLogLines[i]
embed = discord.Embed(title = changeLogLines[0], description = changeLogBody, colour = discord.Colour.dark_purple())
embed.set_footer(text = "Note: \"Silent changes\" are changes that should not impact user experience, and instead only code stability or maintainability.")
changeLog.close()
await ctx.send(embed = embed)
@commands.command(name = "randomint",
description = "Generates a random integer within a given range",
brief = "Generates a random integer within a given range")
async def randomInt(self, ctx, val1 = None, val2 = None):
if val1 == None:
raise self.bot.BOTrasedError("403")
try:
int(val1)
if val2 != None:
int(val2)
assert int(val1) < int(val2)
except:
raise self.bot.BOTrasedError("400")
if val2 == None:
await ctx.send("Your number is " + str(random.randint(0, int(val1))))
else:
await ctx.send("Your number is " + str(random.randint(int(val1), int(val2))))
def setup(bot):
bot.add_cog(Utilities(bot)) | 0.283385 | 0.127979 |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.lib.base import BaseController, render
import lr.lib.helpers as h
import urllib2, json, datetime
from lr.model import LRNode as sourceLRNode, NodeServiceModel
log = logging.getLogger(__name__)
class ServicesController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('services', 'services')
def index(self, format='html'):
"""GET /services: All items in the collection"""
if sourceLRNode.isServiceAvailable('Network Node Services') == False:
return "Administrative service is not available"
data = {}
data['timestamp'] = str(datetime.datetime.utcnow())
data['node_id'] = sourceLRNode.nodeDescription.node_id
data['active'] = sourceLRNode.nodeDescription.active
data['node_name'] = sourceLRNode.nodeDescription.node_name
data['services'] = []
for s in sourceLRNode.nodeServices:
data['services'].append(s.specData)
return json.dumps(data)
# url('services')
def create(self):
"""POST /services: Create a new item"""
# url('services')
def new(self, format='html'):
"""GET /services/new: Form to create a new item"""
# url('new_services')
def update(self, id):
"""PUT /services/id: Update an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('services', id=ID),
# method='put')
# url('services', id=ID)
def delete(self, id):
"""DELETE /services/id: Delete an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('services', id=ID),
# method='delete')
# url('services', id=ID)
def show(self, id, format='html'):
"""GET /services/id: Show a specific item"""
# url('services', id=ID)
def edit(self, id, format='html'):
"""GET /services/id/edit: Form to edit an existing item"""
# url('edit_services', id=ID) | LR/lr/controllers/services.py |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.lib.base import BaseController, render
import lr.lib.helpers as h
import urllib2, json, datetime
from lr.model import LRNode as sourceLRNode, NodeServiceModel
log = logging.getLogger(__name__)
class ServicesController(BaseController):
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('services', 'services')
def index(self, format='html'):
"""GET /services: All items in the collection"""
if sourceLRNode.isServiceAvailable('Network Node Services') == False:
return "Administrative service is not available"
data = {}
data['timestamp'] = str(datetime.datetime.utcnow())
data['node_id'] = sourceLRNode.nodeDescription.node_id
data['active'] = sourceLRNode.nodeDescription.active
data['node_name'] = sourceLRNode.nodeDescription.node_name
data['services'] = []
for s in sourceLRNode.nodeServices:
data['services'].append(s.specData)
return json.dumps(data)
# url('services')
def create(self):
"""POST /services: Create a new item"""
# url('services')
def new(self, format='html'):
"""GET /services/new: Form to create a new item"""
# url('new_services')
def update(self, id):
"""PUT /services/id: Update an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="PUT" />
# Or using helpers:
# h.form(url('services', id=ID),
# method='put')
# url('services', id=ID)
def delete(self, id):
"""DELETE /services/id: Delete an existing item"""
# Forms posted to this method should contain a hidden field:
# <input type="hidden" name="_method" value="DELETE" />
# Or using helpers:
# h.form(url('services', id=ID),
# method='delete')
# url('services', id=ID)
def show(self, id, format='html'):
"""GET /services/id: Show a specific item"""
# url('services', id=ID)
def edit(self, id, format='html'):
"""GET /services/id/edit: Form to edit an existing item"""
# url('edit_services', id=ID) | 0.514644 | 0.136292 |
from Queue import Empty, Queue
from boto.exception import S3ResponseError
from boto.pyami.config import Config
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto import utils
from filechunkio import FileChunkIO
from logging import handlers
from multiprocessing import Pool
from threading import Thread
from time import sleep
import argparse
import fcntl
import logging
import math
import os
import pyinotify
import signal
import stat
import sys
import time
import traceback
#Default filename for the config file
CONFIG_FILE = './s3ingest.conf'
access_key_id = None # needed global because multiprocessing cannot pickle certain objects
secret_access_key = None # needed global because multiprocessing cannot pickle certain objects
# Must be global to be passed around
def upload_progress_cb(bytes_so_far, total_bytes):
logging.info("{0:d} / {1:d} bytes transferred".format(bytes_so_far, total_bytes))
# Must be global to be passed around
def _upload_part(target_bucket_name, multipart_id, part_num, file_path, offset, bytes, amount_of_retries=10):
cb = upload_progress_cb
def _upload(retries_left=amount_of_retries):
try:
logging.info("Start uploading part #{0:d} of {1}".format(part_num, file_path))
target_bucket = S3Connection(access_key_id, secret_access_key).get_bucket(target_bucket_name)
for mp in target_bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with FileChunkIO(file_path, 'r', offset=offset, bytes=bytes) as fp:
hex_digest, base64_digest, data_size = utils.compute_md5(fp, size=bytes)
mp.upload_part_from_file(fp=fp, part_num=part_num, cb=cb, num_cb=1, md5=(hex_digest, base64_digest))
break
except Exception, exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
logging.error("Failed uploading part #{0:d} of {1}".format(part_num, file_path))
raise exc
else:
logging.info("Completed uploading part #{0:d} of {1}".format(part_num, file_path))
_upload()
class S3Util:
_AWS_ACCESS_KEY_ID = None
_AWS_SECRET_ACCESS_KEY = None
_watch_manager = None
_watch_descriptor = None
_notifier = None
_connection = None
_watched_dir_offset = None
_watched_dir = None
_target_bucket_name = None
_logger = None
_queue = Queue() #Files that are waiting to be uploaded
_currently_processing = set() #Files which have been taken off the queue and are being uploaded
_exit_flag = False
_active_flag = False
_file_split_threshold_bytes = 100 * 1024 * 1024 #Max file size bytes before upload is done in separate parts
_parallel_processes = 2 #Number of processes for uploading parts
def __init__(self, access_key_id, secret_access_key):
self._AWS_ACCESS_KEY_ID = access_key_id
self._AWS_SECRET_ACCESS_KEY = secret_access_key
def connect(self):
logging.debug("Connecting to S3")
self._connection = S3Connection(self._AWS_ACCESS_KEY_ID, self._AWS_SECRET_ACCESS_KEY)
logging.debug("Connected to S3")
def get_connection(self):
return S3Connection(self._AWS_ACCESS_KEY_ID, self._AWS_SECRET_ACCESS_KEY)
def start_monitoring(self, dir_name):
self._watched_dir_offset = len(dir_name)
self._watched_dir = dir_name
self._watch_manager = pyinotify.WatchManager()
#IN_CLOSE_WRITE used because it ensures file is completely written to disk before upload begins
mask = pyinotify.IN_DELETE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE
self._notifier = pyinotify.ThreadedNotifier(self._watch_manager, S3Handler(self))
self._notifier.start()
self._watch_descriptor = self._watch_manager.add_watch(dir_name, mask, rec=True, auto_add=True)
logging.debug("Monitoring: {0}".format(dir_name))
def list_buckets(self):
bucket_rs = self.get_connection().get_all_buckets()
for bucket in bucket_rs:
print "Bucket found: {0}".format(bucket.name)
def list_keys(self, bucket_name, path, min_size_bytes=0, max_size_bytes=sys.maxint):
bucket = self.get_connection().get_bucket(bucket_name)
bucket_list = bucket.list(path)
print "Keys in bucket {0}, path {1}, greater than {2} bytes and less than {3} bytes".format(bucket_name, path, min_size_bytes, max_size_bytes)
for key in bucket_list:
if (key.size >= min_size_bytes ) and (key.size <= max_size_bytes):
print "{0}: {1} ".format(bucket_name, key.name)
def set_target_bucket_name(self, target_bucket_name):
self._target_bucket_name = target_bucket_name
def get_target_bucket_name(self):
return self._target_bucket_name
def get_target_bucket(self):
return self.get_connection().get_bucket(self._target_bucket_name)
def get_bucket(self, bucket_name):
return self.get_connection().get_bucket(bucket_name)
def multipart_upload_file(self, file_path, keyname):
mp = self.get_target_bucket().initiate_multipart_upload(keyname, headers={}, reduced_redundancy=False)
source_size = os.stat(file_path).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)), 5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
pool = Pool(processes=self._parallel_processes)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [self.get_target_bucket_name(), mp.id, part_num, file_path, offset, bytes])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
logging.info("Completed upload of {0}".format(file_path))
else:
logging.error("Failed upload {0} because parts missing".format(file_path))
self._currently_processing.discard(file_path)
mp.cancel_upload()
def upload_file(self, file_path):
self._currently_processing.add(file_path)
key = Key(self.get_target_bucket())
rel_path = str(file_path[self._watched_dir_offset:])
key.key = rel_path
if os.path.isfile(file_path) and os.stat(file_path).st_size > self._file_split_threshold_bytes:
self.multipart_upload_file(file_path, key.key)
else:
fp = open(file_path, "r")
hex_digest, base64_digest, data_size = utils.compute_md5(fp)
key.set_contents_from_filename(file_path, cb=upload_progress_cb, num_cb=1, md5=(hex_digest, base64_digest))
# Check in queue since the same file path may have been added again while this one was uploading
if os.path.isfile(file_path) and not self.is_queued(file_path):
os.remove(file_path)
self._currently_processing.discard(file_path)
def get_next(self):
return self._queue.get(timeout=5)
def add_to_queue(self, file_path):
if os.path.isfile(file_path) and not os.path.getsize(file_path) > 0:
logging.error("Got zero-byte file, {0}, (ignoring)".format(file_path))
return
if not self.is_queued(file_path):
self._queue.put(file_path)
def task_done(self):
self._queue.task_done()
def wait_for_completion(self):
self._queue.join()
def is_exit(self):
return self._exit_flag
def set_active(self, is_active):
self._active_flag = is_active
def is_active(self):
return self._active_flag
def is_queued(self, file_path):
return file_path in self._queue.queue
def is_currently_processing(self, file_path):
return file_path in self._currently_processing
def remove_currently_processing(self, file_path):
self._currently_processing.discard(file_path)
def signal_handler(self, signal, frame):
self._exit_flag = True
logging.debug("Stopping monitors")
# destroy the inotify's instance on this interrupt (stop monitoring)
self._watch_manager.rm_watch(self._watch_descriptor.values())
self._notifier.stop()
logging.debug("Monitors stopped. Exiting")
sys.exit(0)
"""Removes filepath items from a queue and begins the upload process to Amazon.
"""
class S3Uploader(Thread):
def __init__(self, s3_util):
Thread.__init__(self)
self.s3_util = s3_util
def run(self):
while True:
if self.s3_util.is_active():
try:
file_path = self.s3_util.get_next()
if self.s3_util.is_currently_processing(file_path):
#Return removed filepath to queue and continue (needed if same file is sent again)
self.s3_util.task_done()
self.s3_util.add_to_queue(file_path)
continue
else:
try:
logging.info("{0} upload started by thread {1}".format(file_path, self.name))
self.s3_util.upload_file(file_path)
logging.info("{0} upload completed by thread {1}".format(file_path, self.name))
except Exception as e:
tb = traceback.format_exc()
logging.error("{0} upload failed in thread {1}, error: {2}".format(file_path, self.name, tb))
self.s3_util.remove_currently_processing(file_path)
self.s3_util.task_done()
except Empty:
#Ignore if queue is empty, just try again
pass
# End if main thread is closing
if self.s3_util.is_exit():
return
sleep(2)
"""Adds filepath items to a queue when the file/dir is fully copied to the filesystem.
"""
class S3Handler(pyinotify.ProcessEvent):
_s3_util = None
def __init__(self, s3_util):
self._s3_util = s3_util
def process_IN_CLOSE_WRITE(self, event):
# Create files this way since this ensures that the entire file is written before starting transfer
file_path = os.path.join(event.path, event.name)
logging.debug("{0} close_write event received, adding to queue".format(file_path))
self._s3_util.add_to_queue(file_path)
def process_IN_CREATE(self, event):
# Only create directories this way
try:
if event.is_dir:
#file_path = os.path.join(event.path, event.name)
self._s3_util.add_to_queue(event.path)
except AttributeError:
pass
# Ignore since most events would be files, so hasattr(event, 'is_dir') would be slow
def process_IN_DELETE(self, event):
pass
#print "\nRemoved: {0}".format(os.path.join(event.path, event.name))
def main(argv):
parser = argparse.ArgumentParser(description='Upload assets to Amazon')
parser.add_argument('--config',
dest='config_filename',
action='store',
default=CONFIG_FILE,
help='optional custom configuration filename')
parser.add_argument('--node',
dest='node_name_override',
action='store',
default=False,
help='optional override for the pid-id specified in the config file')
parameters = parser.parse_args()
current_defaults_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), parameters.config_filename)
config = Config(path=current_defaults_filename)
global access_key_id
global secret_access_key
access_key_id = config.get('Amazon', 'aws_access_key_id')
secret_access_key = config.get('Amazon', 'aws_secret_access_key')
log_file_path = config.get('General', 'log_file_path', '/var/log/s3ingest.log')
log_level = config.getint('General', 'log_level', 20)
target_bucket_name = config.get('Amazon', 's3_bucket_name')
monitored_dir_name = config.get('General', 'monitored_directory')
worker_threads = config.getint('General', 'worker_threads', 5)
pid_file_path = config.get('General', 'pid_file_path', './s3ingest.semaphore')
if not parameters.node_name_override:
pid_id = config.get('General', 'pid_id').rstrip()
else:
pid_id = parameters.node_name_override.rstrip()
HEART_BEAT_TIME_SECS = config.getint('General', 'heart_beat_time_secs', 300)
MIN_MODIFIED_INTERVAL_SECS = 3600 # 3600 secs = 1 hr. Keep high to allow time for large files to upload and reduce false positives
if not os.path.exists(monitored_dir_name):
print "The directory to be monitored '{0}' does not exist".format(monitored_dir_name)
sys.exit(1)
logging.basicConfig(filename=log_file_path, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=log_level)
mailhost = config.get('Mail', 'mailhost')
fromaddr = config.get('Mail', 'fromaddr')
toaddrs = config.get('Mail', 'toaddrs')
smtp_handler = handlers.SMTPHandler(mailhost, fromaddr, toaddrs, 'S3Util error occurred')
smtp_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(smtp_handler)
s3_util = S3Util(access_key_id, secret_access_key)
s3_util.set_target_bucket_name(target_bucket_name)
signal.signal(signal.SIGINT, s3_util.signal_handler)
signal.signal(signal.SIGTERM, s3_util.signal_handler)
# Check for pid file and create if not found
if not os.path.exists(pid_file_path):
pid_file = open(pid_file_path, "w+")
fcntl.flock(pid_file.fileno(), fcntl.LOCK_EX)
pid_file.write(str(pid_id))
fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
pid_file.close()
s3_util.start_monitoring(monitored_dir_name)
logging.debug("Starting worker threads")
for i in range(worker_threads):
t = S3Uploader(s3_util)
t.setDaemon(True)
t.start()
logging.debug("Worker threads started")
while True:
pid_file = open(pid_file_path, "r+")
logging.debug("Waiting for lock")
fcntl.flock(pid_file.fileno(), fcntl.LOCK_SH)
logging.debug("Acquired lock")
current_pid = pid_file.readline().rstrip()
st = os.stat(pid_file_path)
now = time.time()
pid_modified_time = st[stat.ST_MTIME]
logging.debug("pid file: {0}, current_host: {1}".format(current_pid, pid_id))
if pid_id == current_pid:
logging.debug("State - Active")
os.utime(pid_file_path, None)
s3_util.set_active(True)
# Find files have been unmodified for a defined threshold and assume that they need to be queued
for dirpath, dirnames, filenames in os.walk(monitored_dir_name):
for name in filenames:
file_path = os.path.normpath(os.path.join(dirpath, name))
last_modifed_time = os.path.getmtime(file_path)
if ((now - last_modifed_time) > MIN_MODIFIED_INTERVAL_SECS and not
(s3_util.is_queued(file_path) or s3_util.is_currently_processing(file_path))):
logging.info("Directory scan found file '{0}' older than {1} seconds and added to queue".format(file_path, (now - last_modifed_time)))
s3_util.add_to_queue(file_path)
else:
if now - pid_modified_time > HEART_BEAT_TIME_SECS:
logging.debug("Stale pid file found, setting state - Active")
pid_file.truncate(0)
pid_file.seek(0)
pid_file.write(str(pid_id))
s3_util.set_active(True)
else:
logging.debug("State - Inactive")
s3_util.set_active(False)
fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
logging.debug("Released lock")
pid_file.close()
#Play nice
sleep(5)
s3_util.wait_for_completion()
logging.debug("Exiting")
sys.exit(0)
if __name__ == "__main__":
main(sys.argv) | s3ingest.py | from Queue import Empty, Queue
from boto.exception import S3ResponseError
from boto.pyami.config import Config
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto import utils
from filechunkio import FileChunkIO
from logging import handlers
from multiprocessing import Pool
from threading import Thread
from time import sleep
import argparse
import fcntl
import logging
import math
import os
import pyinotify
import signal
import stat
import sys
import time
import traceback
#Default filename for the config file
CONFIG_FILE = './s3ingest.conf'
access_key_id = None # needed global because multiprocessing cannot pickle certain objects
secret_access_key = None # needed global because multiprocessing cannot pickle certain objects
# Must be global to be passed around
def upload_progress_cb(bytes_so_far, total_bytes):
logging.info("{0:d} / {1:d} bytes transferred".format(bytes_so_far, total_bytes))
# Must be global to be passed around
def _upload_part(target_bucket_name, multipart_id, part_num, file_path, offset, bytes, amount_of_retries=10):
cb = upload_progress_cb
def _upload(retries_left=amount_of_retries):
try:
logging.info("Start uploading part #{0:d} of {1}".format(part_num, file_path))
target_bucket = S3Connection(access_key_id, secret_access_key).get_bucket(target_bucket_name)
for mp in target_bucket.get_all_multipart_uploads():
if mp.id == multipart_id:
with FileChunkIO(file_path, 'r', offset=offset, bytes=bytes) as fp:
hex_digest, base64_digest, data_size = utils.compute_md5(fp, size=bytes)
mp.upload_part_from_file(fp=fp, part_num=part_num, cb=cb, num_cb=1, md5=(hex_digest, base64_digest))
break
except Exception, exc:
if retries_left:
_upload(retries_left=retries_left - 1)
else:
logging.error("Failed uploading part #{0:d} of {1}".format(part_num, file_path))
raise exc
else:
logging.info("Completed uploading part #{0:d} of {1}".format(part_num, file_path))
_upload()
class S3Util:
_AWS_ACCESS_KEY_ID = None
_AWS_SECRET_ACCESS_KEY = None
_watch_manager = None
_watch_descriptor = None
_notifier = None
_connection = None
_watched_dir_offset = None
_watched_dir = None
_target_bucket_name = None
_logger = None
_queue = Queue() #Files that are waiting to be uploaded
_currently_processing = set() #Files which have been taken off the queue and are being uploaded
_exit_flag = False
_active_flag = False
_file_split_threshold_bytes = 100 * 1024 * 1024 #Max file size bytes before upload is done in separate parts
_parallel_processes = 2 #Number of processes for uploading parts
def __init__(self, access_key_id, secret_access_key):
self._AWS_ACCESS_KEY_ID = access_key_id
self._AWS_SECRET_ACCESS_KEY = secret_access_key
def connect(self):
logging.debug("Connecting to S3")
self._connection = S3Connection(self._AWS_ACCESS_KEY_ID, self._AWS_SECRET_ACCESS_KEY)
logging.debug("Connected to S3")
def get_connection(self):
return S3Connection(self._AWS_ACCESS_KEY_ID, self._AWS_SECRET_ACCESS_KEY)
def start_monitoring(self, dir_name):
self._watched_dir_offset = len(dir_name)
self._watched_dir = dir_name
self._watch_manager = pyinotify.WatchManager()
#IN_CLOSE_WRITE used because it ensures file is completely written to disk before upload begins
mask = pyinotify.IN_DELETE | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_CREATE
self._notifier = pyinotify.ThreadedNotifier(self._watch_manager, S3Handler(self))
self._notifier.start()
self._watch_descriptor = self._watch_manager.add_watch(dir_name, mask, rec=True, auto_add=True)
logging.debug("Monitoring: {0}".format(dir_name))
def list_buckets(self):
bucket_rs = self.get_connection().get_all_buckets()
for bucket in bucket_rs:
print "Bucket found: {0}".format(bucket.name)
def list_keys(self, bucket_name, path, min_size_bytes=0, max_size_bytes=sys.maxint):
bucket = self.get_connection().get_bucket(bucket_name)
bucket_list = bucket.list(path)
print "Keys in bucket {0}, path {1}, greater than {2} bytes and less than {3} bytes".format(bucket_name, path, min_size_bytes, max_size_bytes)
for key in bucket_list:
if (key.size >= min_size_bytes ) and (key.size <= max_size_bytes):
print "{0}: {1} ".format(bucket_name, key.name)
def set_target_bucket_name(self, target_bucket_name):
self._target_bucket_name = target_bucket_name
def get_target_bucket_name(self):
return self._target_bucket_name
def get_target_bucket(self):
return self.get_connection().get_bucket(self._target_bucket_name)
def get_bucket(self, bucket_name):
return self.get_connection().get_bucket(bucket_name)
def multipart_upload_file(self, file_path, keyname):
mp = self.get_target_bucket().initiate_multipart_upload(keyname, headers={}, reduced_redundancy=False)
source_size = os.stat(file_path).st_size
bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)), 5242880)
chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
pool = Pool(processes=self._parallel_processes)
for i in range(chunk_amount):
offset = i * bytes_per_chunk
remaining_bytes = source_size - offset
bytes = min([bytes_per_chunk, remaining_bytes])
part_num = i + 1
pool.apply_async(_upload_part, [self.get_target_bucket_name(), mp.id, part_num, file_path, offset, bytes])
pool.close()
pool.join()
if len(mp.get_all_parts()) == chunk_amount:
mp.complete_upload()
logging.info("Completed upload of {0}".format(file_path))
else:
logging.error("Failed upload {0} because parts missing".format(file_path))
self._currently_processing.discard(file_path)
mp.cancel_upload()
def upload_file(self, file_path):
self._currently_processing.add(file_path)
key = Key(self.get_target_bucket())
rel_path = str(file_path[self._watched_dir_offset:])
key.key = rel_path
if os.path.isfile(file_path) and os.stat(file_path).st_size > self._file_split_threshold_bytes:
self.multipart_upload_file(file_path, key.key)
else:
fp = open(file_path, "r")
hex_digest, base64_digest, data_size = utils.compute_md5(fp)
key.set_contents_from_filename(file_path, cb=upload_progress_cb, num_cb=1, md5=(hex_digest, base64_digest))
# Check in queue since the same file path may have been added again while this one was uploading
if os.path.isfile(file_path) and not self.is_queued(file_path):
os.remove(file_path)
self._currently_processing.discard(file_path)
def get_next(self):
return self._queue.get(timeout=5)
def add_to_queue(self, file_path):
if os.path.isfile(file_path) and not os.path.getsize(file_path) > 0:
logging.error("Got zero-byte file, {0}, (ignoring)".format(file_path))
return
if not self.is_queued(file_path):
self._queue.put(file_path)
def task_done(self):
self._queue.task_done()
def wait_for_completion(self):
self._queue.join()
def is_exit(self):
return self._exit_flag
def set_active(self, is_active):
self._active_flag = is_active
def is_active(self):
return self._active_flag
def is_queued(self, file_path):
return file_path in self._queue.queue
def is_currently_processing(self, file_path):
return file_path in self._currently_processing
def remove_currently_processing(self, file_path):
self._currently_processing.discard(file_path)
def signal_handler(self, signal, frame):
self._exit_flag = True
logging.debug("Stopping monitors")
# destroy the inotify's instance on this interrupt (stop monitoring)
self._watch_manager.rm_watch(self._watch_descriptor.values())
self._notifier.stop()
logging.debug("Monitors stopped. Exiting")
sys.exit(0)
"""Removes filepath items from a queue and begins the upload process to Amazon.
"""
class S3Uploader(Thread):
def __init__(self, s3_util):
Thread.__init__(self)
self.s3_util = s3_util
def run(self):
while True:
if self.s3_util.is_active():
try:
file_path = self.s3_util.get_next()
if self.s3_util.is_currently_processing(file_path):
#Return removed filepath to queue and continue (needed if same file is sent again)
self.s3_util.task_done()
self.s3_util.add_to_queue(file_path)
continue
else:
try:
logging.info("{0} upload started by thread {1}".format(file_path, self.name))
self.s3_util.upload_file(file_path)
logging.info("{0} upload completed by thread {1}".format(file_path, self.name))
except Exception as e:
tb = traceback.format_exc()
logging.error("{0} upload failed in thread {1}, error: {2}".format(file_path, self.name, tb))
self.s3_util.remove_currently_processing(file_path)
self.s3_util.task_done()
except Empty:
#Ignore if queue is empty, just try again
pass
# End if main thread is closing
if self.s3_util.is_exit():
return
sleep(2)
"""Adds filepath items to a queue when the file/dir is fully copied to the filesystem.
"""
class S3Handler(pyinotify.ProcessEvent):
_s3_util = None
def __init__(self, s3_util):
self._s3_util = s3_util
def process_IN_CLOSE_WRITE(self, event):
# Create files this way since this ensures that the entire file is written before starting transfer
file_path = os.path.join(event.path, event.name)
logging.debug("{0} close_write event received, adding to queue".format(file_path))
self._s3_util.add_to_queue(file_path)
def process_IN_CREATE(self, event):
# Only create directories this way
try:
if event.is_dir:
#file_path = os.path.join(event.path, event.name)
self._s3_util.add_to_queue(event.path)
except AttributeError:
pass
# Ignore since most events would be files, so hasattr(event, 'is_dir') would be slow
def process_IN_DELETE(self, event):
pass
#print "\nRemoved: {0}".format(os.path.join(event.path, event.name))
def main(argv):
parser = argparse.ArgumentParser(description='Upload assets to Amazon')
parser.add_argument('--config',
dest='config_filename',
action='store',
default=CONFIG_FILE,
help='optional custom configuration filename')
parser.add_argument('--node',
dest='node_name_override',
action='store',
default=False,
help='optional override for the pid-id specified in the config file')
parameters = parser.parse_args()
current_defaults_filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), parameters.config_filename)
config = Config(path=current_defaults_filename)
global access_key_id
global secret_access_key
access_key_id = config.get('Amazon', 'aws_access_key_id')
secret_access_key = config.get('Amazon', 'aws_secret_access_key')
log_file_path = config.get('General', 'log_file_path', '/var/log/s3ingest.log')
log_level = config.getint('General', 'log_level', 20)
target_bucket_name = config.get('Amazon', 's3_bucket_name')
monitored_dir_name = config.get('General', 'monitored_directory')
worker_threads = config.getint('General', 'worker_threads', 5)
pid_file_path = config.get('General', 'pid_file_path', './s3ingest.semaphore')
if not parameters.node_name_override:
pid_id = config.get('General', 'pid_id').rstrip()
else:
pid_id = parameters.node_name_override.rstrip()
HEART_BEAT_TIME_SECS = config.getint('General', 'heart_beat_time_secs', 300)
MIN_MODIFIED_INTERVAL_SECS = 3600 # 3600 secs = 1 hr. Keep high to allow time for large files to upload and reduce false positives
if not os.path.exists(monitored_dir_name):
print "The directory to be monitored '{0}' does not exist".format(monitored_dir_name)
sys.exit(1)
logging.basicConfig(filename=log_file_path, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p', level=log_level)
mailhost = config.get('Mail', 'mailhost')
fromaddr = config.get('Mail', 'fromaddr')
toaddrs = config.get('Mail', 'toaddrs')
smtp_handler = handlers.SMTPHandler(mailhost, fromaddr, toaddrs, 'S3Util error occurred')
smtp_handler.setLevel(logging.ERROR)
logging.getLogger().addHandler(smtp_handler)
s3_util = S3Util(access_key_id, secret_access_key)
s3_util.set_target_bucket_name(target_bucket_name)
signal.signal(signal.SIGINT, s3_util.signal_handler)
signal.signal(signal.SIGTERM, s3_util.signal_handler)
# Check for pid file and create if not found
if not os.path.exists(pid_file_path):
pid_file = open(pid_file_path, "w+")
fcntl.flock(pid_file.fileno(), fcntl.LOCK_EX)
pid_file.write(str(pid_id))
fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
pid_file.close()
s3_util.start_monitoring(monitored_dir_name)
logging.debug("Starting worker threads")
for i in range(worker_threads):
t = S3Uploader(s3_util)
t.setDaemon(True)
t.start()
logging.debug("Worker threads started")
while True:
pid_file = open(pid_file_path, "r+")
logging.debug("Waiting for lock")
fcntl.flock(pid_file.fileno(), fcntl.LOCK_SH)
logging.debug("Acquired lock")
current_pid = pid_file.readline().rstrip()
st = os.stat(pid_file_path)
now = time.time()
pid_modified_time = st[stat.ST_MTIME]
logging.debug("pid file: {0}, current_host: {1}".format(current_pid, pid_id))
if pid_id == current_pid:
logging.debug("State - Active")
os.utime(pid_file_path, None)
s3_util.set_active(True)
# Find files have been unmodified for a defined threshold and assume that they need to be queued
for dirpath, dirnames, filenames in os.walk(monitored_dir_name):
for name in filenames:
file_path = os.path.normpath(os.path.join(dirpath, name))
last_modifed_time = os.path.getmtime(file_path)
if ((now - last_modifed_time) > MIN_MODIFIED_INTERVAL_SECS and not
(s3_util.is_queued(file_path) or s3_util.is_currently_processing(file_path))):
logging.info("Directory scan found file '{0}' older than {1} seconds and added to queue".format(file_path, (now - last_modifed_time)))
s3_util.add_to_queue(file_path)
else:
if now - pid_modified_time > HEART_BEAT_TIME_SECS:
logging.debug("Stale pid file found, setting state - Active")
pid_file.truncate(0)
pid_file.seek(0)
pid_file.write(str(pid_id))
s3_util.set_active(True)
else:
logging.debug("State - Inactive")
s3_util.set_active(False)
fcntl.flock(pid_file.fileno(), fcntl.LOCK_UN)
logging.debug("Released lock")
pid_file.close()
#Play nice
sleep(5)
s3_util.wait_for_completion()
logging.debug("Exiting")
sys.exit(0)
if __name__ == "__main__":
main(sys.argv) | 0.334372 | 0.086093 |
import copy
# a potential has a set of variables and a CPT
variable_c = ('c', 2)
variable_s = ('s', 2)
variable_r = ('r', 2)
variable_w = ('w', 2)
pc_vars = (variable_c,)
pc_cpt = {
(0,) : 0.5,
(1,) : 0.5
}
pot_c = (pc_vars, pc_cpt)
pr_vars = (variable_c, variable_r)
pr_cpt = {
(0, 0) : 0.8,
(0, 1) : 0.2,
(1, 0) : 0.2,
(1, 1) : 0.8
}
pot_r = (pr_vars, pr_cpt)
ps_vars = (variable_c, variable_s)
ps_cpt = {
(0, 0) : 0.5,
(0, 1) : 0.5,
(1, 0) : 0.9,
(1, 1) : 0.1
}
pot_s = (ps_vars, ps_cpt)
pw_vars = (variable_r, variable_s, variable_w)
pw_cpt = {
(0, 0, 0): 1,
(0, 0, 1): 0,
(0, 1, 0): 0.1,
(0, 1, 1): 0.9,
(1, 0, 0): 0.1,
(1, 0, 1): 0.9,
(1, 1, 0): 0.01,
(1, 1, 1): 0.99
}
pot_w = (pw_vars, pw_cpt)
def get_var_names(var_list):
return [v[0] for v in var_list]
def get_var_vals(var_list):
return [v[1] for v in var_list]
def init_cpt(variables):
var_vals = get_var_vals(variables)
def init_recurse (loc, var_vals, indices, cpt):
if (loc == len(indices)):
cpt[tuple(indices)] = 0
return
for i in range(var_vals[loc]):
indices[loc] = i
init_recurse(loc+1, var_vals, indices, cpt)
cpt = {}
init_recurse(0, var_vals, [-1]*len(var_vals), cpt)
return cpt
# a number of operations are defined over potentials
# one is marginalization wrt a subset of variables
def marg(pot, subset, project=False) :
if not project:
subset = tuple(set(pot[0])-set(subset))
else:
subset = tuple(subset)
num_vars = len(pot[0])
ind = [0] * num_vars
for i in range(num_vars):
if (pot[0][i] in subset):
ind[i] = 1
assert (set(subset).issubset(pot[0]))
p2_cpt = init_cpt(subset)
for indices, p in pot[1].iteritems():
indices2 = tuple([indices[i] for i in range(num_vars) if ind[i]])
p2_cpt[indices2] += p
return (subset, p2_cpt)
# another one is factor multiplication
def mult(pot1, pot2):
p3_vars = tuple(set(pot1[0]).union(set(pot2[0])))
num_vars = len(p3_vars)
ind1 = [0] * len(p3_vars)
ind2 = [0] * len(p3_vars)
for i in range(num_vars):
if p3_vars[i] in pot1[0]:
ind1[i] = 1
if p3_vars[i] in pot2[0]:
ind2[i] = 1
p3_cpt = init_cpt(p3_vars)
for indices, p in p3_cpt.iteritems():
indices1 = tuple([indices[i] for i in range(num_vars) if ind1[i]])
indices2 = tuple([indices[i] for i in range(num_vars) if ind2[i]])
p3_cpt[indices] = pot1[1][indices1] * pot2[1][indices2]
return (p3_vars, p3_cpt)
print mult(pot_c, pot_w)
print marg(pot_w, (variable_r, variable_s), project=False)
def var_elim(var_set, pot_list):
""" the variable elimination algorithm
:param var_set: the set of query (remaining) variables
:param pot_set: a list of potentials
:returns: a set of potentials
"""
init_vars = set()
pots = []
for pot in pot_list:
init_vars = init_vars.union(pot[0])
for pot in pot_list:
pot2 = copy.deepcopy(pot)
current_vars = set(pot2[0])
if not current_vars.issubset(var_set):
rem = var_set.intersection(current_vars)
pot2 = marg(pot, rem, project=True)
pots.append(pot2)
return pots
def bucket_elim(var_list, pot_list):
""" the bucket elimination algorithm
:param var_list: the ordered list of buckets
:param pot_list: a list of potentials
:returns: a list of factors over the remaining variables
"""
# TODO turns out that you don't need a set
buckets = {}
for var in var_list:
buckets[var] = []
pot_ind = [True] * len(pot_list)
for var in var_list:
for i in range(len(pot_list)):
if var in pot_list[i][0] and pot_ind[i]:
buckets[var].append(pot_list[i])
pot_ind[i] = False
# remember that there might be remaining potentials
rest = [pot[i] for i in range(len(pot_list)) if pot_ind[i]]
# the elimination phase
for i in range(len(var_list)):
current_var = var_list[i]
current_bucket = buckets[current_var]
# multiply the factors
bucket_mult = current_bucket[0]
for j in range(1, len(current_bucket)):
bucket_mult = mult(bucket_mult, current_bucket[j])
# marginalize
pot_m = marg(bucket_mult, (current_var,))
# now move this to another bucket or rest
found = False
for j in range(i+1, len(var_list)):
if var_list[j] in pot_m[0]:
buckets[var_list[j]].append(pot_m)
found = True
if not found:
rest.append(pot_m)
return rest
p_list = [pot_c, pot_r, pot_s]
v_set = {variable_c, variable_r}
p = var_elim(v_set, p_list)
print (p) | my_engine/notebooks/potential.py | import copy
# a potential has a set of variables and a CPT
variable_c = ('c', 2)
variable_s = ('s', 2)
variable_r = ('r', 2)
variable_w = ('w', 2)
pc_vars = (variable_c,)
pc_cpt = {
(0,) : 0.5,
(1,) : 0.5
}
pot_c = (pc_vars, pc_cpt)
pr_vars = (variable_c, variable_r)
pr_cpt = {
(0, 0) : 0.8,
(0, 1) : 0.2,
(1, 0) : 0.2,
(1, 1) : 0.8
}
pot_r = (pr_vars, pr_cpt)
ps_vars = (variable_c, variable_s)
ps_cpt = {
(0, 0) : 0.5,
(0, 1) : 0.5,
(1, 0) : 0.9,
(1, 1) : 0.1
}
pot_s = (ps_vars, ps_cpt)
pw_vars = (variable_r, variable_s, variable_w)
pw_cpt = {
(0, 0, 0): 1,
(0, 0, 1): 0,
(0, 1, 0): 0.1,
(0, 1, 1): 0.9,
(1, 0, 0): 0.1,
(1, 0, 1): 0.9,
(1, 1, 0): 0.01,
(1, 1, 1): 0.99
}
pot_w = (pw_vars, pw_cpt)
def get_var_names(var_list):
return [v[0] for v in var_list]
def get_var_vals(var_list):
return [v[1] for v in var_list]
def init_cpt(variables):
var_vals = get_var_vals(variables)
def init_recurse (loc, var_vals, indices, cpt):
if (loc == len(indices)):
cpt[tuple(indices)] = 0
return
for i in range(var_vals[loc]):
indices[loc] = i
init_recurse(loc+1, var_vals, indices, cpt)
cpt = {}
init_recurse(0, var_vals, [-1]*len(var_vals), cpt)
return cpt
# a number of operations are defined over potentials
# one is marginalization wrt a subset of variables
def marg(pot, subset, project=False) :
if not project:
subset = tuple(set(pot[0])-set(subset))
else:
subset = tuple(subset)
num_vars = len(pot[0])
ind = [0] * num_vars
for i in range(num_vars):
if (pot[0][i] in subset):
ind[i] = 1
assert (set(subset).issubset(pot[0]))
p2_cpt = init_cpt(subset)
for indices, p in pot[1].iteritems():
indices2 = tuple([indices[i] for i in range(num_vars) if ind[i]])
p2_cpt[indices2] += p
return (subset, p2_cpt)
# another one is factor multiplication
def mult(pot1, pot2):
p3_vars = tuple(set(pot1[0]).union(set(pot2[0])))
num_vars = len(p3_vars)
ind1 = [0] * len(p3_vars)
ind2 = [0] * len(p3_vars)
for i in range(num_vars):
if p3_vars[i] in pot1[0]:
ind1[i] = 1
if p3_vars[i] in pot2[0]:
ind2[i] = 1
p3_cpt = init_cpt(p3_vars)
for indices, p in p3_cpt.iteritems():
indices1 = tuple([indices[i] for i in range(num_vars) if ind1[i]])
indices2 = tuple([indices[i] for i in range(num_vars) if ind2[i]])
p3_cpt[indices] = pot1[1][indices1] * pot2[1][indices2]
return (p3_vars, p3_cpt)
print mult(pot_c, pot_w)
print marg(pot_w, (variable_r, variable_s), project=False)
def var_elim(var_set, pot_list):
""" the variable elimination algorithm
:param var_set: the set of query (remaining) variables
:param pot_set: a list of potentials
:returns: a set of potentials
"""
init_vars = set()
pots = []
for pot in pot_list:
init_vars = init_vars.union(pot[0])
for pot in pot_list:
pot2 = copy.deepcopy(pot)
current_vars = set(pot2[0])
if not current_vars.issubset(var_set):
rem = var_set.intersection(current_vars)
pot2 = marg(pot, rem, project=True)
pots.append(pot2)
return pots
def bucket_elim(var_list, pot_list):
""" the bucket elimination algorithm
:param var_list: the ordered list of buckets
:param pot_list: a list of potentials
:returns: a list of factors over the remaining variables
"""
# TODO turns out that you don't need a set
buckets = {}
for var in var_list:
buckets[var] = []
pot_ind = [True] * len(pot_list)
for var in var_list:
for i in range(len(pot_list)):
if var in pot_list[i][0] and pot_ind[i]:
buckets[var].append(pot_list[i])
pot_ind[i] = False
# remember that there might be remaining potentials
rest = [pot[i] for i in range(len(pot_list)) if pot_ind[i]]
# the elimination phase
for i in range(len(var_list)):
current_var = var_list[i]
current_bucket = buckets[current_var]
# multiply the factors
bucket_mult = current_bucket[0]
for j in range(1, len(current_bucket)):
bucket_mult = mult(bucket_mult, current_bucket[j])
# marginalize
pot_m = marg(bucket_mult, (current_var,))
# now move this to another bucket or rest
found = False
for j in range(i+1, len(var_list)):
if var_list[j] in pot_m[0]:
buckets[var_list[j]].append(pot_m)
found = True
if not found:
rest.append(pot_m)
return rest
p_list = [pot_c, pot_r, pot_s]
v_set = {variable_c, variable_r}
p = var_elim(v_set, p_list)
print (p) | 0.231354 | 0.397997 |
import logging
import warnings
INFO = 25
DETAILED_INFO = 20
try:
import mpi4py
MPISIZE = mpi4py.MPI.COMM_WORLD.Get_size()
MPIRANK = mpi4py.MPI.COMM_WORLD.Get_rank()
USING_MPI = MPISIZE > 1
except (ImportError, AttributeError):
USING_MPI = False
def configure_logging(verbosity="standard", module=False, timestamp=False,
stats_file=None, logfile=None):
"""Configuration of Bingo logging
Parameters
----------
verbosity : str or int
verbosity options are "quiet", "standard", "detailed", "debug", or an
integer (0 - 100) that corresponds to typical python log level.
module : bool
whether to show the module name on logging output. Default False
timestamp :
whether to show a time stamp on logging output. Default False
stats_file : str
(optional) file name for evolution statistics to be logged to
logfile : str
(optional) file name for a copy of the log to be saved
"""
level = _get_log_level_from_verbosity(verbosity)
root_logger = logging.getLogger()
root_logger.setLevel(level)
root_logger.handlers=[] # remove current handlers
console_handler = _make_console_handler(level, module, timestamp)
root_logger.addHandler(console_handler)
if logfile is not None:
logfile_handler = _make_logfile_handler(logfile, level, module,
timestamp)
root_logger.addHandler(logfile_handler)
if stats_file is not None:
stats_file_handler = _make_stats_file_handler(stats_file)
root_logger.addHandler(stats_file_handler)
def _make_console_handler(level, module, timestamp):
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
format_string = _get_console_format_string(module, timestamp)
formatter = logging.Formatter(format_string)
console_handler.setFormatter(formatter)
console_handler.addFilter(StatsFilter(filter_out=True))
console_handler.addFilter(MpiFilter())
return console_handler
def _make_logfile_handler(filename, level, module, timestamp):
file_handler = logging.FileHandler(filename)
file_handler.setLevel(level)
format_string = _get_console_format_string(module, timestamp)
formatter = logging.Formatter(format_string)
file_handler.setFormatter(formatter)
file_handler.addFilter(StatsFilter(filter_out=True))
file_handler.addFilter(MpiFilter())
return file_handler
def _get_log_level_from_verbosity(verbosity):
verbosity_map = {"quiet": logging.WARNING,
"standard": INFO,
"detailed": DETAILED_INFO,
"debug": logging.DEBUG}
if isinstance(verbosity, str):
return verbosity_map[verbosity]
if isinstance(verbosity, int):
return verbosity
warnings.warn("Unrecognized verbosity level provided. "
"Using standard verbosity.")
return INFO
def _get_console_format_string(module, timestamp):
format_string = "%(message)s"
if module:
format_string = "%(module)s\t" + format_string
if timestamp:
format_string = "%(asctime)s\t" + format_string
return format_string
def _make_stats_file_handler(stats_file):
file_handler = logging.FileHandler(stats_file)
file_handler.setLevel(INFO)
formatter = logging.Formatter("%(message)s")
file_handler.setFormatter(formatter)
file_handler.addFilter(StatsFilter(filter_out=False))
file_handler.addFilter(MpiFilter())
return file_handler
class MpiFilter(logging.Filter):
"""
This is a filter which filters out messages from auxiliary processes at the
INFO level
Parameters
----------
add_proc_number : bool (optional)
Add processor identifier to multi-processor log messages. default True.
"""
def __init__(self, add_proc_number=True):
super().__init__()
self._add_proc_number = add_proc_number
def filter(self, record):
if USING_MPI:
if record.levelno == INFO:
return MPIRANK == 0
if self._add_proc_number:
record.msg = "{}>\t".format(MPIRANK) + record.msg
return True
class StatsFilter(logging.Filter):
"""This is a filter which filters based on the identifier "<stats>" at the
beginning of a log message
Parameters
----------
filter_out : bool
Whether to filter-out or filter-in stats messages
"""
def __init__(self, filter_out):
super().__init__()
self._filter_out = filter_out
def filter(self, record):
if "stats" in record.__dict__:
return not self._filter_out == record.stats
return self._filter_out | bingo/util/log.py | import logging
import warnings
INFO = 25
DETAILED_INFO = 20
try:
import mpi4py
MPISIZE = mpi4py.MPI.COMM_WORLD.Get_size()
MPIRANK = mpi4py.MPI.COMM_WORLD.Get_rank()
USING_MPI = MPISIZE > 1
except (ImportError, AttributeError):
USING_MPI = False
def configure_logging(verbosity="standard", module=False, timestamp=False,
stats_file=None, logfile=None):
"""Configuration of Bingo logging
Parameters
----------
verbosity : str or int
verbosity options are "quiet", "standard", "detailed", "debug", or an
integer (0 - 100) that corresponds to typical python log level.
module : bool
whether to show the module name on logging output. Default False
timestamp :
whether to show a time stamp on logging output. Default False
stats_file : str
(optional) file name for evolution statistics to be logged to
logfile : str
(optional) file name for a copy of the log to be saved
"""
level = _get_log_level_from_verbosity(verbosity)
root_logger = logging.getLogger()
root_logger.setLevel(level)
root_logger.handlers=[] # remove current handlers
console_handler = _make_console_handler(level, module, timestamp)
root_logger.addHandler(console_handler)
if logfile is not None:
logfile_handler = _make_logfile_handler(logfile, level, module,
timestamp)
root_logger.addHandler(logfile_handler)
if stats_file is not None:
stats_file_handler = _make_stats_file_handler(stats_file)
root_logger.addHandler(stats_file_handler)
def _make_console_handler(level, module, timestamp):
console_handler = logging.StreamHandler()
console_handler.setLevel(level)
format_string = _get_console_format_string(module, timestamp)
formatter = logging.Formatter(format_string)
console_handler.setFormatter(formatter)
console_handler.addFilter(StatsFilter(filter_out=True))
console_handler.addFilter(MpiFilter())
return console_handler
def _make_logfile_handler(filename, level, module, timestamp):
file_handler = logging.FileHandler(filename)
file_handler.setLevel(level)
format_string = _get_console_format_string(module, timestamp)
formatter = logging.Formatter(format_string)
file_handler.setFormatter(formatter)
file_handler.addFilter(StatsFilter(filter_out=True))
file_handler.addFilter(MpiFilter())
return file_handler
def _get_log_level_from_verbosity(verbosity):
verbosity_map = {"quiet": logging.WARNING,
"standard": INFO,
"detailed": DETAILED_INFO,
"debug": logging.DEBUG}
if isinstance(verbosity, str):
return verbosity_map[verbosity]
if isinstance(verbosity, int):
return verbosity
warnings.warn("Unrecognized verbosity level provided. "
"Using standard verbosity.")
return INFO
def _get_console_format_string(module, timestamp):
format_string = "%(message)s"
if module:
format_string = "%(module)s\t" + format_string
if timestamp:
format_string = "%(asctime)s\t" + format_string
return format_string
def _make_stats_file_handler(stats_file):
file_handler = logging.FileHandler(stats_file)
file_handler.setLevel(INFO)
formatter = logging.Formatter("%(message)s")
file_handler.setFormatter(formatter)
file_handler.addFilter(StatsFilter(filter_out=False))
file_handler.addFilter(MpiFilter())
return file_handler
class MpiFilter(logging.Filter):
"""
This is a filter which filters out messages from auxiliary processes at the
INFO level
Parameters
----------
add_proc_number : bool (optional)
Add processor identifier to multi-processor log messages. default True.
"""
def __init__(self, add_proc_number=True):
super().__init__()
self._add_proc_number = add_proc_number
def filter(self, record):
if USING_MPI:
if record.levelno == INFO:
return MPIRANK == 0
if self._add_proc_number:
record.msg = "{}>\t".format(MPIRANK) + record.msg
return True
class StatsFilter(logging.Filter):
"""This is a filter which filters based on the identifier "<stats>" at the
beginning of a log message
Parameters
----------
filter_out : bool
Whether to filter-out or filter-in stats messages
"""
def __init__(self, filter_out):
super().__init__()
self._filter_out = filter_out
def filter(self, record):
if "stats" in record.__dict__:
return not self._filter_out == record.stats
return self._filter_out | 0.627951 | 0.127462 |
from py_tests_common import *
def TypeofOperatorDeclaration_Test0():
c_program_text= """
type T= typeof(0);
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test1():
c_program_text= """
type T= typeof( 55 * 88 );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test2():
c_program_text= """
type T= [ typeof( 0.25 ), 64 ];
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test3():
c_program_text= """
type T= typeof( "str" );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test5():
c_program_text= """
fn Foo() : i32;
type T= typeof( Foo() );
"""
tests_lib.build_program( c_program_text )
def Typeof_Test0():
c_program_text= """
fn Baz() : i32 { return 666; }
fn Foo()
{
var typeof( Baz() ) x= Baz(); // Type will be "i32"
var i32 x_copy= x;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test1():
c_program_text= """
fn Pass( f64& x ) : f64& { return x; }
fn Foo()
{
var f64 x= 0.52;
var typeof( Pass(x) ) x_copy= x; // Type will be "f64", function reference modifier ignored
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test2():
c_program_text= """
type PiType= typeof(3.14f); // Typeof for global typedef
var PiType e= 2.718281828f;
"""
tests_lib.build_program( c_program_text )
def Typeof_Test3():
c_program_text= """
struct S {}
var S constexpr s{};
fn GetS() : typeof(s)& // Typeof for function return type
{
return s;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test4():
c_program_text= """
struct S {}
var S constexpr s{};
fn CopyS( typeof(s) mut arg ) : S // Typeof for function argument type
{
return move(arg);
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test5():
c_program_text= """
struct S
{
auto constexpr SomeConstant= "8"c8;
typeof(SomeConstant) field; // Typeof for class field
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test6():
c_program_text= """
fn Foo()
{
auto &constexpr str= "Some String";
var typeof(str) str_storage= zero_init; // Typeof for string type
static_assert( typeinfo</ typeof(str) />.element_count == size_type(11) ); // Typeof for typeinfo
}
"""
tests_lib.build_program( c_program_text )
def TypeofHasNoEffects_Test0():
c_program_text= """
fn Inc( i32 &mut x ) : i32 { ++x; return x; }
fn Foo()
{
var i32 mut x= 666;
var typeof( Inc(x) ) x_copy= x; // Only type evalueated for expression 'Inc(x)', no actual code generated.
halt if( x != 666 );
halt if( x_copy != 666 );
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def Typeof_ChecksExpression_Test0():
c_program_text= """
type T= typeof( CallUnknownFunction() );
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "NameNotFound" )
assert( errors_list[0].src_loc.line == 2 ) | source/tests/py_tests/typeof_test.py | from py_tests_common import *
def TypeofOperatorDeclaration_Test0():
c_program_text= """
type T= typeof(0);
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test1():
c_program_text= """
type T= typeof( 55 * 88 );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test2():
c_program_text= """
type T= [ typeof( 0.25 ), 64 ];
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test3():
c_program_text= """
type T= typeof( "str" );
"""
tests_lib.build_program( c_program_text )
def TypeofOperatorDeclaration_Test5():
c_program_text= """
fn Foo() : i32;
type T= typeof( Foo() );
"""
tests_lib.build_program( c_program_text )
def Typeof_Test0():
c_program_text= """
fn Baz() : i32 { return 666; }
fn Foo()
{
var typeof( Baz() ) x= Baz(); // Type will be "i32"
var i32 x_copy= x;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test1():
c_program_text= """
fn Pass( f64& x ) : f64& { return x; }
fn Foo()
{
var f64 x= 0.52;
var typeof( Pass(x) ) x_copy= x; // Type will be "f64", function reference modifier ignored
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test2():
c_program_text= """
type PiType= typeof(3.14f); // Typeof for global typedef
var PiType e= 2.718281828f;
"""
tests_lib.build_program( c_program_text )
def Typeof_Test3():
c_program_text= """
struct S {}
var S constexpr s{};
fn GetS() : typeof(s)& // Typeof for function return type
{
return s;
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test4():
c_program_text= """
struct S {}
var S constexpr s{};
fn CopyS( typeof(s) mut arg ) : S // Typeof for function argument type
{
return move(arg);
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test5():
c_program_text= """
struct S
{
auto constexpr SomeConstant= "8"c8;
typeof(SomeConstant) field; // Typeof for class field
}
"""
tests_lib.build_program( c_program_text )
def Typeof_Test6():
c_program_text= """
fn Foo()
{
auto &constexpr str= "Some String";
var typeof(str) str_storage= zero_init; // Typeof for string type
static_assert( typeinfo</ typeof(str) />.element_count == size_type(11) ); // Typeof for typeinfo
}
"""
tests_lib.build_program( c_program_text )
def TypeofHasNoEffects_Test0():
c_program_text= """
fn Inc( i32 &mut x ) : i32 { ++x; return x; }
fn Foo()
{
var i32 mut x= 666;
var typeof( Inc(x) ) x_copy= x; // Only type evalueated for expression 'Inc(x)', no actual code generated.
halt if( x != 666 );
halt if( x_copy != 666 );
}
"""
tests_lib.build_program( c_program_text )
tests_lib.run_function( "_Z3Foov" )
def Typeof_ChecksExpression_Test0():
c_program_text= """
type T= typeof( CallUnknownFunction() );
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "NameNotFound" )
assert( errors_list[0].src_loc.line == 2 ) | 0.47244 | 0.206594 |
import nose.tools
import dcase_util
from dcase_util.containers import ListDictContainer
from nose.tools import *
import tempfile
import os
def test_container():
data = ListDictContainer([
{
'key1': 100,
'key2': 400,
},
{
'key1': 200,
'key2': 300,
},
{
'key1': 300,
'key2': 200,
},
{
'key1': 400,
'key2': 100,
},
])
column = data.get_field(field_name='key1')
nose.tools.eq_(column, [100, 200, 300, 400])
column = data.get_field(field_name='key2')
nose.tools.eq_(column, [400, 300, 200, 100])
nose.tools.eq_(data.search(key='key1', value=100), {'key1': 100, 'key2': 400})
nose.tools.eq_(data.search(key='key1', value=123), None)
def test_save():
# Empty content
ListDictContainer({}).save(filename=os.path.join(tempfile.gettempdir(), 'saved.yaml'))
# Content
data = [
{
'key1': 100,
'key2': 402.2,
},
{
'key1': 200,
'key2': 302.2,
},
{
'key1': 300,
'key2': 202.3,
},
{
'key1': 400,
'key2': 101.2,
},
]
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.yaml')).save().load()
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.csv')).save().load(
fields=['key1', 'key2']
)
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.csv')).save(
fields=['key1', 'key2']
).load(
fields=['key1', 'key2']
)
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.cpickle')).save().load()
nose.tools.assert_list_equal(d, data)
@raises(IOError)
def test_load_not_found2():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.txt'))
@raises(IOError)
def test_load_wrong_type():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.cpickle'))
@raises(IOError)
def test_load_wrong_type2():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.abc')) | tests/containers/test_ListDictContainer.py |
import nose.tools
import dcase_util
from dcase_util.containers import ListDictContainer
from nose.tools import *
import tempfile
import os
def test_container():
data = ListDictContainer([
{
'key1': 100,
'key2': 400,
},
{
'key1': 200,
'key2': 300,
},
{
'key1': 300,
'key2': 200,
},
{
'key1': 400,
'key2': 100,
},
])
column = data.get_field(field_name='key1')
nose.tools.eq_(column, [100, 200, 300, 400])
column = data.get_field(field_name='key2')
nose.tools.eq_(column, [400, 300, 200, 100])
nose.tools.eq_(data.search(key='key1', value=100), {'key1': 100, 'key2': 400})
nose.tools.eq_(data.search(key='key1', value=123), None)
def test_save():
# Empty content
ListDictContainer({}).save(filename=os.path.join(tempfile.gettempdir(), 'saved.yaml'))
# Content
data = [
{
'key1': 100,
'key2': 402.2,
},
{
'key1': 200,
'key2': 302.2,
},
{
'key1': 300,
'key2': 202.3,
},
{
'key1': 400,
'key2': 101.2,
},
]
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.yaml')).save().load()
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.csv')).save().load(
fields=['key1', 'key2']
)
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.csv')).save(
fields=['key1', 'key2']
).load(
fields=['key1', 'key2']
)
nose.tools.assert_list_equal(d, data)
d = ListDictContainer(data, filename=os.path.join(tempfile.gettempdir(), 'saved.cpickle')).save().load()
nose.tools.assert_list_equal(d, data)
@raises(IOError)
def test_load_not_found2():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.txt'))
@raises(IOError)
def test_load_wrong_type():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.cpickle'))
@raises(IOError)
def test_load_wrong_type2():
with dcase_util.utils.DisableLogger():
ListDictContainer().load(filename=os.path.join(tempfile.gettempdir(), 'wrong.abc')) | 0.327023 | 0.33158 |
import pandas as pd
import quandl, math, datetime
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot') # specifying the type of plotting chart to use
df = quandl.get('WIKI/GOOGL') # add stock to right of WIKI
# Adding columns to stock value charts
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace=True)
forecast_out = int(math.ceil(0.01 * len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
X = np.array(df.drop(['label'], 1)) # finding out X and Y values
X = preprocessing.scale(X)
X = X[:-forecast_out]
X_lately = X[-forecast_out:]
df.dropna(inplace=True)
Y = np.array(df['label'])
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.2)
clf = LinearRegression(n_jobs=-1) # figuring out the linear regression within the data set
clf.fit(X_train, Y_train)
accuracy = clf.score(X_test, Y_test) # getting the confidence of the linear regression
forecast_set = clf.predict(X_lately) # predicting the future prices
print(df.head()) # prints the prices of the stock when it first came out
print()
print('--------------------------------------------')
print()
print(df.tail()) # prints the most recent prices for the stock
print()
print('--------------------------------------------')
print()
print('Predicted Prices over next ', forecast_out, ' days') # Displays timeline of predicted prices
print()
print(forecast_set) # Displays the future prices
print()
print('--------------------------------------------')
print()
print('Accuracy: ', accuracy) # Displays the confidence of the linear regression
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400 # number of seconds in one day
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i] # loops through columns to get numbers to graph
# plots and create the graph with all numbers
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=10) # specifies the location of the key
plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.show() | app.py | import pandas as pd
import quandl, math, datetime
import numpy as np
from sklearn import preprocessing, cross_validation, svm
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
from matplotlib import style
style.use('ggplot') # specifying the type of plotting chart to use
df = quandl.get('WIKI/GOOGL') # add stock to right of WIKI
# Adding columns to stock value charts
df = df[['Adj. Open', 'Adj. High', 'Adj. Low', 'Adj. Close', 'Adj. Volume']]
df['HL_PCT'] = (df['Adj. High'] - df['Adj. Close']) / df['Adj. Close'] * 100
df['PCT_Change'] = (df['Adj. Close'] - df['Adj. Open']) / df['Adj. Open'] * 100
df = df[['Adj. Close', 'HL_PCT', 'PCT_Change', 'Adj. Volume']]
forecast_col = 'Adj. Close'
df.fillna(-99999, inplace=True)
forecast_out = int(math.ceil(0.01 * len(df)))
df['label'] = df[forecast_col].shift(-forecast_out)
X = np.array(df.drop(['label'], 1)) # finding out X and Y values
X = preprocessing.scale(X)
X = X[:-forecast_out]
X_lately = X[-forecast_out:]
df.dropna(inplace=True)
Y = np.array(df['label'])
X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size=0.2)
clf = LinearRegression(n_jobs=-1) # figuring out the linear regression within the data set
clf.fit(X_train, Y_train)
accuracy = clf.score(X_test, Y_test) # getting the confidence of the linear regression
forecast_set = clf.predict(X_lately) # predicting the future prices
print(df.head()) # prints the prices of the stock when it first came out
print()
print('--------------------------------------------')
print()
print(df.tail()) # prints the most recent prices for the stock
print()
print('--------------------------------------------')
print()
print('Predicted Prices over next ', forecast_out, ' days') # Displays timeline of predicted prices
print()
print(forecast_set) # Displays the future prices
print()
print('--------------------------------------------')
print()
print('Accuracy: ', accuracy) # Displays the confidence of the linear regression
df['Forecast'] = np.nan
last_date = df.iloc[-1].name
last_unix = last_date.timestamp()
one_day = 86400 # number of seconds in one day
next_unix = last_unix + one_day
for i in forecast_set:
next_date = datetime.datetime.fromtimestamp(next_unix)
next_unix += one_day
df.loc[next_date] = [np.nan for _ in range(len(df.columns)-1)] + [i] # loops through columns to get numbers to graph
# plots and create the graph with all numbers
df['Adj. Close'].plot()
df['Forecast'].plot()
plt.legend(loc=10) # specifies the location of the key
plt.xlabel('Date')
plt.ylabel('Stock Price')
plt.show() | 0.636805 | 0.471102 |
import torch
import torch.nn as nn
class MultiEmbeddings(nn.Module):
def __init__(self, *variable_params):
# example: *[(name, num_embeddings, embedding_dim), ... ]
super().__init__()
self.params = variable_params
self.embeddings = nn.ModuleDict({
name: nn.Embedding(s, e) for (name, s, e) in variable_params
})
def forward(self, input):
return torch.cat([self.embeddings[name](input[name]) for (name, _, _) in self.params], dim=2)
class Empty(nn.Module):
def __init__(self, size):
self.size = size
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
return f"{self.size}"
class Inputs(nn.Module):
def __init__(self, inputs_config=None):
super().__init__()
self.inputs_config = inputs_config
if inputs_config is not None:
self.numerical = inputs_config.get("numerical")
self.categorical = inputs_config.get("categorical")
self.output_size = 0
if self.categorical is not None:
self.categorical_inputs = MultiEmbeddings(*self.categorical)
self.output_size += sum([i[2] for i in self.categorical])
if self.numerical is not None:
self.numerical_inputs = nn.ModuleDict({name: Empty(size) for (name, size) in self.numerical})
self.output_size += sum([i[1] for i in self.numerical])
else:
self.output_size = 0
def forward(self, feed_dict):
# batch, seq, N
if self.inputs_config is not None:
outputs = []
if self.categorical is not None:
outputs.append(self.categorical_inputs(feed_dict))
if self.numerical is not None:
for (name, _) in self.numerical:
outputs.append(self.numerical_inputs[name](feed_dict[name]))
return torch.cat(outputs, dim=2)
else:
return None | deepseries/model/seq2seq/utils.py | import torch
import torch.nn as nn
class MultiEmbeddings(nn.Module):
def __init__(self, *variable_params):
# example: *[(name, num_embeddings, embedding_dim), ... ]
super().__init__()
self.params = variable_params
self.embeddings = nn.ModuleDict({
name: nn.Embedding(s, e) for (name, s, e) in variable_params
})
def forward(self, input):
return torch.cat([self.embeddings[name](input[name]) for (name, _, _) in self.params], dim=2)
class Empty(nn.Module):
def __init__(self, size):
self.size = size
super().__init__()
def forward(self, x):
return x
def extra_repr(self):
return f"{self.size}"
class Inputs(nn.Module):
def __init__(self, inputs_config=None):
super().__init__()
self.inputs_config = inputs_config
if inputs_config is not None:
self.numerical = inputs_config.get("numerical")
self.categorical = inputs_config.get("categorical")
self.output_size = 0
if self.categorical is not None:
self.categorical_inputs = MultiEmbeddings(*self.categorical)
self.output_size += sum([i[2] for i in self.categorical])
if self.numerical is not None:
self.numerical_inputs = nn.ModuleDict({name: Empty(size) for (name, size) in self.numerical})
self.output_size += sum([i[1] for i in self.numerical])
else:
self.output_size = 0
def forward(self, feed_dict):
# batch, seq, N
if self.inputs_config is not None:
outputs = []
if self.categorical is not None:
outputs.append(self.categorical_inputs(feed_dict))
if self.numerical is not None:
for (name, _) in self.numerical:
outputs.append(self.numerical_inputs[name](feed_dict[name]))
return torch.cat(outputs, dim=2)
else:
return None | 0.922474 | 0.325279 |
import sys
from os.path import exists as path_exists
from pyscaffold.api import create_project
from pyscaffold.cli import run
from pyscaffold.extensions.github_actions import GithubActions
def test_create_project_with_github_actions(tmpfolder):
# Given options with the GithubActions extension,
opts = dict(project_path="proj", extensions=[GithubActions()])
# when the project is created,
create_project(opts)
# then files from GithubActions extension should exist
assert path_exists("proj/.github/workflows/ci.yml")
def test_create_project_without_github_actions(tmpfolder):
# Given options without the GithubActions extension,
opts = dict(project_path="proj")
# when the project is created,
create_project(opts)
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
def test_cli_with_github_actions(tmpfolder):
# Given the command line with the GithubActions option,
sys.argv = ["pyscaffold", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then files from GithubActions and other extensions automatically added should
# exist
assert path_exists("proj/.github/workflows/ci.yml")
assert path_exists("proj/tox.ini")
assert path_exists("proj/.pre-commit-config.yaml")
def test_cli_with_github_actions_and_pretend(tmpfolder):
# Given the command line with the GithubActions and pretend options
sys.argv = ["pyscaffold", "--pretend", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
# (or the project itself)
assert not path_exists("proj")
def test_cli_without_github_actions(tmpfolder):
# Given the command line without the GithubActions option,
sys.argv = ["pyscaffold", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml") | tests/extensions/test_github_actions.py | import sys
from os.path import exists as path_exists
from pyscaffold.api import create_project
from pyscaffold.cli import run
from pyscaffold.extensions.github_actions import GithubActions
def test_create_project_with_github_actions(tmpfolder):
# Given options with the GithubActions extension,
opts = dict(project_path="proj", extensions=[GithubActions()])
# when the project is created,
create_project(opts)
# then files from GithubActions extension should exist
assert path_exists("proj/.github/workflows/ci.yml")
def test_create_project_without_github_actions(tmpfolder):
# Given options without the GithubActions extension,
opts = dict(project_path="proj")
# when the project is created,
create_project(opts)
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
def test_cli_with_github_actions(tmpfolder):
# Given the command line with the GithubActions option,
sys.argv = ["pyscaffold", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then files from GithubActions and other extensions automatically added should
# exist
assert path_exists("proj/.github/workflows/ci.yml")
assert path_exists("proj/tox.ini")
assert path_exists("proj/.pre-commit-config.yaml")
def test_cli_with_github_actions_and_pretend(tmpfolder):
# Given the command line with the GithubActions and pretend options
sys.argv = ["pyscaffold", "--pretend", "--github-actions", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml")
# (or the project itself)
assert not path_exists("proj")
def test_cli_without_github_actions(tmpfolder):
# Given the command line without the GithubActions option,
sys.argv = ["pyscaffold", "proj"]
# when pyscaffold runs,
run()
# then GithubActions files should not exist
assert not path_exists("proj/.github/workflows/ci.yml") | 0.236693 | 0.285447 |
from exo3 import lireCSV # fonction de lecture et de construction
# de la liste de dictionnaires
# fonctions d'affichage
from exo4 import printTableformatee, printTableformateeDeco
########################################################################
def printTitre( texte ) :
l = len(texte)
print(" " * 10 + "╔"+ "═" * (l+4) + "╗")
print(" " * 10 + "║ ",texte,"║")
print(" " * 10 + "╚"+ "═" * (l+4) + "╝")
########################################################################
# exo5 1 : compter les enregistrements dont le code postal est
# inférieur à codePostal
########################################################################
def exo5_1 (table) :
printTitre("Exercice 5.1 : statistiques sur le code postal")
codePostal = ""
while codePostal == "" :
try : # essaye ce qui suit
codePostal = int(input("Donner un nombre à 5 chiffres de code postal :"))
print ("\n\n\nRequète : recherche des gens qui habitent ",
"dans un département dont le code postal est ",
"inférieur à ", str(codePostal) )
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("SVP rentrer un nombre valide de code Postal")
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if int(enregistrement["code Postal"]) < 30000 :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print ("Il y a " + str(nRep) + " réponses : ")
printTableformateeDeco (reponse)
########################################################################
# exo5 2 : compter les fiches ou enregistrements dont le numéro de
# dossier est inférieur à numDossier
########################################################################
def exo5_2 (table) :
printTitre("Exercice 5.2 : statistiques sur le numéro de dossier")
numDossier = 0
while numDossier <= 0 or numDossier > 9999 :
try : # essaye ce qui suit
numDossier = int(input("Donner un numéro de dossier à 4 chiffres :"))
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("SVP rentrer un nombre à 4 chiffres !")
print("Requète : pourcentage d'enregistrements dont le numéro de ",
"dossier est plus grand que ", numDossier ," inclus")
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if int(enregistrement["Dossier num"]) >= numDossier :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print("Il y a ",nRep / len(table) * 100, "% des enregistrements",
" qui correspondent à la requète.")
printTitre("Table des fiches dont le numéro de dossier est plus "+
"grand que "+str( numDossier)+ " inclus")
printTableformateeDeco (reponse)
########################################################################
# exo5 3 : pourcentage d'enregistrements dont le nom commence par lettre
########################################################################
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def exo5_3 (table) :
texte =" Exercice 5.3 : pourcentage d'enregistrements dont le nom commence par une lettre à choisir :"
printTitre(texte)
lettre = ""
while lettre.lower() not in alphabet :
try : # essaye ce qui suit
lettre = input("Donner une lettre de l'alphabet :")
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("Donner une seule lettre de l'alphabet !")
print("Requète : pourcentage d'enregistrements dont le nom ",
"commence par ", lettre)
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if enregistrement["Nom"][0].lower() == lettre :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print("Il y a ",nRep / len(table) * 100, "% des enregistrements",
" qui correspondent à la requète.")
printTitre("Table des fiches dont le nom commence par "+ lettre)
printTableformateeDeco (reponse)
########################################################################
def viderEcran () :
import os
#os.system('cls') # efface l'écran de la console cmd.exe sur windows
os.system('clear') # on linux / os x
print("Le fichier BDD.csv est lu et converti en liste de ",
"dictionnaires")
########################################################################
# les codes suivants sont des codes de test :
########################################################################
def main() :
"""
Fonction de test de lireCSV
"""
print("Efface l'écran : "+ 57 * "\n")
print("___________________________________________________________",
"____")
table = lireCSV("BDD.csv")
# test de la deuxième fonction d'affichage décoratif
printTableformateeDeco (table)
print("\nLa table contient ", len(table),
"lignes ou fiches ou enregistrements.")
printTitre("Choix de l'exercice à tester")
for i in range(1,4) :
print("- exercice n° 5.", i)
n = int(input("numéro :"))
if n == 1 : exo5_1 (table)
elif n == 2 : exo5_2 (table)
elif n == 3 : exo5_3 (table)
else : print("Dommage, vous ne savez pas lire !")
if __name__ == "__main__":
"""
Ne fonctionne que si c'est ce fichier qui est activé directement
La variable __name__ prend la valeur du fichier activé en premier.
"""
main() | programmeNSI/cours/exo/exo5.py | from exo3 import lireCSV # fonction de lecture et de construction
# de la liste de dictionnaires
# fonctions d'affichage
from exo4 import printTableformatee, printTableformateeDeco
########################################################################
def printTitre( texte ) :
l = len(texte)
print(" " * 10 + "╔"+ "═" * (l+4) + "╗")
print(" " * 10 + "║ ",texte,"║")
print(" " * 10 + "╚"+ "═" * (l+4) + "╝")
########################################################################
# exo5 1 : compter les enregistrements dont le code postal est
# inférieur à codePostal
########################################################################
def exo5_1 (table) :
printTitre("Exercice 5.1 : statistiques sur le code postal")
codePostal = ""
while codePostal == "" :
try : # essaye ce qui suit
codePostal = int(input("Donner un nombre à 5 chiffres de code postal :"))
print ("\n\n\nRequète : recherche des gens qui habitent ",
"dans un département dont le code postal est ",
"inférieur à ", str(codePostal) )
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("SVP rentrer un nombre valide de code Postal")
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if int(enregistrement["code Postal"]) < 30000 :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print ("Il y a " + str(nRep) + " réponses : ")
printTableformateeDeco (reponse)
########################################################################
# exo5 2 : compter les fiches ou enregistrements dont le numéro de
# dossier est inférieur à numDossier
########################################################################
def exo5_2 (table) :
printTitre("Exercice 5.2 : statistiques sur le numéro de dossier")
numDossier = 0
while numDossier <= 0 or numDossier > 9999 :
try : # essaye ce qui suit
numDossier = int(input("Donner un numéro de dossier à 4 chiffres :"))
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("SVP rentrer un nombre à 4 chiffres !")
print("Requète : pourcentage d'enregistrements dont le numéro de ",
"dossier est plus grand que ", numDossier ," inclus")
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if int(enregistrement["Dossier num"]) >= numDossier :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print("Il y a ",nRep / len(table) * 100, "% des enregistrements",
" qui correspondent à la requète.")
printTitre("Table des fiches dont le numéro de dossier est plus "+
"grand que "+str( numDossier)+ " inclus")
printTableformateeDeco (reponse)
########################################################################
# exo5 3 : pourcentage d'enregistrements dont le nom commence par lettre
########################################################################
alphabet = ['a','b','c','d','e','f','g','h','i','j','k','l','m', 'n',
'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def exo5_3 (table) :
texte =" Exercice 5.3 : pourcentage d'enregistrements dont le nom commence par une lettre à choisir :"
printTitre(texte)
lettre = ""
while lettre.lower() not in alphabet :
try : # essaye ce qui suit
lettre = input("Donner une lettre de l'alphabet :")
except : # est exécuté si l'essai a conduit à un retour d'erreur
print("Donner une seule lettre de l'alphabet !")
print("Requète : pourcentage d'enregistrements dont le nom ",
"commence par ", lettre)
reponse = [] #initialisation de la liste des réponses attendues
####################################################################
for enregistrement in table :
if enregistrement["Nom"][0].lower() == lettre :
reponse.append(enregistrement)
####################################################################
nRep = len (reponse) # nombre de réponses
if nRep == 0 :
print("Il n'y a aucun enregistrement qui correspond à la ",
"requète")
else :
print("Il y a ",nRep / len(table) * 100, "% des enregistrements",
" qui correspondent à la requète.")
printTitre("Table des fiches dont le nom commence par "+ lettre)
printTableformateeDeco (reponse)
########################################################################
def viderEcran () :
import os
#os.system('cls') # efface l'écran de la console cmd.exe sur windows
os.system('clear') # on linux / os x
print("Le fichier BDD.csv est lu et converti en liste de ",
"dictionnaires")
########################################################################
# les codes suivants sont des codes de test :
########################################################################
def main() :
"""
Fonction de test de lireCSV
"""
print("Efface l'écran : "+ 57 * "\n")
print("___________________________________________________________",
"____")
table = lireCSV("BDD.csv")
# test de la deuxième fonction d'affichage décoratif
printTableformateeDeco (table)
print("\nLa table contient ", len(table),
"lignes ou fiches ou enregistrements.")
printTitre("Choix de l'exercice à tester")
for i in range(1,4) :
print("- exercice n° 5.", i)
n = int(input("numéro :"))
if n == 1 : exo5_1 (table)
elif n == 2 : exo5_2 (table)
elif n == 3 : exo5_3 (table)
else : print("Dommage, vous ne savez pas lire !")
if __name__ == "__main__":
"""
Ne fonctionne que si c'est ce fichier qui est activé directement
La variable __name__ prend la valeur du fichier activé en premier.
"""
main() | 0.073715 | 0.327991 |
from enum import Enum
class NamedEntityScoreEnum(Enum):
"""Enum for the score of a named entity"""
# The score is based on the number of models that detect the entity.
# The used models are AWS Comprehend, NLTK and Spacy
# The score is high if all models detected the entity
HIGH = "HIGH"
# The score is medium if 2 models detected the entity
MEDIUM = "MEDIUM"
# The score is low if only 1 model detected the entity
LOW = "LOW"
class NamedEntityTypeEnum(Enum):
"""Enum for the type of a named entity"""
# A branded product
PRODUCT = "PRODUCT"
# A full date (for example, 11/25/2017), day (Tuesday), month (May), or time (8:30 a.m.)
DATE = "DATE"
# An event, such as a festival, concert, election, etc.
EVENT = "EVENT"
# A specific location, such as a country, city, lake, building, etc.
LOCATION = "LOCATION"
# Large organizations, such as a government, company, religion, sports team, etc.
ORGANIZATION = "ORGANIZATION"
# Individuals, groups of people, nicknames, fictional characters
PERSON = "PERSON"
# A quantified amount, such as currency, percentages, numbers, bytes, etc.
QUANTITY = "QUANTITY"
# An official name given to any creation or creative work, such as movies, books, songs, etc.
TITLE = "TITLE"
# Entities that don't fit into any of the other entity categories
OTHER = "OTHER"
class NamedEntityRelationshipEnum(Enum):
"""Enum for the relationship a named entity"""
# The named entity is quoted in a document
QUOTED = "QUOTED"
# The named entity is referenced in a document
REFERENCED = "REFERENCED"
class NamedEntity:
"""Named entity class"""
text: str
score: NamedEntityScoreEnum
# Score in percentage given by AWS Comprehend only
aws_score: float
type: NamedEntityTypeEnum
begin_offset: int
end_offset: int
relationship: NamedEntityRelationshipEnum
@staticmethod
def from_json(data):
"""Convert a json dict to object"""
obj = NamedEntity()
obj.text = data["text"]
obj.score = NamedEntityScoreEnum(data["score"])
try:
obj.aws_score = data["aws_score"]
except KeyError:
pass
obj.type = NamedEntityTypeEnum(data["type"])
obj.begin_offset = data["begin_offset"]
obj.end_offset = data["end_offset"]
obj.relationship = NamedEntityRelationshipEnum(data["relationship"])
return obj | entities/named_entity.py | from enum import Enum
class NamedEntityScoreEnum(Enum):
"""Enum for the score of a named entity"""
# The score is based on the number of models that detect the entity.
# The used models are AWS Comprehend, NLTK and Spacy
# The score is high if all models detected the entity
HIGH = "HIGH"
# The score is medium if 2 models detected the entity
MEDIUM = "MEDIUM"
# The score is low if only 1 model detected the entity
LOW = "LOW"
class NamedEntityTypeEnum(Enum):
"""Enum for the type of a named entity"""
# A branded product
PRODUCT = "PRODUCT"
# A full date (for example, 11/25/2017), day (Tuesday), month (May), or time (8:30 a.m.)
DATE = "DATE"
# An event, such as a festival, concert, election, etc.
EVENT = "EVENT"
# A specific location, such as a country, city, lake, building, etc.
LOCATION = "LOCATION"
# Large organizations, such as a government, company, religion, sports team, etc.
ORGANIZATION = "ORGANIZATION"
# Individuals, groups of people, nicknames, fictional characters
PERSON = "PERSON"
# A quantified amount, such as currency, percentages, numbers, bytes, etc.
QUANTITY = "QUANTITY"
# An official name given to any creation or creative work, such as movies, books, songs, etc.
TITLE = "TITLE"
# Entities that don't fit into any of the other entity categories
OTHER = "OTHER"
class NamedEntityRelationshipEnum(Enum):
"""Enum for the relationship a named entity"""
# The named entity is quoted in a document
QUOTED = "QUOTED"
# The named entity is referenced in a document
REFERENCED = "REFERENCED"
class NamedEntity:
"""Named entity class"""
text: str
score: NamedEntityScoreEnum
# Score in percentage given by AWS Comprehend only
aws_score: float
type: NamedEntityTypeEnum
begin_offset: int
end_offset: int
relationship: NamedEntityRelationshipEnum
@staticmethod
def from_json(data):
"""Convert a json dict to object"""
obj = NamedEntity()
obj.text = data["text"]
obj.score = NamedEntityScoreEnum(data["score"])
try:
obj.aws_score = data["aws_score"]
except KeyError:
pass
obj.type = NamedEntityTypeEnum(data["type"])
obj.begin_offset = data["begin_offset"]
obj.end_offset = data["end_offset"]
obj.relationship = NamedEntityRelationshipEnum(data["relationship"])
return obj | 0.723798 | 0.45744 |
import csv
def calc_edit_dist(word1, word2):
'''
First, create a 2D array to enable dynamic programming.
Then, use dynamic programming to alculate
edit distance between two words.
'''
#this method needs fixing
comparison_matrix = create_comparision_matrix(word1, word2)
num_rows = len(comparison_matrix)
num_cols = len(comparison_matrix[0])
cost_to_replace = 1
cost_to_insert = 1
for row in range(1, num_rows):
for col in range(1, num_cols):
if row == col:
if word1[row] == word2[col]:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1]
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_replace
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_insert
return comparison_matrix[num_rows-1][num_cols-1]
def create_comparision_matrix(word1, word2):
'''
Create a 2D array with the all entires containing
all 0s except for the first row and first column
'''
word1_length = len(word1)
word2_length = len(word2)
comparison_matrix = []
for i in range(word1_length):
comparison_matrix.append([])
for j in range(word2_length):
comparison_matrix[i].append(0)
if word1[0] != word2[0]:
comparison_matrix[0][0] = 2
for r in range(1, word1_length):
try:
if word1[r] == word2[r]:
comparison_matrix[r][0] = comparison_matrix[r-1][0]
else:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 2
except:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 1
for c in range(1, word2_length):
comparison_matrix[0][c] = comparison_matrix[0][c-1] + 1
return comparison_matrix
def load_dictionary_as_list():
dictionary_as_list = list(open('corncob_lowercase.txt', 'r'))
for i in range(len(dictionary_as_list)):
dictionary_as_list[i] = dictionary_as_list[i].strip()
return dictionary_as_list
def suggest_word(input_text, dictionary):
'''
With the text the user has provided,
suggest a word to type.
'''
closest_word = '______________________________________________________________________________________'
for word in dictionary:
if len(input_text) >= len(word):
continue
else:
if input_text == word[0:len(input_text)]:
if len(word) < len(closest_word):
closest_word = word
if closest_word == '______________________________________________________________________________________':
closest_word = ''
return closest_word
def autocorrect_word(input_text, dictionary):
''':
With the text the user has provided, if the
the word is not in the dictionary, provide
an alternative word that autocorrects the
given text.
'''
possible_words = ['', '', '']
least_edit_distances = [9999, 9999, 9999]
if input_text in dictionary:
return input_text
for word in dictionary:
edit_distance = calc_edit_dist(word, input_text)
for i in range(len(least_edit_distances)):
if edit_distance < least_edit_distances[i]:
least_edit_distances[i] = edit_distance
possible_words[i] = word
break
print(f"These were the possible words: {possible_words}")
closest_word = find_most_frequent_word(possible_words)
return closest_word
def find_most_frequent_word(possible_words):
most_frequent_word = possible_words[0]
highest_frequency = 0
word_frequencies = convert_frequency_csv_to_array()
for row in word_frequencies:
for possible_word in possible_words:
word = row[1]
if word == possible_word:
word_frequency = int(row[2])
if word_frequency > highest_frequency:
highest_frequency = word_frequency
most_frequent_word = word
return most_frequent_word
def convert_frequency_csv_to_array():
with open('word_frequency.csv') as word_frequencies_csv:
csv_reader = list(csv.reader(word_frequencies_csv))
csv_reader = csv_reader[1:]
return csv_reader
def main():
while True:
input_text = input('Enter a word: ')
dictionary = load_dictionary_as_list()
if len(input_text) == 0:
continue
elif len(input_text) < 2:
suggested_word = suggest_word(input_text, dictionary)
else:
closest_word = autocorrect_word(input_text, dictionary)
suggested_word = suggest_word(input_text, dictionary)
print(f"Did you mean this word? {closest_word}")
print(f"Were you about to type: {suggested_word}")
main() | auto_correct.py | import csv
def calc_edit_dist(word1, word2):
'''
First, create a 2D array to enable dynamic programming.
Then, use dynamic programming to alculate
edit distance between two words.
'''
#this method needs fixing
comparison_matrix = create_comparision_matrix(word1, word2)
num_rows = len(comparison_matrix)
num_cols = len(comparison_matrix[0])
cost_to_replace = 1
cost_to_insert = 1
for row in range(1, num_rows):
for col in range(1, num_cols):
if row == col:
if word1[row] == word2[col]:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1]
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_replace
else:
comparison_matrix[row][col] = comparison_matrix[row-1][col-1] + cost_to_insert
return comparison_matrix[num_rows-1][num_cols-1]
def create_comparision_matrix(word1, word2):
'''
Create a 2D array with the all entires containing
all 0s except for the first row and first column
'''
word1_length = len(word1)
word2_length = len(word2)
comparison_matrix = []
for i in range(word1_length):
comparison_matrix.append([])
for j in range(word2_length):
comparison_matrix[i].append(0)
if word1[0] != word2[0]:
comparison_matrix[0][0] = 2
for r in range(1, word1_length):
try:
if word1[r] == word2[r]:
comparison_matrix[r][0] = comparison_matrix[r-1][0]
else:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 2
except:
comparison_matrix[r][0] = comparison_matrix[r-1][0] + 1
for c in range(1, word2_length):
comparison_matrix[0][c] = comparison_matrix[0][c-1] + 1
return comparison_matrix
def load_dictionary_as_list():
dictionary_as_list = list(open('corncob_lowercase.txt', 'r'))
for i in range(len(dictionary_as_list)):
dictionary_as_list[i] = dictionary_as_list[i].strip()
return dictionary_as_list
def suggest_word(input_text, dictionary):
'''
With the text the user has provided,
suggest a word to type.
'''
closest_word = '______________________________________________________________________________________'
for word in dictionary:
if len(input_text) >= len(word):
continue
else:
if input_text == word[0:len(input_text)]:
if len(word) < len(closest_word):
closest_word = word
if closest_word == '______________________________________________________________________________________':
closest_word = ''
return closest_word
def autocorrect_word(input_text, dictionary):
''':
With the text the user has provided, if the
the word is not in the dictionary, provide
an alternative word that autocorrects the
given text.
'''
possible_words = ['', '', '']
least_edit_distances = [9999, 9999, 9999]
if input_text in dictionary:
return input_text
for word in dictionary:
edit_distance = calc_edit_dist(word, input_text)
for i in range(len(least_edit_distances)):
if edit_distance < least_edit_distances[i]:
least_edit_distances[i] = edit_distance
possible_words[i] = word
break
print(f"These were the possible words: {possible_words}")
closest_word = find_most_frequent_word(possible_words)
return closest_word
def find_most_frequent_word(possible_words):
most_frequent_word = possible_words[0]
highest_frequency = 0
word_frequencies = convert_frequency_csv_to_array()
for row in word_frequencies:
for possible_word in possible_words:
word = row[1]
if word == possible_word:
word_frequency = int(row[2])
if word_frequency > highest_frequency:
highest_frequency = word_frequency
most_frequent_word = word
return most_frequent_word
def convert_frequency_csv_to_array():
with open('word_frequency.csv') as word_frequencies_csv:
csv_reader = list(csv.reader(word_frequencies_csv))
csv_reader = csv_reader[1:]
return csv_reader
def main():
while True:
input_text = input('Enter a word: ')
dictionary = load_dictionary_as_list()
if len(input_text) == 0:
continue
elif len(input_text) < 2:
suggested_word = suggest_word(input_text, dictionary)
else:
closest_word = autocorrect_word(input_text, dictionary)
suggested_word = suggest_word(input_text, dictionary)
print(f"Did you mean this word? {closest_word}")
print(f"Were you about to type: {suggested_word}")
main() | 0.229018 | 0.587825 |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the Chmod() Action works.
"""
import os
import os.path
import stat
import TestSCons
test = TestSCons.TestSCons()
# Note: Windows basically has two modes that it can os.chmod() files to
# 0444 and 0666, and directories to 0555 and 0777, so we can only really
# oscillate between those values.
test.write('SConstruct', """
Execute(Chmod('f1', 0666))
Execute(Chmod('d2', 0777))
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
Cat = Action(cat)
env = Environment()
env.Command('bar.out', 'bar.in', [Cat,
Chmod("f3", 0666),
Chmod("d4", 0777)])
env = Environment(FILE = 'f5')
env.Command('f6.out', 'f6.in', [Chmod('$FILE', 0666), Cat])
env.Command('f7.out', 'f7.in', [Cat,
Chmod('Chmod-$SOURCE', 0666),
Chmod('${TARGET}-Chmod', 0666)])
""")
test.write('f1', "f1\n")
test.subdir('d2')
test.write(['d2', 'file'], "d2/file\n")
test.write('bar.in', "bar.in\n")
test.write('f3', "f3\n")
test.subdir('d4')
test.write(['d4', 'file'], "d4/file\n")
test.write('f5', "f5\n")
test.write('f6.in', "f6.in\n")
test.write('f7.in', "f7.in\n")
test.write('Chmod-f7.in', "Chmod-f7.in\n")
test.write('f7.out-Chmod', "f7.out-Chmod\n")
os.chmod(test.workpath('f1'), 0444)
os.chmod(test.workpath('d2'), 0555)
os.chmod(test.workpath('f3'), 0444)
os.chmod(test.workpath('d4'), 0555)
os.chmod(test.workpath('f5'), 0444)
os.chmod(test.workpath('Chmod-f7.in'), 0444)
os.chmod(test.workpath('f7.out-Chmod'), 0444)
expect = test.wrap_stdout(read_str = 'Chmod("f1", 0666)\nChmod("d2", 0777)\n',
build_str = """\
cat(["bar.out"], ["bar.in"])
Chmod("f3", 0666)
Chmod("d4", 0777)
Chmod("f5", 0666)
cat(["f6.out"], ["f6.in"])
cat(["f7.out"], ["f7.in"])
Chmod("Chmod-f7.in", 0666)
Chmod("f7.out-Chmod", 0666)
""")
test.run(options = '-n', arguments = '.', stdout = expect)
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
test.fail_test(s != 0555)
test.must_not_exist('bar.out')
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
test.fail_test(s != 0555)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
test.fail_test(s != 0444)
test.must_not_exist('f6.out')
test.must_not_exist('f7.out')
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
test.fail_test(s != 0444)
test.run()
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
test.fail_test(s != 0777)
test.must_match('bar.out', "bar.in\n")
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
test.fail_test(s != 0777)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
test.fail_test(s != 0666)
test.must_match('f6.out', "f6.in\n")
test.must_match('f7.out', "f7.in\n")
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
test.fail_test(s != 0666)
test.pass_test() | test/Chmod.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that the Chmod() Action works.
"""
import os
import os.path
import stat
import TestSCons
test = TestSCons.TestSCons()
# Note: Windows basically has two modes that it can os.chmod() files to
# 0444 and 0666, and directories to 0555 and 0777, so we can only really
# oscillate between those values.
test.write('SConstruct', """
Execute(Chmod('f1', 0666))
Execute(Chmod('d2', 0777))
def cat(env, source, target):
target = str(target[0])
source = map(str, source)
f = open(target, "wb")
for src in source:
f.write(open(src, "rb").read())
f.close()
Cat = Action(cat)
env = Environment()
env.Command('bar.out', 'bar.in', [Cat,
Chmod("f3", 0666),
Chmod("d4", 0777)])
env = Environment(FILE = 'f5')
env.Command('f6.out', 'f6.in', [Chmod('$FILE', 0666), Cat])
env.Command('f7.out', 'f7.in', [Cat,
Chmod('Chmod-$SOURCE', 0666),
Chmod('${TARGET}-Chmod', 0666)])
""")
test.write('f1', "f1\n")
test.subdir('d2')
test.write(['d2', 'file'], "d2/file\n")
test.write('bar.in', "bar.in\n")
test.write('f3', "f3\n")
test.subdir('d4')
test.write(['d4', 'file'], "d4/file\n")
test.write('f5', "f5\n")
test.write('f6.in', "f6.in\n")
test.write('f7.in', "f7.in\n")
test.write('Chmod-f7.in', "Chmod-f7.in\n")
test.write('f7.out-Chmod', "f7.out-Chmod\n")
os.chmod(test.workpath('f1'), 0444)
os.chmod(test.workpath('d2'), 0555)
os.chmod(test.workpath('f3'), 0444)
os.chmod(test.workpath('d4'), 0555)
os.chmod(test.workpath('f5'), 0444)
os.chmod(test.workpath('Chmod-f7.in'), 0444)
os.chmod(test.workpath('f7.out-Chmod'), 0444)
expect = test.wrap_stdout(read_str = 'Chmod("f1", 0666)\nChmod("d2", 0777)\n',
build_str = """\
cat(["bar.out"], ["bar.in"])
Chmod("f3", 0666)
Chmod("d4", 0777)
Chmod("f5", 0666)
cat(["f6.out"], ["f6.in"])
cat(["f7.out"], ["f7.in"])
Chmod("Chmod-f7.in", 0666)
Chmod("f7.out-Chmod", 0666)
""")
test.run(options = '-n', arguments = '.', stdout = expect)
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
test.fail_test(s != 0555)
test.must_not_exist('bar.out')
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
test.fail_test(s != 0555)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
test.fail_test(s != 0444)
test.must_not_exist('f6.out')
test.must_not_exist('f7.out')
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
test.fail_test(s != 0444)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
test.fail_test(s != 0444)
test.run()
s = stat.S_IMODE(os.stat(test.workpath('f1'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('d2'))[stat.ST_MODE])
test.fail_test(s != 0777)
test.must_match('bar.out', "bar.in\n")
s = stat.S_IMODE(os.stat(test.workpath('f3'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('d4'))[stat.ST_MODE])
test.fail_test(s != 0777)
s = stat.S_IMODE(os.stat(test.workpath('f5'))[stat.ST_MODE])
test.fail_test(s != 0666)
test.must_match('f6.out', "f6.in\n")
test.must_match('f7.out', "f7.in\n")
s = stat.S_IMODE(os.stat(test.workpath('Chmod-f7.in'))[stat.ST_MODE])
test.fail_test(s != 0666)
s = stat.S_IMODE(os.stat(test.workpath('f7.out-Chmod'))[stat.ST_MODE])
test.fail_test(s != 0666)
test.pass_test() | 0.334372 | 0.200558 |
from math import *
from MITgcmutils import rdmds
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pylab as pl
import scipy.io
import scipy as spy
import sys
lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
sys.path.append(lib_path)
import ReadOutTools_MITgcm as rout
import MetricsPythonTools as mpt
### -----------------------------------------------------------------------------------------------------------------------------------
def main():
expPath = sys.argv[1]
run = sys.argv[2]
Grid1, GridOut1, State1,StateOut1,Ptracers1, PtracersOut1 = mpt.getDatasets(expPath, run)
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
rc = GridOut1.variables['RC']
xc = rout.getField(Grid1, 'XC') # x coords tracer cells
yc = rout.getField(Grid1, 'YC') # y coords tracer cells
drF = GridOut1.variables['drF'] # vertical distance between faces
dxF = rout.getField(Grid1,'dxF')
dyF = rout.getField(Grid1,'dyF')
MaskCan = rout.getMask(Grid1,'HFacC')
hFacCCan = rout.getField(Grid1,'HFacC')
rACan = rout.getField(Grid1,'rA')
drFCan=GridOut1.variables['drF']
print('Finished reading grid variables')
#Transect definitions (indices x,y,z,t)
CS1 = [0,40,227,227,0,29]
CS2 = [40,120,227,227,0,29]
CS3 = [120,240,267,267,0,29]
CS3sb = [120,240,227,227,0,29 ]
CS4 = [240,320,227,227,0,29 ]
CS5 = [320,359,227,227,0,29 ]
AS1 = [120,120,227,267,0,29 ]
AS2 = [240,240,227,267,0,29 ]
LID1 = [120,180,227,267,29,29 ]
LID2 = [180,240,227,267,29,29 ]
#Get slices
V_CS1a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS1[0],CS1[1],CS1[2],CS1[3],CS1[4],CS1[5])
V_CS2a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS2[0],CS2[1],CS2[2],CS2[3],CS2[4],CS2[5])
V_CS3a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS3[0],CS3[1],CS3[2],CS3[3],CS3[4],CS3[5])
V_CS4a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS4[0],CS4[1],CS4[2],CS4[3],CS4[4],CS4[5])
V_CS5a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS5[0],CS5[1],CS5[2],CS5[3],CS5[4],CS5[5])
V_CS3sba = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS3sb[0],CS3sb[1],CS3sb[2],CS3sb[3],CS3sb[4],CS3sb[5])
U_AS1a = mpt.slice_area( dyF,drFCan,rACan,hFacCCan,AS1[0],AS1[1],AS1[2],AS1[3],AS1[4],AS1[5])
U_AS2a = mpt.slice_area( dyF,drFCan,rACan,hFacCCan,AS2[0],AS2[1],AS2[2],AS2[3],AS2[4],AS2[5])
W_LID1a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,LID1[0],LID1[1],LID1[2],LID1[3],LID1[4],LID1[5])
W_LID2a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,LID2[0],LID2[1],LID2[2],LID2[3],LID2[4],LID2[5])
#add up
V_CS1 = np.sum(V_CS1a)
V_CS2 = np.sum(V_CS2a)
V_CS3 = np.sum(V_CS3a )
V_CS4 = np.sum(V_CS4a )
V_CS5 = np.sum(V_CS5a )
V_CS3sb = np.sum(V_CS3sba )
U_AS1 = np.sum(U_AS1a )
U_AS2 = np.sum(U_AS2a )
W_LID1 = np.sum(W_LID1a)
W_LID2 = np.sum(W_LID2a)
yin = 227
zfin = 30
[VolShNoHole,VolHole] = mpt.Volume_Sh_and_Hole(MaskCan,rACan,hFacCCan,drFCan,yin,zfin,xh1=120,xh2=240,yh1=227,yh2=267)
raw_data = {'CS1area': V_CS1, 'CS2area': V_CS2, 'CS3area': V_CS3, 'CS3sbarea': V_CS3sb, 'CS4area': V_CS4, 'CS5area': V_CS5, 'AS1area':U_AS1, 'AS2area': U_AS2,'LID1area': W_LID1, 'LID2area': W_LID2,'VolHole': VolHole,'VolShNoHole':VolShNoHole}
df = pd.DataFrame(raw_data, columns = ['CS1area', 'CS2area', 'CS3area', 'CS3sbarea', 'CS4area', 'CS5area', 'AS1area', 'AS2area', 'LID1area', 'LID2area','VolHole','VolShNoHole'], index=[0])
filename1 = ('results/metricsDataFrames/Canyon_AreasVolumes_NoC.csv')
df.to_csv(filename1)
print(filename1)
print('Done')
main() | PythonScripts/CS_Sections_Areas.py | from math import *
from MITgcmutils import rdmds
from netCDF4 import Dataset
import numpy as np
import os
import pandas as pd
import pylab as pl
import scipy.io
import scipy as spy
import sys
lib_path = os.path.abspath('../../Building_canyon/BuildCanyon/PythonModulesMITgcm') # Add absolute path to my python scripts
sys.path.append(lib_path)
import ReadOutTools_MITgcm as rout
import MetricsPythonTools as mpt
### -----------------------------------------------------------------------------------------------------------------------------------
def main():
expPath = sys.argv[1]
run = sys.argv[2]
Grid1, GridOut1, State1,StateOut1,Ptracers1, PtracersOut1 = mpt.getDatasets(expPath, run)
nx = 360
ny = 360
nz = 90
nt = 19 # t dimension size
rc = GridOut1.variables['RC']
xc = rout.getField(Grid1, 'XC') # x coords tracer cells
yc = rout.getField(Grid1, 'YC') # y coords tracer cells
drF = GridOut1.variables['drF'] # vertical distance between faces
dxF = rout.getField(Grid1,'dxF')
dyF = rout.getField(Grid1,'dyF')
MaskCan = rout.getMask(Grid1,'HFacC')
hFacCCan = rout.getField(Grid1,'HFacC')
rACan = rout.getField(Grid1,'rA')
drFCan=GridOut1.variables['drF']
print('Finished reading grid variables')
#Transect definitions (indices x,y,z,t)
CS1 = [0,40,227,227,0,29]
CS2 = [40,120,227,227,0,29]
CS3 = [120,240,267,267,0,29]
CS3sb = [120,240,227,227,0,29 ]
CS4 = [240,320,227,227,0,29 ]
CS5 = [320,359,227,227,0,29 ]
AS1 = [120,120,227,267,0,29 ]
AS2 = [240,240,227,267,0,29 ]
LID1 = [120,180,227,267,29,29 ]
LID2 = [180,240,227,267,29,29 ]
#Get slices
V_CS1a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS1[0],CS1[1],CS1[2],CS1[3],CS1[4],CS1[5])
V_CS2a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS2[0],CS2[1],CS2[2],CS2[3],CS2[4],CS2[5])
V_CS3a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS3[0],CS3[1],CS3[2],CS3[3],CS3[4],CS3[5])
V_CS4a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS4[0],CS4[1],CS4[2],CS4[3],CS4[4],CS4[5])
V_CS5a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS5[0],CS5[1],CS5[2],CS5[3],CS5[4],CS5[5])
V_CS3sba = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,CS3sb[0],CS3sb[1],CS3sb[2],CS3sb[3],CS3sb[4],CS3sb[5])
U_AS1a = mpt.slice_area( dyF,drFCan,rACan,hFacCCan,AS1[0],AS1[1],AS1[2],AS1[3],AS1[4],AS1[5])
U_AS2a = mpt.slice_area( dyF,drFCan,rACan,hFacCCan,AS2[0],AS2[1],AS2[2],AS2[3],AS2[4],AS2[5])
W_LID1a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,LID1[0],LID1[1],LID1[2],LID1[3],LID1[4],LID1[5])
W_LID2a = mpt.slice_area( dxF,drFCan,rACan,hFacCCan,LID2[0],LID2[1],LID2[2],LID2[3],LID2[4],LID2[5])
#add up
V_CS1 = np.sum(V_CS1a)
V_CS2 = np.sum(V_CS2a)
V_CS3 = np.sum(V_CS3a )
V_CS4 = np.sum(V_CS4a )
V_CS5 = np.sum(V_CS5a )
V_CS3sb = np.sum(V_CS3sba )
U_AS1 = np.sum(U_AS1a )
U_AS2 = np.sum(U_AS2a )
W_LID1 = np.sum(W_LID1a)
W_LID2 = np.sum(W_LID2a)
yin = 227
zfin = 30
[VolShNoHole,VolHole] = mpt.Volume_Sh_and_Hole(MaskCan,rACan,hFacCCan,drFCan,yin,zfin,xh1=120,xh2=240,yh1=227,yh2=267)
raw_data = {'CS1area': V_CS1, 'CS2area': V_CS2, 'CS3area': V_CS3, 'CS3sbarea': V_CS3sb, 'CS4area': V_CS4, 'CS5area': V_CS5, 'AS1area':U_AS1, 'AS2area': U_AS2,'LID1area': W_LID1, 'LID2area': W_LID2,'VolHole': VolHole,'VolShNoHole':VolShNoHole}
df = pd.DataFrame(raw_data, columns = ['CS1area', 'CS2area', 'CS3area', 'CS3sbarea', 'CS4area', 'CS5area', 'AS1area', 'AS2area', 'LID1area', 'LID2area','VolHole','VolShNoHole'], index=[0])
filename1 = ('results/metricsDataFrames/Canyon_AreasVolumes_NoC.csv')
df.to_csv(filename1)
print(filename1)
print('Done')
main() | 0.18101 | 0.207235 |
import numpy as np
import scipy.sparse
import math
import multiprocessing as mp
import itertools
import sys
import os
import gc
from sklearn.neighbors import NearestNeighbors, kneighbors_graph, KDTree
from sklearn.metrics.pairwise import euclidean_distances
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i+n]
def density_broad_search_star(a_b):
try:
return euclidean_distances(a_b[1],a_b[0])
except Exception as e:
raise Exception(e)
def build_CCgraph(X, k, cutoff, n_jobs):
n = X.shape[0]
kdt = NearestNeighbors(n_neighbors = k, metric = 'euclidean', n_jobs = n_jobs, algorithm = 'kd_tree').fit(X)
CCmat = kdt.kneighbors_graph(X, mode = 'distance')
distances, _ = kdt.kneighbors(X)
knn_radius = distances[:, k-1]
CCmat = CCmat.minimum(CCmat.T)
#Now to remove outyling points.. points with no internal edges and points in very small (<5) components.
_, components = scipy.sparse.csgraph.connected_components(CCmat, directed = 'False', return_labels =True)
comp_labs, comp_count = np.unique(components, return_counts = True)
outlier_components = comp_labs[comp_count <= cutoff]
nanidx = np.in1d(components, outlier_components)
components = components.astype(float)
if sum(nanidx) > 0:
components[nanidx] = np.nan
return components, CCmat, knn_radius
def get_density_dists_bb(X, k, components, knn_radius, n_jobs):
#knn_radius = np.empty((X.shape[0]))
#knn_radius[:] = np.nan
best_distance = np.empty((X.shape[0]))
best_distance[:] = np.nan
big_brother = np.empty((X.shape[0]))
big_brother[:] = np.nan
comps = np.unique((components[~np.isnan(components)])).astype(int)
ps = np.zeros((1, 2))
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
kcc = min(k, nc-1)
kdt = NearestNeighbors(n_neighbors = kcc, metric = 'euclidean', n_jobs = n_jobs, algorithm = 'kd_tree').fit(X[cc_idx, :])
distances, neighbors = kdt.kneighbors(X[cc_idx, :])
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = np.empty((nc))
cc_big_brother = np.empty((nc))
cc_radius_diff = cc_knn_radius[:, np.newaxis] - cc_knn_radius[neighbors]
rows, cols = np.where(cc_radius_diff > 0)
rows, unidx = np.unique(rows, return_index = True)
del cc_radius_diff
gc.collect()
cols = cols[unidx]
cc_big_brother[rows] = neighbors[rows, cols]
cc_best_distance[rows] = distances[rows, cols]
search_idx = list(np.setdiff1d(list(range(X[cc_idx, :].shape[0])), rows))
ps = np.vstack((ps, [len(cc_idx), len(search_idx)/len(cc_idx)]))
for indx_chunk in utils.chunks(search_idx, 100):
search_radius = cc_knn_radius[indx_chunk]
GT_radius = cc_knn_radius < search_radius[:, np.newaxis]
if any(np.sum(GT_radius, axis = 1) == 0):
max_i = [i for i in range(GT_radius.shape[0]) if np.sum(GT_radius[i,:]) ==0]
if len(max_i) > 1:
for max_j in max_i[1:len(max_i)]:
GT_radius[max_j, indx_chunk[max_i[0]]] = True
max_i = max_i[0]
cc_big_brother[indx_chunk[max_i]] = indx_chunk[max_i]
cc_best_distance[indx_chunk[max_i]] = np.inf
del indx_chunk[max_i]
GT_radius = np.delete(GT_radius, max_i, 0)
GT_distances = ([X[cc_idx[indx_chunk[i]],np.newaxis], X[cc_idx[GT_radius[i,:]],:]] for i in range(len(indx_chunk)))
if (GT_radius.shape[0]>50):
try:
pool = mp.Pool(processes=n_jobs)
N = 25
distances = []
i = 0
while True:
distance_comp = pool.map(utils.density_broad_search_star, itertools.islice(GT_distances, N))
if distance_comp:
distances.append(distance_comp)
i += 1
else:
break
distances = [dis_pair for dis_list in distances for dis_pair in dis_list]
argmin_distance = [np.argmin(l) for l in distances]
pool.terminate()
except Exception as e:
print("POOL ERROR: "+ e)
pool.close()
pool.terminate()
else:
distances = list(map(utils.density_broad_search_star, list(GT_distances)))
argmin_distance = [np.argmin(l) for l in distances]
for i in range(GT_radius.shape[0]):
cc_big_brother[indx_chunk[i]] = np.where(GT_radius[i,:] == 1)[0][argmin_distance[i]]
cc_best_distance[indx_chunk[i]] = distances[i][argmin_distance[i]]
#knn_radius[cc_idx] = cc_knn_radius
big_brother[cc_idx] = [cc_idx[i] for i in cc_big_brother.astype(int)]
best_distance[cc_idx] = cc_best_distance
return best_distance, big_brother, ps
def get_y(CCmat, components, knn_radius, best_distance, big_brother, rho, alpha, d):
n = components.shape[0]
y_pred = np.repeat(-1, n)
peaks = []
n_cent = 0
comps = np.unique((components[~np.isnan(components)])).astype(int)
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
tested = []
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = best_distance[cc_idx]
#Lines to convert Big Brother into CC_Big_brother
index = np.argsort(cc_idx)
sorted_x = cc_idx[index]
sorted_index = np.searchsorted(sorted_x, big_brother[cc_idx])
cc_big_brother = np.take(index, sorted_index, mode="clip")
not_tested = np.ones(nc, dtype = bool)
peaked = cc_best_distance/cc_knn_radius
peaked[(cc_best_distance==0)*(cc_knn_radius==0)] = np.inf
cc_centers = [np.argmax(peaked)]
not_tested[cc_centers[0]] = False
while True:
#Make sure not all points have been assessed.
if np.sum(not_tested) == 0:
break
#Figure out the index of the next top point
subset_idx = np.argmax(peaked[not_tested])
prop_cent = np.arange(peaked.shape[0])[not_tested][subset_idx]
tested.append(np.arange(peaked.shape[0])[not_tested][subset_idx])
CCmat_level = CCmat[cc_idx, :][:, cc_idx]
#Checking if they all lie on one component
if cc_knn_radius[prop_cent] > max(cc_knn_radius[~not_tested]):
cc_level_set = np.where(cc_knn_radius <= cc_knn_radius[prop_cent])[0]
CCmat_check = CCmat_level[cc_level_set, :][:, cc_level_set]
n_cc, _ = scipy.sparse.csgraph.connected_components(CCmat_check, directed = 'False', return_labels =True)
if n_cc == 1:
break
if cc_knn_radius[prop_cent] > 0:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) > e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius < v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
else:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) >= e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius <= v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
#Now to check if the point's level set contains any previous centers
_, cc_labels = scipy.sparse.csgraph.connected_components(CCmat_level, directed = 'False', return_labels =True)
del CCmat_level
gc.collect()
center_comp = cc_labels[np.isin(cc_cut_idx, cc_centers)]
prop_cent_comp = cc_labels[np.where(cc_cut_idx == prop_cent)[0]]
#We want to check all points that have gamma equal to the gamma of the existing centers.
if np.isin(prop_cent_comp, center_comp):
if peaked[prop_cent] == min(peaked[cc_centers]):
not_tested[prop_cent] = False
continue
else:
break
else:
cc_centers.append(prop_cent)
not_tested[prop_cent] = False
cc_centers = np.array(cc_centers)
peaks.extend(cc_idx[cc_centers])
BBTree = np.zeros((nc, 2))
BBTree[:, 0] = range(nc)
BBTree[:, 1] = cc_big_brother
BBTree[cc_centers,1] = cc_centers
BBTree = BBTree.astype(int)
Clustmat = scipy.sparse.csr_matrix((np.ones((nc)), (BBTree[:,0], BBTree[:, 1])), shape = (nc, nc))
n_clusts, cc_y_pred = scipy.sparse.csgraph.connected_components(Clustmat, directed = 'True', return_labels =True)
cc_y_pred += n_cent
n_cent += n_clusts
y_pred[cc_idx] = cc_y_pred
return y_pred, peaks
def get_y_match(CCmat, img_label, components, knn_radius, best_distance, big_brother, rho, alpha, d):
n = components.shape[0]
y_pred = np.repeat(-1, n)
peaks = []
n_cent = 0
comps = np.unique((components[~np.isnan(components)])).astype(int)
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
tested = []
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = best_distance[cc_idx]
cc_img = img_label[cc_idx]
#Lines to convert Big Brother into CC_Big_brother
index = np.argsort(cc_idx)
sorted_x = cc_idx[index]
sorted_index = np.searchsorted(sorted_x, big_brother[cc_idx])
cc_big_brother = np.take(index, sorted_index, mode="clip")
not_tested = np.ones(nc, dtype = bool)
peaked = cc_best_distance/cc_knn_radius
peaked[(cc_best_distance==0)*(cc_knn_radius==0)] = np.inf
cc_centers = [np.argmax(peaked)]
not_tested[cc_centers[0]] = False
while True:
#Make sure not all points have been assessed.
if np.sum(not_tested) == 0:
break
#Figure out the index of the next top point
subset_idx = np.argmax(peaked[not_tested])
prop_cent = np.arange(peaked.shape[0])[not_tested][subset_idx]
tested.append(np.arange(peaked.shape[0])[not_tested][subset_idx])
CCmat_level = CCmat[cc_idx, :][:, cc_idx]
#Checking if they all lie on one component
if cc_knn_radius[prop_cent] > max(cc_knn_radius[~not_tested]):
cc_level_set = np.where(cc_knn_radius <= cc_knn_radius[prop_cent])[0]
CCmat_check = CCmat_level[cc_level_set, :][:, cc_level_set]
n_cc, _ = scipy.sparse.csgraph.connected_components(CCmat_check, directed = 'False', return_labels =True)
if n_cc == 1:
break
if cc_knn_radius[prop_cent] > 0:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) > e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius < v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
else:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) >= e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius <= v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
#Now to check if the point's level set contains any previous centers
_, cc_labels = scipy.sparse.csgraph.connected_components(CCmat_level, directed = 'False', return_labels =True)
del CCmat_level
gc.collect()
center_comp = cc_labels[np.isin(cc_cut_idx, cc_centers)]
prop_cent_comp = cc_labels[np.where(cc_cut_idx == prop_cent)[0]]
#We want to check all points that have gamma equal to the gamma of the existing centers.
if np.isin(prop_cent_comp, center_comp):
if peaked[prop_cent] == min(peaked[cc_centers]):
not_tested[prop_cent] = False
continue
else:
break
else:
cc_centers.append(prop_cent)
not_tested[prop_cent] = False
cc_centers = np.array(cc_centers)
peaks.extend(cc_idx[cc_centers])
cluster_member = np.arange(len(cc_idx))
#features.matchden = features.bandwidth
cc_big_brother[cc_centers] = -1
cc_best_distance[cc_centers] = 0
sorted_idx = np.argsort(cc_best_distance)
for j in range(0,len(cc_idx)):
idx = sorted_idx[j]
parent_idx = cc_big_brother[idx]
if parent_idx != -1:
#min_dens = min(features.matchden[idx], features.matchden[parent_idx])
x = np.take(cc_img, np.where(cluster_member == cluster_member[parent_idx]))
y = np.take(cc_img, np.where(cluster_member == cluster_member[idx]))
isin_truth = np.isin(x,y)
#Only consider points that meet criteria
if not (isin_truth.any()):
cluster_member[cluster_member == cluster_member[idx]] = cluster_member[parent_idx]
#features.matchden[features.cluster_member == features.cluster_member[idx]] = min_dens
#features.matchden[features.cluster_member == features.cluster_member[parent_idx]] = min_dens
# value is for debugging
# value = cluster indices, counts = number of clusters
(values, counts) = np.unique(cluster_member, return_counts=True)
clusters = counts
y_pred[cc_idx] = cluster_member + n_cent
n_cent += max(cluster_member) + 1
return y_pred, peaks
class CPFcluster:
def __init__(self, k, rho = 0.4, alpha = 1, n_jobs = 1, remove_duplicates = False, cutoff = 1):
self.k = k
self.rho = rho
self.alpha = alpha
self.n_jobs = n_jobs
self.remove_duplicates = remove_duplicates
self.cutoff = cutoff
def fit(self, X):
if type(X) is not np.ndarray:
raise ValueError("X must be an n x d numpy array.")
if self.remove_duplicates:
X = np.unique(X, axis=0)
n, d = X.shape
if self.k > n:
raise ValueError("k cannot be larger than n.")
self.components, self.CCmat, knn_radius = build_CCgraph(X, self.k, self.cutoff, self.n_jobs)
best_distance, big_brother, self.ps = get_density_dists_bb(X, self.k, self.components, knn_radius, self.n_jobs)
self.memberships, self.peaks = get_y(self.CCmat, self.components, knn_radius, best_distance, big_brother, self.rho, self.alpha, d)
class CPFmatch:
def __init__(self, k, rho = 0.4, alpha = 1, n_jobs = 1, remove_duplicates = False, cutoff = 1):
self.k = k
self.rho = rho
self.alpha = alpha
self.n_jobs = n_jobs
self.remove_duplicates = remove_duplicates
self.cutoff = cutoff
def fit(self, X, img_label):
if type(X) is not np.ndarray:
raise ValueError("X must be an n x d numpy array.")
if self.remove_duplicates:
X = np.unique(X, axis=0)
n, d = X.shape
if self.k > n:
raise ValueError("k cannot be larger than n.")
self.components, self.CCmat, knn_radius = build_CCgraph(X, self.k, self.cutoff, self.n_jobs)
best_distance, big_brother, self.ps = get_density_dists_bb(X, self.k, self.components, knn_radius, self.n_jobs)
self.memberships, self.peaks = get_y_match(self.CCmat, img_label, self.components, knn_radius, best_distance, big_brother, self.rho, self.alpha, d) | core.py | import numpy as np
import scipy.sparse
import math
import multiprocessing as mp
import itertools
import sys
import os
import gc
from sklearn.neighbors import NearestNeighbors, kneighbors_graph, KDTree
from sklearn.metrics.pairwise import euclidean_distances
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i+n]
def density_broad_search_star(a_b):
try:
return euclidean_distances(a_b[1],a_b[0])
except Exception as e:
raise Exception(e)
def build_CCgraph(X, k, cutoff, n_jobs):
n = X.shape[0]
kdt = NearestNeighbors(n_neighbors = k, metric = 'euclidean', n_jobs = n_jobs, algorithm = 'kd_tree').fit(X)
CCmat = kdt.kneighbors_graph(X, mode = 'distance')
distances, _ = kdt.kneighbors(X)
knn_radius = distances[:, k-1]
CCmat = CCmat.minimum(CCmat.T)
#Now to remove outyling points.. points with no internal edges and points in very small (<5) components.
_, components = scipy.sparse.csgraph.connected_components(CCmat, directed = 'False', return_labels =True)
comp_labs, comp_count = np.unique(components, return_counts = True)
outlier_components = comp_labs[comp_count <= cutoff]
nanidx = np.in1d(components, outlier_components)
components = components.astype(float)
if sum(nanidx) > 0:
components[nanidx] = np.nan
return components, CCmat, knn_radius
def get_density_dists_bb(X, k, components, knn_radius, n_jobs):
#knn_radius = np.empty((X.shape[0]))
#knn_radius[:] = np.nan
best_distance = np.empty((X.shape[0]))
best_distance[:] = np.nan
big_brother = np.empty((X.shape[0]))
big_brother[:] = np.nan
comps = np.unique((components[~np.isnan(components)])).astype(int)
ps = np.zeros((1, 2))
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
kcc = min(k, nc-1)
kdt = NearestNeighbors(n_neighbors = kcc, metric = 'euclidean', n_jobs = n_jobs, algorithm = 'kd_tree').fit(X[cc_idx, :])
distances, neighbors = kdt.kneighbors(X[cc_idx, :])
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = np.empty((nc))
cc_big_brother = np.empty((nc))
cc_radius_diff = cc_knn_radius[:, np.newaxis] - cc_knn_radius[neighbors]
rows, cols = np.where(cc_radius_diff > 0)
rows, unidx = np.unique(rows, return_index = True)
del cc_radius_diff
gc.collect()
cols = cols[unidx]
cc_big_brother[rows] = neighbors[rows, cols]
cc_best_distance[rows] = distances[rows, cols]
search_idx = list(np.setdiff1d(list(range(X[cc_idx, :].shape[0])), rows))
ps = np.vstack((ps, [len(cc_idx), len(search_idx)/len(cc_idx)]))
for indx_chunk in utils.chunks(search_idx, 100):
search_radius = cc_knn_radius[indx_chunk]
GT_radius = cc_knn_radius < search_radius[:, np.newaxis]
if any(np.sum(GT_radius, axis = 1) == 0):
max_i = [i for i in range(GT_radius.shape[0]) if np.sum(GT_radius[i,:]) ==0]
if len(max_i) > 1:
for max_j in max_i[1:len(max_i)]:
GT_radius[max_j, indx_chunk[max_i[0]]] = True
max_i = max_i[0]
cc_big_brother[indx_chunk[max_i]] = indx_chunk[max_i]
cc_best_distance[indx_chunk[max_i]] = np.inf
del indx_chunk[max_i]
GT_radius = np.delete(GT_radius, max_i, 0)
GT_distances = ([X[cc_idx[indx_chunk[i]],np.newaxis], X[cc_idx[GT_radius[i,:]],:]] for i in range(len(indx_chunk)))
if (GT_radius.shape[0]>50):
try:
pool = mp.Pool(processes=n_jobs)
N = 25
distances = []
i = 0
while True:
distance_comp = pool.map(utils.density_broad_search_star, itertools.islice(GT_distances, N))
if distance_comp:
distances.append(distance_comp)
i += 1
else:
break
distances = [dis_pair for dis_list in distances for dis_pair in dis_list]
argmin_distance = [np.argmin(l) for l in distances]
pool.terminate()
except Exception as e:
print("POOL ERROR: "+ e)
pool.close()
pool.terminate()
else:
distances = list(map(utils.density_broad_search_star, list(GT_distances)))
argmin_distance = [np.argmin(l) for l in distances]
for i in range(GT_radius.shape[0]):
cc_big_brother[indx_chunk[i]] = np.where(GT_radius[i,:] == 1)[0][argmin_distance[i]]
cc_best_distance[indx_chunk[i]] = distances[i][argmin_distance[i]]
#knn_radius[cc_idx] = cc_knn_radius
big_brother[cc_idx] = [cc_idx[i] for i in cc_big_brother.astype(int)]
best_distance[cc_idx] = cc_best_distance
return best_distance, big_brother, ps
def get_y(CCmat, components, knn_radius, best_distance, big_brother, rho, alpha, d):
n = components.shape[0]
y_pred = np.repeat(-1, n)
peaks = []
n_cent = 0
comps = np.unique((components[~np.isnan(components)])).astype(int)
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
tested = []
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = best_distance[cc_idx]
#Lines to convert Big Brother into CC_Big_brother
index = np.argsort(cc_idx)
sorted_x = cc_idx[index]
sorted_index = np.searchsorted(sorted_x, big_brother[cc_idx])
cc_big_brother = np.take(index, sorted_index, mode="clip")
not_tested = np.ones(nc, dtype = bool)
peaked = cc_best_distance/cc_knn_radius
peaked[(cc_best_distance==0)*(cc_knn_radius==0)] = np.inf
cc_centers = [np.argmax(peaked)]
not_tested[cc_centers[0]] = False
while True:
#Make sure not all points have been assessed.
if np.sum(not_tested) == 0:
break
#Figure out the index of the next top point
subset_idx = np.argmax(peaked[not_tested])
prop_cent = np.arange(peaked.shape[0])[not_tested][subset_idx]
tested.append(np.arange(peaked.shape[0])[not_tested][subset_idx])
CCmat_level = CCmat[cc_idx, :][:, cc_idx]
#Checking if they all lie on one component
if cc_knn_radius[prop_cent] > max(cc_knn_radius[~not_tested]):
cc_level_set = np.where(cc_knn_radius <= cc_knn_radius[prop_cent])[0]
CCmat_check = CCmat_level[cc_level_set, :][:, cc_level_set]
n_cc, _ = scipy.sparse.csgraph.connected_components(CCmat_check, directed = 'False', return_labels =True)
if n_cc == 1:
break
if cc_knn_radius[prop_cent] > 0:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) > e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius < v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
else:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) >= e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius <= v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
#Now to check if the point's level set contains any previous centers
_, cc_labels = scipy.sparse.csgraph.connected_components(CCmat_level, directed = 'False', return_labels =True)
del CCmat_level
gc.collect()
center_comp = cc_labels[np.isin(cc_cut_idx, cc_centers)]
prop_cent_comp = cc_labels[np.where(cc_cut_idx == prop_cent)[0]]
#We want to check all points that have gamma equal to the gamma of the existing centers.
if np.isin(prop_cent_comp, center_comp):
if peaked[prop_cent] == min(peaked[cc_centers]):
not_tested[prop_cent] = False
continue
else:
break
else:
cc_centers.append(prop_cent)
not_tested[prop_cent] = False
cc_centers = np.array(cc_centers)
peaks.extend(cc_idx[cc_centers])
BBTree = np.zeros((nc, 2))
BBTree[:, 0] = range(nc)
BBTree[:, 1] = cc_big_brother
BBTree[cc_centers,1] = cc_centers
BBTree = BBTree.astype(int)
Clustmat = scipy.sparse.csr_matrix((np.ones((nc)), (BBTree[:,0], BBTree[:, 1])), shape = (nc, nc))
n_clusts, cc_y_pred = scipy.sparse.csgraph.connected_components(Clustmat, directed = 'True', return_labels =True)
cc_y_pred += n_cent
n_cent += n_clusts
y_pred[cc_idx] = cc_y_pred
return y_pred, peaks
def get_y_match(CCmat, img_label, components, knn_radius, best_distance, big_brother, rho, alpha, d):
n = components.shape[0]
y_pred = np.repeat(-1, n)
peaks = []
n_cent = 0
comps = np.unique((components[~np.isnan(components)])).astype(int)
for cc in comps:
cc_idx = np.where(components == cc)[0]
nc = len(cc_idx)
tested = []
cc_knn_radius = knn_radius[cc_idx]
cc_best_distance = best_distance[cc_idx]
cc_img = img_label[cc_idx]
#Lines to convert Big Brother into CC_Big_brother
index = np.argsort(cc_idx)
sorted_x = cc_idx[index]
sorted_index = np.searchsorted(sorted_x, big_brother[cc_idx])
cc_big_brother = np.take(index, sorted_index, mode="clip")
not_tested = np.ones(nc, dtype = bool)
peaked = cc_best_distance/cc_knn_radius
peaked[(cc_best_distance==0)*(cc_knn_radius==0)] = np.inf
cc_centers = [np.argmax(peaked)]
not_tested[cc_centers[0]] = False
while True:
#Make sure not all points have been assessed.
if np.sum(not_tested) == 0:
break
#Figure out the index of the next top point
subset_idx = np.argmax(peaked[not_tested])
prop_cent = np.arange(peaked.shape[0])[not_tested][subset_idx]
tested.append(np.arange(peaked.shape[0])[not_tested][subset_idx])
CCmat_level = CCmat[cc_idx, :][:, cc_idx]
#Checking if they all lie on one component
if cc_knn_radius[prop_cent] > max(cc_knn_radius[~not_tested]):
cc_level_set = np.where(cc_knn_radius <= cc_knn_radius[prop_cent])[0]
CCmat_check = CCmat_level[cc_level_set, :][:, cc_level_set]
n_cc, _ = scipy.sparse.csgraph.connected_components(CCmat_check, directed = 'False', return_labels =True)
if n_cc == 1:
break
if cc_knn_radius[prop_cent] > 0:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) > e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius < v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
else:
v_cutoff = cc_knn_radius[prop_cent]/(rho**(1/d))
e_cutoff = cc_knn_radius[prop_cent]/alpha
e_mask = np.abs(CCmat_level.data) >= e_cutoff
CCmat_level.data[e_mask] = 0
CCmat_level.eliminate_zeros()
cc_cut_idx = np.where(cc_knn_radius <= v_cutoff)[0]
CCmat_level = CCmat_level[cc_cut_idx, :][:, cc_cut_idx]
#Now to check if the point's level set contains any previous centers
_, cc_labels = scipy.sparse.csgraph.connected_components(CCmat_level, directed = 'False', return_labels =True)
del CCmat_level
gc.collect()
center_comp = cc_labels[np.isin(cc_cut_idx, cc_centers)]
prop_cent_comp = cc_labels[np.where(cc_cut_idx == prop_cent)[0]]
#We want to check all points that have gamma equal to the gamma of the existing centers.
if np.isin(prop_cent_comp, center_comp):
if peaked[prop_cent] == min(peaked[cc_centers]):
not_tested[prop_cent] = False
continue
else:
break
else:
cc_centers.append(prop_cent)
not_tested[prop_cent] = False
cc_centers = np.array(cc_centers)
peaks.extend(cc_idx[cc_centers])
cluster_member = np.arange(len(cc_idx))
#features.matchden = features.bandwidth
cc_big_brother[cc_centers] = -1
cc_best_distance[cc_centers] = 0
sorted_idx = np.argsort(cc_best_distance)
for j in range(0,len(cc_idx)):
idx = sorted_idx[j]
parent_idx = cc_big_brother[idx]
if parent_idx != -1:
#min_dens = min(features.matchden[idx], features.matchden[parent_idx])
x = np.take(cc_img, np.where(cluster_member == cluster_member[parent_idx]))
y = np.take(cc_img, np.where(cluster_member == cluster_member[idx]))
isin_truth = np.isin(x,y)
#Only consider points that meet criteria
if not (isin_truth.any()):
cluster_member[cluster_member == cluster_member[idx]] = cluster_member[parent_idx]
#features.matchden[features.cluster_member == features.cluster_member[idx]] = min_dens
#features.matchden[features.cluster_member == features.cluster_member[parent_idx]] = min_dens
# value is for debugging
# value = cluster indices, counts = number of clusters
(values, counts) = np.unique(cluster_member, return_counts=True)
clusters = counts
y_pred[cc_idx] = cluster_member + n_cent
n_cent += max(cluster_member) + 1
return y_pred, peaks
class CPFcluster:
def __init__(self, k, rho = 0.4, alpha = 1, n_jobs = 1, remove_duplicates = False, cutoff = 1):
self.k = k
self.rho = rho
self.alpha = alpha
self.n_jobs = n_jobs
self.remove_duplicates = remove_duplicates
self.cutoff = cutoff
def fit(self, X):
if type(X) is not np.ndarray:
raise ValueError("X must be an n x d numpy array.")
if self.remove_duplicates:
X = np.unique(X, axis=0)
n, d = X.shape
if self.k > n:
raise ValueError("k cannot be larger than n.")
self.components, self.CCmat, knn_radius = build_CCgraph(X, self.k, self.cutoff, self.n_jobs)
best_distance, big_brother, self.ps = get_density_dists_bb(X, self.k, self.components, knn_radius, self.n_jobs)
self.memberships, self.peaks = get_y(self.CCmat, self.components, knn_radius, best_distance, big_brother, self.rho, self.alpha, d)
class CPFmatch:
def __init__(self, k, rho = 0.4, alpha = 1, n_jobs = 1, remove_duplicates = False, cutoff = 1):
self.k = k
self.rho = rho
self.alpha = alpha
self.n_jobs = n_jobs
self.remove_duplicates = remove_duplicates
self.cutoff = cutoff
def fit(self, X, img_label):
if type(X) is not np.ndarray:
raise ValueError("X must be an n x d numpy array.")
if self.remove_duplicates:
X = np.unique(X, axis=0)
n, d = X.shape
if self.k > n:
raise ValueError("k cannot be larger than n.")
self.components, self.CCmat, knn_radius = build_CCgraph(X, self.k, self.cutoff, self.n_jobs)
best_distance, big_brother, self.ps = get_density_dists_bb(X, self.k, self.components, knn_radius, self.n_jobs)
self.memberships, self.peaks = get_y_match(self.CCmat, img_label, self.components, knn_radius, best_distance, big_brother, self.rho, self.alpha, d) | 0.315103 | 0.350491 |
import pymysql
import os
import datetime
from prettytable import PrettyTable
con = pymysql.connect('localhost','root','','Bank5')
cur = con.cursor()
def signup() :
os.system('clear')
now = str(datetime.datetime.now())
name = input("\nEnter Your Name -: ");
address = input("\nEnter Your Address -: ")
date = input("\nEnter date(yyy-mm-dd) -: ")
contact = input("\nEnter Your Contact Number -: ")
email = input("\nEnter Your Email -: ")
try:
cur.execute("insert into customer (account_no,name,address,email,contact_no,account_type,balance,open_date,status) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)",(1001*(10**10)+int(contact),name,address,email,contact,"",int("0"),date,"open"))
print("\n\nYou Have Successfully Registered with our Bank...")
l = name.split()
password = l[0]+"<PASSWORD>"
username = 1001*(10**10)+int(contact)
print("Your Username is -: ",username)
print("Your Password is -: ",password)
cur.execute("insert into login (user_name,password) values(%s,%s)",(username,password))
except Exception as e:
print(e)
input()
signup()
con.commit()
input()
def signin() :
os.system('clear')
username = input("Enter Username -: ")
password = input("Enter Password -: ")
cur.execute("select * from login")
data = cur.fetchall()
for i in data:
if i[0]==username and i[1]==password:
print("Login Successfull...\nPress Enter to Continue..")
input()
os.system('clear')
ch = 0
while ch!=7:
cur.execute("select * from customer where account_no = %s",(username))
check = cur.fetchall()
if check[0][8]=='open' or check[0][8]=='Open':
print("1. Address Change..\n2. Open New Account\n3. Money Deposit..\n4. Money Withdrawl..\n5. Print Statement..\n6. Transfer Money..\n7. Account Closure..\n8. Avail Loan\n9. Customer Logout..")
ch = input("\nEnter Your Choice -:")
if ch=="1": ## Address Change
address = input("\nEnter New Address to Update -: ")
try:
cur.execute("update customer set address = %s where account_no = %s",(address,username))
print("Address Changed Successfully..\nPress Enter to Continue..")
input()
except Exception as e:
print(e)
input()
con.commit()
elif ch=="2": # Open New Account
print("1. Open Saving Account..\n2. Open Current Account..\n3. Open FD..\n")
select = input("Enter Account Option -: ")
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
if select=="1":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current" or x[5]=="FD":
print("Account Already exist......\n")
input()
else:
balance = int(input("Enter Balance to Deposit -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance,i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("saving",i[0]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,"saving"))
except Exception as e:
print(e)
input()
con.commit()
elif select=="2":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current" or x[5]=="FD":
print("Account Already exist......\n")
input()
else:
balance = int(input("\nEnter Balance to deposit -: "))
while balance<5000:
print("Minimum Balance Should be 5000 Rs.")
balance = int(input("Enter Balance to Deposit -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance,i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("current",i[0]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,"current"))
except Exception as e:
print(e)
input()
con.commit()
break;
if select=="3":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current":
print("Account Already exist......\n")
input()
else:
c = ""
cur.execute("select * from fd")
fd = cur.fetchall()
for y in fd:
if y[0]==i[0]:
c = str(y[1])
c = c[:1]
if c=="":
fdaccount_no = "1FD"+i[0][4:]
else:
c = int(c)+1
fdaccount_no = str(c)+"FD"+i[0][4:]
balance = int(input("Enter Balance to Deposit in FD -: "))
while balance<1000:
print("Minimum Balance Should be 5000 Rs.")
balance = int(input("Enter Balance to Deposit -:"))
duration = int(input("Enter Duration of FD (in Months) -:"))
while duration<12:
print("Minimum Duration Should be 12 months.")
duration = int(input("Enter Duration of FD (in Months) -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance+int(x[6]),i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("FD",i[0]))
cur.execute("insert into fd (account_no,fd_account_no,amount,duration) values(%s,%s,%s,%s)",(i[0],fdaccount_no,balance,duration))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,fdaccount_no))
print("\nMoney Successfully Deposited in FD.....\n")
input()
except Exception as e:
print(e)
input()
con.commit()
elif ch=="3": # Money Deposit
cur.execute("select * from customer")
money = cur.fetchall()
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
for j in money:
if j[0]==i[0]:
if j[5]!="FD":
amount = int(input("\nEnter Amount to be deposited -: "))
newamount = amount + int(j[6])
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,i[0]))
print("Amount Deposited Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",j[0],now[:11],amount,j[5]))
input()
break
except Exception as e:
print(e)
input()
else :
fdno = input("\nEnter FD number to Deposit Money -:")
cur.execute("select * from fd")
fd_data = cur.fetchall()
flag = -1
for z in fd_data:
if z[1]==fdno:
flag = 0
amount = int(input("\nEnter Amount to be deposited -: "))
newamount = amount + int(j[6])
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
print("\nAmount Deposited Successfully..\n")
cur.execute("update fd set amount = %s where fd_account_no = %s",(z[2]+amount,fdno))
print("Total FD Amount is -: ",z[2]+amount)
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",j[0],now[:11],amount,fdno))
input()
break
except Exception as e:
print(e)
if flag==-1:
print("\nSorry FD Number Does Not Exist.....\n")
input()
con.commit()
elif ch=="4": #Money Withdrawl
cur.execute("select * from customer")
money = cur.fetchall()
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
flag = -1
for j in money:
if j[0]==i[0] and (j[5]=="current" or j[5]=="saving"):
flag = 0
amount = int(input("\nEnter Amount to be withdrawl -: "))
if amount<int(j[6]):
newamount = int(j[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
print("Amount Withdrawl Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",j[0],now[:11],amount,j[5]))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
if flag==-1:
print("\nMoney Cannot be Withdrawl From FD....")
input()
con.commit()
elif ch=="5": # Print Statement
cur.execute("select * from transaction")
statement = cur.fetchall()
cur.execute("select * from customer where account_no=%s",i[0])
acc = cur.fetchall()
print("\nName : ",acc[0][1],"\tEmail Id : ",acc[0][3],"\nMobile No. : ",acc[0][4],"\tAccount Type : ",acc[0][5],"\nBalance : ",acc[0][6])
cur.execute("select account_type from customer where account_no = %s ",i[0])
sorc = cur.fetchall()
if sorc[0][0]=="saving" or sorc[0][0]=="current":
t = PrettyTable(['Date','Transaction Type','Amount'])
for j in statement:
if i[0]==j[2]:
t.add_row([j[3],j[1],j[4]])
print("\n",t)
else:
t = PrettyTable(['Date','FD Number','Amount Deposited'])
for j in statement:
if i[0]==j[2]:
t.add_row([j[3],j[5],j[4]])
print("\n",t)
elif ch=="6": # Transfer Money
cur.execute("select * from customer")
transf = cur.fetchall()
for w in transf:
if w[0]==i[0] and w[5]=="FD":
print("Money Cannot be Transferred From FD Account...")
input()
elif w[0]==i[0] and w[5]!="FD":
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
accno = input("\nEnter Account Number to Transfer Money -: ")
flag = -1
for j in transf:
if i[0]==j[0]:
m = j
for j in transf:
if accno==j[0] and j[5]!="FD":
l = j
flag = 0
print("\nName : ",l[1])
amount = int(input("\nEnter Amount to be Transfer -: "))
if amount<int(m[6]):
newamount = int(m[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
cur.execute("update customer set balance = %s where account_no = %s",(amount+l[6],accno))
print("Amount Transferred Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",m[0],now[:11],amount,m[5]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+2,"Credited",accno,now[:11],amount,l[5]))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
elif accno==j[0] and j[5]=="FD":
l = j
fdno = input("Enter FD Number to transfer Money -: ")
flag1 = -1
cur.execute("select * from fd")
fddata = cur.fetchall()
for q in fddata:
if q[1]==fdno:
flag1 = 0
print("\nName : ",j[1])
amount = int(input("\nEnter Amount to be Transfer -: "))
if amount<int(m[6]):
newamount = int(m[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
cur.execute("update customer set balance = %s where account_no = %s",(amount+l[6],accno))
print("Amount Transferred Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",m[0],now[:11],amount,m[5]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+2,"Credited",accno,now[:11],amount,fdno))
cur.execute("update fd set amount = %s where fd_account_no = %s",(q[2]+amount,fdno))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
if flag1==-1:
print("\nFD Number Does not Exist...")
input()
else:
print("\nAccount Number Does not Exist...")
input()
elif ch=="7": # Account Closure
cur.execute("select * from customer")
status = cur.fetchall()
choice = input("\nWant to Close the Account(Y/N) -: ")
if choice=="y" :
try:
cur.execute("update customer set status = %s where account_no = %s",("close",username))
con.commit()
except Exception as e:
print(e)
elif ch=="8":
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="FD" or x[5]=="current":
print("Loan Facility Not Available......\n")
input()
else:
c = ""
cur.execute("select * from loan")
ln = cur.fetchall()
for y in ln:
if y[0]==i[0]:
c = str(y[1])
c = c[:1]
if c=="":
lnaccount_no = "1LN"+i[0][4:]
else:
c = int(c)+1
lnaccount_no = str(c)+"LN"+i[0][4:]
balance = int(input("Enter Loan Amount -: "))
while balance>2*int(x[6]):
print("Loan Amount Should be Less than ",2*int(x[6]))
balance = int(input("Enter Loan Amount -:"))
duration = int(input("Enter Duration of Repayment (in Months) -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance+int(x[6]),i[0]))
cur.execute("insert into loan (account_no,loan_no,amount,repayment_term) values(%s,%s,%s,%s)",(i[0],lnaccount_no,balance,duration))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,lnaccount_no))
print("\nLoan Passed Successfully.....\n")
input()
con.commit()
except Exception as e:
print(e)
input()
elif ch=="9": # Log Out
print("You Have been loged out Successfully\nPress Enter to Continue..")
input()
return
else:
print("\nINVALID CHOICE...")
input()
else:
print("\nAccount is Closed.....\nSorry Can't Perform Operations...")
input()
return
print("INVALID ID AND PASSWORD...")
input()
return
ch = 0
while ch!=4:
os.system('clear')
print("1. Sign Up(New Customer) \n2. Sign In(Existing Customer) \n3. Admin Sign In \n4. Exit..")
print("Enter Your Choice -: ")
ch = int(input())
if ch==1:
signup()
elif ch==2:
signin()
elif ch==3:
os.system('clear')
q = 0
while q!=4:
print("1. Print Closed Account History..\n2. FD Report..\n3. Loan Report..\n4. Admin log out..")
q = input("Enter Your Choice -: ")
if q=="1":
cur.execute("select * from customer where status = %s","close")
data = cur.fetchall()
t = PrettyTable(['Account Number','Name','Status'])
for i in data:
t.add_row([i[0],i[1],i[8]])
print(t)
elif q=="2":
cur.execute("select * from fd")
data = cur.fetchall()
t = PrettyTable(['Account Number','FD Number','Amount','Duration'])
for i in data:
t.add_row([i[0],i[1],i[2],i[3]])
print(t)
elif q=="3":
cur.execute("select * from loan")
data = cur.fetchall()
t = PrettyTable(['Account Number','Loan Number','Amount','Repayment Term'])
for i in data:
t.add_row([i[0],i[1],i[2],i[3]])
print(t)
elif q=="4":
print("You Have Been Logged out Successfully...")
input()
break
else:
print("Invalid Choice...")
input()
cur.close() | bank5.py | import pymysql
import os
import datetime
from prettytable import PrettyTable
con = pymysql.connect('localhost','root','','Bank5')
cur = con.cursor()
def signup() :
os.system('clear')
now = str(datetime.datetime.now())
name = input("\nEnter Your Name -: ");
address = input("\nEnter Your Address -: ")
date = input("\nEnter date(yyy-mm-dd) -: ")
contact = input("\nEnter Your Contact Number -: ")
email = input("\nEnter Your Email -: ")
try:
cur.execute("insert into customer (account_no,name,address,email,contact_no,account_type,balance,open_date,status) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)",(1001*(10**10)+int(contact),name,address,email,contact,"",int("0"),date,"open"))
print("\n\nYou Have Successfully Registered with our Bank...")
l = name.split()
password = l[0]+"<PASSWORD>"
username = 1001*(10**10)+int(contact)
print("Your Username is -: ",username)
print("Your Password is -: ",password)
cur.execute("insert into login (user_name,password) values(%s,%s)",(username,password))
except Exception as e:
print(e)
input()
signup()
con.commit()
input()
def signin() :
os.system('clear')
username = input("Enter Username -: ")
password = input("Enter Password -: ")
cur.execute("select * from login")
data = cur.fetchall()
for i in data:
if i[0]==username and i[1]==password:
print("Login Successfull...\nPress Enter to Continue..")
input()
os.system('clear')
ch = 0
while ch!=7:
cur.execute("select * from customer where account_no = %s",(username))
check = cur.fetchall()
if check[0][8]=='open' or check[0][8]=='Open':
print("1. Address Change..\n2. Open New Account\n3. Money Deposit..\n4. Money Withdrawl..\n5. Print Statement..\n6. Transfer Money..\n7. Account Closure..\n8. Avail Loan\n9. Customer Logout..")
ch = input("\nEnter Your Choice -:")
if ch=="1": ## Address Change
address = input("\nEnter New Address to Update -: ")
try:
cur.execute("update customer set address = %s where account_no = %s",(address,username))
print("Address Changed Successfully..\nPress Enter to Continue..")
input()
except Exception as e:
print(e)
input()
con.commit()
elif ch=="2": # Open New Account
print("1. Open Saving Account..\n2. Open Current Account..\n3. Open FD..\n")
select = input("Enter Account Option -: ")
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
if select=="1":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current" or x[5]=="FD":
print("Account Already exist......\n")
input()
else:
balance = int(input("Enter Balance to Deposit -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance,i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("saving",i[0]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,"saving"))
except Exception as e:
print(e)
input()
con.commit()
elif select=="2":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current" or x[5]=="FD":
print("Account Already exist......\n")
input()
else:
balance = int(input("\nEnter Balance to deposit -: "))
while balance<5000:
print("Minimum Balance Should be 5000 Rs.")
balance = int(input("Enter Balance to Deposit -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance,i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("current",i[0]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,"current"))
except Exception as e:
print(e)
input()
con.commit()
break;
if select=="3":
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="saving" or x[5]=="current":
print("Account Already exist......\n")
input()
else:
c = ""
cur.execute("select * from fd")
fd = cur.fetchall()
for y in fd:
if y[0]==i[0]:
c = str(y[1])
c = c[:1]
if c=="":
fdaccount_no = "1FD"+i[0][4:]
else:
c = int(c)+1
fdaccount_no = str(c)+"FD"+i[0][4:]
balance = int(input("Enter Balance to Deposit in FD -: "))
while balance<1000:
print("Minimum Balance Should be 5000 Rs.")
balance = int(input("Enter Balance to Deposit -:"))
duration = int(input("Enter Duration of FD (in Months) -:"))
while duration<12:
print("Minimum Duration Should be 12 months.")
duration = int(input("Enter Duration of FD (in Months) -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance+int(x[6]),i[0]))
cur.execute("update customer set account_type = %s where account_no = %s",("FD",i[0]))
cur.execute("insert into fd (account_no,fd_account_no,amount,duration) values(%s,%s,%s,%s)",(i[0],fdaccount_no,balance,duration))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,fdaccount_no))
print("\nMoney Successfully Deposited in FD.....\n")
input()
except Exception as e:
print(e)
input()
con.commit()
elif ch=="3": # Money Deposit
cur.execute("select * from customer")
money = cur.fetchall()
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
for j in money:
if j[0]==i[0]:
if j[5]!="FD":
amount = int(input("\nEnter Amount to be deposited -: "))
newamount = amount + int(j[6])
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,i[0]))
print("Amount Deposited Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",j[0],now[:11],amount,j[5]))
input()
break
except Exception as e:
print(e)
input()
else :
fdno = input("\nEnter FD number to Deposit Money -:")
cur.execute("select * from fd")
fd_data = cur.fetchall()
flag = -1
for z in fd_data:
if z[1]==fdno:
flag = 0
amount = int(input("\nEnter Amount to be deposited -: "))
newamount = amount + int(j[6])
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
print("\nAmount Deposited Successfully..\n")
cur.execute("update fd set amount = %s where fd_account_no = %s",(z[2]+amount,fdno))
print("Total FD Amount is -: ",z[2]+amount)
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",j[0],now[:11],amount,fdno))
input()
break
except Exception as e:
print(e)
if flag==-1:
print("\nSorry FD Number Does Not Exist.....\n")
input()
con.commit()
elif ch=="4": #Money Withdrawl
cur.execute("select * from customer")
money = cur.fetchall()
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
flag = -1
for j in money:
if j[0]==i[0] and (j[5]=="current" or j[5]=="saving"):
flag = 0
amount = int(input("\nEnter Amount to be withdrawl -: "))
if amount<int(j[6]):
newamount = int(j[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
print("Amount Withdrawl Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",j[0],now[:11],amount,j[5]))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
if flag==-1:
print("\nMoney Cannot be Withdrawl From FD....")
input()
con.commit()
elif ch=="5": # Print Statement
cur.execute("select * from transaction")
statement = cur.fetchall()
cur.execute("select * from customer where account_no=%s",i[0])
acc = cur.fetchall()
print("\nName : ",acc[0][1],"\tEmail Id : ",acc[0][3],"\nMobile No. : ",acc[0][4],"\tAccount Type : ",acc[0][5],"\nBalance : ",acc[0][6])
cur.execute("select account_type from customer where account_no = %s ",i[0])
sorc = cur.fetchall()
if sorc[0][0]=="saving" or sorc[0][0]=="current":
t = PrettyTable(['Date','Transaction Type','Amount'])
for j in statement:
if i[0]==j[2]:
t.add_row([j[3],j[1],j[4]])
print("\n",t)
else:
t = PrettyTable(['Date','FD Number','Amount Deposited'])
for j in statement:
if i[0]==j[2]:
t.add_row([j[3],j[5],j[4]])
print("\n",t)
elif ch=="6": # Transfer Money
cur.execute("select * from customer")
transf = cur.fetchall()
for w in transf:
if w[0]==i[0] and w[5]=="FD":
print("Money Cannot be Transferred From FD Account...")
input()
elif w[0]==i[0] and w[5]!="FD":
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
accno = input("\nEnter Account Number to Transfer Money -: ")
flag = -1
for j in transf:
if i[0]==j[0]:
m = j
for j in transf:
if accno==j[0] and j[5]!="FD":
l = j
flag = 0
print("\nName : ",l[1])
amount = int(input("\nEnter Amount to be Transfer -: "))
if amount<int(m[6]):
newamount = int(m[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
cur.execute("update customer set balance = %s where account_no = %s",(amount+l[6],accno))
print("Amount Transferred Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",m[0],now[:11],amount,m[5]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+2,"Credited",accno,now[:11],amount,l[5]))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
elif accno==j[0] and j[5]=="FD":
l = j
fdno = input("Enter FD Number to transfer Money -: ")
flag1 = -1
cur.execute("select * from fd")
fddata = cur.fetchall()
for q in fddata:
if q[1]==fdno:
flag1 = 0
print("\nName : ",j[1])
amount = int(input("\nEnter Amount to be Transfer -: "))
if amount<int(m[6]):
newamount = int(m[6]) - amount
try:
cur.execute("update customer set balance = %s where account_no = %s",(newamount,username))
cur.execute("update customer set balance = %s where account_no = %s",(amount+l[6],accno))
print("Amount Transferred Successfully..\nYour Total Balance is ",newamount,"\nPress Enter to Continue..")
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Debited",m[0],now[:11],amount,m[5]))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+2,"Credited",accno,now[:11],amount,fdno))
cur.execute("update fd set amount = %s where fd_account_no = %s",(q[2]+amount,fdno))
input()
con.commit()
except Exception as e:
print(e)
else :
print("Entered Amount is greater than your balance..\nPress Enter to continue...")
input()
break
if flag1==-1:
print("\nFD Number Does not Exist...")
input()
else:
print("\nAccount Number Does not Exist...")
input()
elif ch=="7": # Account Closure
cur.execute("select * from customer")
status = cur.fetchall()
choice = input("\nWant to Close the Account(Y/N) -: ")
if choice=="y" :
try:
cur.execute("update customer set status = %s where account_no = %s",("close",username))
con.commit()
except Exception as e:
print(e)
elif ch=="8":
cur.execute("select * from transaction")
count = cur.rowcount
now = str(datetime.datetime.now())
cur.execute("select * from customer")
cust = cur.fetchall()
for x in cust:
if x[0]==i[0]:
if x[5]=="FD" or x[5]=="current":
print("Loan Facility Not Available......\n")
input()
else:
c = ""
cur.execute("select * from loan")
ln = cur.fetchall()
for y in ln:
if y[0]==i[0]:
c = str(y[1])
c = c[:1]
if c=="":
lnaccount_no = "1LN"+i[0][4:]
else:
c = int(c)+1
lnaccount_no = str(c)+"LN"+i[0][4:]
balance = int(input("Enter Loan Amount -: "))
while balance>2*int(x[6]):
print("Loan Amount Should be Less than ",2*int(x[6]))
balance = int(input("Enter Loan Amount -:"))
duration = int(input("Enter Duration of Repayment (in Months) -:"))
try:
cur.execute("update customer set balance = %s where account_no = %s",(balance+int(x[6]),i[0]))
cur.execute("insert into loan (account_no,loan_no,amount,repayment_term) values(%s,%s,%s,%s)",(i[0],lnaccount_no,balance,duration))
cur.execute("insert into transaction (trans_id,trans_type,account_no,date,amount,account_type) values(%s,%s,%s,%s,%s,%s)",(count+1,"Credited",i[0],now[:11],balance,lnaccount_no))
print("\nLoan Passed Successfully.....\n")
input()
con.commit()
except Exception as e:
print(e)
input()
elif ch=="9": # Log Out
print("You Have been loged out Successfully\nPress Enter to Continue..")
input()
return
else:
print("\nINVALID CHOICE...")
input()
else:
print("\nAccount is Closed.....\nSorry Can't Perform Operations...")
input()
return
print("INVALID ID AND PASSWORD...")
input()
return
ch = 0
while ch!=4:
os.system('clear')
print("1. Sign Up(New Customer) \n2. Sign In(Existing Customer) \n3. Admin Sign In \n4. Exit..")
print("Enter Your Choice -: ")
ch = int(input())
if ch==1:
signup()
elif ch==2:
signin()
elif ch==3:
os.system('clear')
q = 0
while q!=4:
print("1. Print Closed Account History..\n2. FD Report..\n3. Loan Report..\n4. Admin log out..")
q = input("Enter Your Choice -: ")
if q=="1":
cur.execute("select * from customer where status = %s","close")
data = cur.fetchall()
t = PrettyTable(['Account Number','Name','Status'])
for i in data:
t.add_row([i[0],i[1],i[8]])
print(t)
elif q=="2":
cur.execute("select * from fd")
data = cur.fetchall()
t = PrettyTable(['Account Number','FD Number','Amount','Duration'])
for i in data:
t.add_row([i[0],i[1],i[2],i[3]])
print(t)
elif q=="3":
cur.execute("select * from loan")
data = cur.fetchall()
t = PrettyTable(['Account Number','Loan Number','Amount','Repayment Term'])
for i in data:
t.add_row([i[0],i[1],i[2],i[3]])
print(t)
elif q=="4":
print("You Have Been Logged out Successfully...")
input()
break
else:
print("Invalid Choice...")
input()
cur.close() | 0.038665 | 0.139778 |
from bs4 import BeautifulSoup
import requests
import pandas as pd
res=requests.get("http://books.toscrape.com/").text
soup=BeautifulSoup(res,'html.parser')
#Get the total page count
pagecount=soup.select_one('.current').text.split('of')[-1].strip()
title=[]
ratings=[]
cost=[]
for page in range(1,int(pagecount)+1):
finalurl="http://books.toscrape.com/catalogue/page-{}.html".format(page)
res=requests.get(finalurl).text
soup=BeautifulSoup(res,'html.parser')
for t,r,c in zip(soup.select('.image_container >a>img'),soup.select('p.star-rating'),soup.select('.image_container >a')):
title.append(t['alt'])
ratings.append(r.attrs['class'][-1])
cost.append(c['href'])
df = pd.DataFrame({"Title":title,"Ratings":ratings,"Cost":cost})
print(df)
df.to_csv('Titlebooks.csv')
---------------------------------------
---------------------------------------
from bs4 import BeautifulSoup
import requests
import pandas as pd
res=requests.get("http://books.toscrape.com/").text
soup=BeautifulSoup(res,'html.parser')
#Get the total page count
pagecount=soup.select_one('.current').text.split('of')[-1].strip()
title=[]
ratings=[]
cost=[]
for page in range(1,int(pagecount)+1):
finalurl="http://books.toscrape.com/catalogue/page-{}.html".format(page)
res=requests.get(finalurl).text
soup=BeautifulSoup(res,'html.parser')
for c in (soup.select('.image_container >a')):
title.append(c['href'])
for inter in title :
url_lib = "http://books.toscrape.com/catalogue/{}".format(inter)
res2=requests.get(url_lib).text
soup2=BeautifulSoup(res2,'html.parser')
results = soup2.findAll( "div", {"class": "content"})
for item in results:
products = {
'book_href' : item.find('p').text,
}
ratings.append( products)
df = pd.DataFrame({"Title":title,"Ratings":ratings,"Cost":cost})
print(df)
df.to_csv('Titlebooks.csv') | Prueba_cadabook.py | from bs4 import BeautifulSoup
import requests
import pandas as pd
res=requests.get("http://books.toscrape.com/").text
soup=BeautifulSoup(res,'html.parser')
#Get the total page count
pagecount=soup.select_one('.current').text.split('of')[-1].strip()
title=[]
ratings=[]
cost=[]
for page in range(1,int(pagecount)+1):
finalurl="http://books.toscrape.com/catalogue/page-{}.html".format(page)
res=requests.get(finalurl).text
soup=BeautifulSoup(res,'html.parser')
for t,r,c in zip(soup.select('.image_container >a>img'),soup.select('p.star-rating'),soup.select('.image_container >a')):
title.append(t['alt'])
ratings.append(r.attrs['class'][-1])
cost.append(c['href'])
df = pd.DataFrame({"Title":title,"Ratings":ratings,"Cost":cost})
print(df)
df.to_csv('Titlebooks.csv')
---------------------------------------
---------------------------------------
from bs4 import BeautifulSoup
import requests
import pandas as pd
res=requests.get("http://books.toscrape.com/").text
soup=BeautifulSoup(res,'html.parser')
#Get the total page count
pagecount=soup.select_one('.current').text.split('of')[-1].strip()
title=[]
ratings=[]
cost=[]
for page in range(1,int(pagecount)+1):
finalurl="http://books.toscrape.com/catalogue/page-{}.html".format(page)
res=requests.get(finalurl).text
soup=BeautifulSoup(res,'html.parser')
for c in (soup.select('.image_container >a')):
title.append(c['href'])
for inter in title :
url_lib = "http://books.toscrape.com/catalogue/{}".format(inter)
res2=requests.get(url_lib).text
soup2=BeautifulSoup(res2,'html.parser')
results = soup2.findAll( "div", {"class": "content"})
for item in results:
products = {
'book_href' : item.find('p').text,
}
ratings.append( products)
df = pd.DataFrame({"Title":title,"Ratings":ratings,"Cost":cost})
print(df)
df.to_csv('Titlebooks.csv') | 0.156008 | 0.086748 |
import os
import time
import logging
import numpy as np
import unittest
import matplotlib.pyplot as plt
from home_platform.rendering import Panda3dRenderer
from home_platform.suncg import SunCgSceneLoader, loadModel, SunCgModelLights
from panda3d.core import LMatrix4f, TransformState, LVecBase3
from home_platform.core import Scene
from home_platform.utils import Viewer
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data")
TEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data", "suncg")
class TestPanda3dRenderer(unittest.TestCase):
def testObjectWithViewer(self):
scene = Scene()
modelId = '83'
modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
model.hide()
objectsNp = scene.scene.attachNewNode('objects')
objNp = objectsNp.attachNewNode('object-' + str(modelId))
model.reparentTo(objNp)
# Calculate the center of this object
minBounds, maxBounds = model.getTightBounds()
centerPos = minBounds + (maxBounds - minBounds) / 2.0
# Add offset transform to make position relative to the center
model.setTransform(TransformState.makePos(-centerPos))
try:
renderer = Panda3dRenderer(scene, shadowing=False)
viewer = Viewer(scene, interactive=False)
viewer.disableMouse()
viewer.cam.setTransform(TransformState.makePos(LVecBase3(5.0, 0.0, 0.0)))
viewer.cam.lookAt(model)
for _ in range(20):
viewer.step()
time.sleep(1.0)
finally:
renderer.destroy()
viewer.destroy()
viewer.graphicsEngine.removeAllWindows()
def testStep(self):
scene = SunCgSceneLoader.loadHouseFromJson("0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)
modelLightsInfo = SunCgModelLights(os.path.join(TEST_SUNCG_DATA_DIR, 'metadata', 'suncgModelLights.json'))
renderer = Panda3dRenderer(scene, shadowing=True, mode='offscreen', modelLightsInfo=modelLightsInfo)
renderer.showRoomLayout(showCeilings=False)
mat = np.array([0.999992, 0.00394238, 0, 0,
-0.00295702, 0.750104, -0.661314, 0,
-0.00260737, 0.661308, 0.75011, 0,
43.621, -55.7499, 12.9722, 1])
scene.agents[0].setMat(LMatrix4f(*mat.ravel()))
renderer.step(dt=0.1)
image = renderer.getRgbImages()['agent-0']
depth = renderer.getDepthImages(mode='distance')['agent-0']
self.assertTrue(np.min(depth) >= renderer.zNear)
self.assertTrue(np.max(depth) <= renderer.zFar)
fig = plt.figure(figsize=(16,8))
plt.axis("off")
ax = plt.subplot(121)
ax.imshow(image)
ax = plt.subplot(122)
ax.imshow(depth/np.max(depth), cmap='binary')
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
renderer.destroy()
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
np.seterr(all='raise')
unittest.main() | tests/multimodalmaze/test_rendering.py |
import os
import time
import logging
import numpy as np
import unittest
import matplotlib.pyplot as plt
from home_platform.rendering import Panda3dRenderer
from home_platform.suncg import SunCgSceneLoader, loadModel, SunCgModelLights
from panda3d.core import LMatrix4f, TransformState, LVecBase3
from home_platform.core import Scene
from home_platform.utils import Viewer
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data")
TEST_SUNCG_DATA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "data", "suncg")
class TestPanda3dRenderer(unittest.TestCase):
def testObjectWithViewer(self):
scene = Scene()
modelId = '83'
modelFilename = os.path.join(TEST_SUNCG_DATA_DIR, "object", str(modelId), str(modelId) + ".egg")
assert os.path.exists(modelFilename)
model = loadModel(modelFilename)
model.setName('model-' + str(modelId))
model.hide()
objectsNp = scene.scene.attachNewNode('objects')
objNp = objectsNp.attachNewNode('object-' + str(modelId))
model.reparentTo(objNp)
# Calculate the center of this object
minBounds, maxBounds = model.getTightBounds()
centerPos = minBounds + (maxBounds - minBounds) / 2.0
# Add offset transform to make position relative to the center
model.setTransform(TransformState.makePos(-centerPos))
try:
renderer = Panda3dRenderer(scene, shadowing=False)
viewer = Viewer(scene, interactive=False)
viewer.disableMouse()
viewer.cam.setTransform(TransformState.makePos(LVecBase3(5.0, 0.0, 0.0)))
viewer.cam.lookAt(model)
for _ in range(20):
viewer.step()
time.sleep(1.0)
finally:
renderer.destroy()
viewer.destroy()
viewer.graphicsEngine.removeAllWindows()
def testStep(self):
scene = SunCgSceneLoader.loadHouseFromJson("0004d52d1aeeb8ae6de39d6bd993e992", TEST_SUNCG_DATA_DIR)
modelLightsInfo = SunCgModelLights(os.path.join(TEST_SUNCG_DATA_DIR, 'metadata', 'suncgModelLights.json'))
renderer = Panda3dRenderer(scene, shadowing=True, mode='offscreen', modelLightsInfo=modelLightsInfo)
renderer.showRoomLayout(showCeilings=False)
mat = np.array([0.999992, 0.00394238, 0, 0,
-0.00295702, 0.750104, -0.661314, 0,
-0.00260737, 0.661308, 0.75011, 0,
43.621, -55.7499, 12.9722, 1])
scene.agents[0].setMat(LMatrix4f(*mat.ravel()))
renderer.step(dt=0.1)
image = renderer.getRgbImages()['agent-0']
depth = renderer.getDepthImages(mode='distance')['agent-0']
self.assertTrue(np.min(depth) >= renderer.zNear)
self.assertTrue(np.max(depth) <= renderer.zFar)
fig = plt.figure(figsize=(16,8))
plt.axis("off")
ax = plt.subplot(121)
ax.imshow(image)
ax = plt.subplot(122)
ax.imshow(depth/np.max(depth), cmap='binary')
plt.show(block=False)
time.sleep(1.0)
plt.close(fig)
renderer.destroy()
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
np.seterr(all='raise')
unittest.main() | 0.430387 | 0.322739 |
import decimal
import pytest
import json
import requests
from datetime import datetime
from mock import Mock
import pyticketswitch
from pyticketswitch.client import Client, POST, GET
from pyticketswitch import exceptions
from pyticketswitch.trolley import Trolley
from pyticketswitch.reservation import Reservation
from pyticketswitch.user import User
from pyticketswitch.customer import Customer
from pyticketswitch.payment_methods import CardDetails, RedirectionDetails
from pyticketswitch.status import Status
from pyticketswitch.callout import Callout
@pytest.fixture
def client():
client = Client(user="bilbo", password="<PASSWORD>", use_decimal=True)
return client
@pytest.fixture
def fake_func():
def wrapper(return_value):
def fake(*args, **kwargs):
return return_value
return fake
return wrapper
@pytest.fixture
def mock_make_request(client, monkeypatch):
response = {'results': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_events(client, monkeypatch):
response = {'events_by_id': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_performances(client, monkeypatch):
response = {'performances_by_id': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_availability(client, monkeypatch):
response = {'availability': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_trolley(client, monkeypatch):
response = {'trolley_token': 'ABC<PASSWORD>'}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
class FakeResponse(object):
def __init__(self, status_code=200, json=None):
self.status_code = status_code
self._json = json
def json(self, **kwargs):
return self._json
@property
def content(self):
return json.dumps(self._json)
class FakeResponseRaisesValueError(FakeResponse):
def json(self, **kwargs):
raise ValueError("ERROR")
class TestClient:
@pytest.mark.integration
def test_get_url(self, client):
url = client.get_url('events.v1')
assert url == 'https://api.ticketswitch.com/f13/events.v1/'
@pytest.mark.integration
def test_make_request(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
params={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
@pytest.mark.integration
def test_make_request_with_timeout(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params, timeout=15)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
params={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=15
)
@pytest.mark.integration
def test_make_request_with_post(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_post = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.post = fake_post
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params, method=POST)
assert response == {'lol': 'beans'}
fake_post.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
data={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_with_subuser(self, monkeypatch):
client = Client(user="beatles", password="<PASSWORD>",
sub_user="ringo", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'beatles', b'lovemedo'),
params={
'foo': 'bar',
'sub_id': 'ringo',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_with_tracking_id(self, monkeypatch):
client = Client(user="user", password="<PASSWORD>",
tracking_id="xyz", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"depro": "fundis"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
client.language='en-GB'
response = client.make_request('events.v1', {})
assert response
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'pass'),
params={
'tsw_session_track_id': 'xyz'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_when_using_per_request_tracking_id(self, monkeypatch):
client = Client(user="user", password="<PASSWORD>",
tracking_id="xyz", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"depro": "fundis"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
client.language='en-GB'
params = {}
client.add_optional_kwargs(params, tracking_id="123")
response = client.make_request('events.v1', params)
assert response
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'<PASSWORD>'),
params={
'tsw_session_track_id': '123'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
client.add_optional_kwargs(params, tracking_id="456")
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'<PASSWORD>'),
params={
'tsw_session_track_id': '456'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_bad_response_with_auth_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={
'error_code': 3,
'error_desc': 'User authorisation failure',
})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.APIError) as excinfo:
client.make_request('test.v1', {})
assert excinfo.value.msg == 'User authorisation failure'
assert excinfo.value.code == 3
assert excinfo.value.response is fake_response
def test_make_request_bad_response_with_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={
'error_code': 8,
'error_desc': 'price_band_code needs /pool or /alloc suffix',
})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.APIError) as excinfo:
client.make_request('trolley.v1', {})
assert excinfo.value.msg == 'price_band_code needs /pool or /alloc suffix'
assert excinfo.value.code == 8
assert excinfo.value.response is fake_response
def test_make_request_bad_response_without_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.InvalidResponseError):
client.make_request('trolley.v1', {})
def test_make_request_410_gone_response(self, client, monkeypatch):
response_json = {'error_code': 8, 'error_desc': 'transaction failed'}
fake_response = FakeResponse(status_code=410, json=response_json)
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.CallbackGoneError):
client.make_request('callback.v1', {})
def test_make_request_no_contents_raises(self, client, monkeypatch):
response_json = {'data': 'some data'}
fake_response = FakeResponseRaisesValueError(status_code=200, json=response_json)
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.InvalidResponseError):
client.make_request('test.v1', {})
def test_add_optional_kwargs_extra_info(self, client):
params = {}
client.add_optional_kwargs(params, extra_info=True)
assert params == {'req_extra_info': True}
def test_add_optional_kwargs_reviews(self, client):
params = {}
client.add_optional_kwargs(params, reviews=True)
assert params == {'req_reviews': True}
def test_add_optional_kwargs_media(self, client):
params = {}
client.add_optional_kwargs(params, media=True)
assert params == {
'req_media_triplet_one': True,
'req_media_triplet_two': True,
'req_media_triplet_three': True,
'req_media_triplet_four': True,
'req_media_triplet_five': True,
'req_media_seating_plan': True,
'req_media_square': True,
'req_media_landscape': True,
'req_media_marquee': True,
'req_video_iframe': True,
}
def test_add_optional_kwargs_cost_range(self, client):
params = {}
client.add_optional_kwargs(params, cost_range=True)
assert params == {'req_cost_range': True}
def test_add_optional_kwargs_best_value_offer(self, client):
params = {}
client.add_optional_kwargs(params, best_value_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_best_value_offer': True,
}
def test_add_optional_kwargs_max_saving_offer(self, client):
params = {}
client.add_optional_kwargs(params, max_saving_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_max_saving_offer': True,
}
def test_add_optional_kwargs_min_cost_offer(self, client):
params = {}
client.add_optional_kwargs(params, min_cost_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_min_cost_offer': True,
}
def test_add_optional_kwargs_top_price_offer(self, client):
params = {}
client.add_optional_kwargs(params, top_price_offer=True)
params == {
'req_cost_range': True,
'req_cost_range_top_price_offer': True,
}
def test_add_optional_kwargs_no_singles_data(self, client):
params = {}
client.add_optional_kwargs(params, no_singles_data=True)
assert params == {
'req_cost_range': True,
'req_cost_range_no_singles_data': True,
}
def test_add_optional_kwargs_cost_range_details(self, client):
params = {}
client.add_optional_kwargs(params, cost_range_details=True)
assert params == {
'req_cost_range_details': True,
}
def test_add_optional_kwargs_avail_details(self, client):
params = {}
client.add_optional_kwargs(params, availability=True)
params == {
'req_avail_details': True,
}
def test_add_optional_kwargs_avail_details_with_perfs(self, client):
params = {}
client.add_optional_kwargs(params, availability_with_performances=True)
params == {
'req_avail_details_with_perfs': True,
}
def test_add_optional_kwargs_source_info(self, client):
params = {}
client.add_optional_kwargs(params, source_info=True)
params == {
'req_src_info': True,
}
def test_list_events(self, client, monkeypatch):
response = {
'results': {
'event': [
{'event_id': 'ABC123'},
{'event_id': 'DEF456'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
events, meta = client.list_events()
mock_make_request.assert_called_with('events.v1', {})
assert len(events) == 2
event_one, event_two = events
assert event_one.id =='ABC123'
assert event_two.id == 'DEF456'
assert meta.total_results == 10
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_list_events_with_keywords(self, client, mock_make_request):
client.list_events(keywords=['awesome', 'stuff'])
mock_make_request.assert_called_with('events.v1', {
'keywords': 'awesome,stuff',
})
def test_list_events_with_start_date(self, client, mock_make_request):
client.list_events(start_date=datetime(2016, 7, 23, 0, 7, 25))
mock_make_request.assert_called_with('events.v1', {
'date_range': '20160723:',
})
def test_list_events_with_end_date(self, client, mock_make_request):
client.list_events(end_date=datetime(2016, 7, 23, 0, 7, 25))
mock_make_request.assert_called_with('events.v1', {
'date_range': ':20160723',
})
def test_list_events_with_start_and_end_date(self, client, mock_make_request):
client.list_events(
start_date=datetime(2015, 3, 11, 0, 9, 45),
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('events.v1', {
'date_range': '20150311:20160723',
})
def test_list_events_country_code(self, client, mock_make_request):
client.list_events(country_code='fj')
mock_make_request.assert_called_with('events.v1', {
'country_code': 'fj',
})
def test_list_events_city_code(self, client, mock_make_request):
client.list_events(city_code='london-uk')
mock_make_request.assert_called_with('events.v1', {
'city_code': 'london-uk',
})
def test_list_events_geolocation(self, client, mock_make_request):
client.list_events(
latitude=51.52961137,
longitude=-0.10601562,
radius=10
)
mock_make_request.assert_called_with('events.v1', {
'circle': '51.52961137:-0.10601562:10',
})
def test_list_events_invalid_geolocation(self, client):
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
longitude=-0.10601562,
radius=10
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
latitude=51.52961137,
radius=10
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
latitude=51.52961137,
longitude=-0.10601562,
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
radius=10
)
def test_list_events_include_dead(self, client, mock_make_request):
client.list_events(include_dead=True)
mock_make_request.assert_called_with('events.v1', {
'include_dead': True,
})
def test_list_events_sort_order(self, client, mock_make_request):
client.list_events(sort_order='foobar')
mock_make_request.assert_called_with('events.v1', {
'sort_order': 'foobar',
})
def test_list_events_pagination(self, client, mock_make_request):
client.list_events(page=2, page_length=50)
mock_make_request.assert_called_with('events.v1', {
'page_no': 2,
'page_len': 50,
})
def test_list_events_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.list_events()
def test_list_events_misc_kwargs(self, client, mock_make_request):
client.list_events(foobar='lolbeans')
mock_make_request.assert_called_with('events.v1', {
'foobar': 'lolbeans'
})
def test_get_events(self, client, monkeypatch):
response = {
'events_by_id': {
'ABC123': {
'event': {'event_id': 'ABC123'},
},
'DEF456': {
'event': {'event_id': 'DEF456'},
}
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
events, meta = client.get_events(['ABC123', 'DEF456'])
mock_make_request.assert_called_with(
'events_by_id.v1',
{'event_id_list': 'ABC123,DEF456'},
)
event_one = events['ABC123']
event_two = events['DEF456']
assert event_one.id == 'ABC123'
assert event_two.id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_events_event_list(self, client, mock_make_request_for_events):
client.get_events(['6IF', '25DR', '3ENO'])
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': '6IF,25DR,3ENO',
})
def test_get_events_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_events(['6IF', '25DR'])
def test_get_events_misc_kwargs(self, client, mock_make_request_for_events):
client.get_events([], foobar='lolbeans')
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'foobar': 'lolbeans',
})
def test_get_events_with_upsell(self, client, mock_make_request_for_events):
client.get_events(['6IF'], with_upsells=True)
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': '6IF', 'add_upsells': True,
})
def test_get_events_with_addons(self, client, mock_make_request_for_events):
client.get_events(['ABC123'], with_addons=True)
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': 'ABC123', 'add_add_ons': True,
})
def test_get_event(self, client, monkeypatch):
response = {
'events_by_id': {
'ABC123': {
'event': {'event_id': 'ABC123'},
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
event, meta = client.get_event('ABC123')
mock_make_request.assert_called_with(
'events_by_id.v1',
{'event_id_list': 'ABC123'},
)
assert event.id =='ABC123'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_months(self, client, monkeypatch):
response = {
'results': {
'month': [
{'month': 'dec', 'year': 2016},
{'month': 'jan', 'year': 2017},
{'month': 'feb', 'year': 2017},
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
months = client.get_months('ABC123')
mock_make_request.assert_called_with(
'months.v1',
{'event_id': 'ABC123'},
)
assert len(months) == 3
assert months[0].month == 12
assert months[0].year == 2016
assert months[1].month == 1
assert months[1].year == 2017
assert months[2].month == 2
assert months[2].year == 2017
def test_get_months_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_months('6IF')
def test_get_months_misc_kwargs(self, client, mock_make_request):
client.get_months('6IF', foobar='lolbeans')
mock_make_request.assert_called_with('months.v1', {
'event_id': '6IF',
'foobar': 'lolbeans'
})
def test_list_performances_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.list_performances('6IF')
def test_list_performances(self, client, monkeypatch):
response = {
'results': {
'has_perf_names': False,
'events_by_id': {
'ABC123': {'event': {'event_id': 'ABC123'}},
},
'performance': [
{'perf_id': 'ABC123-1', 'event_id': 'ABC123'},
{'perf_id': 'ABC123-2', 'event_id': 'ABC123'},
{'perf_id': 'ABC123-3', 'event_id': 'ABC123'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performances, meta = client.list_performances('ABC123')
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
})
assert len(performances) == 3
performance_one, performance_two, performance_three = performances
assert performance_one.id == 'ABC123-1'
assert performance_two.id == 'ABC123-2'
assert performance_three.id == 'ABC123-3'
assert performance_one.event_id == 'ABC123'
assert performance_two.event_id == 'ABC123'
assert performance_three.event_id == 'ABC123'
assert meta.has_names is False
assert meta.total_results == 10
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_list_performances_cost_range(self, client, mock_make_request):
client.list_performances('ABC123', cost_range=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True
})
def test_list_performances_best_value_offer(self, client, mock_make_request):
client.list_performances('ABC123', best_value_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_best_value_offer': True
})
def test_list_performances_max_saving_offer(self, client, mock_make_request):
client.list_performances('ABC123', max_saving_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_max_saving_offer': True
})
def test_list_performances_min_cost_offer(self, client, mock_make_request):
client.list_performances('ABC123', min_cost_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_min_cost_offer': True
})
def test_list_performances_top_price_offer(self, client, mock_make_request):
client.list_performances('ABC123', top_price_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_top_price_offer': True
})
def test_list_performances_no_singles_data(self, client, mock_make_request):
client.list_performances('ABC123', no_singles_data=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_no_singles_data': True
})
def test_list_performances_availability(self, client, mock_make_request):
client.list_performances('ABC123', availability=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_avail_details': True
})
def test_list_performances_pagination(self, client, mock_make_request):
client.list_performances(
'ABC123',
availability=True,
page=3,
page_length=20,
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_avail_details': True,
'page_no': 3,
'page_len': 20,
})
def test_list_performances_with_start_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
start_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': '20160723:',
})
def test_list_performancess_with_end_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': ':20160723',
})
def test_list_performances_with_start_and_end_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
start_date=datetime(2015, 3, 11, 0, 9, 45),
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': '20150311:20160723',
})
def test_list_performances_misc_kwargs(self, client, mock_make_request):
client.list_performances('ABC123', foobar='lolbeans')
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'foobar': 'lolbeans',
})
def test_get_performances(self, client, monkeypatch):
response = {
'performances_by_id': {
'ABC123-1': {
'perf_id': 'ABC123-1',
'event_id': 'ABC123',
},
'DEF456-2': {
'perf_id': 'DEF456-2',
'event_id': 'DEF456',
}
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performances, meta = client.get_performances(['ABC123-1', 'DEF456-2'])
mock_make_request.assert_called_with('performances_by_id.v1', {
'perf_id_list': 'ABC123-1,DEF456-2',
})
performance_one = performances['ABC123-1']
performance_two = performances['DEF456-2']
assert performance_one.id == 'ABC123-1'
assert performance_two.id == 'DEF456-2'
assert performance_one.event_id == 'ABC123'
assert performance_two.event_id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_performances_no_performances(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_performances(['6IF-1', '6IF-2'])
def test_get_performances_misc_kwargs(self, client, mock_make_request_for_performances):
client.get_performances(['6IF-1', '25DR-2'], foobar='lolbeans')
mock_make_request_for_performances.assert_called_with('performances_by_id.v1', {
'perf_id_list': '6IF-1,25DR-2',
'foobar': 'lolbeans',
})
def test_get_performance(self, client, monkeypatch):
response = {
'performances_by_id': {
'ABC123-1': {
'perf_id': 'ABC123-1',
'event_id': 'ABC123',
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performance, meta = client.get_performance('ABC123-1')
mock_make_request.assert_called_with(
'performances_by_id.v1',
{'perf_id_list': 'ABC123-1'},
)
assert performance.id =='ABC123-1'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_availability(self, client, monkeypatch):
response = {
'availability': {
'ticket_type': [
{
'ticket_type_code': 'CIRCLE',
'price_band': [
{
'price_band_code': 'A',
},
{
'price_band_code': 'B',
'allows_leaving_single_seats': 'if_necessary',
},
]
},
{
'ticket_type_code': 'STALLS',
'price_band': [
{
'price_band_code': 'C',
'allows_leaving_single_seats': 'always',
},
{
'price_band_code': 'D',
'allows_leaving_single_seats': 'never',
},
]
}
]
},
'backend_is_broken': False,
'backend_is_down': False,
'backend_throttle_failed': False,
'contiguous_seat_selection_only': True,
'must_select_whole_seat_block': True,
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'valid_quantities': [2, 3, 4, 5, 6, 7],
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
availability, meta = client.get_availability('ABC123-1')
mock_make_request.assert_called_with('availability.v1', {
'perf_id': 'ABC123-1',
})
assert meta.contiguous_seat_selection_only is True
assert meta.must_select_whole_seat_block is True
assert meta.default_currency_code == 'gbp'
assert meta.valid_quantities == [2, 3, 4, 5, 6, 7]
assert len(availability) == 2
ticket_type_one = availability[0]
assert ticket_type_one.code == 'CIRCLE'
assert len(ticket_type_one.price_bands) == 2
price_band_one = ticket_type_one.price_bands[0]
assert price_band_one.code == 'A'
price_band_two = ticket_type_one.price_bands[1]
assert price_band_two.code == 'B'
assert price_band_two.allows_leaving_single_seats == 'if_necessary'
ticket_type_two = availability[1]
assert ticket_type_two.code == 'STALLS'
assert len(ticket_type_two.price_bands) == 2
price_band_three = ticket_type_two.price_bands[0]
assert price_band_three.code == 'C'
assert price_band_three.allows_leaving_single_seats == 'always'
price_band_four = ticket_type_two.price_bands[1]
assert price_band_four.code == 'D'
assert price_band_four.allows_leaving_single_seats == 'never'
def test_get_availability_with_number_of_seats(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', number_of_seats=2)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'no_of_seats': 2,
})
def test_get_availability_with_discounts(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', discounts=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_discounts': True
})
def test_get_availability_with_example_seats(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', example_seats=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_example_seats': True
})
def test_get_availability_with_seat_blocks(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', seat_blocks=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_seat_blocks': True
})
def test_get_availability_with_user_commission(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', user_commission=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'req_predicted_commission': True,
})
def test_get_availability_no_availability(self, client, monkeypatch):
response = {
'backend_is_broken': False,
'backend_is_down': False,
'backend_throttle_failed': False,
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
_, _ = client.get_availability('ABC123-1')
def test_get_send_methods(self, client, monkeypatch):
response = {
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'send_methods': {
'send_method': [
{'send_code': 'COBO'},
{'send_code': 'POST'}
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
send_methods, meta = client.get_send_methods('ABC123-1')
mock_make_request.assert_called_with('send_methods.v1', {
'perf_id': 'ABC123-1',
})
assert len(send_methods) == 2
assert send_methods[0].code == 'COBO'
assert send_methods[1].code == 'POST'
assert meta.get_currency().code == 'gbp'
def test_get_send_methods_bad_data(self, client, monkeypatch):
mock_make_request = Mock(return_value={})
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
client.get_send_methods('ABC123-1')
def test_get_discounts(self, client, monkeypatch):
response = {
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'discounts': {
'discount': [
{'discount_code': 'ADULT'},
{'discount_code': 'CHILD'}
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
discounts, meta = client.get_discounts('ABC123-1', 'STALLS', 'A/pool',
an_optional_kwarg='kwarg_value')
mock_make_request.assert_called_with('discounts.v1', {
'perf_id': 'ABC123-1',
'ticket_type_code': 'STALLS',
'price_band_code': 'A/pool',
'req_predicted_commission': False,
'an_optional_kwarg': 'kwarg_value',
})
assert len(discounts) == 2
assert discounts[0].code == 'ADULT'
assert discounts[1].code == 'CHILD'
assert meta.get_currency().code == 'gbp'
def test_get_discounts_bad_data(self, client, monkeypatch):
mock_make_request = Mock(return_value={})
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
client.get_discounts('ABC123-1', 'STALLS', 'A/pool')
def test_trolley_params_with_trolley_token(self, client):
params = client._trolley_params(token='DEF456')
assert params == {'trolley_token': 'DEF456'}
def test_trolley_params_with_performance_id(self, client):
params = client._trolley_params(performance_id='6IF-A8B')
assert params == {'perf_id': '6IF-A8B'}
def test_trolley_params_with_number_of_seats(self, client):
params = client._trolley_params(number_of_seats=3)
assert params == {'no_of_seats': 3}
def test_trolley_params_with_ticket_type_code(self, client):
params = client._trolley_params(ticket_type_code='STALLS')
assert params == {'ticket_type_code': 'STALLS'}
def test_trolley_params_with_price_band_code(self, client):
params = client._trolley_params(price_band_code='A')
assert params == {
'price_band_code': 'A'
}
def test_trolley_params_with_item_numbers_to_remove(self, client):
params = client._trolley_params(item_numbers_to_remove=[1, 2, 3], token='ABC123')
assert params == {
'trolley_token': 'ABC123',
'remove_items_list': '1,2,3'
}
def test_trolley_params_with_item_numbers_to_remove_with_no_token(self, client):
with pytest.raises(exceptions.InvalidParametersError):
client._trolley_params(item_numbers_to_remove=[1, 2, 3])
def test_trolley_params_with_seats(self, client):
params = client._trolley_params(seats=['A12', 'B13', 'C14'])
assert params == {
'seat0': 'A12',
'seat1': 'B13',
'seat2': 'C14',
}
def test_trolley_params_with_discounts(self, client):
params = client._trolley_params(discounts=['ADULT', 'CHILD', 'SENIOR'])
assert params == {
'disc0': 'ADULT',
'disc1': 'CHILD',
'disc2': 'SENIOR',
}
def test_trolley_params_with_send_codes(self, client):
params = client._trolley_params(send_codes={'nimax': 'POST', 'see': 'COBO'})
assert params == {
'nimax_send_code': 'POST',
'see_send_code': 'COBO'
}
def test_trolley_params_with_invalid_send_codes(self, client):
with pytest.raises(exceptions.InvalidParametersError):
client._trolley_params(send_codes=['POST', 'COBO'])
def test_get_trolley(self, client, monkeypatch):
response = {
'trolley_contents': {},
'trolley_token': 'DEF456',
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
trolley, meta = client.get_trolley()
mock_make_request.assert_called_with('trolley.v1', {})
assert isinstance(trolley, Trolley)
assert trolley.token == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_trolley_with_unavailable_order(self, client, monkeypatch):
"""
This test is to check that an unavailable order doesn't raise
any exceptions unless `raise_on_unavailable_order` is set to true
"""
response = {
'trolley_contents': {},
'trolley_token': 'DEF456',
'currency_code': 'gbp',
'input_contained_unavailable_order': True,
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# this should not raise any exceptions
client.get_trolley()
# but this should
with pytest.raises(exceptions.OrderUnavailableError):
client.get_trolley(raise_on_unavailable_order=True)
def test_get_upsells(self, client, monkeypatch):
# fakes
response = {
'results': {
'event': [
{'event_id': 'GHI789'},
{'event_id': 'JKL012'},
],
'paging_status': {
'total_unpaged_results': 2,
},
},
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# action
(upsell_events, upsell_meta) = client.get_upsells(token="foobar")
# results
mock_make_request.assert_called_with('upsells.v1', {
'trolley_token': 'foobar',
})
assert len(upsell_events) == 2
event_one, event_two = upsell_events
assert event_one.id == 'GHI789'
assert event_two.id == 'JKL012'
assert upsell_meta.total_results == 2
def test_get_addons(self, client, monkeypatch):
# fakes
response = {
'results': {
'event': [
{'event_id': 'ABC123'},
{'event_id': 'DEF456'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# action
addon_events, addon_meta = client.get_addons(token="foobar")
# results
mock_make_request.assert_called_with('add_ons.v1', {
'trolley_token': 'foobar',
})
assert len(addon_events) == 2
event_one, event_two = addon_events
assert event_one.id =='ABC123'
assert event_two.id == 'DEF456'
assert addon_meta.total_results == 10
def test_make_reservation(self, client, monkeypatch):
response = {
'reserved_trolley': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
reservation, meta = client.make_reservation()
mock_make_request.assert_called_with('reserve.v1', {}, method=POST)
assert isinstance(reservation, Reservation)
assert reservation.trolley.transaction_uuid == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_reservation(self, client, monkeypatch):
transaction_uuid = 'DEF456'
response = {
'reserved_trolley': {
'transaction_uuid': transaction_uuid
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
reservation, meta = client.get_reservation(transaction_uuid)
mock_make_request.assert_called_with('reserve_page_archive.v1', {
"transaction_uuid": transaction_uuid
}, method=GET)
assert isinstance(reservation, Reservation)
assert reservation.trolley.transaction_uuid == transaction_uuid
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_reservation_with_unavailable_order(self, client, monkeypatch):
"""
This test is to check that an unavailable order doesn't raise
any exceptions unless `raise_on_unavailable_order` is set to true
"""
data = {
"input_contained_unavailable_order": True,
"unreserved_orders": [],
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# this should not raise any exceptions
client.make_reservation()
# but this should
with pytest.raises(exceptions.OrderUnavailableError):
client.make_reservation(raise_on_unavailable_order=True)
def test_make_reservation_with_unavailable_order_but_successfull_reservation(self, client, monkeypatch):
"""
This checks that when we raise an exception on unavailable order, but
other parts of the trolley are successfully reserved, that we don't
lose the transaction_uuid
"""
data = {
"input_contained_unavailable_order": True,
'reserved_trolley': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# but this should
with pytest.raises(exceptions.OrderUnavailableError) as excinfo:
client.make_reservation(raise_on_unavailable_order=True)
exception = excinfo.value
assert exception.reservation
assert exception.reservation.trolley.transaction_uuid == 'DEF456'
assert exception.meta.default_currency_code == 'gbp'
def test_get_reservation_with_unavailable_order_but_successful_reservation(self, client, monkeypatch):
"""
This checks that when we raise an exception on unavailable order, but
other parts of the trolley are successfully reserved, that we don't
lose the transaction_uuid
"""
transaction_uuid = 'DEF456'
data = {
"input_contained_unavailable_order": True,
'reserved_trolley': {
'transaction_uuid': transaction_uuid
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# but this should
with pytest.raises(exceptions.OrderUnavailableError) as excinfo:
client.get_reservation(transaction_uuid, raise_on_unavailable_order=True)
exception = excinfo.value
assert exception.reservation
assert exception.reservation.trolley.transaction_uuid == transaction_uuid
assert exception.meta.default_currency_code == 'gbp'
def test_get_status(self, client, monkeypatch):
response = {
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, meta = client.get_status(
transaction_uuid='DEF456',
customer=True,
external_sale_page=True,
)
mock_make_request.assert_called_with('status.v1', {
'transaction_uuid': 'DEF456',
'add_customer': True,
'add_external_sale_page': True,
})
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_status_with_trans(self, client, monkeypatch):
response = {
'trolley_contents': {
'transaction_id': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, meta = client.get_status(
transaction_id='DEF456',
customer=True,
external_sale_page=True,
)
mock_make_request.assert_called_with('trans_id_status.v1', {
'transaction_id': 'DEF456',
'add_customer': True,
'add_external_sale_page': True,
})
assert isinstance(status, Status)
assert status.trolley.transaction_id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_test(self, client, monkeypatch):
response = {'user_id': 'foobar'}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
user = client.test()
mock_make_request.assert_called_with('test.v1', {})
assert isinstance(user, User)
assert user.id == 'foobar'
def test_release_reservation(self, client, monkeypatch):
response = {'released_ok': True}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
released = client.release_reservation('abc123')
mock_make_request.assert_called_with('release.v1', {
'transaction_uuid': 'abc123',
}, method=POST)
assert released is True
def test_make_purchase_card_details(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
card_details = CardDetails(
'4111 1111 1111 1111',
expiry_year=17,
expiry_month=3,
)
status, callout, meta = client.make_purchase(
'abc123',
customer,
payment_method=card_details
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'card_number': '4111 1111 1111 1111',
'expiry_date': '0317',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_purchase_redirection(self, client, monkeypatch):
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
redirection_details = RedirectionDetails(
token='abc123',
url='https://myticketingco.biz/confirmation/abc123',
user_agent='Mozilla/5.0',
accept='text/html,text/plain,application/json',
remote_site='myticketingco.biz',
)
status, callout, meta = client.make_purchase(
'abc123',
customer,
payment_method=redirection_details
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'return_token': '<PASSWORD>',
'return_url': 'https://myticketingco.biz/confirmation/abc123',
'client_http_user_agent': 'Mozilla/5.0',
'client_http_accept': 'text/html,text/plain,application/json',
'remote_site': 'myticketingco.biz',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert status is None
assert isinstance(callout, Callout)
assert callout.code == 'ext_test0'
assert 'gbp' in meta.currencies
assert meta.default_currency_code is None
def test_make_purchase_credit(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
status, callout, meta = client.make_purchase('abc123', customer)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_purchase_opting_out_of_confirmation_email(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
status, callout, meta = client.make_purchase(
'abc123',
customer,
send_confirmation_email=False
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_purchase(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.get_purchase('abc123')
expected_params = {
'transaction_uuid': 'abc123',
}
mock_make_request.assert_called_with(
'purchase_page_archive.v1',
expected_params,
method=GET
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_next_callout(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.next_callout(
'abc123',
'def456',
{'foo': 'bar'},
lol='beans',
)
expected_params = {
'foo': 'bar',
'lol': 'beans',
}
mock_make_request.assert_called_with(
'callback.v1/this.abc123/next.def456',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_next_callout_with_additional_callout(self, client, monkeypatch):
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.next_callout(
'abc123',
'def456',
{'foo': 'bar'},
lol='beans',
)
expected_params = {
'foo': 'bar',
'lol': 'beans',
}
mock_make_request.assert_called_with(
'callback.v1/this.abc123/next.def456',
expected_params,
method=POST
)
assert status is None
assert isinstance(callout, Callout)
assert callout.code == 'ext_test0'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_auth_can_be_overridden_with_subclass(self, monkeypatch):
"""Test that we can override authentication behavior in subclasses
Clients should be able to override the get_auth_params and make
requests without basic authentication, if they can authenticate in
another secure way.
Since get_auth_params() has been deprecated, this should raise a
DeprecationWarning, but still work (for legacy client support).
"""
# state
class MyClient(Client):
def __init__(self, user, auth_key, **kwargs):
super(MyClient, self).__init__(user, password=<PASSWORD>, **kwargs)
self.auth_key = auth_key
def get_auth_params(self):
return {
'user_id': self.user,
'auth_key': self.auth_key,
}
client = MyClient('gandalf', auth_key='speakfriendandenter',
use_decimal=True)
params = {
'foo': 'bar',
}
client.language='en-GB'
# fakes
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
with pytest.warns(DeprecationWarning) as warning_info:
response = client.make_request('events.v1', params)
# results
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=None,
params={
'foo': 'bar',
'user_id': 'gandalf',
'auth_key': 'speakfriendandenter',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
assert warning_info[0].message.args[0] == (
'Function get_auth_params() is deprecated and should not be used')
def test_extra_params_can_be_overriden_by_subclass(self, monkeypatch):
"""Test that we can override extra parameters in subclass
Clients should be able to pass in extra parameters by overriding this
method.
"""
# state
class MyClient(Client):
def __init__(self, user, myfoo, **kwargs):
super(MyClient, self).__init__(user, password=<PASSWORD>, **kwargs)
self.myfoo = myfoo
def get_extra_params(self):
params = super(MyClient, self).get_extra_params()
params.update(myfoo=self.myfoo)
return params
client = MyClient('batman', 'batmanfoo',
sub_user='robin', use_decimal=True)
params = {'fruit': 'apple'}
# fakes
fake_response = FakeResponse(status_code=200, json={'a': 'b'})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
response = client.make_request('events.v1', params)
# results
assert response == {'a': 'b'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=None,
params={
'sub_id': 'robin',
'myfoo': 'batmanfoo',
'fruit': 'apple',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None,
)
def test_get_auth_params_raises_deprecation_warning(self, client):
"""Tests that get_auth_params raises deprecation warning"""
with pytest.warns(DeprecationWarning) as warning_list:
params = client.get_auth_params()
assert not params
assert warning_list[0].message.args[0] == (
'Call to deprecated function get_auth_params'
)
def test_make_request_using_decimal_parsing(self, client, monkeypatch):
# fakes
response_json = {'amount': 1.0}
fake_response = requests.models.Response()
fake_response._content = json.dumps(response_json).encode('utf-8')
fake_response.status_code = 200
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
result = client.make_request('test.v1', {})
# results
assert 'amount' in result
assert type(result['amount']) == decimal.Decimal
assert result['amount'] == decimal.Decimal('1.0')
def test_make_request_using_float_parsing(self, monkeypatch):
# state
client = Client('bilbo', 'baggins')
# fakes
response_json = {'amount': 1.0}
fake_response = requests.models.Response()
fake_response._content = json.dumps(response_json).encode('utf-8')
fake_response.status_code = 200
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
result = client.make_request('test.v1', {})
# results
assert 'amount' in result
assert type(result['amount']) == float
assert result['amount'] == 1.0
def test_make_purchase_with_agent_reference(self, client, monkeypatch):
# state
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
redirection_details = RedirectionDetails(
token='abc123',
url='https://myticketingco.biz/confirmation/abc123',
user_agent='Mozilla/5.0',
accept='text/html,text/plain,application/json',
remote_site='myticketingco.biz',
)
client.make_purchase(
'abc123',
customer,
payment_method=redirection_details,
agent_reference='myticketingco_ff01'
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'return_token': '<PASSWORD>',
'return_url': 'https://myticketingco.biz/confirmation/abc123',
'client_http_user_agent': 'Mozilla/5.0',
'client_http_accept': 'text/html,text/plain,application/json',
'remote_site': 'myticketingco.biz',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
'agent_reference': 'myticketingco_ff01',
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
def test_cancel_purchase(self, client, monkeypatch):
# state
with open("test_data/successful_cancellation.json", 'r') as file_handle:
response = json.load(file_handle)
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
cancellation_result, meta = client.cancel_purchase('abc123')
assert cancellation_result.is_fully_cancelled()
assert cancellation_result.cancelled_item_numbers == [1]
assert 'gbp' in meta.currencies | tests/test_client.py | import decimal
import pytest
import json
import requests
from datetime import datetime
from mock import Mock
import pyticketswitch
from pyticketswitch.client import Client, POST, GET
from pyticketswitch import exceptions
from pyticketswitch.trolley import Trolley
from pyticketswitch.reservation import Reservation
from pyticketswitch.user import User
from pyticketswitch.customer import Customer
from pyticketswitch.payment_methods import CardDetails, RedirectionDetails
from pyticketswitch.status import Status
from pyticketswitch.callout import Callout
@pytest.fixture
def client():
client = Client(user="bilbo", password="<PASSWORD>", use_decimal=True)
return client
@pytest.fixture
def fake_func():
def wrapper(return_value):
def fake(*args, **kwargs):
return return_value
return fake
return wrapper
@pytest.fixture
def mock_make_request(client, monkeypatch):
response = {'results': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_events(client, monkeypatch):
response = {'events_by_id': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_performances(client, monkeypatch):
response = {'performances_by_id': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_availability(client, monkeypatch):
response = {'availability': {}}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
@pytest.fixture
def mock_make_request_for_trolley(client, monkeypatch):
response = {'trolley_token': 'ABC<PASSWORD>'}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
return mock_make_request
class FakeResponse(object):
def __init__(self, status_code=200, json=None):
self.status_code = status_code
self._json = json
def json(self, **kwargs):
return self._json
@property
def content(self):
return json.dumps(self._json)
class FakeResponseRaisesValueError(FakeResponse):
def json(self, **kwargs):
raise ValueError("ERROR")
class TestClient:
@pytest.mark.integration
def test_get_url(self, client):
url = client.get_url('events.v1')
assert url == 'https://api.ticketswitch.com/f13/events.v1/'
@pytest.mark.integration
def test_make_request(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
params={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
@pytest.mark.integration
def test_make_request_with_timeout(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params, timeout=15)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
params={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=15
)
@pytest.mark.integration
def test_make_request_with_post(self, client, monkeypatch):
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_post = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.post = fake_post
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params, method=POST)
assert response == {'lol': 'beans'}
fake_post.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'bilbo', b'baggins'),
data={
'foo': 'bar',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_with_subuser(self, monkeypatch):
client = Client(user="beatles", password="<PASSWORD>",
sub_user="ringo", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
params = {
'foo': 'bar',
}
client.language='en-GB'
response = client.make_request('events.v1', params)
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'beatles', b'lovemedo'),
params={
'foo': 'bar',
'sub_id': 'ringo',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_with_tracking_id(self, monkeypatch):
client = Client(user="user", password="<PASSWORD>",
tracking_id="xyz", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"depro": "fundis"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
client.language='en-GB'
response = client.make_request('events.v1', {})
assert response
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'pass'),
params={
'tsw_session_track_id': 'xyz'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_when_using_per_request_tracking_id(self, monkeypatch):
client = Client(user="user", password="<PASSWORD>",
tracking_id="xyz", use_decimal=True)
fake_response = FakeResponse(status_code=200, json={"depro": "fundis"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
client.language='en-GB'
params = {}
client.add_optional_kwargs(params, tracking_id="123")
response = client.make_request('events.v1', params)
assert response
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'<PASSWORD>'),
params={
'tsw_session_track_id': '123'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
client.add_optional_kwargs(params, tracking_id="456")
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=(b'user', b'<PASSWORD>'),
params={
'tsw_session_track_id': '456'
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
def test_make_request_bad_response_with_auth_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={
'error_code': 3,
'error_desc': 'User authorisation failure',
})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.APIError) as excinfo:
client.make_request('test.v1', {})
assert excinfo.value.msg == 'User authorisation failure'
assert excinfo.value.code == 3
assert excinfo.value.response is fake_response
def test_make_request_bad_response_with_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={
'error_code': 8,
'error_desc': 'price_band_code needs /pool or /alloc suffix',
})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.APIError) as excinfo:
client.make_request('trolley.v1', {})
assert excinfo.value.msg == 'price_band_code needs /pool or /alloc suffix'
assert excinfo.value.code == 8
assert excinfo.value.response is fake_response
def test_make_request_bad_response_without_error(self, client, monkeypatch):
fake_response = FakeResponse(status_code=400, json={})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.InvalidResponseError):
client.make_request('trolley.v1', {})
def test_make_request_410_gone_response(self, client, monkeypatch):
response_json = {'error_code': 8, 'error_desc': 'transaction failed'}
fake_response = FakeResponse(status_code=410, json=response_json)
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.CallbackGoneError):
client.make_request('callback.v1', {})
def test_make_request_no_contents_raises(self, client, monkeypatch):
response_json = {'data': 'some data'}
fake_response = FakeResponseRaisesValueError(status_code=200, json=response_json)
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
with pytest.raises(exceptions.InvalidResponseError):
client.make_request('test.v1', {})
def test_add_optional_kwargs_extra_info(self, client):
params = {}
client.add_optional_kwargs(params, extra_info=True)
assert params == {'req_extra_info': True}
def test_add_optional_kwargs_reviews(self, client):
params = {}
client.add_optional_kwargs(params, reviews=True)
assert params == {'req_reviews': True}
def test_add_optional_kwargs_media(self, client):
params = {}
client.add_optional_kwargs(params, media=True)
assert params == {
'req_media_triplet_one': True,
'req_media_triplet_two': True,
'req_media_triplet_three': True,
'req_media_triplet_four': True,
'req_media_triplet_five': True,
'req_media_seating_plan': True,
'req_media_square': True,
'req_media_landscape': True,
'req_media_marquee': True,
'req_video_iframe': True,
}
def test_add_optional_kwargs_cost_range(self, client):
params = {}
client.add_optional_kwargs(params, cost_range=True)
assert params == {'req_cost_range': True}
def test_add_optional_kwargs_best_value_offer(self, client):
params = {}
client.add_optional_kwargs(params, best_value_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_best_value_offer': True,
}
def test_add_optional_kwargs_max_saving_offer(self, client):
params = {}
client.add_optional_kwargs(params, max_saving_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_max_saving_offer': True,
}
def test_add_optional_kwargs_min_cost_offer(self, client):
params = {}
client.add_optional_kwargs(params, min_cost_offer=True)
assert params == {
'req_cost_range': True,
'req_cost_range_min_cost_offer': True,
}
def test_add_optional_kwargs_top_price_offer(self, client):
params = {}
client.add_optional_kwargs(params, top_price_offer=True)
params == {
'req_cost_range': True,
'req_cost_range_top_price_offer': True,
}
def test_add_optional_kwargs_no_singles_data(self, client):
params = {}
client.add_optional_kwargs(params, no_singles_data=True)
assert params == {
'req_cost_range': True,
'req_cost_range_no_singles_data': True,
}
def test_add_optional_kwargs_cost_range_details(self, client):
params = {}
client.add_optional_kwargs(params, cost_range_details=True)
assert params == {
'req_cost_range_details': True,
}
def test_add_optional_kwargs_avail_details(self, client):
params = {}
client.add_optional_kwargs(params, availability=True)
params == {
'req_avail_details': True,
}
def test_add_optional_kwargs_avail_details_with_perfs(self, client):
params = {}
client.add_optional_kwargs(params, availability_with_performances=True)
params == {
'req_avail_details_with_perfs': True,
}
def test_add_optional_kwargs_source_info(self, client):
params = {}
client.add_optional_kwargs(params, source_info=True)
params == {
'req_src_info': True,
}
def test_list_events(self, client, monkeypatch):
response = {
'results': {
'event': [
{'event_id': 'ABC123'},
{'event_id': 'DEF456'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
events, meta = client.list_events()
mock_make_request.assert_called_with('events.v1', {})
assert len(events) == 2
event_one, event_two = events
assert event_one.id =='ABC123'
assert event_two.id == 'DEF456'
assert meta.total_results == 10
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_list_events_with_keywords(self, client, mock_make_request):
client.list_events(keywords=['awesome', 'stuff'])
mock_make_request.assert_called_with('events.v1', {
'keywords': 'awesome,stuff',
})
def test_list_events_with_start_date(self, client, mock_make_request):
client.list_events(start_date=datetime(2016, 7, 23, 0, 7, 25))
mock_make_request.assert_called_with('events.v1', {
'date_range': '20160723:',
})
def test_list_events_with_end_date(self, client, mock_make_request):
client.list_events(end_date=datetime(2016, 7, 23, 0, 7, 25))
mock_make_request.assert_called_with('events.v1', {
'date_range': ':20160723',
})
def test_list_events_with_start_and_end_date(self, client, mock_make_request):
client.list_events(
start_date=datetime(2015, 3, 11, 0, 9, 45),
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('events.v1', {
'date_range': '20150311:20160723',
})
def test_list_events_country_code(self, client, mock_make_request):
client.list_events(country_code='fj')
mock_make_request.assert_called_with('events.v1', {
'country_code': 'fj',
})
def test_list_events_city_code(self, client, mock_make_request):
client.list_events(city_code='london-uk')
mock_make_request.assert_called_with('events.v1', {
'city_code': 'london-uk',
})
def test_list_events_geolocation(self, client, mock_make_request):
client.list_events(
latitude=51.52961137,
longitude=-0.10601562,
radius=10
)
mock_make_request.assert_called_with('events.v1', {
'circle': '51.52961137:-0.10601562:10',
})
def test_list_events_invalid_geolocation(self, client):
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
longitude=-0.10601562,
radius=10
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
latitude=51.52961137,
radius=10
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
latitude=51.52961137,
longitude=-0.10601562,
)
with pytest.raises(exceptions.InvalidGeoParameters):
client.list_events(
radius=10
)
def test_list_events_include_dead(self, client, mock_make_request):
client.list_events(include_dead=True)
mock_make_request.assert_called_with('events.v1', {
'include_dead': True,
})
def test_list_events_sort_order(self, client, mock_make_request):
client.list_events(sort_order='foobar')
mock_make_request.assert_called_with('events.v1', {
'sort_order': 'foobar',
})
def test_list_events_pagination(self, client, mock_make_request):
client.list_events(page=2, page_length=50)
mock_make_request.assert_called_with('events.v1', {
'page_no': 2,
'page_len': 50,
})
def test_list_events_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.list_events()
def test_list_events_misc_kwargs(self, client, mock_make_request):
client.list_events(foobar='lolbeans')
mock_make_request.assert_called_with('events.v1', {
'foobar': 'lolbeans'
})
def test_get_events(self, client, monkeypatch):
response = {
'events_by_id': {
'ABC123': {
'event': {'event_id': 'ABC123'},
},
'DEF456': {
'event': {'event_id': 'DEF456'},
}
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
events, meta = client.get_events(['ABC123', 'DEF456'])
mock_make_request.assert_called_with(
'events_by_id.v1',
{'event_id_list': 'ABC123,DEF456'},
)
event_one = events['ABC123']
event_two = events['DEF456']
assert event_one.id == 'ABC123'
assert event_two.id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_events_event_list(self, client, mock_make_request_for_events):
client.get_events(['6IF', '25DR', '3ENO'])
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': '6IF,25DR,3ENO',
})
def test_get_events_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_events(['6IF', '25DR'])
def test_get_events_misc_kwargs(self, client, mock_make_request_for_events):
client.get_events([], foobar='lolbeans')
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'foobar': 'lolbeans',
})
def test_get_events_with_upsell(self, client, mock_make_request_for_events):
client.get_events(['6IF'], with_upsells=True)
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': '6IF', 'add_upsells': True,
})
def test_get_events_with_addons(self, client, mock_make_request_for_events):
client.get_events(['ABC123'], with_addons=True)
mock_make_request_for_events.assert_called_with('events_by_id.v1', {
'event_id_list': 'ABC123', 'add_add_ons': True,
})
def test_get_event(self, client, monkeypatch):
response = {
'events_by_id': {
'ABC123': {
'event': {'event_id': 'ABC123'},
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
event, meta = client.get_event('ABC123')
mock_make_request.assert_called_with(
'events_by_id.v1',
{'event_id_list': 'ABC123'},
)
assert event.id =='ABC123'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_months(self, client, monkeypatch):
response = {
'results': {
'month': [
{'month': 'dec', 'year': 2016},
{'month': 'jan', 'year': 2017},
{'month': 'feb', 'year': 2017},
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
months = client.get_months('ABC123')
mock_make_request.assert_called_with(
'months.v1',
{'event_id': 'ABC123'},
)
assert len(months) == 3
assert months[0].month == 12
assert months[0].year == 2016
assert months[1].month == 1
assert months[1].year == 2017
assert months[2].month == 2
assert months[2].year == 2017
def test_get_months_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_months('6IF')
def test_get_months_misc_kwargs(self, client, mock_make_request):
client.get_months('6IF', foobar='lolbeans')
mock_make_request.assert_called_with('months.v1', {
'event_id': '6IF',
'foobar': 'lolbeans'
})
def test_list_performances_no_results(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.list_performances('6IF')
def test_list_performances(self, client, monkeypatch):
response = {
'results': {
'has_perf_names': False,
'events_by_id': {
'ABC123': {'event': {'event_id': 'ABC123'}},
},
'performance': [
{'perf_id': 'ABC123-1', 'event_id': 'ABC123'},
{'perf_id': 'ABC123-2', 'event_id': 'ABC123'},
{'perf_id': 'ABC123-3', 'event_id': 'ABC123'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performances, meta = client.list_performances('ABC123')
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
})
assert len(performances) == 3
performance_one, performance_two, performance_three = performances
assert performance_one.id == 'ABC123-1'
assert performance_two.id == 'ABC123-2'
assert performance_three.id == 'ABC123-3'
assert performance_one.event_id == 'ABC123'
assert performance_two.event_id == 'ABC123'
assert performance_three.event_id == 'ABC123'
assert meta.has_names is False
assert meta.total_results == 10
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_list_performances_cost_range(self, client, mock_make_request):
client.list_performances('ABC123', cost_range=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True
})
def test_list_performances_best_value_offer(self, client, mock_make_request):
client.list_performances('ABC123', best_value_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_best_value_offer': True
})
def test_list_performances_max_saving_offer(self, client, mock_make_request):
client.list_performances('ABC123', max_saving_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_max_saving_offer': True
})
def test_list_performances_min_cost_offer(self, client, mock_make_request):
client.list_performances('ABC123', min_cost_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_min_cost_offer': True
})
def test_list_performances_top_price_offer(self, client, mock_make_request):
client.list_performances('ABC123', top_price_offer=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_top_price_offer': True
})
def test_list_performances_no_singles_data(self, client, mock_make_request):
client.list_performances('ABC123', no_singles_data=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_cost_range': True,
'req_cost_range_no_singles_data': True
})
def test_list_performances_availability(self, client, mock_make_request):
client.list_performances('ABC123', availability=True)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_avail_details': True
})
def test_list_performances_pagination(self, client, mock_make_request):
client.list_performances(
'ABC123',
availability=True,
page=3,
page_length=20,
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'req_avail_details': True,
'page_no': 3,
'page_len': 20,
})
def test_list_performances_with_start_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
start_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': '20160723:',
})
def test_list_performancess_with_end_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': ':20160723',
})
def test_list_performances_with_start_and_end_date(self, client, mock_make_request):
client.list_performances(
'ABC123',
start_date=datetime(2015, 3, 11, 0, 9, 45),
end_date=datetime(2016, 7, 23, 0, 7, 25)
)
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'date_range': '20150311:20160723',
})
def test_list_performances_misc_kwargs(self, client, mock_make_request):
client.list_performances('ABC123', foobar='lolbeans')
mock_make_request.assert_called_with('performances.v1', {
'event_id': 'ABC123',
'foobar': 'lolbeans',
})
def test_get_performances(self, client, monkeypatch):
response = {
'performances_by_id': {
'ABC123-1': {
'perf_id': 'ABC123-1',
'event_id': 'ABC123',
},
'DEF456-2': {
'perf_id': 'DEF456-2',
'event_id': 'DEF456',
}
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performances, meta = client.get_performances(['ABC123-1', 'DEF456-2'])
mock_make_request.assert_called_with('performances_by_id.v1', {
'perf_id_list': 'ABC123-1,DEF456-2',
})
performance_one = performances['ABC123-1']
performance_two = performances['DEF456-2']
assert performance_one.id == 'ABC123-1'
assert performance_two.id == 'DEF456-2'
assert performance_one.event_id == 'ABC123'
assert performance_two.event_id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_performances_no_performances(self, client, monkeypatch, fake_func):
response = {}
monkeypatch.setattr(client, 'make_request', fake_func(response))
with pytest.raises(exceptions.InvalidResponseError):
client.get_performances(['6IF-1', '6IF-2'])
def test_get_performances_misc_kwargs(self, client, mock_make_request_for_performances):
client.get_performances(['6IF-1', '25DR-2'], foobar='lolbeans')
mock_make_request_for_performances.assert_called_with('performances_by_id.v1', {
'perf_id_list': '6IF-1,25DR-2',
'foobar': 'lolbeans',
})
def test_get_performance(self, client, monkeypatch):
response = {
'performances_by_id': {
'ABC123-1': {
'perf_id': 'ABC123-1',
'event_id': 'ABC123',
},
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
performance, meta = client.get_performance('ABC123-1')
mock_make_request.assert_called_with(
'performances_by_id.v1',
{'perf_id_list': 'ABC123-1'},
)
assert performance.id =='ABC123-1'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_availability(self, client, monkeypatch):
response = {
'availability': {
'ticket_type': [
{
'ticket_type_code': 'CIRCLE',
'price_band': [
{
'price_band_code': 'A',
},
{
'price_band_code': 'B',
'allows_leaving_single_seats': 'if_necessary',
},
]
},
{
'ticket_type_code': 'STALLS',
'price_band': [
{
'price_band_code': 'C',
'allows_leaving_single_seats': 'always',
},
{
'price_band_code': 'D',
'allows_leaving_single_seats': 'never',
},
]
}
]
},
'backend_is_broken': False,
'backend_is_down': False,
'backend_throttle_failed': False,
'contiguous_seat_selection_only': True,
'must_select_whole_seat_block': True,
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'valid_quantities': [2, 3, 4, 5, 6, 7],
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
availability, meta = client.get_availability('ABC123-1')
mock_make_request.assert_called_with('availability.v1', {
'perf_id': 'ABC123-1',
})
assert meta.contiguous_seat_selection_only is True
assert meta.must_select_whole_seat_block is True
assert meta.default_currency_code == 'gbp'
assert meta.valid_quantities == [2, 3, 4, 5, 6, 7]
assert len(availability) == 2
ticket_type_one = availability[0]
assert ticket_type_one.code == 'CIRCLE'
assert len(ticket_type_one.price_bands) == 2
price_band_one = ticket_type_one.price_bands[0]
assert price_band_one.code == 'A'
price_band_two = ticket_type_one.price_bands[1]
assert price_band_two.code == 'B'
assert price_band_two.allows_leaving_single_seats == 'if_necessary'
ticket_type_two = availability[1]
assert ticket_type_two.code == 'STALLS'
assert len(ticket_type_two.price_bands) == 2
price_band_three = ticket_type_two.price_bands[0]
assert price_band_three.code == 'C'
assert price_band_three.allows_leaving_single_seats == 'always'
price_band_four = ticket_type_two.price_bands[1]
assert price_band_four.code == 'D'
assert price_band_four.allows_leaving_single_seats == 'never'
def test_get_availability_with_number_of_seats(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', number_of_seats=2)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'no_of_seats': 2,
})
def test_get_availability_with_discounts(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', discounts=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_discounts': True
})
def test_get_availability_with_example_seats(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', example_seats=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_example_seats': True
})
def test_get_availability_with_seat_blocks(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', seat_blocks=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'add_seat_blocks': True
})
def test_get_availability_with_user_commission(self, client, mock_make_request_for_availability):
client.get_availability('6IF-1', user_commission=True)
mock_make_request_for_availability.assert_called_with('availability.v1', {
'perf_id': '6IF-1',
'req_predicted_commission': True,
})
def test_get_availability_no_availability(self, client, monkeypatch):
response = {
'backend_is_broken': False,
'backend_is_down': False,
'backend_throttle_failed': False,
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
_, _ = client.get_availability('ABC123-1')
def test_get_send_methods(self, client, monkeypatch):
response = {
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'send_methods': {
'send_method': [
{'send_code': 'COBO'},
{'send_code': 'POST'}
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
send_methods, meta = client.get_send_methods('ABC123-1')
mock_make_request.assert_called_with('send_methods.v1', {
'perf_id': 'ABC123-1',
})
assert len(send_methods) == 2
assert send_methods[0].code == 'COBO'
assert send_methods[1].code == 'POST'
assert meta.get_currency().code == 'gbp'
def test_get_send_methods_bad_data(self, client, monkeypatch):
mock_make_request = Mock(return_value={})
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
client.get_send_methods('ABC123-1')
def test_get_discounts(self, client, monkeypatch):
response = {
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
},
'discounts': {
'discount': [
{'discount_code': 'ADULT'},
{'discount_code': 'CHILD'}
]
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
discounts, meta = client.get_discounts('ABC123-1', 'STALLS', 'A/pool',
an_optional_kwarg='kwarg_value')
mock_make_request.assert_called_with('discounts.v1', {
'perf_id': 'ABC123-1',
'ticket_type_code': 'STALLS',
'price_band_code': 'A/pool',
'req_predicted_commission': False,
'an_optional_kwarg': 'kwarg_value',
})
assert len(discounts) == 2
assert discounts[0].code == 'ADULT'
assert discounts[1].code == 'CHILD'
assert meta.get_currency().code == 'gbp'
def test_get_discounts_bad_data(self, client, monkeypatch):
mock_make_request = Mock(return_value={})
monkeypatch.setattr(client, 'make_request', mock_make_request)
with pytest.raises(exceptions.InvalidResponseError):
client.get_discounts('ABC123-1', 'STALLS', 'A/pool')
def test_trolley_params_with_trolley_token(self, client):
params = client._trolley_params(token='DEF456')
assert params == {'trolley_token': 'DEF456'}
def test_trolley_params_with_performance_id(self, client):
params = client._trolley_params(performance_id='6IF-A8B')
assert params == {'perf_id': '6IF-A8B'}
def test_trolley_params_with_number_of_seats(self, client):
params = client._trolley_params(number_of_seats=3)
assert params == {'no_of_seats': 3}
def test_trolley_params_with_ticket_type_code(self, client):
params = client._trolley_params(ticket_type_code='STALLS')
assert params == {'ticket_type_code': 'STALLS'}
def test_trolley_params_with_price_band_code(self, client):
params = client._trolley_params(price_band_code='A')
assert params == {
'price_band_code': 'A'
}
def test_trolley_params_with_item_numbers_to_remove(self, client):
params = client._trolley_params(item_numbers_to_remove=[1, 2, 3], token='ABC123')
assert params == {
'trolley_token': 'ABC123',
'remove_items_list': '1,2,3'
}
def test_trolley_params_with_item_numbers_to_remove_with_no_token(self, client):
with pytest.raises(exceptions.InvalidParametersError):
client._trolley_params(item_numbers_to_remove=[1, 2, 3])
def test_trolley_params_with_seats(self, client):
params = client._trolley_params(seats=['A12', 'B13', 'C14'])
assert params == {
'seat0': 'A12',
'seat1': 'B13',
'seat2': 'C14',
}
def test_trolley_params_with_discounts(self, client):
params = client._trolley_params(discounts=['ADULT', 'CHILD', 'SENIOR'])
assert params == {
'disc0': 'ADULT',
'disc1': 'CHILD',
'disc2': 'SENIOR',
}
def test_trolley_params_with_send_codes(self, client):
params = client._trolley_params(send_codes={'nimax': 'POST', 'see': 'COBO'})
assert params == {
'nimax_send_code': 'POST',
'see_send_code': 'COBO'
}
def test_trolley_params_with_invalid_send_codes(self, client):
with pytest.raises(exceptions.InvalidParametersError):
client._trolley_params(send_codes=['POST', 'COBO'])
def test_get_trolley(self, client, monkeypatch):
response = {
'trolley_contents': {},
'trolley_token': 'DEF456',
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
trolley, meta = client.get_trolley()
mock_make_request.assert_called_with('trolley.v1', {})
assert isinstance(trolley, Trolley)
assert trolley.token == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_trolley_with_unavailable_order(self, client, monkeypatch):
"""
This test is to check that an unavailable order doesn't raise
any exceptions unless `raise_on_unavailable_order` is set to true
"""
response = {
'trolley_contents': {},
'trolley_token': 'DEF456',
'currency_code': 'gbp',
'input_contained_unavailable_order': True,
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# this should not raise any exceptions
client.get_trolley()
# but this should
with pytest.raises(exceptions.OrderUnavailableError):
client.get_trolley(raise_on_unavailable_order=True)
def test_get_upsells(self, client, monkeypatch):
# fakes
response = {
'results': {
'event': [
{'event_id': 'GHI789'},
{'event_id': 'JKL012'},
],
'paging_status': {
'total_unpaged_results': 2,
},
},
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# action
(upsell_events, upsell_meta) = client.get_upsells(token="foobar")
# results
mock_make_request.assert_called_with('upsells.v1', {
'trolley_token': 'foobar',
})
assert len(upsell_events) == 2
event_one, event_two = upsell_events
assert event_one.id == 'GHI789'
assert event_two.id == 'JKL012'
assert upsell_meta.total_results == 2
def test_get_addons(self, client, monkeypatch):
# fakes
response = {
'results': {
'event': [
{'event_id': 'ABC123'},
{'event_id': 'DEF456'},
],
'paging_status': {
'total_unpaged_results': 10,
},
},
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# action
addon_events, addon_meta = client.get_addons(token="foobar")
# results
mock_make_request.assert_called_with('add_ons.v1', {
'trolley_token': 'foobar',
})
assert len(addon_events) == 2
event_one, event_two = addon_events
assert event_one.id =='ABC123'
assert event_two.id == 'DEF456'
assert addon_meta.total_results == 10
def test_make_reservation(self, client, monkeypatch):
response = {
'reserved_trolley': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
reservation, meta = client.make_reservation()
mock_make_request.assert_called_with('reserve.v1', {}, method=POST)
assert isinstance(reservation, Reservation)
assert reservation.trolley.transaction_uuid == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_reservation(self, client, monkeypatch):
transaction_uuid = 'DEF456'
response = {
'reserved_trolley': {
'transaction_uuid': transaction_uuid
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
reservation, meta = client.get_reservation(transaction_uuid)
mock_make_request.assert_called_with('reserve_page_archive.v1', {
"transaction_uuid": transaction_uuid
}, method=GET)
assert isinstance(reservation, Reservation)
assert reservation.trolley.transaction_uuid == transaction_uuid
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_reservation_with_unavailable_order(self, client, monkeypatch):
"""
This test is to check that an unavailable order doesn't raise
any exceptions unless `raise_on_unavailable_order` is set to true
"""
data = {
"input_contained_unavailable_order": True,
"unreserved_orders": [],
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# this should not raise any exceptions
client.make_reservation()
# but this should
with pytest.raises(exceptions.OrderUnavailableError):
client.make_reservation(raise_on_unavailable_order=True)
def test_make_reservation_with_unavailable_order_but_successfull_reservation(self, client, monkeypatch):
"""
This checks that when we raise an exception on unavailable order, but
other parts of the trolley are successfully reserved, that we don't
lose the transaction_uuid
"""
data = {
"input_contained_unavailable_order": True,
'reserved_trolley': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# but this should
with pytest.raises(exceptions.OrderUnavailableError) as excinfo:
client.make_reservation(raise_on_unavailable_order=True)
exception = excinfo.value
assert exception.reservation
assert exception.reservation.trolley.transaction_uuid == 'DEF456'
assert exception.meta.default_currency_code == 'gbp'
def test_get_reservation_with_unavailable_order_but_successful_reservation(self, client, monkeypatch):
"""
This checks that when we raise an exception on unavailable order, but
other parts of the trolley are successfully reserved, that we don't
lose the transaction_uuid
"""
transaction_uuid = 'DEF456'
data = {
"input_contained_unavailable_order": True,
'reserved_trolley': {
'transaction_uuid': transaction_uuid
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=data)
monkeypatch.setattr(client, 'make_request', mock_make_request)
# but this should
with pytest.raises(exceptions.OrderUnavailableError) as excinfo:
client.get_reservation(transaction_uuid, raise_on_unavailable_order=True)
exception = excinfo.value
assert exception.reservation
assert exception.reservation.trolley.transaction_uuid == transaction_uuid
assert exception.meta.default_currency_code == 'gbp'
def test_get_status(self, client, monkeypatch):
response = {
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, meta = client.get_status(
transaction_uuid='DEF456',
customer=True,
external_sale_page=True,
)
mock_make_request.assert_called_with('status.v1', {
'transaction_uuid': 'DEF456',
'add_customer': True,
'add_external_sale_page': True,
})
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_status_with_trans(self, client, monkeypatch):
response = {
'trolley_contents': {
'transaction_id': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, meta = client.get_status(
transaction_id='DEF456',
customer=True,
external_sale_page=True,
)
mock_make_request.assert_called_with('trans_id_status.v1', {
'transaction_id': 'DEF456',
'add_customer': True,
'add_external_sale_page': True,
})
assert isinstance(status, Status)
assert status.trolley.transaction_id == 'DEF456'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_test(self, client, monkeypatch):
response = {'user_id': 'foobar'}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
user = client.test()
mock_make_request.assert_called_with('test.v1', {})
assert isinstance(user, User)
assert user.id == 'foobar'
def test_release_reservation(self, client, monkeypatch):
response = {'released_ok': True}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
released = client.release_reservation('abc123')
mock_make_request.assert_called_with('release.v1', {
'transaction_uuid': 'abc123',
}, method=POST)
assert released is True
def test_make_purchase_card_details(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
card_details = CardDetails(
'4111 1111 1111 1111',
expiry_year=17,
expiry_month=3,
)
status, callout, meta = client.make_purchase(
'abc123',
customer,
payment_method=card_details
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'card_number': '4111 1111 1111 1111',
'expiry_date': '0317',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_purchase_redirection(self, client, monkeypatch):
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
redirection_details = RedirectionDetails(
token='abc123',
url='https://myticketingco.biz/confirmation/abc123',
user_agent='Mozilla/5.0',
accept='text/html,text/plain,application/json',
remote_site='myticketingco.biz',
)
status, callout, meta = client.make_purchase(
'abc123',
customer,
payment_method=redirection_details
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'return_token': '<PASSWORD>',
'return_url': 'https://myticketingco.biz/confirmation/abc123',
'client_http_user_agent': 'Mozilla/5.0',
'client_http_accept': 'text/html,text/plain,application/json',
'remote_site': 'myticketingco.biz',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert status is None
assert isinstance(callout, Callout)
assert callout.code == 'ext_test0'
assert 'gbp' in meta.currencies
assert meta.default_currency_code is None
def test_make_purchase_credit(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
status, callout, meta = client.make_purchase('abc123', customer)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_make_purchase_opting_out_of_confirmation_email(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
status, callout, meta = client.make_purchase(
'abc123',
customer,
send_confirmation_email=False
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_get_purchase(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.get_purchase('abc123')
expected_params = {
'transaction_uuid': 'abc123',
}
mock_make_request.assert_called_with(
'purchase_page_archive.v1',
expected_params,
method=GET
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_next_callout(self, client, monkeypatch):
response = {
'transaction_status': 'purchased',
'trolley_contents': {
'transaction_uuid': 'DEF456'
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.next_callout(
'abc123',
'def456',
{'foo': 'bar'},
lol='beans',
)
expected_params = {
'foo': 'bar',
'lol': 'beans',
}
mock_make_request.assert_called_with(
'callback.v1/this.abc123/next.def456',
expected_params,
method=POST
)
assert callout is None
assert isinstance(status, Status)
assert status.trolley.transaction_uuid == 'DEF456'
assert status.status == 'purchased'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_next_callout_with_additional_callout(self, client, monkeypatch):
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_code': 'gbp',
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
status, callout, meta = client.next_callout(
'abc123',
'def456',
{'foo': 'bar'},
lol='beans',
)
expected_params = {
'foo': 'bar',
'lol': 'beans',
}
mock_make_request.assert_called_with(
'callback.v1/this.abc123/next.def456',
expected_params,
method=POST
)
assert status is None
assert isinstance(callout, Callout)
assert callout.code == 'ext_test0'
assert 'gbp' in meta.currencies
assert meta.default_currency_code == 'gbp'
def test_auth_can_be_overridden_with_subclass(self, monkeypatch):
"""Test that we can override authentication behavior in subclasses
Clients should be able to override the get_auth_params and make
requests without basic authentication, if they can authenticate in
another secure way.
Since get_auth_params() has been deprecated, this should raise a
DeprecationWarning, but still work (for legacy client support).
"""
# state
class MyClient(Client):
def __init__(self, user, auth_key, **kwargs):
super(MyClient, self).__init__(user, password=<PASSWORD>, **kwargs)
self.auth_key = auth_key
def get_auth_params(self):
return {
'user_id': self.user,
'auth_key': self.auth_key,
}
client = MyClient('gandalf', auth_key='speakfriendandenter',
use_decimal=True)
params = {
'foo': 'bar',
}
client.language='en-GB'
# fakes
fake_response = FakeResponse(status_code=200, json={"lol": "beans"})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
with pytest.warns(DeprecationWarning) as warning_info:
response = client.make_request('events.v1', params)
# results
assert response == {'lol': 'beans'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=None,
params={
'foo': 'bar',
'user_id': 'gandalf',
'auth_key': 'speakfriendandenter',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None
)
assert warning_info[0].message.args[0] == (
'Function get_auth_params() is deprecated and should not be used')
def test_extra_params_can_be_overriden_by_subclass(self, monkeypatch):
"""Test that we can override extra parameters in subclass
Clients should be able to pass in extra parameters by overriding this
method.
"""
# state
class MyClient(Client):
def __init__(self, user, myfoo, **kwargs):
super(MyClient, self).__init__(user, password=<PASSWORD>, **kwargs)
self.myfoo = myfoo
def get_extra_params(self):
params = super(MyClient, self).get_extra_params()
params.update(myfoo=self.myfoo)
return params
client = MyClient('batman', 'batmanfoo',
sub_user='robin', use_decimal=True)
params = {'fruit': 'apple'}
# fakes
fake_response = FakeResponse(status_code=200, json={'a': 'b'})
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
response = client.make_request('events.v1', params)
# results
assert response == {'a': 'b'}
fake_get.assert_called_with(
'https://api.ticketswitch.com/f13/events.v1/',
auth=None,
params={
'sub_id': 'robin',
'myfoo': 'batmanfoo',
'fruit': 'apple',
},
headers={
'Accept-Language': 'en-GB',
'User-Agent': 'pyticketswitch {}'.format(pyticketswitch.__version__),
},
timeout=None,
)
def test_get_auth_params_raises_deprecation_warning(self, client):
"""Tests that get_auth_params raises deprecation warning"""
with pytest.warns(DeprecationWarning) as warning_list:
params = client.get_auth_params()
assert not params
assert warning_list[0].message.args[0] == (
'Call to deprecated function get_auth_params'
)
def test_make_request_using_decimal_parsing(self, client, monkeypatch):
# fakes
response_json = {'amount': 1.0}
fake_response = requests.models.Response()
fake_response._content = json.dumps(response_json).encode('utf-8')
fake_response.status_code = 200
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
result = client.make_request('test.v1', {})
# results
assert 'amount' in result
assert type(result['amount']) == decimal.Decimal
assert result['amount'] == decimal.Decimal('1.0')
def test_make_request_using_float_parsing(self, monkeypatch):
# state
client = Client('bilbo', 'baggins')
# fakes
response_json = {'amount': 1.0}
fake_response = requests.models.Response()
fake_response._content = json.dumps(response_json).encode('utf-8')
fake_response.status_code = 200
fake_get = Mock(return_value=fake_response)
session = Mock(spec=requests.Session)
session.get = fake_get
monkeypatch.setattr(client, 'get_session', Mock(return_value=session))
# action
result = client.make_request('test.v1', {})
# results
assert 'amount' in result
assert type(result['amount']) == float
assert result['amount'] == 1.0
def test_make_purchase_with_agent_reference(self, client, monkeypatch):
# state
response = {
"callout": {
"bundle_source_code": "ext_test0",
},
'currency_details': {
'gbp': {
'currency_code': 'gbp',
}
}
}
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
customer = Customer('fred', 'flintstone', ['301 cobblestone way'], 'us')
redirection_details = RedirectionDetails(
token='abc123',
url='https://myticketingco.biz/confirmation/abc123',
user_agent='Mozilla/5.0',
accept='text/html,text/plain,application/json',
remote_site='myticketingco.biz',
)
client.make_purchase(
'abc123',
customer,
payment_method=redirection_details,
agent_reference='myticketingco_ff01'
)
expected_params = {
'transaction_uuid': 'abc123',
'first_name': 'fred',
'last_name': 'flintstone',
'address_line_one': '301 cobblestone way',
'country_code': 'us',
'return_token': '<PASSWORD>',
'return_url': 'https://myticketingco.biz/confirmation/abc123',
'client_http_user_agent': 'Mozilla/5.0',
'client_http_accept': 'text/html,text/plain,application/json',
'remote_site': 'myticketingco.biz',
'supplier_can_use_customer_data': False,
'user_can_use_customer_data': False,
'world_can_use_customer_data': False,
'send_confirmation_email': True,
'agent_reference': 'myticketingco_ff01',
}
mock_make_request.assert_called_with(
'purchase.v1',
expected_params,
method=POST
)
def test_cancel_purchase(self, client, monkeypatch):
# state
with open("test_data/successful_cancellation.json", 'r') as file_handle:
response = json.load(file_handle)
mock_make_request = Mock(return_value=response)
monkeypatch.setattr(client, 'make_request', mock_make_request)
cancellation_result, meta = client.cancel_purchase('abc123')
assert cancellation_result.is_fully_cancelled()
assert cancellation_result.cancelled_item_numbers == [1]
assert 'gbp' in meta.currencies | 0.561936 | 0.30702 |
import pytest
from app.api.services import (
save_token,
login_user,
logout_user,
get_users,
get_user,
save_new_user,
get_playlists,
get_playlist,
put_playlist,
save_new_playlist,
)
from app.api.models import BlacklistToken
from app.api.errors import BadRequest
class TestAuthService:
"""Testing auth service"""
def test_save_token_pass(self, db) -> None:
"""It saves the token to the blacklist table"""
token = "my_token"
res = save_token(token)
assert res is True
assert BlacklistToken.check_blacklist(token) is True
def test_login_user_pass(self, db) -> None:
"""It finds the existing user by email and returns a JWT Token"""
auth_data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
token = login_user(auth_data)
assert token is not None
def test_login_user_fail(self, db) -> None:
"""It raises an BadRequest exception when credentials don't match"""
auth_data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
with pytest.raises(BadRequest):
login_user(auth_data)
def test_logout_user(self, db) -> None:
"""It calls save_token if payload is an int or raises BadRequest"""
token = "my_other_token"
payload = 4
res = logout_user(token, payload)
assert res is True
class TestUserService:
"""Testing user service"""
new_user = {
"email": "<EMAIL>",
"username": "testUser",
"password": "<PASSWORD>",
}
def test_get_users(self, db) -> None:
"""It returns all the registered users"""
users = get_users()
assert len(users) >= 1
def test_get_user_pass(self, db) -> None:
"""It returns the user by public_id"""
user = get_user("<PASSWORD>-1683-4cb4-aa15-10728dd83ac9")
assert user is not None
def test_get_user_fail(self, db) -> None:
"""It raises an exception if user doesn't exist"""
with pytest.raises(BadRequest):
get_user("<EMAIL>")
def test_save_new_user_pass(self, db) -> None:
"""It creates a new user"""
user = save_new_user(self.new_user)
assert user is not None
def test_save_new_user_fail(self, db) -> None:
"""It raises a BadRequest if user already exists"""
with pytest.raises(BadRequest):
save_new_user(
{
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
}
)
class TestPlaylistService:
"""Testing playlist service"""
new_playlist = {
"datasource": "reddit",
"screen_name": "new name",
"playlist_link": "https://somelink12.com",
"playlist_description": "This is a description",
"created_by": 1,
}
def test_get_playlists(self, db) -> None:
"""It returns all the spotify playlists"""
playlists = get_playlists()
assert len(playlists) >= 3
def test_get_playlist_pass(self, db) -> None:
"""It gets a spotify playlist by id"""
playlist = get_playlist("a600f327-a53c-4d2d-8d31-41f5a8b81121")
assert playlist is not None
def test_get_playlist_fail(self, db) -> None:
"""It raises a BadRequest when id does not exist"""
with pytest.raises(BadRequest):
get_playlist("a600f327-a53c-4d2d-8d31-41f5a8b81122")
with pytest.raises(BadRequest):
get_playlist("12")
def test_put_playlist_pass(self, db) -> None:
"""It adds or edits a spotify playlist by id"""
playlist = put_playlist(
"87569b1d-8975-4522-97a4-039346c53512", self.new_playlist, 1
)
assert playlist is not None
def test_put_playlist_fail(self, db) -> None:
"""It raises a BadRequest when id does not exist"""
with pytest.raises(BadRequest):
put_playlist("12", self.new_playlist, 1)
def test_save_new_playlist_pass(self, db) -> None:
payload = self.new_playlist
payload["playlist_link"] = "somelink"
playlist = save_new_playlist(self.new_playlist, 1)
assert playlist is not None
def test_save_new_playlist_fail(self, db) -> None:
with pytest.raises(BadRequest):
save_new_playlist(self.new_playlist, 1) | tests/test_services.py | import pytest
from app.api.services import (
save_token,
login_user,
logout_user,
get_users,
get_user,
save_new_user,
get_playlists,
get_playlist,
put_playlist,
save_new_playlist,
)
from app.api.models import BlacklistToken
from app.api.errors import BadRequest
class TestAuthService:
"""Testing auth service"""
def test_save_token_pass(self, db) -> None:
"""It saves the token to the blacklist table"""
token = "my_token"
res = save_token(token)
assert res is True
assert BlacklistToken.check_blacklist(token) is True
def test_login_user_pass(self, db) -> None:
"""It finds the existing user by email and returns a JWT Token"""
auth_data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
token = login_user(auth_data)
assert token is not None
def test_login_user_fail(self, db) -> None:
"""It raises an BadRequest exception when credentials don't match"""
auth_data = {"email": "<EMAIL>", "password": "<PASSWORD>"}
with pytest.raises(BadRequest):
login_user(auth_data)
def test_logout_user(self, db) -> None:
"""It calls save_token if payload is an int or raises BadRequest"""
token = "my_other_token"
payload = 4
res = logout_user(token, payload)
assert res is True
class TestUserService:
"""Testing user service"""
new_user = {
"email": "<EMAIL>",
"username": "testUser",
"password": "<PASSWORD>",
}
def test_get_users(self, db) -> None:
"""It returns all the registered users"""
users = get_users()
assert len(users) >= 1
def test_get_user_pass(self, db) -> None:
"""It returns the user by public_id"""
user = get_user("<PASSWORD>-1683-4cb4-aa15-10728dd83ac9")
assert user is not None
def test_get_user_fail(self, db) -> None:
"""It raises an exception if user doesn't exist"""
with pytest.raises(BadRequest):
get_user("<EMAIL>")
def test_save_new_user_pass(self, db) -> None:
"""It creates a new user"""
user = save_new_user(self.new_user)
assert user is not None
def test_save_new_user_fail(self, db) -> None:
"""It raises a BadRequest if user already exists"""
with pytest.raises(BadRequest):
save_new_user(
{
"email": "<EMAIL>",
"username": "<EMAIL>",
"password": "<PASSWORD>",
}
)
class TestPlaylistService:
"""Testing playlist service"""
new_playlist = {
"datasource": "reddit",
"screen_name": "new name",
"playlist_link": "https://somelink12.com",
"playlist_description": "This is a description",
"created_by": 1,
}
def test_get_playlists(self, db) -> None:
"""It returns all the spotify playlists"""
playlists = get_playlists()
assert len(playlists) >= 3
def test_get_playlist_pass(self, db) -> None:
"""It gets a spotify playlist by id"""
playlist = get_playlist("a600f327-a53c-4d2d-8d31-41f5a8b81121")
assert playlist is not None
def test_get_playlist_fail(self, db) -> None:
"""It raises a BadRequest when id does not exist"""
with pytest.raises(BadRequest):
get_playlist("a600f327-a53c-4d2d-8d31-41f5a8b81122")
with pytest.raises(BadRequest):
get_playlist("12")
def test_put_playlist_pass(self, db) -> None:
"""It adds or edits a spotify playlist by id"""
playlist = put_playlist(
"87569b1d-8975-4522-97a4-039346c53512", self.new_playlist, 1
)
assert playlist is not None
def test_put_playlist_fail(self, db) -> None:
"""It raises a BadRequest when id does not exist"""
with pytest.raises(BadRequest):
put_playlist("12", self.new_playlist, 1)
def test_save_new_playlist_pass(self, db) -> None:
payload = self.new_playlist
payload["playlist_link"] = "somelink"
playlist = save_new_playlist(self.new_playlist, 1)
assert playlist is not None
def test_save_new_playlist_fail(self, db) -> None:
with pytest.raises(BadRequest):
save_new_playlist(self.new_playlist, 1) | 0.469277 | 0.278714 |
import os
import bz2
import MySQLdb
import pandas as pd
import numpy as np
from collections import defaultdict
''' Connect to DB '''
db = MySQLdb.connect(host=os.environ.get("DATAVIVA_DB_HOST", "localhost"), user=os.environ[
"DATAVIVA_DB_USER"], passwd=os.environ["DATAVIVA_DB_PW"], db=os.environ["DATAVIVA_DB_NAME"])
cursor = db.cursor()
missing = {
"bra_id": defaultdict(int),
"school_id": defaultdict(int),
"course_sc_id": defaultdict(int)
}
cursor.execute(
"select id_ibge, id from attrs_bra where id_ibge is not null and length(id) = 9;")
bra_lookup = {str(r[0]): r[1] for r in cursor.fetchall()}
cursor.execute("select id from attrs_school;")
school_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
cursor.execute("select id from attrs_course_sc;")
course_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
BASIC_EDU_CODE = 'xx'
proper_age_map = {
"xx002": 6 + 2,
"xx003": 7 + 2,
"xx004": 8 + 2,
"xx005": 9 + 2,
"xx006": 10 + 2,
"xx007": 11 + 2,
"xx008": 12 + 2,
"xx009": 13 + 2,
"xx010": 14 + 2,
"xx011": 15 + 2,
"xx012": 16 + 2,
"xx013": 17 + 2,
"xx014": 18 + 2,
"xx016": 15 + 2,
"xx017": 16 + 2,
"xx018": 17 + 2,
"xx019": 18 + 2,
}
def floatvert(x):
x = x.replace(',', '.')
try:
return float(x)
except:
return np.nan
def bra_replace(raw):
try:
return bra_lookup[str(raw).strip()]
except:
missing["bra_id"][raw] += 1
return None
def school_replace(raw):
try:
return school_lookup[str(raw).strip()]
except:
missing["school_id"][raw] += 1
return None
def course_replace(raw):
try:
return course_lookup[str(raw).strip().zfill(5) if len(raw) > 0 else str(raw)]
except:
return BASIC_EDU_CODE # -- if missing give BASIC edu code
def edu_level_replace(raw):
return str(raw).zfill(3)
def to_df(file_path, indexes=None):
if "bz2" in file_path:
input_file = bz2.BZ2File(file_path)
else:
input_file = open(file_path, "rU")
if indexes:
converters = {"course_hedu_id": str, "school_id": str}
df = pd.read_csv(
input_file, sep="\t", converters=converters, engine='python')
df = df.set_index(indexes)
else:
cols = ["year", "enroll_id", "student_id", "age", "gender", "color", "edu_mode",
"edu_level", "edu_level_new", "edu", "class_id", "course_sc_id", "school_id",
"bra_id_lives", "location_lives", "bra_id", "loc", "school_type"]
delim = ";"
coerce_cols = {"bra_id": bra_replace, "bra_id_lives": bra_replace, "school_id": school_replace,
"course_sc_id": course_replace, "edu_level_new": edu_level_replace}
df = pd.read_csv(
input_file, header=0, sep=delim, names=cols, converters=coerce_cols)
df = df[["year", "enroll_id", "edu_level_new", "school_id",
"course_sc_id", "class_id", "bra_id", "age", "bra_id_lives"]]
print "Calculating Course IDs for basic education..."
df.loc[df['course_sc_id'] == BASIC_EDU_CODE, 'course_sc_id'] = df['course_sc_id'] + df.edu_level_new
df['course_sc_id'] = df['course_sc_id'].str.replace(' ', '0')
print "Calculating proper age..."
df["distorted_age"] = df.course_sc_id.map(proper_age_map)
df.loc[df['distorted_age'].notnull(), 'distorted_age'] = (df.age >= df.distorted_age).astype(int)
for col, missings in missing.items():
if not len(missings):
continue
num_rows = df.shape[0]
print
print "[WARNING]"
print "The following {0} IDs are not in the DB. Total: ".format(col, num_rows)
print list(missings)
return df | scripts/sc/_to_df.py | import os
import bz2
import MySQLdb
import pandas as pd
import numpy as np
from collections import defaultdict
''' Connect to DB '''
db = MySQLdb.connect(host=os.environ.get("DATAVIVA_DB_HOST", "localhost"), user=os.environ[
"DATAVIVA_DB_USER"], passwd=os.environ["DATAVIVA_DB_PW"], db=os.environ["DATAVIVA_DB_NAME"])
cursor = db.cursor()
missing = {
"bra_id": defaultdict(int),
"school_id": defaultdict(int),
"course_sc_id": defaultdict(int)
}
cursor.execute(
"select id_ibge, id from attrs_bra where id_ibge is not null and length(id) = 9;")
bra_lookup = {str(r[0]): r[1] for r in cursor.fetchall()}
cursor.execute("select id from attrs_school;")
school_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
cursor.execute("select id from attrs_course_sc;")
course_lookup = {str(r[0]): str(r[0]) for r in cursor.fetchall()}
BASIC_EDU_CODE = 'xx'
proper_age_map = {
"xx002": 6 + 2,
"xx003": 7 + 2,
"xx004": 8 + 2,
"xx005": 9 + 2,
"xx006": 10 + 2,
"xx007": 11 + 2,
"xx008": 12 + 2,
"xx009": 13 + 2,
"xx010": 14 + 2,
"xx011": 15 + 2,
"xx012": 16 + 2,
"xx013": 17 + 2,
"xx014": 18 + 2,
"xx016": 15 + 2,
"xx017": 16 + 2,
"xx018": 17 + 2,
"xx019": 18 + 2,
}
def floatvert(x):
x = x.replace(',', '.')
try:
return float(x)
except:
return np.nan
def bra_replace(raw):
try:
return bra_lookup[str(raw).strip()]
except:
missing["bra_id"][raw] += 1
return None
def school_replace(raw):
try:
return school_lookup[str(raw).strip()]
except:
missing["school_id"][raw] += 1
return None
def course_replace(raw):
try:
return course_lookup[str(raw).strip().zfill(5) if len(raw) > 0 else str(raw)]
except:
return BASIC_EDU_CODE # -- if missing give BASIC edu code
def edu_level_replace(raw):
return str(raw).zfill(3)
def to_df(file_path, indexes=None):
if "bz2" in file_path:
input_file = bz2.BZ2File(file_path)
else:
input_file = open(file_path, "rU")
if indexes:
converters = {"course_hedu_id": str, "school_id": str}
df = pd.read_csv(
input_file, sep="\t", converters=converters, engine='python')
df = df.set_index(indexes)
else:
cols = ["year", "enroll_id", "student_id", "age", "gender", "color", "edu_mode",
"edu_level", "edu_level_new", "edu", "class_id", "course_sc_id", "school_id",
"bra_id_lives", "location_lives", "bra_id", "loc", "school_type"]
delim = ";"
coerce_cols = {"bra_id": bra_replace, "bra_id_lives": bra_replace, "school_id": school_replace,
"course_sc_id": course_replace, "edu_level_new": edu_level_replace}
df = pd.read_csv(
input_file, header=0, sep=delim, names=cols, converters=coerce_cols)
df = df[["year", "enroll_id", "edu_level_new", "school_id",
"course_sc_id", "class_id", "bra_id", "age", "bra_id_lives"]]
print "Calculating Course IDs for basic education..."
df.loc[df['course_sc_id'] == BASIC_EDU_CODE, 'course_sc_id'] = df['course_sc_id'] + df.edu_level_new
df['course_sc_id'] = df['course_sc_id'].str.replace(' ', '0')
print "Calculating proper age..."
df["distorted_age"] = df.course_sc_id.map(proper_age_map)
df.loc[df['distorted_age'].notnull(), 'distorted_age'] = (df.age >= df.distorted_age).astype(int)
for col, missings in missing.items():
if not len(missings):
continue
num_rows = df.shape[0]
print
print "[WARNING]"
print "The following {0} IDs are not in the DB. Total: ".format(col, num_rows)
print list(missings)
return df | 0.196363 | 0.201165 |
import cvmfs
import sys
class MerkleCatalogTreeIterator(cvmfs.CatalogTreeIterator):
def __init__(self, repository, root_catalog, visited_hashes = set()):
cvmfs.CatalogTreeIterator.__init__(self, repository, root_catalog)
self.visited_hashes = visited_hashes
def next(self):
catalog = cvmfs.CatalogTreeIterator.next(self)
self.visited_hashes.add(catalog.hash)
return catalog
def _push_catalog_wrapper(self, catalog):
if not catalog.catalog_reference or \
catalog.catalog_reference.hash not in self.visited_hashes:
cvmfs.CatalogTreeIterator._push_catalog_wrapper(self, catalog)
def usage():
print sys.argv[0] + "<repo url/path> <download destination> [<history depth>]"
print "Downloads the whole catalog graph of a given repository."
print "The optional <history depth> puts a threshold on how many historic"
print "catalog tree revisions should be downloaded (default: all)"
if len(sys.argv) < 3 or len(sys.argv) > 4:
usage()
sys.exit(1)
dest = sys.argv[2]
repo = cvmfs.open_repository(sys.argv[1])
depth = sys.argv[3] if len(sys.argv) == 4 else 0
try:
depth = int(depth)
except ValueError, e:
usage()
print
print "<history depth> needs to be an integer"
sys.exit(1)
if depth == 0:
print "Downloading entire catalog tree from " + repo.manifest.repository_name
else:
print "Downloading last" , depth , "catalog revisions from " + repo.manifest.repository_name
root_clg = repo.retrieve_root_catalog()
visited_hashes = set()
while True:
next_root_clg = root_clg.get_predecessor()
for catalog in MerkleCatalogTreeIterator(repo, root_clg, visited_hashes):
if catalog.is_root():
print "Downloading revision" , catalog.revision , "..."
catalog.save_to(dest + "/" + catalog.hash + "C")
repo.close_catalog(catalog)
if depth > 0:
depth -= 1
if depth == 0:
print "all requested catalog tree revisions downloaded"
break
if next_root_clg != None:
try:
root_clg = next_root_clg.retrieve_from(repo)
except cvmfs.repository.FileNotFoundInRepository, e:
print "next root catalog not found (garbage collected?)"
break
else:
print "reached the end of the catalog chain"
break
print "Done (downloaded" , len(visited_hashes) , "catalogs)" | add-ons/tools/download_catalog_graph.py |
import cvmfs
import sys
class MerkleCatalogTreeIterator(cvmfs.CatalogTreeIterator):
def __init__(self, repository, root_catalog, visited_hashes = set()):
cvmfs.CatalogTreeIterator.__init__(self, repository, root_catalog)
self.visited_hashes = visited_hashes
def next(self):
catalog = cvmfs.CatalogTreeIterator.next(self)
self.visited_hashes.add(catalog.hash)
return catalog
def _push_catalog_wrapper(self, catalog):
if not catalog.catalog_reference or \
catalog.catalog_reference.hash not in self.visited_hashes:
cvmfs.CatalogTreeIterator._push_catalog_wrapper(self, catalog)
def usage():
print sys.argv[0] + "<repo url/path> <download destination> [<history depth>]"
print "Downloads the whole catalog graph of a given repository."
print "The optional <history depth> puts a threshold on how many historic"
print "catalog tree revisions should be downloaded (default: all)"
if len(sys.argv) < 3 or len(sys.argv) > 4:
usage()
sys.exit(1)
dest = sys.argv[2]
repo = cvmfs.open_repository(sys.argv[1])
depth = sys.argv[3] if len(sys.argv) == 4 else 0
try:
depth = int(depth)
except ValueError, e:
usage()
print
print "<history depth> needs to be an integer"
sys.exit(1)
if depth == 0:
print "Downloading entire catalog tree from " + repo.manifest.repository_name
else:
print "Downloading last" , depth , "catalog revisions from " + repo.manifest.repository_name
root_clg = repo.retrieve_root_catalog()
visited_hashes = set()
while True:
next_root_clg = root_clg.get_predecessor()
for catalog in MerkleCatalogTreeIterator(repo, root_clg, visited_hashes):
if catalog.is_root():
print "Downloading revision" , catalog.revision , "..."
catalog.save_to(dest + "/" + catalog.hash + "C")
repo.close_catalog(catalog)
if depth > 0:
depth -= 1
if depth == 0:
print "all requested catalog tree revisions downloaded"
break
if next_root_clg != None:
try:
root_clg = next_root_clg.retrieve_from(repo)
except cvmfs.repository.FileNotFoundInRepository, e:
print "next root catalog not found (garbage collected?)"
break
else:
print "reached the end of the catalog chain"
break
print "Done (downloaded" , len(visited_hashes) , "catalogs)" | 0.277473 | 0.181844 |
from __future__ import annotations
import toolcli
from toolcli.command_utils import help_utils
def get_cd_help(parse_spec: toolcli.ParseSpec) -> str:
program_name = parse_spec.get('config', {}).get('base_command', 'PROGRAM')
return 'change working directory to ' + program_name + '-related location'
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': cd_command,
'help': get_cd_help,
'args': [
{'name': 'dirname', 'help': 'name of directory'},
],
'extra_data': ['cd_destination_tempfile', 'parse_spec'],
}
cd_snippet_template = """function {program_name} {
local tempfile="$(mktemp -t tmp.XXXXXX)"
command {program_name} "$@" --cd-destination-tempfile "$tempfile"
if [[ -s "$tempfile" ]]; then
cd "$(realpath $(cat "$tempfile"))"
fi
rm -f "$tempfile" 2>/dev/null
}"""
def cd_command(
dirname: str,
cd_destination_tempfile: str,
parse_spec: toolcli.ParseSpec,
) -> None:
if cd_destination_tempfile is None:
print('using the cd subcommand requires special configuration')
print()
print(
'add the following snippet to your shell config (e.g. ~/.profile):'
)
default_name = '<PROGRAM_NAME>'
program_name = parse_spec.get('config', {}).get(
'base_command', default_name
)
cd_snippet = cd_snippet_template.replace('{program_name}', program_name)
print()
print(cd_snippet)
if program_name == default_name:
print()
print('where', default_name, 'is the name of the root command')
return
# get path
getter = parse_spec['config'].get('cd_dir_getter')
if getter is None:
raise Exception('must specify path getter')
try:
path = getter(dirname)
except Exception:
print('could not find path')
print()
help_utils.print_cd_dirs(parse_spec=parse_spec)
return
# change pwd to path
with open(cd_destination_tempfile, 'w') as f:
f.write(path) | toolcli/command_utils/standard_subcommands/cd_command.py | from __future__ import annotations
import toolcli
from toolcli.command_utils import help_utils
def get_cd_help(parse_spec: toolcli.ParseSpec) -> str:
program_name = parse_spec.get('config', {}).get('base_command', 'PROGRAM')
return 'change working directory to ' + program_name + '-related location'
def get_command_spec() -> toolcli.CommandSpec:
return {
'f': cd_command,
'help': get_cd_help,
'args': [
{'name': 'dirname', 'help': 'name of directory'},
],
'extra_data': ['cd_destination_tempfile', 'parse_spec'],
}
cd_snippet_template = """function {program_name} {
local tempfile="$(mktemp -t tmp.XXXXXX)"
command {program_name} "$@" --cd-destination-tempfile "$tempfile"
if [[ -s "$tempfile" ]]; then
cd "$(realpath $(cat "$tempfile"))"
fi
rm -f "$tempfile" 2>/dev/null
}"""
def cd_command(
dirname: str,
cd_destination_tempfile: str,
parse_spec: toolcli.ParseSpec,
) -> None:
if cd_destination_tempfile is None:
print('using the cd subcommand requires special configuration')
print()
print(
'add the following snippet to your shell config (e.g. ~/.profile):'
)
default_name = '<PROGRAM_NAME>'
program_name = parse_spec.get('config', {}).get(
'base_command', default_name
)
cd_snippet = cd_snippet_template.replace('{program_name}', program_name)
print()
print(cd_snippet)
if program_name == default_name:
print()
print('where', default_name, 'is the name of the root command')
return
# get path
getter = parse_spec['config'].get('cd_dir_getter')
if getter is None:
raise Exception('must specify path getter')
try:
path = getter(dirname)
except Exception:
print('could not find path')
print()
help_utils.print_cd_dirs(parse_spec=parse_spec)
return
# change pwd to path
with open(cd_destination_tempfile, 'w') as f:
f.write(path) | 0.414662 | 0.070816 |
import logging
from app import app
from app.models.sequence import Sequence
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
import pdb
from .logger import ProjectLogger
LEMMA = "lemma"
WORD = "word"
class SequenceProcessor(object):
"""Process given input into Sequences.
"""
def __init__(self, project):
"""Set up local variables for the SequenceProcessor.
"""
self.project = project
self.previously_indexed = []
self.logger = logging.getLogger(__name__)
self.project_logger = ProjectLogger(self.logger, project)
def remove_stops(self, words):
"""Remove every sort of stop from the sentences.
:param list words: A list of WordInSentence objects.
:return list: The list without stops.
"""
without_stops = []
for word in words:
if word.word.lemma not in app.config["STOPWORDS"]:
without_stops.append(word)
return without_stops
def process(self, sentence, sequence_dict=None, sequence_length=4):
"""Iterate and record every sequence with length <= `sequence_length.
The method records using the ReaderWriter a list of sequences present
in the given sentence.
:param Sentence sentence: The sentence to process,
:return list: A list of Sequence objects, representing the results
of processing. These sequences are also sent to the ReaderWriter.
"""
sequences = [] # a list of Sequences
for i in range(0, len(sentence.words)):
# Iterate through every word
self.previously_indexed = []
for j in range(i+1, len(sentence.words) + 1):
# Check every word after the one at i
if j - i <= sequence_length:
# If this word is no more than `sequence_length` words away from i,
# create a new Sequence
sequences.extend(self.get_sequence(sentence, i, j))
# Write the sequences to the database using duplication check
if isinstance(sequence_dict, dict):
for sequence in sequences:
sequence_text = sequence["sequence"]
lemmatized = sequence["is_lemmatized"]
has_function_words = sequence["has_function_words"]
all_function_words = sequence["all_function_words"]
length = len(sequence["words"])
position = sequence["start_position"]
words = sequence["words"]
key = sequence_text
if key in sequence_dict.keys():
sequence = sequence_dict[key]
else:
try:
sequence = Sequence.query.\
filter_by(sequence = sequence_text,
project=self.project).one()
except(MultipleResultsFound):
self.project_logger.error("Duplicate records found "
"for: %s", str(key))
except(NoResultFound):
sequence = Sequence(
sequence = sequence_text,
lemmatized = lemmatized,
has_function_words = has_function_words,
all_function_words = all_function_words,
length = length,
project=self.project,
words = words
)
sequence.save(False)
sequence_dict[key] = sequence
sentence.add_sequence(
sequence = sequence,
position = position,
project = self.project,
force = False
)
return sequences
def get_sequence(self, sentence, i, j):
"""Handle the main processing part in the process() loop.
:param Sentence sentence: A sentence object to create sequences from.
:param int i: The index to start the sequence from, inclusive.
:param int j: The index to stop the sequence from, exclusive.
:return list: A list of dicts representing sequences.
"""
sequences = []
rel_list = sentence.word_in_sentence[i:j] # all the words
word_list = [rel.word for rel in rel_list]
surface_phrase = join_words(rel_list, WORD)
if surface_phrase in self.previously_indexed:
#If we've already seen this sentence, don't bother
return sequences
lemmatized_phrase = join_words(rel_list, LEMMA)
rel_list_nostops = self.remove_stops(rel_list)
word_list_nostops = [rel.word for rel in rel_list_nostops]
lemmatized_phrase_nostops = join_words(rel_list_nostops, LEMMA)
surface_phrase_nostops = join_words(rel_list_nostops, WORD)
# TOOO: Aditi says it's possible to remove these checks, should
# see if that's doable after the unit test is written
has_stops = len(rel_list_nostops) < len(rel_list)
lemmatized_has_stops = (len(lemmatized_phrase_nostops) <
len(lemmatized_phrase))
all_stop_words = len(rel_list_nostops) == 0
lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0
# Definitely make a Sequence of the surface_phrase
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": surface_phrase,
"is_lemmatized": False,
"has_function_words": has_stops,
"all_function_words": all_stop_words,
"words": word_list})
self.previously_indexed.append(surface_phrase)
# If it's not just stops, has stops, and the first word isn't a stop,
# and it hasn't been indexed, then make a Sequence from the nostop SP
if (has_stops and not # Should have stops to avoid duplicate
all_stop_words and
rel_list_nostops[0] == rel_list[0] and not
surface_phrase_nostops in self.previously_indexed):
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": surface_phrase_nostops,
"is_lemmatized": False,
"has_function_words": False,
"all_function_words": False,
"words": word_list_nostops})
self.previously_indexed.append(surface_phrase_nostops)
# Definitely make a Sequence of the lemmatized_phrase
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": lemmatized_phrase,
"is_lemmatized": True,
"has_function_words": lemmatized_has_stops,
"all_function_words": lemmatized_all_stop_words,
"words": word_list})
self.previously_indexed.append(lemmatized_phrase)
# Maybe make a sequence of the lemmatized_phrase_nostop
if (lemmatized_has_stops and not
lemmatized_all_stop_words and
rel_list_nostops[0] == rel_list[0] and not
lemmatized_phrase_nostops in self.previously_indexed):
# We don't add this to previously_indexed
#print "Lemmatized nostop"
#print lemmatized_phrase_nostops
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": lemmatized_phrase_nostops,
"is_lemmatized": True,
"has_function_words": False,
"all_function_words": False,
"words": word_list_nostops})
return sequences
def join_words(words, attr):
"""Join either lemmas or surface words from a list of `WordInSentence`
objects.
:param list words: A list of WordInSentence objects.
:param str attr: Either sequenceprocessor.LEMMA to combine lemmas or
sequenceprocessor.WORD to combine words.
:return str: The combined sentence.
"""
result = []
if attr == LEMMA:
for word in words:
result.append(word.word.lemma)
elif attr == WORD:
for word in words:
result.append(word.surface)
return " ".join(result) | app/preprocessor/sequenceprocessor.py | import logging
from app import app
from app.models.sequence import Sequence
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy.orm.exc import MultipleResultsFound
import pdb
from .logger import ProjectLogger
LEMMA = "lemma"
WORD = "word"
class SequenceProcessor(object):
"""Process given input into Sequences.
"""
def __init__(self, project):
"""Set up local variables for the SequenceProcessor.
"""
self.project = project
self.previously_indexed = []
self.logger = logging.getLogger(__name__)
self.project_logger = ProjectLogger(self.logger, project)
def remove_stops(self, words):
"""Remove every sort of stop from the sentences.
:param list words: A list of WordInSentence objects.
:return list: The list without stops.
"""
without_stops = []
for word in words:
if word.word.lemma not in app.config["STOPWORDS"]:
without_stops.append(word)
return without_stops
def process(self, sentence, sequence_dict=None, sequence_length=4):
"""Iterate and record every sequence with length <= `sequence_length.
The method records using the ReaderWriter a list of sequences present
in the given sentence.
:param Sentence sentence: The sentence to process,
:return list: A list of Sequence objects, representing the results
of processing. These sequences are also sent to the ReaderWriter.
"""
sequences = [] # a list of Sequences
for i in range(0, len(sentence.words)):
# Iterate through every word
self.previously_indexed = []
for j in range(i+1, len(sentence.words) + 1):
# Check every word after the one at i
if j - i <= sequence_length:
# If this word is no more than `sequence_length` words away from i,
# create a new Sequence
sequences.extend(self.get_sequence(sentence, i, j))
# Write the sequences to the database using duplication check
if isinstance(sequence_dict, dict):
for sequence in sequences:
sequence_text = sequence["sequence"]
lemmatized = sequence["is_lemmatized"]
has_function_words = sequence["has_function_words"]
all_function_words = sequence["all_function_words"]
length = len(sequence["words"])
position = sequence["start_position"]
words = sequence["words"]
key = sequence_text
if key in sequence_dict.keys():
sequence = sequence_dict[key]
else:
try:
sequence = Sequence.query.\
filter_by(sequence = sequence_text,
project=self.project).one()
except(MultipleResultsFound):
self.project_logger.error("Duplicate records found "
"for: %s", str(key))
except(NoResultFound):
sequence = Sequence(
sequence = sequence_text,
lemmatized = lemmatized,
has_function_words = has_function_words,
all_function_words = all_function_words,
length = length,
project=self.project,
words = words
)
sequence.save(False)
sequence_dict[key] = sequence
sentence.add_sequence(
sequence = sequence,
position = position,
project = self.project,
force = False
)
return sequences
def get_sequence(self, sentence, i, j):
"""Handle the main processing part in the process() loop.
:param Sentence sentence: A sentence object to create sequences from.
:param int i: The index to start the sequence from, inclusive.
:param int j: The index to stop the sequence from, exclusive.
:return list: A list of dicts representing sequences.
"""
sequences = []
rel_list = sentence.word_in_sentence[i:j] # all the words
word_list = [rel.word for rel in rel_list]
surface_phrase = join_words(rel_list, WORD)
if surface_phrase in self.previously_indexed:
#If we've already seen this sentence, don't bother
return sequences
lemmatized_phrase = join_words(rel_list, LEMMA)
rel_list_nostops = self.remove_stops(rel_list)
word_list_nostops = [rel.word for rel in rel_list_nostops]
lemmatized_phrase_nostops = join_words(rel_list_nostops, LEMMA)
surface_phrase_nostops = join_words(rel_list_nostops, WORD)
# TOOO: Aditi says it's possible to remove these checks, should
# see if that's doable after the unit test is written
has_stops = len(rel_list_nostops) < len(rel_list)
lemmatized_has_stops = (len(lemmatized_phrase_nostops) <
len(lemmatized_phrase))
all_stop_words = len(rel_list_nostops) == 0
lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0
# Definitely make a Sequence of the surface_phrase
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": surface_phrase,
"is_lemmatized": False,
"has_function_words": has_stops,
"all_function_words": all_stop_words,
"words": word_list})
self.previously_indexed.append(surface_phrase)
# If it's not just stops, has stops, and the first word isn't a stop,
# and it hasn't been indexed, then make a Sequence from the nostop SP
if (has_stops and not # Should have stops to avoid duplicate
all_stop_words and
rel_list_nostops[0] == rel_list[0] and not
surface_phrase_nostops in self.previously_indexed):
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": surface_phrase_nostops,
"is_lemmatized": False,
"has_function_words": False,
"all_function_words": False,
"words": word_list_nostops})
self.previously_indexed.append(surface_phrase_nostops)
# Definitely make a Sequence of the lemmatized_phrase
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": lemmatized_phrase,
"is_lemmatized": True,
"has_function_words": lemmatized_has_stops,
"all_function_words": lemmatized_all_stop_words,
"words": word_list})
self.previously_indexed.append(lemmatized_phrase)
# Maybe make a sequence of the lemmatized_phrase_nostop
if (lemmatized_has_stops and not
lemmatized_all_stop_words and
rel_list_nostops[0] == rel_list[0] and not
lemmatized_phrase_nostops in self.previously_indexed):
# We don't add this to previously_indexed
#print "Lemmatized nostop"
#print lemmatized_phrase_nostops
sequences.append({"start_position": i,
"sentence_id": sentence.id,
"document_id": sentence.document_id,
"sequence": lemmatized_phrase_nostops,
"is_lemmatized": True,
"has_function_words": False,
"all_function_words": False,
"words": word_list_nostops})
return sequences
def join_words(words, attr):
"""Join either lemmas or surface words from a list of `WordInSentence`
objects.
:param list words: A list of WordInSentence objects.
:param str attr: Either sequenceprocessor.LEMMA to combine lemmas or
sequenceprocessor.WORD to combine words.
:return str: The combined sentence.
"""
result = []
if attr == LEMMA:
for word in words:
result.append(word.word.lemma)
elif attr == WORD:
for word in words:
result.append(word.surface)
return " ".join(result) | 0.617859 | 0.225513 |
from pathlib import Path
from timeit import default_timer as timer
import h5py
import numpy as np
import torch
from methods.utils.data_utilities import (_segment_index, load_dcase_format,
to_metrics2020_format)
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from utils.common import int16_samples_to_float32
class UserDataset(Dataset):
""" User defined datset
"""
def __init__(self, args, cfg, dataset, dataset_type='train', overlap=''):
"""
Args:
args: input args
cfg: configurations
dataset: dataset used
dataset_type: 'train' | 'valid' | 'dev_test' | 'eval_test'
overlap: '1' | '2'
"""
super().__init__()
self.dataset_type = dataset_type
self.read_into_mem = args.read_into_mem
self.sample_rate = cfg['data']['sample_rate']
self.clip_length = dataset.clip_length
self.label_resolution = dataset.label_resolution
self.frame_length = int(self.clip_length / self.label_resolution)
self.label_interp_ratio = int(self.label_resolution * self.sample_rate / cfg['data']['hop_length'])
# Chunklen and hoplen and segmentation. Since all of the clips are 60s long, it only segments once here
data = np.zeros((1, self.clip_length * self.sample_rate))
if 'train' in self.dataset_type:
chunklen = int(cfg['data']['train_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['train_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen)
elif self.dataset_type in ['valid', 'dev_test', 'eval_test']:
chunklen = int(cfg['data']['test_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['test_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen, last_frame_always_paddding=True)
self.num_segments = len(self.segmented_indexes)
# Data and meta path
fold_str_idx = dataset.fold_str_index
ov_str_idx = dataset.ov_str_index
data_sr_folder_name = '{}fs'.format(self.sample_rate)
main_data_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('data').joinpath(data_sr_folder_name)
dev_data_dir = main_data_dir.joinpath('dev').joinpath(cfg['data']['type'])
eval_data_dir = main_data_dir.joinpath('eval').joinpath(cfg['data']['type'])
main_meta_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('meta')
dev_meta_dir = main_meta_dir.joinpath('dev')
eval_meta_dir = main_meta_dir.joinpath('eval')
if self.dataset_type == 'train':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
train_fold = [int(fold.strip()) for fold in str(cfg['training']['train_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in train_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'valid':
if cfg['training']['valid_fold'] != 'eval':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
valid_fold = [int(fold.strip()) for fold in str(cfg['training']['valid_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in valid_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_dev')
else:
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_eval')
frame_begin_index = 0
self.valid_gt_sed_metrics2019 = []
self.valid_gt_doa_metrics2019 = []
self.valid_gt_dcaseformat = {}
for path in self.paths_list:
ori_meta_path = ori_meta_dir.joinpath(path.stem + '.csv')
output_dict, sed_metrics2019, doa_metrics2019 = \
load_dcase_format(ori_meta_path, frame_begin_index=frame_begin_index,
frame_length=self.frame_length, num_classes=len(dataset.label_set))
self.valid_gt_dcaseformat.update(output_dict)
self.valid_gt_sed_metrics2019.append(sed_metrics2019)
self.valid_gt_doa_metrics2019.append(doa_metrics2019)
frame_begin_index += self.frame_length
self.valid_gt_sed_metrics2019 = np.concatenate(self.valid_gt_sed_metrics2019, axis=0)
self.valid_gt_doa_metrics2019 = np.concatenate(self.valid_gt_doa_metrics2019, axis=0)
self.gt_metrics2020_dict = to_metrics2020_format(self.valid_gt_dcaseformat,
self.valid_gt_sed_metrics2019.shape[0], label_resolution=self.label_resolution)
elif self.dataset_type == 'dev_test':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
dev_test_fold = [int(fold.strip()) for fold in str(cfg['inference']['test_fold']).split(',')]
ov_set = str(cfg['inference']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in dev_test_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'eval_test':
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
self.paths_list = [Path(str(path) + '%' + str(n)) for path in self.paths_list for n in range(self.num_segments)]
# Read into memory
if self.read_into_mem:
load_begin_time = timer()
print('Start to load dataset: {}, ov={}......\n'.format(self.dataset_type + ' set', ov_set))
iterator = tqdm(self.paths_list, total=len(self.paths_list), unit='clips')
self.dataset_list = []
for path in iterator:
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
})
else:
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'waveform': x
})
iterator.close()
print('Loading dataset time: {:.3f}\n'.format(timer()-load_begin_time))
def __len__(self):
"""Get length of the dataset
"""
return len(self.paths_list)
def __getitem__(self, idx):
"""
Read features from the dataset
"""
if self.read_into_mem:
data_dict = self.dataset_list[idx]
fn = data_dict['filename']
n_segment = data_dict['n_segment']
x = data_dict['waveform']
if 'test' not in self.dataset_type:
ov = data_dict['ov']
sed_label = data_dict['sed_label']
doa_label = data_dict['doa_label']
else:
path = self.paths_list[idx]
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
if 'test' not in self.dataset_type:
sample = {
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
}
else:
sample = {
'filename': fn,
'n_segment': n_segment,
'waveform': x
}
return sample
class UserBatchSampler(Sampler):
"""User defined batch sampler. Only for train set.
"""
def __init__(self, clip_num, batch_size, seed=2020):
self.clip_num = clip_num
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.indexes = np.arange(self.clip_num)
self.random_state.shuffle(self.indexes)
self.pointer = 0
def get_state(self):
sampler_state = {
'random': self.random_state.get_state(),
'indexes': self.indexes,
'pointer': self.pointer
}
return sampler_state
def set_state(self, sampler_state):
self.random_state.set_state(sampler_state['random'])
self.indexes = sampler_state['indexes']
self.pointer = sampler_state['pointer']
def __iter__(self):
"""
Return:
batch_indexes (int): indexes of batch
"""
while True:
if self.pointer >= self.clip_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
batch_indexes = self.indexes[self.pointer: self.pointer + self.batch_size]
self.pointer += self.batch_size
yield batch_indexes
def __len__(self):
return (self.clip_num + self.batch_size - 1) // self.batch_size
class PinMemCustomBatch:
def __init__(self, batch_dict):
batch_fn = []
batch_n_segment = []
batch_ov = []
batch_x = []
batch_sed_label = []
batch_doa_label = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_n_segment.append(batch_dict[n]['n_segment'])
batch_ov.append(batch_dict[n]['ov'])
batch_x.append(batch_dict[n]['waveform'])
batch_sed_label.append(batch_dict[n]['sed_label'])
batch_doa_label.append(batch_dict[n]['doa_label'])
self.batch_out_dict = {
'filename': batch_fn,
'n_segment': batch_n_segment,
'ov': batch_ov,
'waveform': torch.tensor(batch_x, dtype=torch.float32),
'sed_label': torch.tensor(batch_sed_label, dtype=torch.float32),
'doa_label': torch.tensor(batch_doa_label, dtype=torch.float32),
}
def pin_memory(self):
self.batch_out_dict['waveform'] = self.batch_out_dict['waveform'].pin_memory()
self.batch_out_dict['sed_label'] = self.batch_out_dict['sed_label'].pin_memory()
self.batch_out_dict['doa_label'] = self.batch_out_dict['doa_label'].pin_memory()
return self.batch_out_dict
def collate_fn(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatch(batch_dict)
class PinMemCustomBatchTest:
def __init__(self, batch_dict):
batch_fn = []
batch_n_segment = []
batch_x = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_n_segment.append(batch_dict[n]['n_segment'])
batch_x.append(batch_dict[n]['waveform'])
self.batch_out_dict = {
'filename': batch_fn,
'n_segment': batch_n_segment,
'waveform': torch.tensor(batch_x, dtype=torch.float32)
}
def pin_memory(self):
self.batch_out_dict['waveform'] = self.batch_out_dict['waveform'].pin_memory()
return self.batch_out_dict
def collate_fn_test(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatchTest(batch_dict) | seld/methods/ein_seld/data.py | from pathlib import Path
from timeit import default_timer as timer
import h5py
import numpy as np
import torch
from methods.utils.data_utilities import (_segment_index, load_dcase_format,
to_metrics2020_format)
from torch.utils.data import Dataset, Sampler
from tqdm import tqdm
from utils.common import int16_samples_to_float32
class UserDataset(Dataset):
""" User defined datset
"""
def __init__(self, args, cfg, dataset, dataset_type='train', overlap=''):
"""
Args:
args: input args
cfg: configurations
dataset: dataset used
dataset_type: 'train' | 'valid' | 'dev_test' | 'eval_test'
overlap: '1' | '2'
"""
super().__init__()
self.dataset_type = dataset_type
self.read_into_mem = args.read_into_mem
self.sample_rate = cfg['data']['sample_rate']
self.clip_length = dataset.clip_length
self.label_resolution = dataset.label_resolution
self.frame_length = int(self.clip_length / self.label_resolution)
self.label_interp_ratio = int(self.label_resolution * self.sample_rate / cfg['data']['hop_length'])
# Chunklen and hoplen and segmentation. Since all of the clips are 60s long, it only segments once here
data = np.zeros((1, self.clip_length * self.sample_rate))
if 'train' in self.dataset_type:
chunklen = int(cfg['data']['train_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['train_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen)
elif self.dataset_type in ['valid', 'dev_test', 'eval_test']:
chunklen = int(cfg['data']['test_chunklen_sec'] * self.sample_rate)
hoplen = int(cfg['data']['test_hoplen_sec'] * self.sample_rate)
self.segmented_indexes, self.segmented_pad_width = _segment_index(data, chunklen, hoplen, last_frame_always_paddding=True)
self.num_segments = len(self.segmented_indexes)
# Data and meta path
fold_str_idx = dataset.fold_str_index
ov_str_idx = dataset.ov_str_index
data_sr_folder_name = '{}fs'.format(self.sample_rate)
main_data_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('data').joinpath(data_sr_folder_name)
dev_data_dir = main_data_dir.joinpath('dev').joinpath(cfg['data']['type'])
eval_data_dir = main_data_dir.joinpath('eval').joinpath(cfg['data']['type'])
main_meta_dir = Path(cfg['hdf5_dir']).joinpath(cfg['dataset']).joinpath('meta')
dev_meta_dir = main_meta_dir.joinpath('dev')
eval_meta_dir = main_meta_dir.joinpath('eval')
if self.dataset_type == 'train':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
train_fold = [int(fold.strip()) for fold in str(cfg['training']['train_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in train_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'valid':
if cfg['training']['valid_fold'] != 'eval':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
valid_fold = [int(fold.strip()) for fold in str(cfg['training']['valid_fold']).split(',')]
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in valid_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_dev')
else:
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
ov_set = str(cfg['training']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
ori_meta_dir = Path(cfg['dataset_dir']).joinpath('metadata_eval')
frame_begin_index = 0
self.valid_gt_sed_metrics2019 = []
self.valid_gt_doa_metrics2019 = []
self.valid_gt_dcaseformat = {}
for path in self.paths_list:
ori_meta_path = ori_meta_dir.joinpath(path.stem + '.csv')
output_dict, sed_metrics2019, doa_metrics2019 = \
load_dcase_format(ori_meta_path, frame_begin_index=frame_begin_index,
frame_length=self.frame_length, num_classes=len(dataset.label_set))
self.valid_gt_dcaseformat.update(output_dict)
self.valid_gt_sed_metrics2019.append(sed_metrics2019)
self.valid_gt_doa_metrics2019.append(doa_metrics2019)
frame_begin_index += self.frame_length
self.valid_gt_sed_metrics2019 = np.concatenate(self.valid_gt_sed_metrics2019, axis=0)
self.valid_gt_doa_metrics2019 = np.concatenate(self.valid_gt_doa_metrics2019, axis=0)
self.gt_metrics2020_dict = to_metrics2020_format(self.valid_gt_dcaseformat,
self.valid_gt_sed_metrics2019.shape[0], label_resolution=self.label_resolution)
elif self.dataset_type == 'dev_test':
data_dirs = [dev_data_dir]
self.meta_dir = dev_meta_dir
dev_test_fold = [int(fold.strip()) for fold in str(cfg['inference']['test_fold']).split(',')]
ov_set = str(cfg['inference']['overlap']) if not overlap else overlap
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if int(path.stem[fold_str_idx]) in dev_test_fold and path.stem[ov_str_idx] in ov_set \
and not path.name.startswith('.')]
elif self.dataset_type == 'eval_test':
data_dirs = [eval_data_dir]
self.meta_dir = eval_meta_dir
self.paths_list = [path for data_dir in data_dirs for path in sorted(data_dir.glob('*.h5')) \
if not path.name.startswith('.')]
self.paths_list = [Path(str(path) + '%' + str(n)) for path in self.paths_list for n in range(self.num_segments)]
# Read into memory
if self.read_into_mem:
load_begin_time = timer()
print('Start to load dataset: {}, ov={}......\n'.format(self.dataset_type + ' set', ov_set))
iterator = tqdm(self.paths_list, total=len(self.paths_list), unit='clips')
self.dataset_list = []
for path in iterator:
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
})
else:
self.dataset_list.append({
'filename': fn,
'n_segment': n_segment,
'waveform': x
})
iterator.close()
print('Loading dataset time: {:.3f}\n'.format(timer()-load_begin_time))
def __len__(self):
"""Get length of the dataset
"""
return len(self.paths_list)
def __getitem__(self, idx):
"""
Read features from the dataset
"""
if self.read_into_mem:
data_dict = self.dataset_list[idx]
fn = data_dict['filename']
n_segment = data_dict['n_segment']
x = data_dict['waveform']
if 'test' not in self.dataset_type:
ov = data_dict['ov']
sed_label = data_dict['sed_label']
doa_label = data_dict['doa_label']
else:
path = self.paths_list[idx]
fn, n_segment = path.stem, int(path.name.split('%')[1])
data_path = Path(str(path).split('%')[0])
index_begin = self.segmented_indexes[n_segment][0]
index_end = self.segmented_indexes[n_segment][1]
pad_width_before = self.segmented_pad_width[n_segment][0]
pad_width_after = self.segmented_pad_width[n_segment][1]
with h5py.File(data_path, 'r') as hf:
x = int16_samples_to_float32(hf['waveform'][:, index_begin: index_end])
pad_width = ((0, 0), (pad_width_before, pad_width_after))
x = np.pad(x, pad_width, mode='constant')
if 'test' not in self.dataset_type:
ov = fn[-1]
index_begin_label = int(index_begin / (self.sample_rate * self.label_resolution))
index_end_label = int(index_end / (self.sample_rate * self.label_resolution))
# pad_width_before_label = int(pad_width_before / (self.sample_rate * self.label_resolution))
pad_width_after_label = int(pad_width_after / (self.sample_rate * self.label_resolution))
meta_path = self.meta_dir.joinpath(fn + '.h5')
with h5py.File(meta_path, 'r') as hf:
sed_label = hf['sed_label'][index_begin_label: index_end_label, ...]
doa_label = hf['doa_label'][index_begin_label: index_end_label, ...] # NOTE: this is Catesian coordinates
if pad_width_after_label != 0:
sed_label_new = np.zeros((pad_width_after_label, 2, 14))
doa_label_new = np.zeros((pad_width_after_label, 2, 3))
sed_label = np.concatenate((sed_label, sed_label_new), axis=0)
doa_label = np.concatenate((doa_label, doa_label_new), axis=0)
if 'test' not in self.dataset_type:
sample = {
'filename': fn,
'n_segment': n_segment,
'ov': ov,
'waveform': x,
'sed_label': sed_label,
'doa_label': doa_label
}
else:
sample = {
'filename': fn,
'n_segment': n_segment,
'waveform': x
}
return sample
class UserBatchSampler(Sampler):
"""User defined batch sampler. Only for train set.
"""
def __init__(self, clip_num, batch_size, seed=2020):
self.clip_num = clip_num
self.batch_size = batch_size
self.random_state = np.random.RandomState(seed)
self.indexes = np.arange(self.clip_num)
self.random_state.shuffle(self.indexes)
self.pointer = 0
def get_state(self):
sampler_state = {
'random': self.random_state.get_state(),
'indexes': self.indexes,
'pointer': self.pointer
}
return sampler_state
def set_state(self, sampler_state):
self.random_state.set_state(sampler_state['random'])
self.indexes = sampler_state['indexes']
self.pointer = sampler_state['pointer']
def __iter__(self):
"""
Return:
batch_indexes (int): indexes of batch
"""
while True:
if self.pointer >= self.clip_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
batch_indexes = self.indexes[self.pointer: self.pointer + self.batch_size]
self.pointer += self.batch_size
yield batch_indexes
def __len__(self):
return (self.clip_num + self.batch_size - 1) // self.batch_size
class PinMemCustomBatch:
def __init__(self, batch_dict):
batch_fn = []
batch_n_segment = []
batch_ov = []
batch_x = []
batch_sed_label = []
batch_doa_label = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_n_segment.append(batch_dict[n]['n_segment'])
batch_ov.append(batch_dict[n]['ov'])
batch_x.append(batch_dict[n]['waveform'])
batch_sed_label.append(batch_dict[n]['sed_label'])
batch_doa_label.append(batch_dict[n]['doa_label'])
self.batch_out_dict = {
'filename': batch_fn,
'n_segment': batch_n_segment,
'ov': batch_ov,
'waveform': torch.tensor(batch_x, dtype=torch.float32),
'sed_label': torch.tensor(batch_sed_label, dtype=torch.float32),
'doa_label': torch.tensor(batch_doa_label, dtype=torch.float32),
}
def pin_memory(self):
self.batch_out_dict['waveform'] = self.batch_out_dict['waveform'].pin_memory()
self.batch_out_dict['sed_label'] = self.batch_out_dict['sed_label'].pin_memory()
self.batch_out_dict['doa_label'] = self.batch_out_dict['doa_label'].pin_memory()
return self.batch_out_dict
def collate_fn(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatch(batch_dict)
class PinMemCustomBatchTest:
def __init__(self, batch_dict):
batch_fn = []
batch_n_segment = []
batch_x = []
for n in range(len(batch_dict)):
batch_fn.append(batch_dict[n]['filename'])
batch_n_segment.append(batch_dict[n]['n_segment'])
batch_x.append(batch_dict[n]['waveform'])
self.batch_out_dict = {
'filename': batch_fn,
'n_segment': batch_n_segment,
'waveform': torch.tensor(batch_x, dtype=torch.float32)
}
def pin_memory(self):
self.batch_out_dict['waveform'] = self.batch_out_dict['waveform'].pin_memory()
return self.batch_out_dict
def collate_fn_test(batch_dict):
"""
Merges a list of samples to form a mini-batch
Pin memory for customized dataset
"""
return PinMemCustomBatchTest(batch_dict) | 0.685844 | 0.244758 |
import os
import shutil
import socket
from omegaconf import OmegaConf
class WandbUrls: # pylint: disable=too-few-public-methods
def __init__(self, url):
url_hash = url.split("/")[-1]
project = url.split("/")[-3]
entity = url.split("/")[-4]
self.weight_url = url
self.log_url = "https://app.wandb.ai/{}/{}/runs/{}/logs".format(
entity, project, url_hash
)
self.chart_url = "https://app.wandb.ai/{}/{}/runs/{}".format(
entity, project, url_hash
)
self.overview_url = "https://app.wandb.ai/{}/{}/runs/{}/overview".format(
entity, project, url_hash
)
self.hydra_config_url = (
"https://app.wandb.ai/{}/{}/runs/{}/files/hydra-config.yaml".format(
entity, project, url_hash
)
)
self.overrides_url = (
"https://app.wandb.ai/{}/{}/runs/{}/files/overrides.yaml".format(
entity, project, url_hash
)
)
# pylint: disable=line-too-long
def __repr__(self):
msg = "=================================================== WANDB URLS ===================================================================\n" # noqa: E501
for k, v in self.__dict__.items():
msg += "{}: {}\n".format(k.upper(), v)
msg += "=================================================================================================================================\n" # noqa: E501
return msg
def to_dict(self):
return {k.upper(): v for k, v in self.__dict__.items()}
def log_jam(run):
try:
from jammy import get_jam_repo_git
except ImportError:
return None
jam_sha, jam_diff = get_jam_repo_git()
with open("jam_change.patch", "w") as f:
f.write(jam_diff)
run.save("jam_change.patch")
return jam_sha
def log_proj(run, proj_path):
try:
from jammy.utils import git
except ImportError:
return None
proj_sha, proj_diff = git.log_repo(proj_path)
with open("proj_change.patch", "w") as f:
f.write(proj_diff)
run.save("proj_change.patch")
return proj_sha
def log_hydra(run):
shutil.copyfile(
os.path.join(os.getcwd(), ".hydra/config.yaml"),
os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"),
)
run.save(os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"))
run.save(os.path.join(os.getcwd(), ".hydra/overrides.yaml"))
class JamWandb:
g_cfg = None
run = None
@property
def cfg(self):
return JamWandb.g_cfg
@cfg.setter
def cfg(self, g_cfg):
JamWandb.g_cfg = g_cfg
@staticmethod
def prep_cfg(dump_meta=True):
if JamWandb.g_cfg is None:
raise RuntimeError("Set JamWandb g_cfg firstly")
if JamWandb.run is None:
raise RuntimeError("Set JamWandb run")
g_cfg = JamWandb.g_cfg
run = JamWandb.run
jam_sha = log_jam(run)
proj_sha = log_proj(run, g_cfg.work_dir)
log_hydra(run)
cfg = {
"proj_path": g_cfg.work_dir,
"run_path": os.getcwd(),
"host": socket.gethostname(),
"jam_sha": jam_sha,
"proj_sha": proj_sha,
**(WandbUrls(run.url).to_dict()),
"z": OmegaConf.to_container(g_cfg, resolve=True),
}
if dump_meta:
with open("meta.yaml", "w") as fp:
OmegaConf.save(config=OmegaConf.create(cfg), f=fp.name)
return cfg
@staticmethod
def log(*args, **kargs):
if JamWandb.run is not None:
raise RuntimeError("wandb is inactive, please launch first.")
JamWandb.run.log(*args, **kargs)
@staticmethod
def finish():
if JamWandb.run is None:
return
if os.path.exists("jam_.log"):
JamWandb.run.save("jam_.log")
JamWandb.run.finish()
JamWandb.run = None | src/logger/jam_wandb.py | import os
import shutil
import socket
from omegaconf import OmegaConf
class WandbUrls: # pylint: disable=too-few-public-methods
def __init__(self, url):
url_hash = url.split("/")[-1]
project = url.split("/")[-3]
entity = url.split("/")[-4]
self.weight_url = url
self.log_url = "https://app.wandb.ai/{}/{}/runs/{}/logs".format(
entity, project, url_hash
)
self.chart_url = "https://app.wandb.ai/{}/{}/runs/{}".format(
entity, project, url_hash
)
self.overview_url = "https://app.wandb.ai/{}/{}/runs/{}/overview".format(
entity, project, url_hash
)
self.hydra_config_url = (
"https://app.wandb.ai/{}/{}/runs/{}/files/hydra-config.yaml".format(
entity, project, url_hash
)
)
self.overrides_url = (
"https://app.wandb.ai/{}/{}/runs/{}/files/overrides.yaml".format(
entity, project, url_hash
)
)
# pylint: disable=line-too-long
def __repr__(self):
msg = "=================================================== WANDB URLS ===================================================================\n" # noqa: E501
for k, v in self.__dict__.items():
msg += "{}: {}\n".format(k.upper(), v)
msg += "=================================================================================================================================\n" # noqa: E501
return msg
def to_dict(self):
return {k.upper(): v for k, v in self.__dict__.items()}
def log_jam(run):
try:
from jammy import get_jam_repo_git
except ImportError:
return None
jam_sha, jam_diff = get_jam_repo_git()
with open("jam_change.patch", "w") as f:
f.write(jam_diff)
run.save("jam_change.patch")
return jam_sha
def log_proj(run, proj_path):
try:
from jammy.utils import git
except ImportError:
return None
proj_sha, proj_diff = git.log_repo(proj_path)
with open("proj_change.patch", "w") as f:
f.write(proj_diff)
run.save("proj_change.patch")
return proj_sha
def log_hydra(run):
shutil.copyfile(
os.path.join(os.getcwd(), ".hydra/config.yaml"),
os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"),
)
run.save(os.path.join(os.getcwd(), ".hydra/hydra-config.yaml"))
run.save(os.path.join(os.getcwd(), ".hydra/overrides.yaml"))
class JamWandb:
g_cfg = None
run = None
@property
def cfg(self):
return JamWandb.g_cfg
@cfg.setter
def cfg(self, g_cfg):
JamWandb.g_cfg = g_cfg
@staticmethod
def prep_cfg(dump_meta=True):
if JamWandb.g_cfg is None:
raise RuntimeError("Set JamWandb g_cfg firstly")
if JamWandb.run is None:
raise RuntimeError("Set JamWandb run")
g_cfg = JamWandb.g_cfg
run = JamWandb.run
jam_sha = log_jam(run)
proj_sha = log_proj(run, g_cfg.work_dir)
log_hydra(run)
cfg = {
"proj_path": g_cfg.work_dir,
"run_path": os.getcwd(),
"host": socket.gethostname(),
"jam_sha": jam_sha,
"proj_sha": proj_sha,
**(WandbUrls(run.url).to_dict()),
"z": OmegaConf.to_container(g_cfg, resolve=True),
}
if dump_meta:
with open("meta.yaml", "w") as fp:
OmegaConf.save(config=OmegaConf.create(cfg), f=fp.name)
return cfg
@staticmethod
def log(*args, **kargs):
if JamWandb.run is not None:
raise RuntimeError("wandb is inactive, please launch first.")
JamWandb.run.log(*args, **kargs)
@staticmethod
def finish():
if JamWandb.run is None:
return
if os.path.exists("jam_.log"):
JamWandb.run.save("jam_.log")
JamWandb.run.finish()
JamWandb.run = None | 0.350533 | 0.169097 |
import os
import time
from dbt.adapters.factory import get_adapter
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.contracts.graph.parsed import ParsedNode
from dbt.contracts.graph.manifest import CompileResultNode
from dbt.contracts.results import ExecutionResult
import dbt.clients.jinja
import dbt.compilation
import dbt.exceptions
import dbt.linker
import dbt.tracking
import dbt.model
import dbt.ui.printer
import dbt.utils
from dbt.clients.system import write_json
import dbt.graph.selector
from multiprocessing.dummy import Pool as ThreadPool
RESULT_FILE_NAME = 'run_results.json'
class RunManager(object):
def __init__(self, config):
self.config = config
def deserialize_graph(self):
logger.info("Loading dependency graph file.")
base_target_path = self.config.target_path
graph_file = os.path.join(
base_target_path,
dbt.compilation.graph_file_name
)
return dbt.linker.from_file(graph_file)
def get_dependent(self, linker, node_id):
dependent_nodes = linker.get_dependent_nodes(node_id)
for node_id in dependent_nodes:
yield node_id
def get_runners(self, Runner, adapter, node_dependency_list):
all_nodes = dbt.utils.flatten_nodes(node_dependency_list)
num_nodes = len([
n for n in all_nodes if not Runner.is_ephemeral_model(n)
])
node_runners = {}
i = 0
for node in all_nodes:
uid = node.get('unique_id')
if Runner.is_ephemeral_model(node):
runner = Runner(self.config, adapter, node, 0, 0)
else:
i += 1
runner = Runner(self.config, adapter, node, i, num_nodes)
node_runners[uid] = runner
return node_runners
def call_runner(self, data):
runner = data['runner']
manifest = data['manifest']
if runner.skip:
return runner.on_skip()
# no before/after printing for ephemeral mdoels
if not runner.is_ephemeral_model(runner.node):
runner.before_execute()
result = runner.safe_run(manifest)
if not runner.is_ephemeral_model(runner.node):
runner.after_execute(result)
if result.errored and runner.raise_on_first_error():
raise dbt.exceptions.RuntimeException(result.error)
return result
def get_relevant_runners(self, node_runners, node_subset):
runners = []
for node in node_subset:
unique_id = node.get('unique_id')
if unique_id in node_runners:
runners.append(node_runners[unique_id])
return runners
def execute_nodes(self, linker, Runner, manifest, node_dependency_list):
adapter = get_adapter(self.config)
num_threads = self.config.threads
target_name = self.config.target_name
text = "Concurrency: {} threads (target='{}')"
concurrency_line = text.format(num_threads, target_name)
dbt.ui.printer.print_timestamped_line(concurrency_line)
dbt.ui.printer.print_timestamped_line("")
schemas = list(Runner.get_model_schemas(manifest))
node_runners = self.get_runners(Runner, adapter, node_dependency_list)
pool = ThreadPool(num_threads)
node_results = []
for node_list in node_dependency_list:
runners = self.get_relevant_runners(node_runners, node_list)
args_list = []
for runner in runners:
args_list.append({
'manifest': manifest,
'runner': runner
})
try:
for result in pool.imap_unordered(self.call_runner, args_list):
is_ephemeral = Runner.is_ephemeral_model(result.node)
if not is_ephemeral:
node_results.append(result)
node = CompileResultNode(**result.node)
node_id = node.unique_id
manifest.nodes[node_id] = node
if result.errored:
dependents = self.get_dependent(linker, node_id)
self._mark_dependent_errors(node_runners, dependents,
result, is_ephemeral)
except KeyboardInterrupt:
pool.close()
pool.terminate()
adapter = get_adapter(self.config)
if not adapter.is_cancelable():
msg = ("The {} adapter does not support query "
"cancellation. Some queries may still be "
"running!".format(adapter.type()))
yellow = dbt.ui.printer.COLOR_FG_YELLOW
dbt.ui.printer.print_timestamped_line(msg, yellow)
raise
for conn_name in adapter.cancel_open_connections():
dbt.ui.printer.print_cancel_line(conn_name)
dbt.ui.printer.print_run_end_messages(node_results,
early_exit=True)
pool.join()
raise
pool.close()
pool.join()
return node_results
@staticmethod
def _mark_dependent_errors(node_runners, dependents, result, is_ephemeral):
for dep_node_id in dependents:
runner = node_runners.get(dep_node_id)
if not runner:
continue
if is_ephemeral:
cause = result
else:
cause = None
runner.do_skip(cause=result)
def write_results(self, execution_result):
filepath = os.path.join(self.config.target_path, RESULT_FILE_NAME)
write_json(filepath, execution_result.serialize())
def compile(self, config):
compiler = dbt.compilation.Compiler(config)
compiler.initialize()
return compiler.compile()
def run_from_graph(self, Selector, Runner, query):
"""
Run dbt for the query, based on the graph.
Selector is a type (not instance!) derived from
dbt.graph.selector.NodeSelector
Runner is a type (not instance!) derived from
dbt.node_runners.BaseRunner
"""
manifest, linker = self.compile(self.config)
selector = Selector(linker, manifest)
selected_nodes = selector.select(query)
dep_list = selector.as_node_list(selected_nodes)
adapter = get_adapter(self.config)
flat_nodes = dbt.utils.flatten_nodes(dep_list)
if len(flat_nodes) == 0:
logger.info("WARNING: Nothing to do. Try checking your model "
"configs and model specification args")
return []
elif Runner.print_header:
stat_line = dbt.ui.printer.get_counts(flat_nodes)
logger.info("")
dbt.ui.printer.print_timestamped_line(stat_line)
dbt.ui.printer.print_timestamped_line("")
else:
logger.info("")
try:
Runner.before_hooks(self.config, adapter, manifest)
started = time.time()
Runner.before_run(self.config, adapter, manifest)
res = self.execute_nodes(linker, Runner, manifest, dep_list)
Runner.after_run(self.config, adapter, res, manifest)
elapsed = time.time() - started
Runner.after_hooks(self.config, adapter, res, manifest, elapsed)
finally:
adapter.cleanup_connections()
result = ExecutionResult(
results=res,
elapsed_time=elapsed,
generated_at=dbt.utils.timestring(),
)
self.write_results(result)
return res
# ------------------------------------
def run(self, query, Runner):
Selector = dbt.graph.selector.NodeSelector
return self.run_from_graph(Selector, Runner, query)
def run_flat(self, query, Runner):
Selector = dbt.graph.selector.FlatNodeSelector
return self.run_from_graph(Selector, Runner, query) | dbt/runner.py | import os
import time
from dbt.adapters.factory import get_adapter
from dbt.logger import GLOBAL_LOGGER as logger
from dbt.contracts.graph.parsed import ParsedNode
from dbt.contracts.graph.manifest import CompileResultNode
from dbt.contracts.results import ExecutionResult
import dbt.clients.jinja
import dbt.compilation
import dbt.exceptions
import dbt.linker
import dbt.tracking
import dbt.model
import dbt.ui.printer
import dbt.utils
from dbt.clients.system import write_json
import dbt.graph.selector
from multiprocessing.dummy import Pool as ThreadPool
RESULT_FILE_NAME = 'run_results.json'
class RunManager(object):
def __init__(self, config):
self.config = config
def deserialize_graph(self):
logger.info("Loading dependency graph file.")
base_target_path = self.config.target_path
graph_file = os.path.join(
base_target_path,
dbt.compilation.graph_file_name
)
return dbt.linker.from_file(graph_file)
def get_dependent(self, linker, node_id):
dependent_nodes = linker.get_dependent_nodes(node_id)
for node_id in dependent_nodes:
yield node_id
def get_runners(self, Runner, adapter, node_dependency_list):
all_nodes = dbt.utils.flatten_nodes(node_dependency_list)
num_nodes = len([
n for n in all_nodes if not Runner.is_ephemeral_model(n)
])
node_runners = {}
i = 0
for node in all_nodes:
uid = node.get('unique_id')
if Runner.is_ephemeral_model(node):
runner = Runner(self.config, adapter, node, 0, 0)
else:
i += 1
runner = Runner(self.config, adapter, node, i, num_nodes)
node_runners[uid] = runner
return node_runners
def call_runner(self, data):
runner = data['runner']
manifest = data['manifest']
if runner.skip:
return runner.on_skip()
# no before/after printing for ephemeral mdoels
if not runner.is_ephemeral_model(runner.node):
runner.before_execute()
result = runner.safe_run(manifest)
if not runner.is_ephemeral_model(runner.node):
runner.after_execute(result)
if result.errored and runner.raise_on_first_error():
raise dbt.exceptions.RuntimeException(result.error)
return result
def get_relevant_runners(self, node_runners, node_subset):
runners = []
for node in node_subset:
unique_id = node.get('unique_id')
if unique_id in node_runners:
runners.append(node_runners[unique_id])
return runners
def execute_nodes(self, linker, Runner, manifest, node_dependency_list):
adapter = get_adapter(self.config)
num_threads = self.config.threads
target_name = self.config.target_name
text = "Concurrency: {} threads (target='{}')"
concurrency_line = text.format(num_threads, target_name)
dbt.ui.printer.print_timestamped_line(concurrency_line)
dbt.ui.printer.print_timestamped_line("")
schemas = list(Runner.get_model_schemas(manifest))
node_runners = self.get_runners(Runner, adapter, node_dependency_list)
pool = ThreadPool(num_threads)
node_results = []
for node_list in node_dependency_list:
runners = self.get_relevant_runners(node_runners, node_list)
args_list = []
for runner in runners:
args_list.append({
'manifest': manifest,
'runner': runner
})
try:
for result in pool.imap_unordered(self.call_runner, args_list):
is_ephemeral = Runner.is_ephemeral_model(result.node)
if not is_ephemeral:
node_results.append(result)
node = CompileResultNode(**result.node)
node_id = node.unique_id
manifest.nodes[node_id] = node
if result.errored:
dependents = self.get_dependent(linker, node_id)
self._mark_dependent_errors(node_runners, dependents,
result, is_ephemeral)
except KeyboardInterrupt:
pool.close()
pool.terminate()
adapter = get_adapter(self.config)
if not adapter.is_cancelable():
msg = ("The {} adapter does not support query "
"cancellation. Some queries may still be "
"running!".format(adapter.type()))
yellow = dbt.ui.printer.COLOR_FG_YELLOW
dbt.ui.printer.print_timestamped_line(msg, yellow)
raise
for conn_name in adapter.cancel_open_connections():
dbt.ui.printer.print_cancel_line(conn_name)
dbt.ui.printer.print_run_end_messages(node_results,
early_exit=True)
pool.join()
raise
pool.close()
pool.join()
return node_results
@staticmethod
def _mark_dependent_errors(node_runners, dependents, result, is_ephemeral):
for dep_node_id in dependents:
runner = node_runners.get(dep_node_id)
if not runner:
continue
if is_ephemeral:
cause = result
else:
cause = None
runner.do_skip(cause=result)
def write_results(self, execution_result):
filepath = os.path.join(self.config.target_path, RESULT_FILE_NAME)
write_json(filepath, execution_result.serialize())
def compile(self, config):
compiler = dbt.compilation.Compiler(config)
compiler.initialize()
return compiler.compile()
def run_from_graph(self, Selector, Runner, query):
"""
Run dbt for the query, based on the graph.
Selector is a type (not instance!) derived from
dbt.graph.selector.NodeSelector
Runner is a type (not instance!) derived from
dbt.node_runners.BaseRunner
"""
manifest, linker = self.compile(self.config)
selector = Selector(linker, manifest)
selected_nodes = selector.select(query)
dep_list = selector.as_node_list(selected_nodes)
adapter = get_adapter(self.config)
flat_nodes = dbt.utils.flatten_nodes(dep_list)
if len(flat_nodes) == 0:
logger.info("WARNING: Nothing to do. Try checking your model "
"configs and model specification args")
return []
elif Runner.print_header:
stat_line = dbt.ui.printer.get_counts(flat_nodes)
logger.info("")
dbt.ui.printer.print_timestamped_line(stat_line)
dbt.ui.printer.print_timestamped_line("")
else:
logger.info("")
try:
Runner.before_hooks(self.config, adapter, manifest)
started = time.time()
Runner.before_run(self.config, adapter, manifest)
res = self.execute_nodes(linker, Runner, manifest, dep_list)
Runner.after_run(self.config, adapter, res, manifest)
elapsed = time.time() - started
Runner.after_hooks(self.config, adapter, res, manifest, elapsed)
finally:
adapter.cleanup_connections()
result = ExecutionResult(
results=res,
elapsed_time=elapsed,
generated_at=dbt.utils.timestring(),
)
self.write_results(result)
return res
# ------------------------------------
def run(self, query, Runner):
Selector = dbt.graph.selector.NodeSelector
return self.run_from_graph(Selector, Runner, query)
def run_flat(self, query, Runner):
Selector = dbt.graph.selector.FlatNodeSelector
return self.run_from_graph(Selector, Runner, query) | 0.411702 | 0.086825 |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Forum'
db.create_table('forums_forum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in_forum', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('display_order', self.gf('django.db.models.fields.IntegerField')(default=1, db_index=True)),
('is_listed', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
))
db.send_create_signal('forums', ['Forum'])
# Adding model 'Thread'
db.create_table('forums_thread', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('forum', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Forum'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('replies', self.gf('django.db.models.fields.IntegerField')(default=0)),
('is_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
))
db.send_create_signal('forums', ['Thread'])
# Adding model 'Post'
db.create_table('forums_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('thread', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Thread'])),
('content', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='post_last_updated_by', null=True, to=orm['auth.User'])),
))
db.send_create_signal('forums', ['Post'])
def backwards(self, orm):
# Deleting model 'Forum'
db.delete_table('forums_forum')
# Deleting model 'Thread'
db.delete_table('forums_thread')
# Deleting model 'Post'
db.delete_table('forums_post')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.forum': {
'Meta': {'ordering': "['display_order', 'id']", 'object_name': 'Forum'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_listed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in_forum'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'forums.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Thread']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_last_updated_by'", 'null': 'True', 'to': "orm['auth.User']"})
},
'forums.thread': {
'Meta': {'ordering': "['-is_sticky', '-last_post__created']", 'object_name': 'Thread'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['forums'] | kitsune/forums/migrations/0001_initial.py | import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Forum'
db.create_table('forums_forum', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('description', self.gf('django.db.models.fields.TextField')(null=True)),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in_forum', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('display_order', self.gf('django.db.models.fields.IntegerField')(default=1, db_index=True)),
('is_listed', self.gf('django.db.models.fields.BooleanField')(default=True, db_index=True)),
))
db.send_create_signal('forums', ['Forum'])
# Adding model 'Thread'
db.create_table('forums_thread', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=255)),
('forum', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Forum'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('last_post', self.gf('django.db.models.fields.related.ForeignKey')(related_name='last_post_in', null=True, on_delete=models.SET_NULL, to=orm['forums.Post'])),
('replies', self.gf('django.db.models.fields.IntegerField')(default=0)),
('is_locked', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_sticky', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)),
))
db.send_create_signal('forums', ['Thread'])
# Adding model 'Post'
db.create_table('forums_post', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('thread', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['forums.Thread'])),
('content', self.gf('django.db.models.fields.TextField')()),
('author', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('updated_by', self.gf('django.db.models.fields.related.ForeignKey')(related_name='post_last_updated_by', null=True, to=orm['auth.User'])),
))
db.send_create_signal('forums', ['Post'])
def backwards(self, orm):
# Deleting model 'Forum'
db.delete_table('forums_forum')
# Deleting model 'Thread'
db.delete_table('forums_thread')
# Deleting model 'Post'
db.delete_table('forums_post')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('<PASSWORD>.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'forums.forum': {
'Meta': {'ordering': "['display_order', 'id']", 'object_name': 'Forum'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'display_order': ('django.db.models.fields.IntegerField', [], {'default': '1', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_listed': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in_forum'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'forums.post': {
'Meta': {'ordering': "['created']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Thread']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'updated_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'post_last_updated_by'", 'null': 'True', 'to': "orm['auth.User']"})
},
'forums.thread': {
'Meta': {'ordering': "['-is_sticky', '-last_post__created']", 'object_name': 'Thread'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'forum': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['forums.Forum']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_post': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'last_post_in'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['forums.Post']"}),
'replies': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['forums'] | 0.432063 | 0.098469 |
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
from decimal import Decimal
import logging
class CaldaiemuraliItSpider(BaseSpider):
name = "caldaiemurali.it"
allowed_domains = ["caldaiemurali.it"]
start_urls = (
'http://www.caldaiemurali.it/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select("//ul[@id='nav']//a/@href").extract()
for category in categories:
yield Request(category, callback=self.parse)
pages = hxs.select("//div[@class='pages']/ol/li/a/@href").extract()
for page in pages:
yield Request(page, callback=self.parse)
items = hxs.select("//div[@class='product-list-block']//a[@class='product-image']/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_item(self, response):
url = response.url
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@class='product-shop']/div[@class='product-name']/h2/text()").extract()
if not name:
logging.error("NO NAME! %s" % url)
return
name = name[0]
# adding product
price = hxs.select("//div[@class='product-shop']/div[@class='price-box']//span[@class='price']/text()").extract()
if not price:
logging.error("NO PRICE! %s" % url)
return
price = price[0].replace(".", "").replace(",", ".")
# price_delivery = hxs.select("//div[@class='product-shop']//table[@id='product-attribute-specs-table']/tr/td[(preceding::th[text()='Spese Spedizione'])]/text()").extract()
# if not price_delivery:
# logging.error("NO PRICE DELIVERY! %s" % url)
# return
# price_delivery = price_delivery[0]
# price = Decimal(price) + Decimal(price_delivery)
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item() | portfolio/Python/scrapy/rosarioweb/caldaiemurali_it.py | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request
from product_spiders.items import Product, ProductLoader
from decimal import Decimal
import logging
class CaldaiemuraliItSpider(BaseSpider):
name = "caldaiemurali.it"
allowed_domains = ["caldaiemurali.it"]
start_urls = (
'http://www.caldaiemurali.it/',
)
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select("//ul[@id='nav']//a/@href").extract()
for category in categories:
yield Request(category, callback=self.parse)
pages = hxs.select("//div[@class='pages']/ol/li/a/@href").extract()
for page in pages:
yield Request(page, callback=self.parse)
items = hxs.select("//div[@class='product-list-block']//a[@class='product-image']/@href").extract()
for item in items:
yield Request(item, callback=self.parse_item)
def parse_item(self, response):
url = response.url
hxs = HtmlXPathSelector(response)
name = hxs.select("//div[@class='product-shop']/div[@class='product-name']/h2/text()").extract()
if not name:
logging.error("NO NAME! %s" % url)
return
name = name[0]
# adding product
price = hxs.select("//div[@class='product-shop']/div[@class='price-box']//span[@class='price']/text()").extract()
if not price:
logging.error("NO PRICE! %s" % url)
return
price = price[0].replace(".", "").replace(",", ".")
# price_delivery = hxs.select("//div[@class='product-shop']//table[@id='product-attribute-specs-table']/tr/td[(preceding::th[text()='Spese Spedizione'])]/text()").extract()
# if not price_delivery:
# logging.error("NO PRICE DELIVERY! %s" % url)
# return
# price_delivery = price_delivery[0]
# price = Decimal(price) + Decimal(price_delivery)
l = ProductLoader(item=Product(), response=response)
l.add_value('identifier', str(name))
l.add_value('name', name)
l.add_value('url', url)
l.add_value('price', price)
yield l.load_item() | 0.372391 | 0.079531 |
import math
from .searcher import Searcher
from pychemia import pcm_log
class ParticleSwarm(Searcher):
def __init__(self, population, params=None, generation_size=32, stabilization_limit=10):
"""
Implementation fo the Firefly algorithm for global minimization
This searcher uses a metric to compute the attractiveness and the vector displacement
to move one firefly in the direction of another one
:param population:
:param params: (dict) Parameters to setup the Searcher
:param generation_size: (int)
:param stabilization_limit: (int)
:return:
"""
# Mandatory objects
self.population = population
# Parameters
self.gamma = None
self.elites = None
self.set_params(params)
# Constrains
self.generation_size = generation_size
self.stabilization_limit = stabilization_limit
# Initializing objects
Searcher.__init__(self, self.population, generation_size, stabilization_limit)
def set_params(self, params):
if params is None:
self.gamma = 0.1
self.elites = 3
else:
assert ('gamma' in params)
assert (params['gamma'] >= 0.0)
self.gamma = params['gamma']
if 'elites' in params:
self.elites = params['elites']
def get_params(self):
return {'gamma': self.gamma, 'elites': self.elites}
def run_one(self):
# Get a static selection of the values in the generation that are relaxed
selection = self.population.ids_sorted(self.actives_in_generation)
# Minus sign because we are searching for minima
intensity = self.population.get_values(selection)
for entry_id in intensity:
intensity[entry_id] *= -1
moves = {}
new_selection = {}
for entry_id in selection:
new_selection[entry_id] = None
# Move all the fireflies (Except the most brightness)
# as the selection is sorted it means that the first one will no move
pcm_log.debug('No Moving %d %s. Intensity: %7.3f' % (0, str(selection[0]), intensity[selection[0]]))
# The best
elites = selection[:self.elites]
for i in range(self.elites, len(selection)):
entry_id = selection[i]
pcm_log.debug('Moving %d %s. Intensity: %7.3f' % (i, str(entry_id), intensity[entry_id]))
distances = [self.population.distance(entry_id, entry_jd) for entry_jd in elites]
target = elites[distances.index(min(distances))]
distance = min(distances)
atractiveness = math.exp(-self.gamma * distance) * intensity[target]
pcm_log.debug('[%s] Distance: %7.3f. Intensity: %7.3f. Atractiveness: %7.3f' % (str(target),
distance,
intensity[target],
atractiveness))
if intensity[entry_id] < atractiveness:
new_selection[entry_id] = self.population.move(entry_id, target, in_place=False)
for entry_id in selection:
pcm_log.debug('Deciding fate for firefly: %s' % str(entry_id))
if new_selection[entry_id] is not None:
pcm_log.debug('Moved to a new location %s ' % str(entry_id))
self.replace_by_other(entry_id, new_selection[entry_id], reason=None)
else:
pcm_log.debug('Promoted to new generation ')
self.pass_to_new_generation(entry_id, reason='No other firefly is more attractive') | pychemia/searcher/swarm2.py | import math
from .searcher import Searcher
from pychemia import pcm_log
class ParticleSwarm(Searcher):
def __init__(self, population, params=None, generation_size=32, stabilization_limit=10):
"""
Implementation fo the Firefly algorithm for global minimization
This searcher uses a metric to compute the attractiveness and the vector displacement
to move one firefly in the direction of another one
:param population:
:param params: (dict) Parameters to setup the Searcher
:param generation_size: (int)
:param stabilization_limit: (int)
:return:
"""
# Mandatory objects
self.population = population
# Parameters
self.gamma = None
self.elites = None
self.set_params(params)
# Constrains
self.generation_size = generation_size
self.stabilization_limit = stabilization_limit
# Initializing objects
Searcher.__init__(self, self.population, generation_size, stabilization_limit)
def set_params(self, params):
if params is None:
self.gamma = 0.1
self.elites = 3
else:
assert ('gamma' in params)
assert (params['gamma'] >= 0.0)
self.gamma = params['gamma']
if 'elites' in params:
self.elites = params['elites']
def get_params(self):
return {'gamma': self.gamma, 'elites': self.elites}
def run_one(self):
# Get a static selection of the values in the generation that are relaxed
selection = self.population.ids_sorted(self.actives_in_generation)
# Minus sign because we are searching for minima
intensity = self.population.get_values(selection)
for entry_id in intensity:
intensity[entry_id] *= -1
moves = {}
new_selection = {}
for entry_id in selection:
new_selection[entry_id] = None
# Move all the fireflies (Except the most brightness)
# as the selection is sorted it means that the first one will no move
pcm_log.debug('No Moving %d %s. Intensity: %7.3f' % (0, str(selection[0]), intensity[selection[0]]))
# The best
elites = selection[:self.elites]
for i in range(self.elites, len(selection)):
entry_id = selection[i]
pcm_log.debug('Moving %d %s. Intensity: %7.3f' % (i, str(entry_id), intensity[entry_id]))
distances = [self.population.distance(entry_id, entry_jd) for entry_jd in elites]
target = elites[distances.index(min(distances))]
distance = min(distances)
atractiveness = math.exp(-self.gamma * distance) * intensity[target]
pcm_log.debug('[%s] Distance: %7.3f. Intensity: %7.3f. Atractiveness: %7.3f' % (str(target),
distance,
intensity[target],
atractiveness))
if intensity[entry_id] < atractiveness:
new_selection[entry_id] = self.population.move(entry_id, target, in_place=False)
for entry_id in selection:
pcm_log.debug('Deciding fate for firefly: %s' % str(entry_id))
if new_selection[entry_id] is not None:
pcm_log.debug('Moved to a new location %s ' % str(entry_id))
self.replace_by_other(entry_id, new_selection[entry_id], reason=None)
else:
pcm_log.debug('Promoted to new generation ')
self.pass_to_new_generation(entry_id, reason='No other firefly is more attractive') | 0.737442 | 0.472075 |
import scipy.stats as scs
import numpy as np
from .quaternion import symplectic
def ge(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian ensemble
"""
return scs.norm().rvs(size=[int(n), int(n)])
def goe(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian orthogonal ensemble
"""
return 0.5*(ge(n)+ge(n).T)
def gse(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian symplectic ensemble
"""
return symplectic(ge(n), ge(n), ge(n), ge(n))
def gue(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian unitary ensemble
"""
ge_C = ge(n) + 1j * ge(n)
return 0.5*(ge_C+ge_C.T.conj())
def ginibre(n, complex=True):
"""
This is a common non-Hermitian ensemble
:param n: size of random matrix
:param complex: if true, complex Ginibre ensemble, otherwise real
:return: random n by n matrix, drawn from the standard Gaussian ginibre ensemble
"""
if complex:
return ge(n) * np.sqrt(1 / (2 * n)) + 1j * ge(n) * np.sqrt(1 / (2 * n))
else:
return ge(n) * np.sqrt(1 /n)
def le(n, alpha, beta=0):
"""
Draw from the Levy-stable ensemble
:param n: size of random matrix
:param alpha: parameter controlling the asymptotic power law of the distribution tails, between 0 and 2
:param beta: skew of the distribution, between -1 and 1
:return: random n by n matrix, drawn from the Levy ensemble
"""
rv = scs.levy_stable(alpha, beta)
return rv.rvs(size=[int(n), int(n)])
def disorder(random_matrix, rv, seed=None):
"""
Adds (quenched) disorder drawn from a positive distribution to the random matrix.
See PHYSICAL REVIEW E 77, 011122 (2008)
:param random_matrix: random matrix drawn from any ensemble
:param rv: positive disorder random variable
:param seed: setting this to not None quenches the disorder
:return: disordered random matrix
"""
return random_matrix/np.sqrt(rv.rvs(random_state=seed)/rv.mean()) | rmt/ensembles.py | import scipy.stats as scs
import numpy as np
from .quaternion import symplectic
def ge(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian ensemble
"""
return scs.norm().rvs(size=[int(n), int(n)])
def goe(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian orthogonal ensemble
"""
return 0.5*(ge(n)+ge(n).T)
def gse(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian symplectic ensemble
"""
return symplectic(ge(n), ge(n), ge(n), ge(n))
def gue(n):
"""
:param n: size of random matrix
:return: random n by n matrix, drawn from the standard Gaussian unitary ensemble
"""
ge_C = ge(n) + 1j * ge(n)
return 0.5*(ge_C+ge_C.T.conj())
def ginibre(n, complex=True):
"""
This is a common non-Hermitian ensemble
:param n: size of random matrix
:param complex: if true, complex Ginibre ensemble, otherwise real
:return: random n by n matrix, drawn from the standard Gaussian ginibre ensemble
"""
if complex:
return ge(n) * np.sqrt(1 / (2 * n)) + 1j * ge(n) * np.sqrt(1 / (2 * n))
else:
return ge(n) * np.sqrt(1 /n)
def le(n, alpha, beta=0):
"""
Draw from the Levy-stable ensemble
:param n: size of random matrix
:param alpha: parameter controlling the asymptotic power law of the distribution tails, between 0 and 2
:param beta: skew of the distribution, between -1 and 1
:return: random n by n matrix, drawn from the Levy ensemble
"""
rv = scs.levy_stable(alpha, beta)
return rv.rvs(size=[int(n), int(n)])
def disorder(random_matrix, rv, seed=None):
"""
Adds (quenched) disorder drawn from a positive distribution to the random matrix.
See PHYSICAL REVIEW E 77, 011122 (2008)
:param random_matrix: random matrix drawn from any ensemble
:param rv: positive disorder random variable
:param seed: setting this to not None quenches the disorder
:return: disordered random matrix
"""
return random_matrix/np.sqrt(rv.rvs(random_state=seed)/rv.mean()) | 0.765681 | 0.771198 |
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
from .middleware import *
from .uimodules import *
from .options import *
import traceback
import base64
import pprint
import linecache
class DebugBreakException(Exception):
"""Raise this to break into the debugger during an HTTP request"""
pass
def debug():
"""Used to create debug breakpoints in code"""
raise DebugBreakException()
class ErrorFrame(object):
"""Holds information about a function call in a traceback"""
def __init__(self, tback, filename, function, lineno, vars, id, pre_context, context_line, post_context, pre_context_lineno):
self.tback = tback
self.filename = filename
self.function = function
self.lineno = lineno
self.vars = vars
self.id = id
self.pre_context = pre_context
self.context_line = context_line
self.post_context = post_context
self.pre_context_lineno = pre_context_lineno
def get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
def get_lines(start, end):
return [linecache.getline(filename, l).rstrip() for l in range(start, end)]
lower_bound = max(1, lineno - context_lines)
upper_bound = lineno + context_lines
linecache.checkcache(filename)
pre_context = get_lines(lower_bound, lineno)
context_line = linecache.getline(filename, lineno).rstrip()
post_context = get_lines(lineno + 1, upper_bound)
return lower_bound, pre_context, context_line, post_context
def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next
return frames
def prettify_object(obj):
"""Makes a pretty string for an object for nice output"""
try:
return pprint.pformat(str(obj))
except UnicodeDecodeError as e:
raise
except Exception as e:
return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e)) | oz/error_pages/__init__.py |
from __future__ import absolute_import, division, print_function, with_statement, unicode_literals
from .middleware import *
from .uimodules import *
from .options import *
import traceback
import base64
import pprint
import linecache
class DebugBreakException(Exception):
"""Raise this to break into the debugger during an HTTP request"""
pass
def debug():
"""Used to create debug breakpoints in code"""
raise DebugBreakException()
class ErrorFrame(object):
"""Holds information about a function call in a traceback"""
def __init__(self, tback, filename, function, lineno, vars, id, pre_context, context_line, post_context, pre_context_lineno):
self.tback = tback
self.filename = filename
self.function = function
self.lineno = lineno
self.vars = vars
self.id = id
self.pre_context = pre_context
self.context_line = context_line
self.post_context = post_context
self.pre_context_lineno = pre_context_lineno
def get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
def get_lines(start, end):
return [linecache.getline(filename, l).rstrip() for l in range(start, end)]
lower_bound = max(1, lineno - context_lines)
upper_bound = lineno + context_lines
linecache.checkcache(filename)
pre_context = get_lines(lower_bound, lineno)
context_line = linecache.getline(filename, lineno).rstrip()
post_context = get_lines(lineno + 1, upper_bound)
return lower_bound, pre_context, context_line, post_context
def get_frames(tback, is_breakpoint):
"""Builds a list of ErrorFrame objects from a traceback"""
frames = []
while tback is not None:
if tback.tb_next is None and is_breakpoint:
break
filename = tback.tb_frame.f_code.co_filename
function = tback.tb_frame.f_code.co_name
context = tback.tb_frame.f_locals
lineno = tback.tb_lineno - 1
tback_id = id(tback)
pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7)
frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno))
tback = tback.tb_next
return frames
def prettify_object(obj):
"""Makes a pretty string for an object for nice output"""
try:
return pprint.pformat(str(obj))
except UnicodeDecodeError as e:
raise
except Exception as e:
return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e)) | 0.656438 | 0.048722 |
'''Sorry for this name, but i didnt know how to call it so i went random word generator,
love appeared, one thing led to another and here we are now.'''
import discord
from discord.ext import commands
import random
import json
class love:
def __init__(self, client):
self.client = client
######### SHIP #########
def pairStrenght(self, luf1, luf2):
Aww1 = luf1.id
Aww2 = luf2.id
destiny = Aww1 + Aww2
fate = hash(destiny)
return (fate%10)
def lovez(self, p1, p2, love):
if (love != 0):
if (love > 10):
if (love > 20):
if (love > 30):
if (love > 40):
if (love > 50):
if (love > 60):
if (love > 70):
if (love > 80):
if (love > 90):
if (love > 99):
if (love == 100):
lmao = "I can see your ship would be 100/100. That is a 1/100 chance so you both really are lucky. As a compensation, lemme teach you what is love: Love encompasses a variety of strong and positive emotional and mental states, ranging from the most sublime virtue or good habit, the deepest interpersonal affection and to the simplest pleasure. An example of this range of meanings is that the love of a mother differs from the love of a spouse, which differs from the love of food. Most commonly, love refers to a feeling of strong attraction and emotional attachment. Love can also be a virtue representing human kindness, compassion, and affection, as 'the unselfish loyal and benevolent concern for the good of another'."
return lmao
else:
return "Your love is inmensurable"
else:
return "`>%s love` \n Wow, thats a super duper hight score... Uhhh, I dont have anything prepared for this situations... Not yet till I learn the `pls marry` command anyway." % (love)
else:
return "`>%s love`\n Most of the people reading this would feel frustrated and jealous. Too bad for the cuse the love %s and %s share is unbreakable" % (love, p1, p2)
else:
return "`>%s love`\n As %s would say: 'Roses are red, Tulips are black. %s'd look great with a knife in their back.'" % (love, p2, p1)
else:
return "`>%s love` \n Thats a beautiful number, but more beautiful is %s, you should forget about %s and come with me :kissing_smiling_eyes: " % (love, p2, p1)
else:
return "`>%s love` \nAight, if this was a 'love exam', you would have passed, too bad it aint, so lemme do this: \n `>49.99 love` \n %s tell %s to stop crying." % (love, p1, p2)
else:
return "`>%s love` \nHeeey, that was close to 50! Maybe we can work something out, you two would look so cute together." % (love)
else:
return "`>%s love` \n Some things should never be together, be it pizza and pinneaple, be it a priest and a child, be it %s and %s." % (love, p1, p2)
else:
return "`>%s love` \n I just asked %s wife's what they thought bout %s... you better watch your back at night." % (love, p1, p2)
else:
return "`>%s love` \n This is the story of how %s and %s died alone. Sad." % (love, p1, p2)
else:
return "`>%s love`\n Thats super low and super sad, %s will watch %s as they leave with another, even better, person." % (love, p1, p2)
else:
return "`>%s love` \n Both %s and %s are really effed." % (love, p1, p2)
@commands.command(pass_context=True)
async def fakeship(self, ctx, user1 : str, user2 : str):
if (user1 == user2):
await self.client.say("That is just same as masturbating... Sad.")
else:
love = random.randint(0, 100)
msg = self.lovez(user1, user2, love)
await self.client.say(msg)
@commands.command(pass_context=True)
async def ship(self, ctx, user1 : discord.Member, user2 : discord.Member):
if (user1 == user2):
await self.client.say("That is just same as masturbating... Sad.")
else:
love = self.pairStrenght(user1, user2)
msg = self.lovez(user1, user2, love)
await self.client.say(msg)
def setup(client):
client.add_cog(love(client)) | love.py | '''Sorry for this name, but i didnt know how to call it so i went random word generator,
love appeared, one thing led to another and here we are now.'''
import discord
from discord.ext import commands
import random
import json
class love:
def __init__(self, client):
self.client = client
######### SHIP #########
def pairStrenght(self, luf1, luf2):
Aww1 = luf1.id
Aww2 = luf2.id
destiny = Aww1 + Aww2
fate = hash(destiny)
return (fate%10)
def lovez(self, p1, p2, love):
if (love != 0):
if (love > 10):
if (love > 20):
if (love > 30):
if (love > 40):
if (love > 50):
if (love > 60):
if (love > 70):
if (love > 80):
if (love > 90):
if (love > 99):
if (love == 100):
lmao = "I can see your ship would be 100/100. That is a 1/100 chance so you both really are lucky. As a compensation, lemme teach you what is love: Love encompasses a variety of strong and positive emotional and mental states, ranging from the most sublime virtue or good habit, the deepest interpersonal affection and to the simplest pleasure. An example of this range of meanings is that the love of a mother differs from the love of a spouse, which differs from the love of food. Most commonly, love refers to a feeling of strong attraction and emotional attachment. Love can also be a virtue representing human kindness, compassion, and affection, as 'the unselfish loyal and benevolent concern for the good of another'."
return lmao
else:
return "Your love is inmensurable"
else:
return "`>%s love` \n Wow, thats a super duper hight score... Uhhh, I dont have anything prepared for this situations... Not yet till I learn the `pls marry` command anyway." % (love)
else:
return "`>%s love`\n Most of the people reading this would feel frustrated and jealous. Too bad for the cuse the love %s and %s share is unbreakable" % (love, p1, p2)
else:
return "`>%s love`\n As %s would say: 'Roses are red, Tulips are black. %s'd look great with a knife in their back.'" % (love, p2, p1)
else:
return "`>%s love` \n Thats a beautiful number, but more beautiful is %s, you should forget about %s and come with me :kissing_smiling_eyes: " % (love, p2, p1)
else:
return "`>%s love` \nAight, if this was a 'love exam', you would have passed, too bad it aint, so lemme do this: \n `>49.99 love` \n %s tell %s to stop crying." % (love, p1, p2)
else:
return "`>%s love` \nHeeey, that was close to 50! Maybe we can work something out, you two would look so cute together." % (love)
else:
return "`>%s love` \n Some things should never be together, be it pizza and pinneaple, be it a priest and a child, be it %s and %s." % (love, p1, p2)
else:
return "`>%s love` \n I just asked %s wife's what they thought bout %s... you better watch your back at night." % (love, p1, p2)
else:
return "`>%s love` \n This is the story of how %s and %s died alone. Sad." % (love, p1, p2)
else:
return "`>%s love`\n Thats super low and super sad, %s will watch %s as they leave with another, even better, person." % (love, p1, p2)
else:
return "`>%s love` \n Both %s and %s are really effed." % (love, p1, p2)
@commands.command(pass_context=True)
async def fakeship(self, ctx, user1 : str, user2 : str):
if (user1 == user2):
await self.client.say("That is just same as masturbating... Sad.")
else:
love = random.randint(0, 100)
msg = self.lovez(user1, user2, love)
await self.client.say(msg)
@commands.command(pass_context=True)
async def ship(self, ctx, user1 : discord.Member, user2 : discord.Member):
if (user1 == user2):
await self.client.say("That is just same as masturbating... Sad.")
else:
love = self.pairStrenght(user1, user2)
msg = self.lovez(user1, user2, love)
await self.client.say(msg)
def setup(client):
client.add_cog(love(client)) | 0.138171 | 0.137446 |
import sys
sys.path.append("../svg")
from geometry import GeometryLoss
import numpy as np
import pygame as pg
import torch
import pydiffvg
import tkinter as tk
from tkinter import filedialog
def box_kernel(val):
return np.heaviside(-val+1,0)
def cone_kernel(val):
return np.maximum(0,1-val)
def nptosurf(arr):
if arr.shape[2]==1:
#greyscale
shape=arr.shape
shape=(shape[0],shape[1],3)
arr=np.broadcast_to(arr,shape)
return pg.surfarray.make_surface(arr*255)
def brush_tensor(screen_size,coords,radius,kernel):
coordarr=np.stack(np.meshgrid(np.linspace(0,screen_size[0]-1,screen_size[0]),np.linspace(0,screen_size[1]-1,screen_size[1]),indexing='ij'),axis=2)
ctrarr = np.reshape(np.array(coords), [1, 1, 2])
distarr=np.sqrt(np.sum(np.power(coordarr-ctrarr,2),axis=2))
valarr=kernel(distarr/radius)
return torch.tensor(valarr,requires_grad=False,dtype=torch.float32)
def checkerboard(shape, square_size=2):
xv,yv=np.meshgrid(np.floor(np.linspace(0,shape[1]-1,shape[1])/square_size),np.floor(np.linspace(0,shape[0]-1,shape[0])/square_size))
bin=np.expand_dims(((xv+yv)%2),axis=2)
res=bin*np.array([[[1., 1., 1.,]]])+(1-bin)*np.array([[[.75, .75, .75,]]])
return torch.tensor(res,requires_grad=False,dtype=torch.float32)
def render(optim, viewport):
scene_args = pydiffvg.RenderFunction.serialize_scene(*optim.build_scene())
render = pydiffvg.RenderFunction.apply
img = render(viewport[0], # width
viewport[1], # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None,
*scene_args)
return img
def optimize(optim, viewport, brush_kernel, increase=True, strength=0.1):
optim.zero_grad()
geomLoss=torch.tensor(0.)
for shape, gloss in zip(optim.scene[2],geometryLosses):
geomLoss+=gloss.compute(shape)
img=render(optim,viewport)
imalpha=img[:,:,3]
multiplied=imalpha*brush_kernel
loss=((1-multiplied).mean() if increase else multiplied.mean())*strength
loss+=geomLoss
loss.backward()
optim.step()
return render(optim,viewport)
def get_infile():
pydiffvg.set_use_gpu(False)
root = tk.Tk()
#root.withdraw()
file_path = filedialog.askopenfilename(initialdir = ".",title = "Select graphic to optimize",filetypes = (("SVG files","*.svg"),("all files","*.*")))
root.destroy()
return file_path
def compositebg(img):
bg=checkerboard(img.shape,2)
color=img[:,:,0:3]
alpha=img[:,:,3]
composite=alpha.unsqueeze(2)*color+(1-alpha).unsqueeze(2)*bg
return composite
def main():
infile=get_infile()
settings=pydiffvg.SvgOptimizationSettings()
settings.global_override(["optimize_color"],False)
settings.global_override(["transforms","optimize_transforms"], False)
settings.global_override(["optimizer"], "SGD")
settings.global_override(["paths","shape_lr"], 1e-1)
optim=pydiffvg.OptimizableSvg(infile,settings)
global geometryLosses
geometryLosses = []
for shape in optim.build_scene()[2]:
geometryLosses.append(GeometryLoss(shape))
scaling=1
brush_radius=100
graphic_size=optim.canvas
screen_size=(graphic_size[1]*scaling, graphic_size[0]*scaling)
pg.init()
screen=pg.display.set_mode(screen_size)
screen.fill((255,255,255))
img=render(optim,graphic_size)
print(img.max())
npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size)
screen.blit(npsurf,(0,0))
pg.display.update()
clock=pg.time.Clock()
z=0
btn=0
while True:
clock.tick(60)
for event in pg.event.get():
if event.type==pg.QUIT:
pg.quit()
sys.exit()
y, x = pg.mouse.get_pos()
if event.type == pg.MOUSEBUTTONDOWN:
if event.button in [1,3]:
z=1
btn=event.button
elif event.button == 4:
brush_radius*=1.1
elif event.button == 5:
brush_radius/=1.1
brush_radius=max(brush_radius,5)
elif event.type == pg.MOUSEBUTTONUP:
if event.button in [1,3]:
z=0
if z==1:
brush=brush_tensor((graphic_size[0],graphic_size[1]), (x/scaling, y/scaling), brush_radius, box_kernel)
img=optimize(optim,graphic_size,brush,btn==1)
npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size)
screen.blit(npsurf,(0,0))
pg.draw.circle(screen, (255,255,255), (y,x), int(brush_radius*scaling), 1)
pg.display.update()
if __name__ == '__main__':
main() | apps/svg_brush.py | import sys
sys.path.append("../svg")
from geometry import GeometryLoss
import numpy as np
import pygame as pg
import torch
import pydiffvg
import tkinter as tk
from tkinter import filedialog
def box_kernel(val):
return np.heaviside(-val+1,0)
def cone_kernel(val):
return np.maximum(0,1-val)
def nptosurf(arr):
if arr.shape[2]==1:
#greyscale
shape=arr.shape
shape=(shape[0],shape[1],3)
arr=np.broadcast_to(arr,shape)
return pg.surfarray.make_surface(arr*255)
def brush_tensor(screen_size,coords,radius,kernel):
coordarr=np.stack(np.meshgrid(np.linspace(0,screen_size[0]-1,screen_size[0]),np.linspace(0,screen_size[1]-1,screen_size[1]),indexing='ij'),axis=2)
ctrarr = np.reshape(np.array(coords), [1, 1, 2])
distarr=np.sqrt(np.sum(np.power(coordarr-ctrarr,2),axis=2))
valarr=kernel(distarr/radius)
return torch.tensor(valarr,requires_grad=False,dtype=torch.float32)
def checkerboard(shape, square_size=2):
xv,yv=np.meshgrid(np.floor(np.linspace(0,shape[1]-1,shape[1])/square_size),np.floor(np.linspace(0,shape[0]-1,shape[0])/square_size))
bin=np.expand_dims(((xv+yv)%2),axis=2)
res=bin*np.array([[[1., 1., 1.,]]])+(1-bin)*np.array([[[.75, .75, .75,]]])
return torch.tensor(res,requires_grad=False,dtype=torch.float32)
def render(optim, viewport):
scene_args = pydiffvg.RenderFunction.serialize_scene(*optim.build_scene())
render = pydiffvg.RenderFunction.apply
img = render(viewport[0], # width
viewport[1], # height
2, # num_samples_x
2, # num_samples_y
0, # seed
None,
*scene_args)
return img
def optimize(optim, viewport, brush_kernel, increase=True, strength=0.1):
optim.zero_grad()
geomLoss=torch.tensor(0.)
for shape, gloss in zip(optim.scene[2],geometryLosses):
geomLoss+=gloss.compute(shape)
img=render(optim,viewport)
imalpha=img[:,:,3]
multiplied=imalpha*brush_kernel
loss=((1-multiplied).mean() if increase else multiplied.mean())*strength
loss+=geomLoss
loss.backward()
optim.step()
return render(optim,viewport)
def get_infile():
pydiffvg.set_use_gpu(False)
root = tk.Tk()
#root.withdraw()
file_path = filedialog.askopenfilename(initialdir = ".",title = "Select graphic to optimize",filetypes = (("SVG files","*.svg"),("all files","*.*")))
root.destroy()
return file_path
def compositebg(img):
bg=checkerboard(img.shape,2)
color=img[:,:,0:3]
alpha=img[:,:,3]
composite=alpha.unsqueeze(2)*color+(1-alpha).unsqueeze(2)*bg
return composite
def main():
infile=get_infile()
settings=pydiffvg.SvgOptimizationSettings()
settings.global_override(["optimize_color"],False)
settings.global_override(["transforms","optimize_transforms"], False)
settings.global_override(["optimizer"], "SGD")
settings.global_override(["paths","shape_lr"], 1e-1)
optim=pydiffvg.OptimizableSvg(infile,settings)
global geometryLosses
geometryLosses = []
for shape in optim.build_scene()[2]:
geometryLosses.append(GeometryLoss(shape))
scaling=1
brush_radius=100
graphic_size=optim.canvas
screen_size=(graphic_size[1]*scaling, graphic_size[0]*scaling)
pg.init()
screen=pg.display.set_mode(screen_size)
screen.fill((255,255,255))
img=render(optim,graphic_size)
print(img.max())
npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size)
screen.blit(npsurf,(0,0))
pg.display.update()
clock=pg.time.Clock()
z=0
btn=0
while True:
clock.tick(60)
for event in pg.event.get():
if event.type==pg.QUIT:
pg.quit()
sys.exit()
y, x = pg.mouse.get_pos()
if event.type == pg.MOUSEBUTTONDOWN:
if event.button in [1,3]:
z=1
btn=event.button
elif event.button == 4:
brush_radius*=1.1
elif event.button == 5:
brush_radius/=1.1
brush_radius=max(brush_radius,5)
elif event.type == pg.MOUSEBUTTONUP:
if event.button in [1,3]:
z=0
if z==1:
brush=brush_tensor((graphic_size[0],graphic_size[1]), (x/scaling, y/scaling), brush_radius, box_kernel)
img=optimize(optim,graphic_size,brush,btn==1)
npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size)
screen.blit(npsurf,(0,0))
pg.draw.circle(screen, (255,255,255), (y,x), int(brush_radius*scaling), 1)
pg.display.update()
if __name__ == '__main__':
main() | 0.325521 | 0.311348 |
'''Advent of Code 2018 Day 6 solution'''
from typing import Tuple, List
import numpy
Coords = Tuple[int, ...]
def taxicabdistance(a: Coords, b: Coords) -> int:
'''Calculate Taxi Cab (Manhattan) distance between two pairs of coordinates'''
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def runsolution(inputs: List[Coords], threshold: int) -> Tuple[int, int]:
'''Solve both parts'''
minx = min([z[0] for z in inputs])
miny = min([z[1] for z in inputs])
maxx = max([z[0] for z in inputs])
maxy = max([z[1] for z in inputs])
total = numpy.zeros(len(inputs), dtype=int)
totalsafe = 0
# Loop through the grid
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
# Get distances to all other points
distances = [taxicabdistance(z, (x, y)) for z in inputs]
d = sorted(distances)
# If there isn't a tie for the closest point, add one to the count for the closest
if d[0] != d[1]:
total[distances.index(d[0])] += 1
# Keep track of the number of points satisfying part 2. (Sum of distances below
# threshold)
if sum(distances) < threshold:
totalsafe += 1
# Go round the edge of the grid, any closest points we find have infinite coverage and should
# be ignored
infinites = set()
for x in range(minx - 25, maxx + 25):
distances = [taxicabdistance(z, (x, miny-25)) for z in inputs]
infinites.add(distances.index(min(distances)))
distances = [taxicabdistance(z, (x, maxy+25)) for z in inputs]
infinites.add(distances.index(min(distances)))
for y in range(miny - 25, maxy + 25):
distances = [taxicabdistance(z, (minx-25, y)) for z in inputs]
infinites.add(distances.index(min(distances)))
distances = [taxicabdistance(z, (maxx+25, y)) for z in inputs]
infinites.add(distances.index(min(distances)))
# Strip out the infinite coordinates from the result
for i in infinites:
total[i] = 0
# Return coordinate with highest score, and size of safe area.
return (max(total), totalsafe)
def run() -> Tuple[int, int]:
'''Main'''
# Read input data
with open('inputs/day06.txt', 'r') as f:
inputs: List[Coords] = [tuple(map(int, line.rstrip("\n").split(', '))) for line in f]
# Solve the problem
return runsolution(inputs, 10000)
if __name__ == '__main__':
print(run()) | aoc2018/day06.py | '''Advent of Code 2018 Day 6 solution'''
from typing import Tuple, List
import numpy
Coords = Tuple[int, ...]
def taxicabdistance(a: Coords, b: Coords) -> int:
'''Calculate Taxi Cab (Manhattan) distance between two pairs of coordinates'''
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def runsolution(inputs: List[Coords], threshold: int) -> Tuple[int, int]:
'''Solve both parts'''
minx = min([z[0] for z in inputs])
miny = min([z[1] for z in inputs])
maxx = max([z[0] for z in inputs])
maxy = max([z[1] for z in inputs])
total = numpy.zeros(len(inputs), dtype=int)
totalsafe = 0
# Loop through the grid
for x in range(minx, maxx+1):
for y in range(miny, maxy+1):
# Get distances to all other points
distances = [taxicabdistance(z, (x, y)) for z in inputs]
d = sorted(distances)
# If there isn't a tie for the closest point, add one to the count for the closest
if d[0] != d[1]:
total[distances.index(d[0])] += 1
# Keep track of the number of points satisfying part 2. (Sum of distances below
# threshold)
if sum(distances) < threshold:
totalsafe += 1
# Go round the edge of the grid, any closest points we find have infinite coverage and should
# be ignored
infinites = set()
for x in range(minx - 25, maxx + 25):
distances = [taxicabdistance(z, (x, miny-25)) for z in inputs]
infinites.add(distances.index(min(distances)))
distances = [taxicabdistance(z, (x, maxy+25)) for z in inputs]
infinites.add(distances.index(min(distances)))
for y in range(miny - 25, maxy + 25):
distances = [taxicabdistance(z, (minx-25, y)) for z in inputs]
infinites.add(distances.index(min(distances)))
distances = [taxicabdistance(z, (maxx+25, y)) for z in inputs]
infinites.add(distances.index(min(distances)))
# Strip out the infinite coordinates from the result
for i in infinites:
total[i] = 0
# Return coordinate with highest score, and size of safe area.
return (max(total), totalsafe)
def run() -> Tuple[int, int]:
'''Main'''
# Read input data
with open('inputs/day06.txt', 'r') as f:
inputs: List[Coords] = [tuple(map(int, line.rstrip("\n").split(', '))) for line in f]
# Solve the problem
return runsolution(inputs, 10000)
if __name__ == '__main__':
print(run()) | 0.746509 | 0.656335 |
import numpy as np
from functools import partial
import multiprocessing as mp
from multiprocessing import Pool, Value, Array
class Object3D:
Object_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
def __init__(self):
Object3D.Object_Counter +=1
def trilinear_int(self,x,y,z):
x1 = x - int(x)
x2 = int(x) + 1 -x
if x < 0:
x2 = -1*x1
x1 = 1 + x1
y1 = y - int(y)
y2 = int(y) + 1 -y
if y < 0:
y2 = -1 * y1
y1 = 1 + y1
z1 = z - int(z)
z2= int(z) +1 -z
if z < 0:
z2 = -1 * z1
z1 = 1 + z1
px1y1z1 = x1*y1*z1
px1y2z1 = x1*y2*z1
px1y1z2 = x1*y1*z2
px1y2z2 = x1*y2*z2
px2y1z1 = x2*y1*z1
px2y2z1 = x2*y2*z1
px2y1z2 = x2*y1*z2
px2y2z2 = x2*y2*z2
return px2y2z2, px2y1z2, px2y2z1 ,px2y1z1, px1y2z2, px1y1z2, px1y2z1, px1y1z1
def points(self,x,y,z):
xp1 = int(x)
xp2 = int(x+1)
if x < 0:
xp1 = int(x-1)
xp2 = int(x)
yp1 = int(y)
yp2 = int(y+1)
if y < 0:
yp1 = int(y-1)
yp2 = int(y)
zp1 = int(z)
zp2 = int(z+1)
if z < 0:
zp1 = int(z-1)
zp2 = int(z)
p1 = np.array([xp1, yp1, zp1])
p2 = np.array([xp1, yp2, zp1])
p3 = np.array([xp1, yp1, zp2])
p4 = np.array([xp1, yp2, zp2])
p5 = np.array([xp2, yp1, zp1])
p6 = np.array([xp2, yp2, zp1])
p7 = np.array([xp2, yp1, zp2])
p8 = np.array([xp2, yp2, zp2])
return [p1,p2,p3,p4,p5,p6,p7,p8]
def rotation(self,x,y,z):
tz,tx,tz2 = np.deg2rad(self.Object_Parameter["rotation"])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0,0,1]])
Rx = np.array([[1,0,0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Rz2 = np.array([[np.cos(tz2), -np.sin(tz2), 0], [np.sin(tz2), np.cos(tz2), 0], [0,0,1]])
rot_mat = np.linalg.inv(np.dot(np.dot(Rz2,Rx), Rz))
x,y,z = np.dot(rot_mat,[x,y,z])
return x, y, z
def rotation_inv(self,x,y,z):
tz,tx,tz2 = np.deg2rad(self.Object_Parameter["rotation"])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0,0,1]])
Rx = np.array([[1,0,0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Rz2 = np.array([[np.cos(tz2), -np.sin(tz2), 0], [np.sin(tz2), np.cos(tz2), 0], [0,0,1]])
rot_mat = np.dot(np.dot(Rz2,Rx), Rz)
x,y,z = np.dot(rot_mat,[x,y,z])
return x, y, z
def volume_old(self):
for index in np.ndenumerate(self.box):
x, y, z = self.rotation_inv(index[0][0],index[0][1],index[0][2])
point_list = self.point(x,y,z)
for pos in point_list:
x1, y1, z1 = pos
if self.point_in_object(x1,y1,z1):
x1r, y1r, z1r = self.rotation(x1,y1,z1)
point_listr = self.point(x1r, y1r, z1r)
intpols = self.trilinear_int(x1r, y1r, z1r)
for posr, intpol in zip(point_listr,intpols):
self.box[posr]=self.box[posr]+self.Object_Parameter["color"] * intpol
def vol_parallel(self,ind,v):
x1, y1,z1 = ind[0][0]-v[0]/2,ind[0][1]-v[1]/2,ind[0][2]-v[2]/2
x, y, z = self.rotation_inv(x1,y1,z1)
if self.point_in_object(x,y,z):
return [ind[0][0],ind[0][1],ind[0][2]]
def volumepar(self):
self.vol_parallelp = partial(self.vol_parallel,v=self.box.shape)
with mp.Pool(processes = 30) as pool:
L = pool.map(self.vol_parallelp, [index for index in np.ndenumerate(self.box)])
L1 =[i for i in list(L) if i != None]
for i in L1:
self.box[i[0],i[1],i[2]] = self.Object_Parameter["color"]
return self.box
def place_object_involume(self,volume, overwrite = False):
if overwrite is True:
self.vol_parallelp = partial(self.vol_parallel,v=self.box.shape)
with mp.Pool(processes = 30) as pool:
L = pool.map(self.vol_parallelp, [index for index in np.ndenumerate(self.box)])
L1 =[i for i in list(L) if i != None]
for i in L1:
self.pos1 = i[0] + self.Object_Parameter["pos"][0] - int(self.box.shape[0]/2)
self.pos2 = i[1] + self.Object_Parameter["pos"][1] - int(self.box.shape[1]/2)
self.pos3 = i[2] + self.Object_Parameter["pos"][2] - int(self.box.shape[2]/2)
volume[self.pos1,self.pos2,self.pos3]=self.Object_Parameter["color"]
return volume
def calc_dims(self):
l_max = np.sqrt((2*self.Object_Parameter["radius_dim"][0])**2+self.Object_Parameter["radius_dim"][1]**2+self.Object_Parameter["radius_dim"][2]**2)
e1 = np.array([self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e2 = np.array([self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e3 = np.array([-self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e4 = np.array([-self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e5 = np.array([self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e6 = np.array([self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e7 = np.array([-self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e8 = np.array([-self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e1r = self.rotation(*e1)
e2r = self.rotation(*e2)
e3r = self.rotation(*e3)
e4r = self.rotation(*e4)
e5r = self.rotation(*e5)
e6r = self.rotation(*e6)
e7r = self.rotation(*e7)
e8r = self.rotation(*e8)
max_x = max(e1r[0],e2r[0],e3r[0],e4r[0],e5r[0],e6r[0],e7r[0],e8r[0])+1
max_y = max(e1r[1],e2r[1],e3r[1],e4r[1],e5r[1],e6r[1],e7r[1],e8r[0])+1
max_z = max(e1r[2],e2r[2],e3r[2],e4r[2],e5r[2],e6r[2],e7r[2],e8r[2])+1
return [(int(max_x)*2,int(max_y)*2,int(max_z)*2),(int(l_max*2),int(l_max*2),int(l_max*2))]
class Sphere(Object3D):
Sphere_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Sphere.Sphere_Counter -=1
def __init__(self, name, radius_dim=(1,1,1), pos=(0,0,0), color=100, rotation=(0,0,0)):
self.name = name
Object3D.Object_Counter +=1
Sphere.Sphere_Counter +=1
self.Object_Parameter = {"objecttype":"sphere", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2/(zoff**2)
return x_prim + y_prim + z_prim <= 1
class Ellipsoid(Object3D):
Ellipsoid_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Sphere.Ellipsoid_Counter -=1
def __init__(self, name, radius_dim=(1,1,1), pos=(0,0,0), color=100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter +=1
Sphere.Ellipsoid_Counter +=1
self.Object_Parameter = {"objecttype":"sphere", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2/(zoff**2)
return x_prim + y_prim + z_prim <= 1
class Square(Object3D):
Square_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Square.Square_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Square.Square_Counter +=1
self.Object_Parameter = {"objecttype":"square", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = x**2
y_prim = y**2
z_prim = z **2
return x_prim <= xoff**2 and y_prim <= yoff**2 and z_prim <= zoff**2
class Box(Object3D):
Box_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Box.Box_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Box.Box_Counter +=1
self.Object_Parameter = {"objecttype":"box", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = x**2
y_prim = y**2
z_prim = z**2
return x_prim <= xoff**2 and y_prim <= yoff**2 and z_prim <= zoff**2
class Tubus(Object3D):
Tubus_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Tubus.Tubus_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Box.Box_Counter +=1
self.Object_Parameter = {"objecttype":"tubus", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2
return x_prim + y_prim <= 1 and z_prim <= zoff**2
class Helix(Object3D):
pass
class Cone(Object3D):
pass
class Pyramide3(Object3D):
pass
class Pyramide4(Object3D):
pass
if __name__=='__main__':
volume = np.zeros((150,150,150))
tb1 = Tubus('tubus1', radius_dim = (7,7,20), pos=(75,75,20), color = 255, rotation = (0,0,0))
tb2 = Tubus('tubus2', radius_dim = (7,7,20), pos=(65,75,55),color = 255, rotation = (90,135,0))
tb3 = Tubus('tubus3', radius_dim = (7,7,20), pos=(85,75,55),color = 255, rotation = (90,225,0))
tb4 = Tubus('tubus1', radius_dim = (7,7,20), pos=(50,75,85), color = 255, rotation = (0,0,0))
tb5 = Tubus('tubus2', radius_dim = (7,7,20), pos=(40,75,120),color = 255, rotation = (90,135,0))
tb6 = Tubus('tubus3', radius_dim = (7,7,20), pos=(60,75,120),color = 255, rotation = (90,225,0))
tb7 = Tubus('tubus1', radius_dim = (7,7,20), pos=(100,75,85), color = 255, rotation = (0,0,0))
tb8 = Tubus('tubus2', radius_dim = (7,7,20), pos=(90,75,120),color = 255, rotation = (90,135,0))
tb9 = Tubus('tubus3', radius_dim = (7,7,20), pos=(110,75,120),color = 255, rotation = (90,225,0))
"""
tb1i = Tubus('tubus1', radius_dim = (3,3,20), pos=(75,75,20), color = 0, rotation = (0,0,0))
tb2i = Tubus('tubus2', radius_dim = (3,3,20), pos=(60,75,55),color = 0, rotation = (90,135,0))
tb3i = Tubus('tubus3', radius_dim = (3,3,20), pos=(90,75,55),color = 0, rotation = (90,225,0))
tb4i = Tubus('tubus1', radius_dim = (3,3,20), pos=(45,75,85), color = 0, rotation = (0,0,0))
tb5i = Tubus('tubus2', radius_dim = (3,3,20), pos=(30,75,120),color = 0, rotation = (90,135,0))
tb6i = Tubus('tubus3', radius_dim = (3,3,20), pos=(60,75,120),color = 0, rotation = (90,225,0))
tb7i = Tubus('tubus1', radius_dim = (3,3,20), pos=(105,75,85), color = 0, rotation = (0,0,0))
tb8i = Tubus('tubus2', radius_dim = (3,3,20), pos=(90,75,120),color = 0, rotation = (90,135,0))
tb9i = Tubus('tubus3', radius_dim = (3,3,20), pos=(120,75,120),color = 0, rotation = (90,225,0))
"""
volume = tb1.place_object_involume(volume, overwrite = True)
volume = tb2.place_object_involume(volume, overwrite = True)
volume = tb3.place_object_involume(volume, overwrite = True)
volume = tb4.place_object_involume(volume, overwrite = True)
volume = tb5.place_object_involume(volume, overwrite = True)
volume = tb6.place_object_involume(volume, overwrite = True)
volume = tb7.place_object_involume(volume, overwrite = True)
volume = tb8.place_object_involume(volume, overwrite = True)
volume = tb9.place_object_involume(volume, overwrite = True)
"""
volume = tb1i.place_object_involume(volume, overwrite = True)
volume = tb2i.place_object_involume(volume, overwrite = True)
volume = tb3i.place_object_involume(volume, overwrite = True)
volume = tb4i.place_object_involume(volume, overwrite = True)
volume = tb5i.place_object_involume(volume, overwrite = True)
volume = tb6i.place_object_involume(volume, overwrite = True)
volume = tb7i.place_object_involume(volume, overwrite = True)
volume = tb8i.place_object_involume(volume, overwrite = True)
volume = tb9i.place_object_involume(volume, overwrite = True)
"""
import tifffile as tiff
image = volume
#image = np.uint8(volume)
tiff.imsave('volume_test_tuben.tif',np.float32(image)) | Object3D_Class.py | import numpy as np
from functools import partial
import multiprocessing as mp
from multiprocessing import Pool, Value, Array
class Object3D:
Object_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
def __init__(self):
Object3D.Object_Counter +=1
def trilinear_int(self,x,y,z):
x1 = x - int(x)
x2 = int(x) + 1 -x
if x < 0:
x2 = -1*x1
x1 = 1 + x1
y1 = y - int(y)
y2 = int(y) + 1 -y
if y < 0:
y2 = -1 * y1
y1 = 1 + y1
z1 = z - int(z)
z2= int(z) +1 -z
if z < 0:
z2 = -1 * z1
z1 = 1 + z1
px1y1z1 = x1*y1*z1
px1y2z1 = x1*y2*z1
px1y1z2 = x1*y1*z2
px1y2z2 = x1*y2*z2
px2y1z1 = x2*y1*z1
px2y2z1 = x2*y2*z1
px2y1z2 = x2*y1*z2
px2y2z2 = x2*y2*z2
return px2y2z2, px2y1z2, px2y2z1 ,px2y1z1, px1y2z2, px1y1z2, px1y2z1, px1y1z1
def points(self,x,y,z):
xp1 = int(x)
xp2 = int(x+1)
if x < 0:
xp1 = int(x-1)
xp2 = int(x)
yp1 = int(y)
yp2 = int(y+1)
if y < 0:
yp1 = int(y-1)
yp2 = int(y)
zp1 = int(z)
zp2 = int(z+1)
if z < 0:
zp1 = int(z-1)
zp2 = int(z)
p1 = np.array([xp1, yp1, zp1])
p2 = np.array([xp1, yp2, zp1])
p3 = np.array([xp1, yp1, zp2])
p4 = np.array([xp1, yp2, zp2])
p5 = np.array([xp2, yp1, zp1])
p6 = np.array([xp2, yp2, zp1])
p7 = np.array([xp2, yp1, zp2])
p8 = np.array([xp2, yp2, zp2])
return [p1,p2,p3,p4,p5,p6,p7,p8]
def rotation(self,x,y,z):
tz,tx,tz2 = np.deg2rad(self.Object_Parameter["rotation"])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0,0,1]])
Rx = np.array([[1,0,0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Rz2 = np.array([[np.cos(tz2), -np.sin(tz2), 0], [np.sin(tz2), np.cos(tz2), 0], [0,0,1]])
rot_mat = np.linalg.inv(np.dot(np.dot(Rz2,Rx), Rz))
x,y,z = np.dot(rot_mat,[x,y,z])
return x, y, z
def rotation_inv(self,x,y,z):
tz,tx,tz2 = np.deg2rad(self.Object_Parameter["rotation"])
Rz = np.array([[np.cos(tz), -np.sin(tz), 0], [np.sin(tz), np.cos(tz), 0], [0,0,1]])
Rx = np.array([[1,0,0], [0, np.cos(tx), -np.sin(tx)], [0, np.sin(tx), np.cos(tx)]])
Rz2 = np.array([[np.cos(tz2), -np.sin(tz2), 0], [np.sin(tz2), np.cos(tz2), 0], [0,0,1]])
rot_mat = np.dot(np.dot(Rz2,Rx), Rz)
x,y,z = np.dot(rot_mat,[x,y,z])
return x, y, z
def volume_old(self):
for index in np.ndenumerate(self.box):
x, y, z = self.rotation_inv(index[0][0],index[0][1],index[0][2])
point_list = self.point(x,y,z)
for pos in point_list:
x1, y1, z1 = pos
if self.point_in_object(x1,y1,z1):
x1r, y1r, z1r = self.rotation(x1,y1,z1)
point_listr = self.point(x1r, y1r, z1r)
intpols = self.trilinear_int(x1r, y1r, z1r)
for posr, intpol in zip(point_listr,intpols):
self.box[posr]=self.box[posr]+self.Object_Parameter["color"] * intpol
def vol_parallel(self,ind,v):
x1, y1,z1 = ind[0][0]-v[0]/2,ind[0][1]-v[1]/2,ind[0][2]-v[2]/2
x, y, z = self.rotation_inv(x1,y1,z1)
if self.point_in_object(x,y,z):
return [ind[0][0],ind[0][1],ind[0][2]]
def volumepar(self):
self.vol_parallelp = partial(self.vol_parallel,v=self.box.shape)
with mp.Pool(processes = 30) as pool:
L = pool.map(self.vol_parallelp, [index for index in np.ndenumerate(self.box)])
L1 =[i for i in list(L) if i != None]
for i in L1:
self.box[i[0],i[1],i[2]] = self.Object_Parameter["color"]
return self.box
def place_object_involume(self,volume, overwrite = False):
if overwrite is True:
self.vol_parallelp = partial(self.vol_parallel,v=self.box.shape)
with mp.Pool(processes = 30) as pool:
L = pool.map(self.vol_parallelp, [index for index in np.ndenumerate(self.box)])
L1 =[i for i in list(L) if i != None]
for i in L1:
self.pos1 = i[0] + self.Object_Parameter["pos"][0] - int(self.box.shape[0]/2)
self.pos2 = i[1] + self.Object_Parameter["pos"][1] - int(self.box.shape[1]/2)
self.pos3 = i[2] + self.Object_Parameter["pos"][2] - int(self.box.shape[2]/2)
volume[self.pos1,self.pos2,self.pos3]=self.Object_Parameter["color"]
return volume
def calc_dims(self):
l_max = np.sqrt((2*self.Object_Parameter["radius_dim"][0])**2+self.Object_Parameter["radius_dim"][1]**2+self.Object_Parameter["radius_dim"][2]**2)
e1 = np.array([self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e2 = np.array([self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e3 = np.array([-self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e4 = np.array([-self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],self.Object_Parameter["radius_dim"][2]])
e5 = np.array([self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e6 = np.array([self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e7 = np.array([-self.Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e8 = np.array([-self.Object_Parameter["radius_dim"][0],-self.Object_Parameter["radius_dim"][1],-self.Object_Parameter["radius_dim"][2]])
e1r = self.rotation(*e1)
e2r = self.rotation(*e2)
e3r = self.rotation(*e3)
e4r = self.rotation(*e4)
e5r = self.rotation(*e5)
e6r = self.rotation(*e6)
e7r = self.rotation(*e7)
e8r = self.rotation(*e8)
max_x = max(e1r[0],e2r[0],e3r[0],e4r[0],e5r[0],e6r[0],e7r[0],e8r[0])+1
max_y = max(e1r[1],e2r[1],e3r[1],e4r[1],e5r[1],e6r[1],e7r[1],e8r[0])+1
max_z = max(e1r[2],e2r[2],e3r[2],e4r[2],e5r[2],e6r[2],e7r[2],e8r[2])+1
return [(int(max_x)*2,int(max_y)*2,int(max_z)*2),(int(l_max*2),int(l_max*2),int(l_max*2))]
class Sphere(Object3D):
Sphere_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Sphere.Sphere_Counter -=1
def __init__(self, name, radius_dim=(1,1,1), pos=(0,0,0), color=100, rotation=(0,0,0)):
self.name = name
Object3D.Object_Counter +=1
Sphere.Sphere_Counter +=1
self.Object_Parameter = {"objecttype":"sphere", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2/(zoff**2)
return x_prim + y_prim + z_prim <= 1
class Ellipsoid(Object3D):
Ellipsoid_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Sphere.Ellipsoid_Counter -=1
def __init__(self, name, radius_dim=(1,1,1), pos=(0,0,0), color=100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter +=1
Sphere.Ellipsoid_Counter +=1
self.Object_Parameter = {"objecttype":"sphere", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2/(zoff**2)
return x_prim + y_prim + z_prim <= 1
class Square(Object3D):
Square_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Square.Square_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Square.Square_Counter +=1
self.Object_Parameter = {"objecttype":"square", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = x**2
y_prim = y**2
z_prim = z **2
return x_prim <= xoff**2 and y_prim <= yoff**2 and z_prim <= zoff**2
class Box(Object3D):
Box_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Box.Box_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Box.Box_Counter +=1
self.Object_Parameter = {"objecttype":"box", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = x**2
y_prim = y**2
z_prim = z**2
return x_prim <= xoff**2 and y_prim <= yoff**2 and z_prim <= zoff**2
class Tubus(Object3D):
Tubus_Counter = 0
def __del__(self):
Object3D.Object_Counter -= 1
Tubus.Tubus_Counter -= 1
def __init__(self, name, radius_dim = (10,10,10), pos = (0,0,0), color = 100, rotation = (0,0,0)):
self.name = name
Object3D.Object_Counter += 1
Box.Box_Counter +=1
self.Object_Parameter = {"objecttype":"tubus", "radius_dim": radius_dim, "pos":pos, "color": color, "rotation":rotation}
self.rotated_dims = self.calc_dims()
self.box = np.zeros(self.rotated_dims[0])
def point_in_object(self, x, y, z):
xoff, yoff, zoff =self. Object_Parameter["radius_dim"][0],self.Object_Parameter["radius_dim"][1],self.Object_Parameter['radius_dim'][2]
x_prim = (x)**2/(xoff**2)
y_prim = (y)**2/(yoff**2)
z_prim = (z)**2
return x_prim + y_prim <= 1 and z_prim <= zoff**2
class Helix(Object3D):
pass
class Cone(Object3D):
pass
class Pyramide3(Object3D):
pass
class Pyramide4(Object3D):
pass
if __name__=='__main__':
volume = np.zeros((150,150,150))
tb1 = Tubus('tubus1', radius_dim = (7,7,20), pos=(75,75,20), color = 255, rotation = (0,0,0))
tb2 = Tubus('tubus2', radius_dim = (7,7,20), pos=(65,75,55),color = 255, rotation = (90,135,0))
tb3 = Tubus('tubus3', radius_dim = (7,7,20), pos=(85,75,55),color = 255, rotation = (90,225,0))
tb4 = Tubus('tubus1', radius_dim = (7,7,20), pos=(50,75,85), color = 255, rotation = (0,0,0))
tb5 = Tubus('tubus2', radius_dim = (7,7,20), pos=(40,75,120),color = 255, rotation = (90,135,0))
tb6 = Tubus('tubus3', radius_dim = (7,7,20), pos=(60,75,120),color = 255, rotation = (90,225,0))
tb7 = Tubus('tubus1', radius_dim = (7,7,20), pos=(100,75,85), color = 255, rotation = (0,0,0))
tb8 = Tubus('tubus2', radius_dim = (7,7,20), pos=(90,75,120),color = 255, rotation = (90,135,0))
tb9 = Tubus('tubus3', radius_dim = (7,7,20), pos=(110,75,120),color = 255, rotation = (90,225,0))
"""
tb1i = Tubus('tubus1', radius_dim = (3,3,20), pos=(75,75,20), color = 0, rotation = (0,0,0))
tb2i = Tubus('tubus2', radius_dim = (3,3,20), pos=(60,75,55),color = 0, rotation = (90,135,0))
tb3i = Tubus('tubus3', radius_dim = (3,3,20), pos=(90,75,55),color = 0, rotation = (90,225,0))
tb4i = Tubus('tubus1', radius_dim = (3,3,20), pos=(45,75,85), color = 0, rotation = (0,0,0))
tb5i = Tubus('tubus2', radius_dim = (3,3,20), pos=(30,75,120),color = 0, rotation = (90,135,0))
tb6i = Tubus('tubus3', radius_dim = (3,3,20), pos=(60,75,120),color = 0, rotation = (90,225,0))
tb7i = Tubus('tubus1', radius_dim = (3,3,20), pos=(105,75,85), color = 0, rotation = (0,0,0))
tb8i = Tubus('tubus2', radius_dim = (3,3,20), pos=(90,75,120),color = 0, rotation = (90,135,0))
tb9i = Tubus('tubus3', radius_dim = (3,3,20), pos=(120,75,120),color = 0, rotation = (90,225,0))
"""
volume = tb1.place_object_involume(volume, overwrite = True)
volume = tb2.place_object_involume(volume, overwrite = True)
volume = tb3.place_object_involume(volume, overwrite = True)
volume = tb4.place_object_involume(volume, overwrite = True)
volume = tb5.place_object_involume(volume, overwrite = True)
volume = tb6.place_object_involume(volume, overwrite = True)
volume = tb7.place_object_involume(volume, overwrite = True)
volume = tb8.place_object_involume(volume, overwrite = True)
volume = tb9.place_object_involume(volume, overwrite = True)
"""
volume = tb1i.place_object_involume(volume, overwrite = True)
volume = tb2i.place_object_involume(volume, overwrite = True)
volume = tb3i.place_object_involume(volume, overwrite = True)
volume = tb4i.place_object_involume(volume, overwrite = True)
volume = tb5i.place_object_involume(volume, overwrite = True)
volume = tb6i.place_object_involume(volume, overwrite = True)
volume = tb7i.place_object_involume(volume, overwrite = True)
volume = tb8i.place_object_involume(volume, overwrite = True)
volume = tb9i.place_object_involume(volume, overwrite = True)
"""
import tifffile as tiff
image = volume
#image = np.uint8(volume)
tiff.imsave('volume_test_tuben.tif',np.float32(image)) | 0.311951 | 0.432723 |
from collections import defaultdict
import os
import re
import unicodedata
class WordList(object):
def __init__(self, lower=False, strip_nonalpha=False, echo=True, min=None, max=None, transforms=[]):
self._lower = lower
self._echo = echo
self._strip_nonalpha = strip_nonalpha
self._words = set()
self.sets = defaultdict
self.min = min
self.max = max
self.transforms = transforms
def _transform(self, word, fns, min=None, max=None):
if not isinstance(fns, list):
fns = [fns]
results = [word]
for fn in fns:
results += fn(word)
print(results)
return self._add_words(results, min=min, max=max)
def _add_word(self, word, min=None, max=None):
word_length = len(word)
min = min if min else self.min
max = max if max else self.max
if min and word_length < min:
return 0
if max and word_length > max:
return 0
if word not in self._words:
self._words.add(word)
return 1
return 0
def _add_words(self, words, min=None, max=None):
count_added = 0
for word in words:
count_added += self._add_word(word, min=min, max=max)
return count_added
@property
def words(self):
return self._words
def add_file(self, filename, split_further=None, min=None, max=None, reject=[], transforms=[]):
count_possible = 0
count_transformed = 0
count_added = 0
with open(filename, 'U', encoding='iso-8859-15') as f: # can also try cp437 (so:16528468)
for row in f:
if split_further is None:
words = [row]
else:
words = row.split(split_further)
for word in words:
word = word.strip('\n').strip('\r')
if self._lower:
word = word.lower()
word = unicodedata.normalize('NFKD', word).encode('ascii','ignore').decode("utf-8")
if self._strip_nonalpha:
word = re.sub('[^a-zA-Z]', '', word)
do_continue = True
for fn in reject:
if fn(word):
do_continue = False
if not do_continue:
break
number_words_transformed = 0
number_words_added = self._add_word(word, min=min, max=max)
if transforms and number_words_added > 0:
number_words_transformed = self._transform(word, transforms, min=min, max=max)
count_possible += 1
count_transformed += number_words_transformed
count_added += number_words_added
if self._echo:
print('Dictionary: {}, Possible: {}, Words added: {}, Transformed added: {}, Total: {}'.format(
os.path.basename(filename), count_possible, count_added, count_transformed, len(self._words)))
def dict_by_length(self):
out = defaultdict(set)
for word in self._words:
out[len(word)].add(word)
return out | wordlist.py | from collections import defaultdict
import os
import re
import unicodedata
class WordList(object):
def __init__(self, lower=False, strip_nonalpha=False, echo=True, min=None, max=None, transforms=[]):
self._lower = lower
self._echo = echo
self._strip_nonalpha = strip_nonalpha
self._words = set()
self.sets = defaultdict
self.min = min
self.max = max
self.transforms = transforms
def _transform(self, word, fns, min=None, max=None):
if not isinstance(fns, list):
fns = [fns]
results = [word]
for fn in fns:
results += fn(word)
print(results)
return self._add_words(results, min=min, max=max)
def _add_word(self, word, min=None, max=None):
word_length = len(word)
min = min if min else self.min
max = max if max else self.max
if min and word_length < min:
return 0
if max and word_length > max:
return 0
if word not in self._words:
self._words.add(word)
return 1
return 0
def _add_words(self, words, min=None, max=None):
count_added = 0
for word in words:
count_added += self._add_word(word, min=min, max=max)
return count_added
@property
def words(self):
return self._words
def add_file(self, filename, split_further=None, min=None, max=None, reject=[], transforms=[]):
count_possible = 0
count_transformed = 0
count_added = 0
with open(filename, 'U', encoding='iso-8859-15') as f: # can also try cp437 (so:16528468)
for row in f:
if split_further is None:
words = [row]
else:
words = row.split(split_further)
for word in words:
word = word.strip('\n').strip('\r')
if self._lower:
word = word.lower()
word = unicodedata.normalize('NFKD', word).encode('ascii','ignore').decode("utf-8")
if self._strip_nonalpha:
word = re.sub('[^a-zA-Z]', '', word)
do_continue = True
for fn in reject:
if fn(word):
do_continue = False
if not do_continue:
break
number_words_transformed = 0
number_words_added = self._add_word(word, min=min, max=max)
if transforms and number_words_added > 0:
number_words_transformed = self._transform(word, transforms, min=min, max=max)
count_possible += 1
count_transformed += number_words_transformed
count_added += number_words_added
if self._echo:
print('Dictionary: {}, Possible: {}, Words added: {}, Transformed added: {}, Total: {}'.format(
os.path.basename(filename), count_possible, count_added, count_transformed, len(self._words)))
def dict_by_length(self):
out = defaultdict(set)
for word in self._words:
out[len(word)].add(word)
return out | 0.749454 | 0.095097 |
from datetime import timedelta
import os,json,logging,subprocess
from airflow.models import DAG,Variable
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.operators.dummy_operator import DummyOperator
from igf_airflow.logging.upload_log_msg import send_log_to_channels,log_success,log_failure,log_sleep
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import get_ongoing_seqrun_list
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import copy_seqrun_manifest_file
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import reset_manifest_file
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import get_seqrun_chunks
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import copy_seqrun_chunk
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import run_interop_dump
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import generate_interop_report_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import check_progress_for_run_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import samplesheet_validation_and_branch_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import run_tile_demult_list_func
## DEFAULT ARGS
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(2),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'provide_context': True,
}
## SSH HOOKS
orwell_ssh_hook = \
SSHHook(
key_file=Variable.get('hpc_ssh_key_file'),
username=Variable.get('hpc_user'),
remote_host=Variable.get('orwell_server_hostname'))
## DAG
dag = \
DAG(
dag_id='dag8_copy_ongoing_seqrun',
catchup=False,
schedule_interval="0 */2 * * *",
max_active_runs=1,
tags=['hpc'],
default_args=default_args,
orientation='LR')
with dag:
## TASK
generate_seqrun_list = \
BranchPythonOperator(
task_id='generate_seqrun_list',
dag=dag,
queue='hpc_4G',
python_callable=get_ongoing_seqrun_list)
## TASK
no_ongoing_seqrun = \
DummyOperator(
task_id='no_ongoing_seqrun',
dag=dag,
queue='hpc_4G',
on_success_callback=log_sleep)
## TASK
tasks = list()
for i in range(5):
generate_seqrun_file_list = \
SSHOperator(
task_id='generate_seqrun_file_list_{0}'.format(i),
dag=dag,
pool='orwell_exe_pool',
ssh_hook=orwell_ssh_hook,
do_xcom_push=True,
queue='hpc_4G',
params={'source_task_id':'generate_seqrun_list',
'pull_key':'ongoing_seqruns',
'index_number':i},
command="""
source /home/igf/igf_code/airflow/env.sh; \
python /home/igf/igf_code/airflow/data-management-python/scripts/seqrun_processing/create_file_list_for_ongoing_seqrun.py \
--seqrun_base_dir /home/igf/seqrun/illumina \
--output_path /home/igf/ongoing_run_tracking \
--seqrun_id {{ ti.xcom_pull(key=params.pull_key,task_ids=params.source_task_id)[ params.index_number ] }}
""")
## TASK
copy_seqrun_file_list = \
PythonOperator(
task_id='copy_seqrun_file_list_{0}'.format(i),
dag=dag,
pool='orwell_scp_pool',
queue='hpc_4G',
params={'xcom_pull_task_ids':'generate_seqrun_file_list_{0}'.format(i)},
python_callable=copy_seqrun_manifest_file)
## TASK
compare_seqrun_files = \
PythonOperator(
task_id='compare_seqrun_files_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'xcom_pull_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'seqrun_id_pull_key':'ongoing_seqruns',
'run_index_number':i,
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'local_seqrun_path':Variable.get('hpc_seqrun_path')},
python_callable=reset_manifest_file)
## TASK
decide_copy_branch = \
BranchPythonOperator(
task_id='decide_copy_branch_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'xcom_pull_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'worker_size':10,
'seqrun_chunk_size_key':'seqrun_chunk_size',
'child_task_prefix':'copy_file_run_{0}_chunk'.format(i)},
python_callable=get_seqrun_chunks)
## TASK
no_copy_seqrun = \
DummyOperator(
task_id='copy_file_run_{0}_chunk_{1}'.format(i,'no_work'),
dag=dag,
queue='hpc_4G',
on_success_callback=log_sleep)
## TASK
copy_seqrun_files = list()
for j in range(10):
copy_file_chunk = \
PythonOperator(
task_id='copy_file_run_{0}_chunk_{1}'.format(i,j),
dag=dag,
queue='hpc_4G',
pool='orwell_scp_pool',
params={'file_path_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'seqrun_chunk_size_key':'seqrun_chunk_size',
'seqrun_chunk_size_task_ids':'decide_copy_branch_{0}'.format(i),
'run_index_number':i,
'chunk_index_number':j,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'local_seqrun_path':Variable.get('hpc_seqrun_path')},
python_callable=copy_seqrun_chunk)
copy_seqrun_files.append(copy_file_chunk)
## PIPELINE
generate_seqrun_list >> generate_seqrun_file_list >> copy_seqrun_file_list >> compare_seqrun_files >> decide_copy_branch
decide_copy_branch >> no_copy_seqrun
decide_copy_branch >> copy_seqrun_files
## TASK
wait_for_copy_chunk = \
DummyOperator(
task_id='wait_for_copy_chunk_run_{0}'.format(i),
dag=dag,
trigger_rule='none_failed_or_skipped',
queue='hpc_4G')
## PIPELINE
copy_seqrun_files >> wait_for_copy_chunk
## TASK
create_interop_dump = \
PythonOperator(
task_id='create_interop_dump_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list'},
python_callable=run_interop_dump)
## PIPELINE
wait_for_copy_chunk >> create_interop_dump
## TASK
generate_interop_report = \
PythonOperator(
task_id='generate_interop_report_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'runInfo_xml_file_name':'RunInfo.xml',
'interop_dump_pull_task':'create_interop_dump_run_{0}'.format(i),
'timeout':1200,
'kernel_name':'python3',
'output_notebook_key':'interop_notebook'},
python_callable=generate_interop_report_func)
## PIPELINE
create_interop_dump >> generate_interop_report
## TASK
check_progress_for_run = \
BranchPythonOperator(
task_id='check_progress_for_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_validation_job_prefix':'samplesheet_validation',
'tile_demult_job_prefix':'tile_demultiplexing',
'no_job_prefix':'no_seqrun_checking',
'next_job_prefix':'samplesheet_validation',
'runParameters_xml_file_name':'runParameters.xml',
'samplesheet_file_name':'SampleSheet.csv',
'interop_dump_pull_task':'create_interop_dump_run_{0}'.format(i)},
python_callable=check_progress_for_run_func)
## PIPELINE
create_interop_dump >> check_progress_for_run
## TASK
no_seqrun_checking = \
DummyOperator(
task_id='no_seqrun_checking_{0}'.format(i),
dag=dag,
queue='hpc_4G')
## PIPELINE
check_progress_for_run >> no_seqrun_checking
## TASK
samplesheet_validation_and_branch = \
BranchPythonOperator(
task_id='samplesheet_validation_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_file_name':'SampleSheet.csv',
'runParameters_xml_file_name':'runParameters.xml',
'no_job_prefix':'no_seqrun_checking',
'next_job_prefix':'tile_demultiplexing',
'next_job_range':[i for i in range(1,9)]},
python_callable=samplesheet_validation_and_branch_func)
## PIPELINE
check_progress_for_run >> samplesheet_validation_and_branch
## TASK
run_tile_demult_list = list()
for j in range(1,9):
run_tile_demult_per_lane = \
PythonOperator(
task_id='tile_demultiplexing_{0}_{1}'.format(i,j),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'lane_id':j,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_file_name':'SampleSheet.csv',
'runinfo_xml_file_name':'RunInfo.xml',
'runParameters_xml_file_name':'runParameters.xml',
'tile_list':[1101,],
'threads':1},
python_callable=run_tile_demult_list_func)
run_tile_demult_list.\
append(run_tile_demult_per_lane)
## PIPELINE
samplesheet_validation_and_branch >> run_tile_demult_list
samplesheet_validation_and_branch >> no_seqrun_checking
## PIPELINE
generate_seqrun_list >> no_ongoing_seqrun | dags/dag8_copy_ongoing_seqrun.py | from datetime import timedelta
import os,json,logging,subprocess
from airflow.models import DAG,Variable
from airflow.utils.dates import days_ago
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.operators.ssh_operator import SSHOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
from airflow.contrib.hooks.ssh_hook import SSHHook
from airflow.operators.dummy_operator import DummyOperator
from igf_airflow.logging.upload_log_msg import send_log_to_channels,log_success,log_failure,log_sleep
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import get_ongoing_seqrun_list
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import copy_seqrun_manifest_file
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import reset_manifest_file
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import get_seqrun_chunks
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import copy_seqrun_chunk
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import run_interop_dump
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import generate_interop_report_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import check_progress_for_run_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import samplesheet_validation_and_branch_func
from igf_airflow.utils.dag8_copy_ongoing_seqrun_utils import run_tile_demult_list_func
## DEFAULT ARGS
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': days_ago(2),
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
'provide_context': True,
}
## SSH HOOKS
orwell_ssh_hook = \
SSHHook(
key_file=Variable.get('hpc_ssh_key_file'),
username=Variable.get('hpc_user'),
remote_host=Variable.get('orwell_server_hostname'))
## DAG
dag = \
DAG(
dag_id='dag8_copy_ongoing_seqrun',
catchup=False,
schedule_interval="0 */2 * * *",
max_active_runs=1,
tags=['hpc'],
default_args=default_args,
orientation='LR')
with dag:
## TASK
generate_seqrun_list = \
BranchPythonOperator(
task_id='generate_seqrun_list',
dag=dag,
queue='hpc_4G',
python_callable=get_ongoing_seqrun_list)
## TASK
no_ongoing_seqrun = \
DummyOperator(
task_id='no_ongoing_seqrun',
dag=dag,
queue='hpc_4G',
on_success_callback=log_sleep)
## TASK
tasks = list()
for i in range(5):
generate_seqrun_file_list = \
SSHOperator(
task_id='generate_seqrun_file_list_{0}'.format(i),
dag=dag,
pool='orwell_exe_pool',
ssh_hook=orwell_ssh_hook,
do_xcom_push=True,
queue='hpc_4G',
params={'source_task_id':'generate_seqrun_list',
'pull_key':'ongoing_seqruns',
'index_number':i},
command="""
source /home/igf/igf_code/airflow/env.sh; \
python /home/igf/igf_code/airflow/data-management-python/scripts/seqrun_processing/create_file_list_for_ongoing_seqrun.py \
--seqrun_base_dir /home/igf/seqrun/illumina \
--output_path /home/igf/ongoing_run_tracking \
--seqrun_id {{ ti.xcom_pull(key=params.pull_key,task_ids=params.source_task_id)[ params.index_number ] }}
""")
## TASK
copy_seqrun_file_list = \
PythonOperator(
task_id='copy_seqrun_file_list_{0}'.format(i),
dag=dag,
pool='orwell_scp_pool',
queue='hpc_4G',
params={'xcom_pull_task_ids':'generate_seqrun_file_list_{0}'.format(i)},
python_callable=copy_seqrun_manifest_file)
## TASK
compare_seqrun_files = \
PythonOperator(
task_id='compare_seqrun_files_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'xcom_pull_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'seqrun_id_pull_key':'ongoing_seqruns',
'run_index_number':i,
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'local_seqrun_path':Variable.get('hpc_seqrun_path')},
python_callable=reset_manifest_file)
## TASK
decide_copy_branch = \
BranchPythonOperator(
task_id='decide_copy_branch_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'xcom_pull_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'worker_size':10,
'seqrun_chunk_size_key':'seqrun_chunk_size',
'child_task_prefix':'copy_file_run_{0}_chunk'.format(i)},
python_callable=get_seqrun_chunks)
## TASK
no_copy_seqrun = \
DummyOperator(
task_id='copy_file_run_{0}_chunk_{1}'.format(i,'no_work'),
dag=dag,
queue='hpc_4G',
on_success_callback=log_sleep)
## TASK
copy_seqrun_files = list()
for j in range(10):
copy_file_chunk = \
PythonOperator(
task_id='copy_file_run_{0}_chunk_{1}'.format(i,j),
dag=dag,
queue='hpc_4G',
pool='orwell_scp_pool',
params={'file_path_task_ids':'copy_seqrun_file_list_{0}'.format(i),
'seqrun_chunk_size_key':'seqrun_chunk_size',
'seqrun_chunk_size_task_ids':'decide_copy_branch_{0}'.format(i),
'run_index_number':i,
'chunk_index_number':j,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'local_seqrun_path':Variable.get('hpc_seqrun_path')},
python_callable=copy_seqrun_chunk)
copy_seqrun_files.append(copy_file_chunk)
## PIPELINE
generate_seqrun_list >> generate_seqrun_file_list >> copy_seqrun_file_list >> compare_seqrun_files >> decide_copy_branch
decide_copy_branch >> no_copy_seqrun
decide_copy_branch >> copy_seqrun_files
## TASK
wait_for_copy_chunk = \
DummyOperator(
task_id='wait_for_copy_chunk_run_{0}'.format(i),
dag=dag,
trigger_rule='none_failed_or_skipped',
queue='hpc_4G')
## PIPELINE
copy_seqrun_files >> wait_for_copy_chunk
## TASK
create_interop_dump = \
PythonOperator(
task_id='create_interop_dump_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list'},
python_callable=run_interop_dump)
## PIPELINE
wait_for_copy_chunk >> create_interop_dump
## TASK
generate_interop_report = \
PythonOperator(
task_id='generate_interop_report_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'runInfo_xml_file_name':'RunInfo.xml',
'interop_dump_pull_task':'create_interop_dump_run_{0}'.format(i),
'timeout':1200,
'kernel_name':'python3',
'output_notebook_key':'interop_notebook'},
python_callable=generate_interop_report_func)
## PIPELINE
create_interop_dump >> generate_interop_report
## TASK
check_progress_for_run = \
BranchPythonOperator(
task_id='check_progress_for_run_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_validation_job_prefix':'samplesheet_validation',
'tile_demult_job_prefix':'tile_demultiplexing',
'no_job_prefix':'no_seqrun_checking',
'next_job_prefix':'samplesheet_validation',
'runParameters_xml_file_name':'runParameters.xml',
'samplesheet_file_name':'SampleSheet.csv',
'interop_dump_pull_task':'create_interop_dump_run_{0}'.format(i)},
python_callable=check_progress_for_run_func)
## PIPELINE
create_interop_dump >> check_progress_for_run
## TASK
no_seqrun_checking = \
DummyOperator(
task_id='no_seqrun_checking_{0}'.format(i),
dag=dag,
queue='hpc_4G')
## PIPELINE
check_progress_for_run >> no_seqrun_checking
## TASK
samplesheet_validation_and_branch = \
BranchPythonOperator(
task_id='samplesheet_validation_{0}'.format(i),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_file_name':'SampleSheet.csv',
'runParameters_xml_file_name':'runParameters.xml',
'no_job_prefix':'no_seqrun_checking',
'next_job_prefix':'tile_demultiplexing',
'next_job_range':[i for i in range(1,9)]},
python_callable=samplesheet_validation_and_branch_func)
## PIPELINE
check_progress_for_run >> samplesheet_validation_and_branch
## TASK
run_tile_demult_list = list()
for j in range(1,9):
run_tile_demult_per_lane = \
PythonOperator(
task_id='tile_demultiplexing_{0}_{1}'.format(i,j),
dag=dag,
queue='hpc_4G',
params={'run_index_number':i,
'lane_id':j,
'seqrun_id_pull_key':'ongoing_seqruns',
'seqrun_id_pull_task_ids':'generate_seqrun_list',
'samplesheet_file_name':'SampleSheet.csv',
'runinfo_xml_file_name':'RunInfo.xml',
'runParameters_xml_file_name':'runParameters.xml',
'tile_list':[1101,],
'threads':1},
python_callable=run_tile_demult_list_func)
run_tile_demult_list.\
append(run_tile_demult_per_lane)
## PIPELINE
samplesheet_validation_and_branch >> run_tile_demult_list
samplesheet_validation_and_branch >> no_seqrun_checking
## PIPELINE
generate_seqrun_list >> no_ongoing_seqrun | 0.314366 | 0.106691 |
import os
import sys
import subprocess
import threading
from typing import List, Tuple
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self) -> None:
self.env = os.environ
self.env['LC_ALL'] = 'en_US.UTF_8'
self.meteor_cmd = [
'java', '-jar', '-Xmx2G', METEOR_JAR,
'-', '-', '-stdio', '-l', 'en', '-norm'
]
self.meteor_p = subprocess.Popen(
self.meteor_cmd,
cwd = os.path.dirname(os.path.abspath(__file__)),
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = self.env,
universal_newlines = True,
bufsize = 1
)
# Used to guarantee thread safety
self.lock = threading.Lock()
def compute_score(
self, reference: List[List[str]], hypothesis: List[List[str]]
) -> Tuple[float, List[float]]:
assert len(reference) == len(hypothesis)
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i, hypo in enumerate(hypothesis):
hypo = hypo
ref = reference[i]
# sanity check
assert(type(hypo) is list)
assert(len(hypo) >= 1)
assert(type(ref) is list)
assert(len(ref) > 0)
stat = self._stat(hypo[0], ref)
eval_line += ' ||| {}'.format(stat)
# Send to METEOR
self.meteor_p.stdin.write(eval_line + '\n')
# Collect segment scores
for i in range(0, len(hypothesis)):
score = float(self.meteor_p.stdout.readline().strip())
scores.append(score)
# Final score
final_score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return final_score, scores
def method(self) -> str:
return "METEOR"
def _stat(self, hypothesis_str, reference_list):
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write(score_line+'\n')
return self.meteor_p.stdout.readline().strip()
def __del__(self) -> None:
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() | metrics/meteor/meteor.py |
import os
import sys
import subprocess
import threading
from typing import List, Tuple
# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed.
METEOR_JAR = 'meteor-1.5.jar'
# print METEOR_JAR
class Meteor:
def __init__(self) -> None:
self.env = os.environ
self.env['LC_ALL'] = 'en_US.UTF_8'
self.meteor_cmd = [
'java', '-jar', '-Xmx2G', METEOR_JAR,
'-', '-', '-stdio', '-l', 'en', '-norm'
]
self.meteor_p = subprocess.Popen(
self.meteor_cmd,
cwd = os.path.dirname(os.path.abspath(__file__)),
stdin = subprocess.PIPE,
stdout = subprocess.PIPE,
stderr = subprocess.PIPE,
env = self.env,
universal_newlines = True,
bufsize = 1
)
# Used to guarantee thread safety
self.lock = threading.Lock()
def compute_score(
self, reference: List[List[str]], hypothesis: List[List[str]]
) -> Tuple[float, List[float]]:
assert len(reference) == len(hypothesis)
scores = []
eval_line = 'EVAL'
self.lock.acquire()
for i, hypo in enumerate(hypothesis):
hypo = hypo
ref = reference[i]
# sanity check
assert(type(hypo) is list)
assert(len(hypo) >= 1)
assert(type(ref) is list)
assert(len(ref) > 0)
stat = self._stat(hypo[0], ref)
eval_line += ' ||| {}'.format(stat)
# Send to METEOR
self.meteor_p.stdin.write(eval_line + '\n')
# Collect segment scores
for i in range(0, len(hypothesis)):
score = float(self.meteor_p.stdout.readline().strip())
scores.append(score)
# Final score
final_score = float(self.meteor_p.stdout.readline().strip())
self.lock.release()
return final_score, scores
def method(self) -> str:
return "METEOR"
def _stat(self, hypothesis_str, reference_list):
# SCORE ||| reference 1 words ||| reference n words ||| hypothesis words
hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ')
score_line = ' ||| '.join(('SCORE', ' ||| '.join(reference_list), hypothesis_str))
self.meteor_p.stdin.write(score_line+'\n')
return self.meteor_p.stdout.readline().strip()
def __del__(self) -> None:
self.lock.acquire()
self.meteor_p.stdin.close()
self.meteor_p.kill()
self.meteor_p.wait()
self.lock.release() | 0.334916 | 0.301324 |
import ckit
from ckit.ckit_const import *
## @addtogroup widget
## @{
#--------------------------------------------------------------------
## タブバーウィジェット
#
class TabBarWidget(ckit.Widget):
MAX_ITEM_WIDTH = 30
def __init__( self, window, x, y, width, height, selchange_handler ):
ckit.Widget.__init__( self, window, x, y, width, height )
self.plane0 = None
self.createThemePlane()
self.items = []
self.selection = None
self.scroll_pos = 0
self.selchange_handler = selchange_handler
self.paint()
def destroy(self):
self.destroyThemePlane()
def show(self,visible):
ckit.Widget.show(self,visible)
self.plane0.show(visible)
def charToTabIndex( self, char_x, char_y ):
x = -self.scroll_pos
if 0 <= (char_y - self.y) < self.height:
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if x <= (char_x - self.x) < x + item_width:
return i
x += item_width
return None
def onLeftButtonDown( self, char_x, char_y, mod ):
#print( "onLeftButtonDown", char_x, char_y, mod )
index = self.charToTabIndex( char_x, char_y )
if index==None : return
self.selection = index
if self.selchange_handler:
self.selchange_handler( self.selection, self.items[self.selection] )
def onLeftButtonUp( self, char_x, char_y, mod ):
#print( "onLeftButtonUp", char_x, char_y, mod )
pass
def createThemePlane(self):
if not self.plane0:
self.plane0 = ckit.ThemePlane3x3( self.window, 'tabbar0.png' )
def destroyThemePlane(self):
if self.plane0:
self.plane0.destroy()
self.plane0 = None
def setItems( self, items ):
self.items = items
self.paint()
def setSelection( self, selection ):
self.selection = selection
self.paint()
def makeVisible( self, index ):
tabs_width = 0
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if i==index:
if self.scroll_pos > tabs_width:
self.scroll_pos = tabs_width
elif self.scroll_pos + self.width < tabs_width + item_width:
self.scroll_pos = tabs_width + item_width - self.width
tabs_width += item_width
if i==len(self.items)-1:
if tabs_width < self.scroll_pos + self.width:
self.scroll_pos = max( tabs_width - self.width, 0 )
def paint(self):
if self.selection!=None:
self.makeVisible(self.selection)
client_rect = self.window.getClientRect()
offset_x, offset_y = self.window.charToClient( 0, 0 )
char_w, char_h = self.window.getCharSize()
# 背景画像をウインドウの端にあわせる
offset_x2 = 0
if self.x==0 : offset_x2 = offset_x
offset_x3 = 0
if self.x+self.width==self.window.width() : offset_x3 = offset_x
offset_y2 = 0
if self.y==0 : offset_y2 = offset_y
offset_y3 = 0
if self.y+self.height==self.window.height() : offset_y3 = offset_y
# 背景画像
self.plane0.setPosSize( self.x*char_w+offset_x-offset_x2, self.y*char_h+offset_y-offset_y2, self.width*char_w+offset_x2+offset_x3, self.height*char_h+offset_y2+offset_y3 )
line_color = (120,120,120)
active_bg_color = (240,240,240)
inactive_bg_color = None
fg = ckit.getColor("bar_fg")
attr = ckit.Attribute( fg=fg )
attribute_table = {}
attribute_table[ True, 0 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ True, 1 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( 0, line_color ) )
attribute_table[ True, 2 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_RIGHT, line_color ) )
attribute_table[ False, 0 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ False, 1 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( 0, line_color ) )
attribute_table[ False, 2 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_RIGHT, line_color ) )
# テキスト塗りつぶし
self.window.putString( self.x, self.y, self.width, 1, attr, " " * self.width )
# アイテム
x = self.x
y = self.y
width = self.width
height = self.height
offset = -self.scroll_pos
for i, item in enumerate(self.items):
active = i==self.selection
name = item[0]
item_width = self.window.getStringWidth(name)
if item_width>TabBarWidget.MAX_ITEM_WIDTH:
name = ckit.adjustStringWidth( self.window, name, TabBarWidget.MAX_ITEM_WIDTH, align=ckit.ALIGN_LEFT, ellipsis=ckit.ELLIPSIS_RIGHT )
item_width = TabBarWidget.MAX_ITEM_WIDTH
self.window.putString( x, y, width, height, attribute_table[active,0], " ", offset=offset )
offset += 1
self.window.putString( x, y, width-1, height, attribute_table[active,1], name, offset=offset )
offset += item_width
if i<len(self.items)-1:
self.window.putString( x, y, width, height, attribute_table[active,1], " ", offset=offset )
else:
self.window.putString( x, y, width, height, attribute_table[active,2], " ", offset=offset )
offset += 1
## @} widget | lredit_tabbar.py | import ckit
from ckit.ckit_const import *
## @addtogroup widget
## @{
#--------------------------------------------------------------------
## タブバーウィジェット
#
class TabBarWidget(ckit.Widget):
MAX_ITEM_WIDTH = 30
def __init__( self, window, x, y, width, height, selchange_handler ):
ckit.Widget.__init__( self, window, x, y, width, height )
self.plane0 = None
self.createThemePlane()
self.items = []
self.selection = None
self.scroll_pos = 0
self.selchange_handler = selchange_handler
self.paint()
def destroy(self):
self.destroyThemePlane()
def show(self,visible):
ckit.Widget.show(self,visible)
self.plane0.show(visible)
def charToTabIndex( self, char_x, char_y ):
x = -self.scroll_pos
if 0 <= (char_y - self.y) < self.height:
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if x <= (char_x - self.x) < x + item_width:
return i
x += item_width
return None
def onLeftButtonDown( self, char_x, char_y, mod ):
#print( "onLeftButtonDown", char_x, char_y, mod )
index = self.charToTabIndex( char_x, char_y )
if index==None : return
self.selection = index
if self.selchange_handler:
self.selchange_handler( self.selection, self.items[self.selection] )
def onLeftButtonUp( self, char_x, char_y, mod ):
#print( "onLeftButtonUp", char_x, char_y, mod )
pass
def createThemePlane(self):
if not self.plane0:
self.plane0 = ckit.ThemePlane3x3( self.window, 'tabbar0.png' )
def destroyThemePlane(self):
if self.plane0:
self.plane0.destroy()
self.plane0 = None
def setItems( self, items ):
self.items = items
self.paint()
def setSelection( self, selection ):
self.selection = selection
self.paint()
def makeVisible( self, index ):
tabs_width = 0
for i, item in enumerate(self.items):
name = item[0]
item_width = min( self.window.getStringWidth(name), TabBarWidget.MAX_ITEM_WIDTH ) + 2
if i==index:
if self.scroll_pos > tabs_width:
self.scroll_pos = tabs_width
elif self.scroll_pos + self.width < tabs_width + item_width:
self.scroll_pos = tabs_width + item_width - self.width
tabs_width += item_width
if i==len(self.items)-1:
if tabs_width < self.scroll_pos + self.width:
self.scroll_pos = max( tabs_width - self.width, 0 )
def paint(self):
if self.selection!=None:
self.makeVisible(self.selection)
client_rect = self.window.getClientRect()
offset_x, offset_y = self.window.charToClient( 0, 0 )
char_w, char_h = self.window.getCharSize()
# 背景画像をウインドウの端にあわせる
offset_x2 = 0
if self.x==0 : offset_x2 = offset_x
offset_x3 = 0
if self.x+self.width==self.window.width() : offset_x3 = offset_x
offset_y2 = 0
if self.y==0 : offset_y2 = offset_y
offset_y3 = 0
if self.y+self.height==self.window.height() : offset_y3 = offset_y
# 背景画像
self.plane0.setPosSize( self.x*char_w+offset_x-offset_x2, self.y*char_h+offset_y-offset_y2, self.width*char_w+offset_x2+offset_x3, self.height*char_h+offset_y2+offset_y3 )
line_color = (120,120,120)
active_bg_color = (240,240,240)
inactive_bg_color = None
fg = ckit.getColor("bar_fg")
attr = ckit.Attribute( fg=fg )
attribute_table = {}
attribute_table[ True, 0 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ True, 1 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( 0, line_color ) )
attribute_table[ True, 2 ] = ckit.Attribute( fg=fg, bg=active_bg_color, line0=( LINE_RIGHT, line_color ) )
attribute_table[ False, 0 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_LEFT, line_color ) )
attribute_table[ False, 1 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( 0, line_color ) )
attribute_table[ False, 2 ] = ckit.Attribute( fg=fg, bg=inactive_bg_color, line0=( LINE_RIGHT, line_color ) )
# テキスト塗りつぶし
self.window.putString( self.x, self.y, self.width, 1, attr, " " * self.width )
# アイテム
x = self.x
y = self.y
width = self.width
height = self.height
offset = -self.scroll_pos
for i, item in enumerate(self.items):
active = i==self.selection
name = item[0]
item_width = self.window.getStringWidth(name)
if item_width>TabBarWidget.MAX_ITEM_WIDTH:
name = ckit.adjustStringWidth( self.window, name, TabBarWidget.MAX_ITEM_WIDTH, align=ckit.ALIGN_LEFT, ellipsis=ckit.ELLIPSIS_RIGHT )
item_width = TabBarWidget.MAX_ITEM_WIDTH
self.window.putString( x, y, width, height, attribute_table[active,0], " ", offset=offset )
offset += 1
self.window.putString( x, y, width-1, height, attribute_table[active,1], name, offset=offset )
offset += item_width
if i<len(self.items)-1:
self.window.putString( x, y, width, height, attribute_table[active,1], " ", offset=offset )
else:
self.window.putString( x, y, width, height, attribute_table[active,2], " ", offset=offset )
offset += 1
## @} widget | 0.147524 | 0.103839 |
import threading
from xml.etree import ElementTree
try:
from .st_helper import running_in_st, is_st3
from . import colors
from .color_highlighter import ColorHighlighter
except ValueError:
from st_helper import running_in_st, is_st3
import colors
from color_highlighter import ColorHighlighter
if running_in_st():
import sublime # pylint: disable=import-error
else:
from . import sublime
class ColorSchemeBuilder(object):
"""A class for building a color scheme."""
_scope_name_template = "CH_color_%s"
_color_scope_template = """
<dict>
<key>name</key>
<string>CH_color</string>
<key>scope</key>
<string>CH_color_%s</string>
<key>settings</key>
<dict>
<key>background</key>
<string>%s</string>
<key>foreground</key>
<string>%s</string>
<key>caret</key>
<string>%s</string>
</dict>
</dict>
"""
_text_scope_name_template = "CH_text_color_%s"
_text_color_scope_template = """
<dict>
<key>scope</key>
<string>CH_text_color_%s</string>
<key>settings</key>
<dict>
<key>background</key>
<string>%s</string>
<key>foreground</key>
<string>%s</string>
<key>caret</key>
<string>%s</string>
</dict>
</dict>
"""
def __init__(self, color_scheme_data, color_scheme_writer, async_update):
"""
Init the ColorSchemeBuilder.
Arguments:
- color_scheme_data - a ColorSchemeData instance for a color scheme.
- color_scheme_writer - a ColorSchemeWriter instance for a color scheme.
- async_update - whether to update the color scheme asynchronously or not.
"""
self._color_scheme_data = color_scheme_data
self._color_scheme_writer = color_scheme_writer
self._async_update = async_update
self._lock = threading.Lock()
def get_scopes(self, for_colors, for_text_coloring):
"""
Get scope names for a list of colors.
Arguments:
- for_colors - a list of colors.
- for_text_coloring - whether or not to return text highlighting scope names.
Returns a list of scope names, one for each color.
"""
scope_names = []
for color in for_colors:
background_color = self._color_scheme_data.background_color
fixed_color = colors.background_color_for_text_workaround(color, background_color)
color_name = fixed_color[1:]
scope_names.append(self._get_color_name(for_text_coloring, color_name))
if self._async_update:
sublime.set_timeout_async(lambda: self._update_schema(for_colors), 0)
else:
self._update_schema(for_colors)
return scope_names
def _update_schema(self, for_colors):
with self._lock:
existing_colors = self._color_scheme_data.existing_colors
scopes = []
for color in for_colors:
if color in existing_colors:
continue
opposite_color = colors.complementary_color(color)
background_color = self._color_scheme_data.background_color
fixed_color = colors.background_color_for_text_workaround(color, background_color)
fixed_background_color = colors.background_color_for_text_workaround(background_color, background_color)
color_name = fixed_color[1:]
scope = ElementTree.fromstring(
self._color_scope_template % (color_name, fixed_color, opposite_color, opposite_color))
scopes.append(scope)
text_scope = ElementTree.fromstring(
self._text_color_scope_template % (color_name, fixed_background_color, fixed_color, opposite_color))
scopes.append(text_scope)
existing_colors[color] = color_name
if scopes:
self._color_scheme_writer.add_scopes(scopes)
def _get_color_name(self, for_text_coloring, color_name):
if for_text_coloring:
return self._text_scope_name_template % color_name
return self._scope_name_template % color_name
class ColorSchemeColorHighlighter(ColorHighlighter):
"""A color highlighter that uses color scheme scopes to highlight colors."""
region_name_template = "CH_color_%s_%d_%d"
if is_st3():
_region_style_flags = {
"filled": sublime.DRAW_NO_OUTLINE,
"text": sublime.DRAW_NO_OUTLINE,
"outlined": sublime.DRAW_NO_FILL,
"underlined_solid": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE,
"underlined_strippled": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE,
"underlined_squiggly": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE,
}
else:
_region_style_flags = {
"filled": 0,
"text": 0,
"outlined": sublime.DRAW_OUTLINED,
}
def __init__(self, view, style, color_scheme_builder, name, debug): # pylint: disable=too-many-arguments
"""
Init a ColorSchemeColorHighlighter.
Arguments:
- view - a view to highlight colors in.
- style - the style of color highlighting.
- color_scheme_builder - the color scheme builder to build regions for colors.
- name - the name of the color highlighter.
- debug - whether to enable debug mode.
"""
assert style in ColorSchemeColorHighlighter._region_style_flags
self._view = view
self._color_scheme_builder = color_scheme_builder
self._text_coloring = style == "text"
self._flags = ColorSchemeColorHighlighter._region_style_flags[style]
self._name = name
self._debug = debug
def highlight_region(self, context, value):
"""
Highlight a region.
Arguments:
- context - a dict with color highlighter run data.
- value - tuple (region to highlight, it's color).
Returns True, if highlighted, False otherwise.
"""
if "values" not in context:
context["values"] = []
context["values"].append(value)
def highlight_regions_done(self, context): # noqa: D401
"""
Called after all calls to highlight_region and unhighlight_region from highlight_regions have been made.
Arguments:
- context - a dict with color highlighter run data.
"""
values = context.get("values", None)
if not values:
return
colors_to_highlight = []
for (_, color) in values:
colors_to_highlight.append(color)
scopes = self._color_scheme_builder.get_scopes(colors_to_highlight, self._text_coloring)
for index, value in enumerate(values):
(region, color) = value
region_key = ColorSchemeColorHighlighter.region_name_template % (self._name, region.a, region.b)
if self._debug:
print("ColorHighlighter: action=highlight highlighter=ColorSchemeColorHighlighter region=%s color=%s"
% (region, color))
self._view.add_regions(region_key, [region.region()], scopes[index], "", self._flags)
def unhighlight_region(self, context, value):
"""
Unhighlight a region.
Arguments:
- context - a dict with color highlighter run data.
- value - tuple (region to unhighlight, it's color).
"""
(region, _) = value
region_key = ColorSchemeColorHighlighter.region_name_template % (self._name, region.a, region.b)
self._view.erase_regions(region_key) | color_scheme_color_highlighter.py |
import threading
from xml.etree import ElementTree
try:
from .st_helper import running_in_st, is_st3
from . import colors
from .color_highlighter import ColorHighlighter
except ValueError:
from st_helper import running_in_st, is_st3
import colors
from color_highlighter import ColorHighlighter
if running_in_st():
import sublime # pylint: disable=import-error
else:
from . import sublime
class ColorSchemeBuilder(object):
"""A class for building a color scheme."""
_scope_name_template = "CH_color_%s"
_color_scope_template = """
<dict>
<key>name</key>
<string>CH_color</string>
<key>scope</key>
<string>CH_color_%s</string>
<key>settings</key>
<dict>
<key>background</key>
<string>%s</string>
<key>foreground</key>
<string>%s</string>
<key>caret</key>
<string>%s</string>
</dict>
</dict>
"""
_text_scope_name_template = "CH_text_color_%s"
_text_color_scope_template = """
<dict>
<key>scope</key>
<string>CH_text_color_%s</string>
<key>settings</key>
<dict>
<key>background</key>
<string>%s</string>
<key>foreground</key>
<string>%s</string>
<key>caret</key>
<string>%s</string>
</dict>
</dict>
"""
def __init__(self, color_scheme_data, color_scheme_writer, async_update):
"""
Init the ColorSchemeBuilder.
Arguments:
- color_scheme_data - a ColorSchemeData instance for a color scheme.
- color_scheme_writer - a ColorSchemeWriter instance for a color scheme.
- async_update - whether to update the color scheme asynchronously or not.
"""
self._color_scheme_data = color_scheme_data
self._color_scheme_writer = color_scheme_writer
self._async_update = async_update
self._lock = threading.Lock()
def get_scopes(self, for_colors, for_text_coloring):
"""
Get scope names for a list of colors.
Arguments:
- for_colors - a list of colors.
- for_text_coloring - whether or not to return text highlighting scope names.
Returns a list of scope names, one for each color.
"""
scope_names = []
for color in for_colors:
background_color = self._color_scheme_data.background_color
fixed_color = colors.background_color_for_text_workaround(color, background_color)
color_name = fixed_color[1:]
scope_names.append(self._get_color_name(for_text_coloring, color_name))
if self._async_update:
sublime.set_timeout_async(lambda: self._update_schema(for_colors), 0)
else:
self._update_schema(for_colors)
return scope_names
def _update_schema(self, for_colors):
with self._lock:
existing_colors = self._color_scheme_data.existing_colors
scopes = []
for color in for_colors:
if color in existing_colors:
continue
opposite_color = colors.complementary_color(color)
background_color = self._color_scheme_data.background_color
fixed_color = colors.background_color_for_text_workaround(color, background_color)
fixed_background_color = colors.background_color_for_text_workaround(background_color, background_color)
color_name = fixed_color[1:]
scope = ElementTree.fromstring(
self._color_scope_template % (color_name, fixed_color, opposite_color, opposite_color))
scopes.append(scope)
text_scope = ElementTree.fromstring(
self._text_color_scope_template % (color_name, fixed_background_color, fixed_color, opposite_color))
scopes.append(text_scope)
existing_colors[color] = color_name
if scopes:
self._color_scheme_writer.add_scopes(scopes)
def _get_color_name(self, for_text_coloring, color_name):
if for_text_coloring:
return self._text_scope_name_template % color_name
return self._scope_name_template % color_name
class ColorSchemeColorHighlighter(ColorHighlighter):
"""A color highlighter that uses color scheme scopes to highlight colors."""
region_name_template = "CH_color_%s_%d_%d"
if is_st3():
_region_style_flags = {
"filled": sublime.DRAW_NO_OUTLINE,
"text": sublime.DRAW_NO_OUTLINE,
"outlined": sublime.DRAW_NO_FILL,
"underlined_solid": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SOLID_UNDERLINE,
"underlined_strippled": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_STIPPLED_UNDERLINE,
"underlined_squiggly": sublime.DRAW_NO_FILL | sublime.DRAW_NO_OUTLINE | sublime.DRAW_SQUIGGLY_UNDERLINE,
}
else:
_region_style_flags = {
"filled": 0,
"text": 0,
"outlined": sublime.DRAW_OUTLINED,
}
def __init__(self, view, style, color_scheme_builder, name, debug): # pylint: disable=too-many-arguments
"""
Init a ColorSchemeColorHighlighter.
Arguments:
- view - a view to highlight colors in.
- style - the style of color highlighting.
- color_scheme_builder - the color scheme builder to build regions for colors.
- name - the name of the color highlighter.
- debug - whether to enable debug mode.
"""
assert style in ColorSchemeColorHighlighter._region_style_flags
self._view = view
self._color_scheme_builder = color_scheme_builder
self._text_coloring = style == "text"
self._flags = ColorSchemeColorHighlighter._region_style_flags[style]
self._name = name
self._debug = debug
def highlight_region(self, context, value):
"""
Highlight a region.
Arguments:
- context - a dict with color highlighter run data.
- value - tuple (region to highlight, it's color).
Returns True, if highlighted, False otherwise.
"""
if "values" not in context:
context["values"] = []
context["values"].append(value)
def highlight_regions_done(self, context): # noqa: D401
"""
Called after all calls to highlight_region and unhighlight_region from highlight_regions have been made.
Arguments:
- context - a dict with color highlighter run data.
"""
values = context.get("values", None)
if not values:
return
colors_to_highlight = []
for (_, color) in values:
colors_to_highlight.append(color)
scopes = self._color_scheme_builder.get_scopes(colors_to_highlight, self._text_coloring)
for index, value in enumerate(values):
(region, color) = value
region_key = ColorSchemeColorHighlighter.region_name_template % (self._name, region.a, region.b)
if self._debug:
print("ColorHighlighter: action=highlight highlighter=ColorSchemeColorHighlighter region=%s color=%s"
% (region, color))
self._view.add_regions(region_key, [region.region()], scopes[index], "", self._flags)
def unhighlight_region(self, context, value):
"""
Unhighlight a region.
Arguments:
- context - a dict with color highlighter run data.
- value - tuple (region to unhighlight, it's color).
"""
(region, _) = value
region_key = ColorSchemeColorHighlighter.region_name_template % (self._name, region.a, region.b)
self._view.erase_regions(region_key) | 0.822011 | 0.128307 |
import GestureAgentsTUIO.tuio as tuio
from GestureAgents.Events import Event
from GestureAgents.Agent import Agent
class TuioCursorEvents:
newAgent = Event()
class CursorAgent(Agent):
eventnames = ('newCursor', 'updateCursor', 'removeCursor')
class TuioAgentGenerator:
def __init__(self, screensize, inverse_x=False, inverse_y=False):
self.tracking = tuio.Tracking(host='0.0.0.0')
self.cursors = {}
self.agents = {}
self.screensize = screensize
self.inverse_x = inverse_x
self.inverse_y = inverse_y
def update(self):
self.tracking.update()
cursors = {}
for cur in self.tracking.cursors():
cursors[cur.sessionid] = self._genCurDict(cur)
#send removeCursor
for c in dict(self.cursors):
if c not in cursors:
del self.cursors[c]
a = self.agents[c]
a.removeCursor.call(a)
a.finish()
del self.agents[c]
#send new info
for c, content in cursors.iteritems():
if c not in self.cursors:
#newCursor
a = self.makeCursorAgent()
self._updateAgent(a, content)
a.ontable = False
self.cursors[c] = content
self.agents[c] = a
TuioCursorEvents.newAgent.call(a)
a.ontable = True
a.newCursor.call(a)
elif content != self.cursors[c]:
#updateCursor
a = self.agents[c]
self._updateAgent(a, content)
self.cursors[c] = content
a.updateCursor.call(a)
def __del__(self):
self.tracking.stop()
@staticmethod
def _genCurDict(cur):
d = dict()
for member in ("sessionid", "xpos", "ypos", "xmot", "ymot", "mot_accel"):
d[member] = getattr(cur, member)
return d
def _updateAgent(self, agent, dcur):
for member, value in dcur.iteritems():
setattr(agent, member, value)
#pos is legacy as Mouse emulator
if self.inverse_x:
agent.xpos = 1 - agent.xpos
if self.inverse_y:
agent.ypos = 1 - agent.ypos
agent.pos = (
agent.xpos * self.screensize[0], agent.ypos * self.screensize[1])
@staticmethod
def makeCursorAgent():
return CursorAgent(TuioCursorEvents) | GestureAgentsTUIO/Tuio.py |
import GestureAgentsTUIO.tuio as tuio
from GestureAgents.Events import Event
from GestureAgents.Agent import Agent
class TuioCursorEvents:
newAgent = Event()
class CursorAgent(Agent):
eventnames = ('newCursor', 'updateCursor', 'removeCursor')
class TuioAgentGenerator:
def __init__(self, screensize, inverse_x=False, inverse_y=False):
self.tracking = tuio.Tracking(host='0.0.0.0')
self.cursors = {}
self.agents = {}
self.screensize = screensize
self.inverse_x = inverse_x
self.inverse_y = inverse_y
def update(self):
self.tracking.update()
cursors = {}
for cur in self.tracking.cursors():
cursors[cur.sessionid] = self._genCurDict(cur)
#send removeCursor
for c in dict(self.cursors):
if c not in cursors:
del self.cursors[c]
a = self.agents[c]
a.removeCursor.call(a)
a.finish()
del self.agents[c]
#send new info
for c, content in cursors.iteritems():
if c not in self.cursors:
#newCursor
a = self.makeCursorAgent()
self._updateAgent(a, content)
a.ontable = False
self.cursors[c] = content
self.agents[c] = a
TuioCursorEvents.newAgent.call(a)
a.ontable = True
a.newCursor.call(a)
elif content != self.cursors[c]:
#updateCursor
a = self.agents[c]
self._updateAgent(a, content)
self.cursors[c] = content
a.updateCursor.call(a)
def __del__(self):
self.tracking.stop()
@staticmethod
def _genCurDict(cur):
d = dict()
for member in ("sessionid", "xpos", "ypos", "xmot", "ymot", "mot_accel"):
d[member] = getattr(cur, member)
return d
def _updateAgent(self, agent, dcur):
for member, value in dcur.iteritems():
setattr(agent, member, value)
#pos is legacy as Mouse emulator
if self.inverse_x:
agent.xpos = 1 - agent.xpos
if self.inverse_y:
agent.ypos = 1 - agent.ypos
agent.pos = (
agent.xpos * self.screensize[0], agent.ypos * self.screensize[1])
@staticmethod
def makeCursorAgent():
return CursorAgent(TuioCursorEvents) | 0.457379 | 0.189203 |
import torch
from torch.quantization.observer import MovingAverageMinMaxObserver
from torch.quantization.fake_quantize import FakeQuantize
from torch.quantization.quantization_mappings import *
from torch.quantization.quantize import swap_module
import src.utils as utils
import copy
import torch.nn.intrinsic as nni
from src.models.stochastic.bbb.conv import Conv2d as Conv2dBBB
from src.models.stochastic.bbb.conv import ConvReLU2d as ConvReLU2dBBB
from src.models.stochastic.bbb.conv import ConvBn2d as ConvBn2dBBB
from src.models.stochastic.bbb.conv import ConvBnReLU2d as ConvBnReLU2dBBB
from src.models.stochastic.bbb.quantized.conv_q import Conv2d as Conv2dBBB_Q
from src.models.stochastic.bbb.quantized.conv_q import ConvReLU2d as ConvReLU2dBBB_Q
from src.models.stochastic.bbb.quantized.conv_qat import Conv2d as Conv2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvReLU2d as ConvReLU2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvBn2d as ConvBn2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvBnReLU2d as ConvBnReLU2dBBB_QAT
from src.models.stochastic.bbb.linear import Linear as LinearBBB
from src.models.stochastic.bbb.linear import LinearReLU as LinearReLUBBB
from src.models.stochastic.bbb.quantized.linear_q import Linear as LinearBBB_Q
from src.models.stochastic.bbb.quantized.linear_q import LinearReLU as LinearReLUBBB_Q
from src.models.stochastic.bbb.quantized.linear_qat import Linear as LinearBBB_QAT
from src.models.stochastic.bbb.quantized.linear_qat import LinearReLU as LinearReLUBBB_QAT
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST = get_qconfig_propagation_list()
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(LinearBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(LinearReLUBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(Conv2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvReLU2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvBn2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvBnReLU2dBBB)
QAT_MODULE_MAPPINGS[LinearBBB] = LinearBBB_QAT
QAT_MODULE_MAPPINGS[LinearReLUBBB] = LinearReLUBBB_QAT
QAT_MODULE_MAPPINGS[Conv2dBBB] = Conv2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvReLU2dBBB] = ConvReLU2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvBn2dBBB] = ConvBn2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvBnReLU2dBBB] = ConvBnReLU2dBBB_QAT
STATIC_QUANT_MODULE_MAPPINGS[LinearBBB] = LinearBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearBBB_QAT] = LinearBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearReLUBBB] = LinearReLUBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearReLUBBB_QAT] = LinearReLUBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[Conv2dBBB] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[Conv2dBBB_QAT] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvReLU2dBBB] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvReLU2dBBB_QAT] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvBn2dBBB_QAT] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvBnReLU2dBBB_QAT] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[nni.ConvBn2d] = torch.nn.quantized.Conv2d
STATIC_QUANT_MODULE_MAPPINGS[torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d] = torch.nn.quantized.Conv2d
STATIC_QUANT_MODULE_MAPPINGS[nni.ConvBnReLU2d] = torch.nn.intrinsic.quantized.ConvReLU2d
STATIC_QUANT_MODULE_MAPPINGS[torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d] = torch.nn.intrinsic.quantized.ConvReLU2d
def convert(model, mapping=None, inplace=True):
def _convert(module, mapping=None, inplace=True):
if mapping is None:
mapping = STATIC_QUANT_MODULE_MAPPINGS
if not inplace:
module = copy.deepcopy(module)
reassign = {}
SWAPPABLE_MODULES = (nni.ConvBn2d,
nni.ConvBnReLU2d,
torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d,
torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d,
nni.LinearReLU,
nni.BNReLU2d,
nni.BNReLU3d,
nni.ConvBn1d,
nni.ConvReLU1d,
nni.ConvBnReLU1d,
nni.ConvReLU2d,
nni.ConvReLU3d,
LinearReLUBBB,
ConvReLU2dBBB,
ConvBn2dBBB,
ConvBnReLU2dBBB)
for name, mod in module.named_children():
if type(mod) not in SWAPPABLE_MODULES:
_convert(mod, mapping, inplace=True)
swap = swap_module(mod, mapping)
reassign[name] = swap
for key, value in reassign.items():
module._modules[key] = value
return module
if mapping is None:
mapping = STATIC_QUANT_MODULE_MAPPINGS
model = _convert(model, mapping=mapping, inplace=inplace)
return model
def postprocess_model(model, args, q=None, at=None, special_info=""):
if q is None:
q = args.q
if at is None:
at = args.at
if q and at and 'sgld' not in args.model:
model = model.cpu()
utils.load_model(model, args.save+"/weights{}.pt".format(special_info))
convert(model)
utils.save_model(model, args, special_info)
def prepare_model(model, args, q=None, at=None):
if q is None:
q = args.q
if at is None:
at = args.at
torch.backends.quantized.engine = 'fbgemm'
assert 2 <= args.activation_precision and args.activation_precision <= 7
assert 2 <= args.weight_precision and args.weight_precision <= 8
activation_precision = utils.UINT_BOUNDS[args.activation_precision]
weight_precision = utils.INT_BOUNDS[args.weight_precision]
if hasattr(model, 'fuse_model'):
model.fuse_model()
model.qconfig = torch.quantization.QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
dtype=torch.quint8,
quant_min=activation_precision[0],
quant_max=activation_precision[1],
qscheme=torch.per_tensor_affine),
weight=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=weight_precision[0],
quant_max=weight_precision[1],
dtype=torch.qint8,
qscheme=torch.per_tensor_affine))
if not 'bbb' in args.model:
torch.quantization.prepare_qat(model, inplace=True)
else:
torch.quantization.prepare(
model, allow_list=DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST, inplace=True)
torch.quantization.prepare(
model, allow_list=DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST, inplace=True, observer_non_leaf_module_list=[LinearBBB, Conv2dBBB])
convert(model, mapping=QAT_MODULE_MAPPINGS) | src/quant_utils.py | import torch
from torch.quantization.observer import MovingAverageMinMaxObserver
from torch.quantization.fake_quantize import FakeQuantize
from torch.quantization.quantization_mappings import *
from torch.quantization.quantize import swap_module
import src.utils as utils
import copy
import torch.nn.intrinsic as nni
from src.models.stochastic.bbb.conv import Conv2d as Conv2dBBB
from src.models.stochastic.bbb.conv import ConvReLU2d as ConvReLU2dBBB
from src.models.stochastic.bbb.conv import ConvBn2d as ConvBn2dBBB
from src.models.stochastic.bbb.conv import ConvBnReLU2d as ConvBnReLU2dBBB
from src.models.stochastic.bbb.quantized.conv_q import Conv2d as Conv2dBBB_Q
from src.models.stochastic.bbb.quantized.conv_q import ConvReLU2d as ConvReLU2dBBB_Q
from src.models.stochastic.bbb.quantized.conv_qat import Conv2d as Conv2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvReLU2d as ConvReLU2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvBn2d as ConvBn2dBBB_QAT
from src.models.stochastic.bbb.quantized.conv_qat import ConvBnReLU2d as ConvBnReLU2dBBB_QAT
from src.models.stochastic.bbb.linear import Linear as LinearBBB
from src.models.stochastic.bbb.linear import LinearReLU as LinearReLUBBB
from src.models.stochastic.bbb.quantized.linear_q import Linear as LinearBBB_Q
from src.models.stochastic.bbb.quantized.linear_q import LinearReLU as LinearReLUBBB_Q
from src.models.stochastic.bbb.quantized.linear_qat import Linear as LinearBBB_QAT
from src.models.stochastic.bbb.quantized.linear_qat import LinearReLU as LinearReLUBBB_QAT
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST = get_qconfig_propagation_list()
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(LinearBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(LinearReLUBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(Conv2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvReLU2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvBn2dBBB)
DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST.add(ConvBnReLU2dBBB)
QAT_MODULE_MAPPINGS[LinearBBB] = LinearBBB_QAT
QAT_MODULE_MAPPINGS[LinearReLUBBB] = LinearReLUBBB_QAT
QAT_MODULE_MAPPINGS[Conv2dBBB] = Conv2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvReLU2dBBB] = ConvReLU2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvBn2dBBB] = ConvBn2dBBB_QAT
QAT_MODULE_MAPPINGS[ConvBnReLU2dBBB] = ConvBnReLU2dBBB_QAT
STATIC_QUANT_MODULE_MAPPINGS[LinearBBB] = LinearBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearBBB_QAT] = LinearBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearReLUBBB] = LinearReLUBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[LinearReLUBBB_QAT] = LinearReLUBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[Conv2dBBB] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[Conv2dBBB_QAT] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvReLU2dBBB] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvReLU2dBBB_QAT] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvBn2dBBB_QAT] = Conv2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[ConvBnReLU2dBBB_QAT] = ConvReLU2dBBB_Q
STATIC_QUANT_MODULE_MAPPINGS[nni.ConvBn2d] = torch.nn.quantized.Conv2d
STATIC_QUANT_MODULE_MAPPINGS[torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d] = torch.nn.quantized.Conv2d
STATIC_QUANT_MODULE_MAPPINGS[nni.ConvBnReLU2d] = torch.nn.intrinsic.quantized.ConvReLU2d
STATIC_QUANT_MODULE_MAPPINGS[torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d] = torch.nn.intrinsic.quantized.ConvReLU2d
def convert(model, mapping=None, inplace=True):
def _convert(module, mapping=None, inplace=True):
if mapping is None:
mapping = STATIC_QUANT_MODULE_MAPPINGS
if not inplace:
module = copy.deepcopy(module)
reassign = {}
SWAPPABLE_MODULES = (nni.ConvBn2d,
nni.ConvBnReLU2d,
torch.nn.intrinsic.qat.modules.conv_fused.ConvBnReLU2d,
torch.nn.intrinsic.qat.modules.conv_fused.ConvBn2d,
nni.LinearReLU,
nni.BNReLU2d,
nni.BNReLU3d,
nni.ConvBn1d,
nni.ConvReLU1d,
nni.ConvBnReLU1d,
nni.ConvReLU2d,
nni.ConvReLU3d,
LinearReLUBBB,
ConvReLU2dBBB,
ConvBn2dBBB,
ConvBnReLU2dBBB)
for name, mod in module.named_children():
if type(mod) not in SWAPPABLE_MODULES:
_convert(mod, mapping, inplace=True)
swap = swap_module(mod, mapping)
reassign[name] = swap
for key, value in reassign.items():
module._modules[key] = value
return module
if mapping is None:
mapping = STATIC_QUANT_MODULE_MAPPINGS
model = _convert(model, mapping=mapping, inplace=inplace)
return model
def postprocess_model(model, args, q=None, at=None, special_info=""):
if q is None:
q = args.q
if at is None:
at = args.at
if q and at and 'sgld' not in args.model:
model = model.cpu()
utils.load_model(model, args.save+"/weights{}.pt".format(special_info))
convert(model)
utils.save_model(model, args, special_info)
def prepare_model(model, args, q=None, at=None):
if q is None:
q = args.q
if at is None:
at = args.at
torch.backends.quantized.engine = 'fbgemm'
assert 2 <= args.activation_precision and args.activation_precision <= 7
assert 2 <= args.weight_precision and args.weight_precision <= 8
activation_precision = utils.UINT_BOUNDS[args.activation_precision]
weight_precision = utils.INT_BOUNDS[args.weight_precision]
if hasattr(model, 'fuse_model'):
model.fuse_model()
model.qconfig = torch.quantization.QConfig(activation=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
dtype=torch.quint8,
quant_min=activation_precision[0],
quant_max=activation_precision[1],
qscheme=torch.per_tensor_affine),
weight=FakeQuantize.with_args(observer=MovingAverageMinMaxObserver,
quant_min=weight_precision[0],
quant_max=weight_precision[1],
dtype=torch.qint8,
qscheme=torch.per_tensor_affine))
if not 'bbb' in args.model:
torch.quantization.prepare_qat(model, inplace=True)
else:
torch.quantization.prepare(
model, allow_list=DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST, inplace=True)
torch.quantization.prepare(
model, allow_list=DEFAULT_QCONFIG_PROPAGATE_WHITE_LIST, inplace=True, observer_non_leaf_module_list=[LinearBBB, Conv2dBBB])
convert(model, mapping=QAT_MODULE_MAPPINGS) | 0.702224 | 0.783575 |
def intriga(n):
# Inicializa um tabuleiro sem rainhas
tabuleiro = [None for _ in range(n)]
# Inicializa o numero minimo de rainhas com o máximo de rainhas possivel (numero de linhas)
minimum = len(tabuleiro)
minimum_pos = list()
# Testa a rainha causadora de intrigas em cada posição
# Testa cada linha
for i in range(len(tabuleiro)):
# Testa cada coluna
for j in range(len(tabuleiro)):
tabuleiro[i] = j
# Para cada posicao, verifica o numero maximo de rainhas amigas possivel
cur_min = rainha(tabuleiro, 1)
# Caso seja menor do que o menor obtido até agora, descarta os resultados anteriores
if cur_min < minimum:
minimum = cur_min
minimum_pos = [(i, j)]
# Caso seja igual, concatena a posição atual aos resultados anteriores
elif cur_min == minimum:
minimum_pos.append((i, j))
# Reseta a posição atual
tabuleiro[i] = None
# Retorna o menor numero de rainhas amigas possivel encontrado e as posições correspondentes
# para a rainha causadora de intrigas
return minimum, minimum_pos
# Tenta posicionar as demais rainhas de forma a maximizar o número de rainhas
def rainha(tabuleiro, k, i = 0):
maximum = k
# Caso a posição esteja fora do tabuleiro, retorne o numero de rainhas até agora
if i >= len(tabuleiro):
return maximum
# Caso ja tenha alguma rainha na linha atual, teste a proxima posição
if tabuleiro[i] != None:
return rainha(tabuleiro, k, i + 1)
# Para cada coluna possivel
for j in range(len(tabuleiro)):
# Se a posição for livre de intrigas
if safe(tabuleiro, i, j):
# Posicione uma rainha nela
tabuleiro[i] = j
# Obtenha o maximo possivel com essa rainha posicionada
cur_max = rainha(tabuleiro, k + 1, i + 1)
# Se for maior que o maximo obtido anteriormente, substitua
if cur_max > maximum:
maximum = cur_max
# Retira a rainha da posição
tabuleiro[i] = None
# Teste uma ultima vez, com a linha vazia
cur_max = rainha(tabuleiro, k, i + 1)
# Se for maior que o maximo obtido anteriormente, substitua
if cur_max > maximum:
maximum = cur_max
# Retorne o valor máximo obtido
return maximum
# Verifica se há uma intriga com uma posição do tabuleiro
def safe(tabuleiro, i, j):
for linha in range(len(tabuleiro)):
if (linha != i) and (tabuleiro[linha] != None):
if (tabuleiro[linha] == j) or (abs(linha - i) == abs(tabuleiro[linha] - j)):
return False
return True
def main():
# Obtem o tamanho do tabuleiro
n = int(input())
# Obtem os resultados
num_rainhas, posicoes = intriga(n)
# Normaliza as posições para iniciarem em 1
num_rainhas += 1
posicoes = [(i + 1, j + 1) for (i, j) in posicoes]
# Imprime resultado
#print('Uma rainha em qualquer uma das seguintes posições minimiza o número de rainhas amigas no tabuleiro para {}:'.format(num_rainhas))
print(*posicoes, sep=' ')
main() | Simulado 03/RainhasAmigasComUmaIntriga/src/__main__.py | def intriga(n):
# Inicializa um tabuleiro sem rainhas
tabuleiro = [None for _ in range(n)]
# Inicializa o numero minimo de rainhas com o máximo de rainhas possivel (numero de linhas)
minimum = len(tabuleiro)
minimum_pos = list()
# Testa a rainha causadora de intrigas em cada posição
# Testa cada linha
for i in range(len(tabuleiro)):
# Testa cada coluna
for j in range(len(tabuleiro)):
tabuleiro[i] = j
# Para cada posicao, verifica o numero maximo de rainhas amigas possivel
cur_min = rainha(tabuleiro, 1)
# Caso seja menor do que o menor obtido até agora, descarta os resultados anteriores
if cur_min < minimum:
minimum = cur_min
minimum_pos = [(i, j)]
# Caso seja igual, concatena a posição atual aos resultados anteriores
elif cur_min == minimum:
minimum_pos.append((i, j))
# Reseta a posição atual
tabuleiro[i] = None
# Retorna o menor numero de rainhas amigas possivel encontrado e as posições correspondentes
# para a rainha causadora de intrigas
return minimum, minimum_pos
# Tenta posicionar as demais rainhas de forma a maximizar o número de rainhas
def rainha(tabuleiro, k, i = 0):
maximum = k
# Caso a posição esteja fora do tabuleiro, retorne o numero de rainhas até agora
if i >= len(tabuleiro):
return maximum
# Caso ja tenha alguma rainha na linha atual, teste a proxima posição
if tabuleiro[i] != None:
return rainha(tabuleiro, k, i + 1)
# Para cada coluna possivel
for j in range(len(tabuleiro)):
# Se a posição for livre de intrigas
if safe(tabuleiro, i, j):
# Posicione uma rainha nela
tabuleiro[i] = j
# Obtenha o maximo possivel com essa rainha posicionada
cur_max = rainha(tabuleiro, k + 1, i + 1)
# Se for maior que o maximo obtido anteriormente, substitua
if cur_max > maximum:
maximum = cur_max
# Retira a rainha da posição
tabuleiro[i] = None
# Teste uma ultima vez, com a linha vazia
cur_max = rainha(tabuleiro, k, i + 1)
# Se for maior que o maximo obtido anteriormente, substitua
if cur_max > maximum:
maximum = cur_max
# Retorne o valor máximo obtido
return maximum
# Verifica se há uma intriga com uma posição do tabuleiro
def safe(tabuleiro, i, j):
for linha in range(len(tabuleiro)):
if (linha != i) and (tabuleiro[linha] != None):
if (tabuleiro[linha] == j) or (abs(linha - i) == abs(tabuleiro[linha] - j)):
return False
return True
def main():
# Obtem o tamanho do tabuleiro
n = int(input())
# Obtem os resultados
num_rainhas, posicoes = intriga(n)
# Normaliza as posições para iniciarem em 1
num_rainhas += 1
posicoes = [(i + 1, j + 1) for (i, j) in posicoes]
# Imprime resultado
#print('Uma rainha em qualquer uma das seguintes posições minimiza o número de rainhas amigas no tabuleiro para {}:'.format(num_rainhas))
print(*posicoes, sep=' ')
main() | 0.326701 | 0.606382 |
from typing import List, Union, Dict, Tuple, Set, Any, Literal
from functools import reduce
from json import dumps
from random import random
def to_terminal(
line_type: Union[Literal['warning', 'error', 'info', ''], None],
msg_type: Union[Literal['WARNING', 'ERROR', 'INFO', ''], str, None],
*message: Union[str, Tuple[Any], List[Any]]):
"""
prints to terminal, use this instead of print statement if you want to examine output on Genetic Py terminal.
:param line_type: this is used to determine the color of the message:
* 'error' is red.
* 'warning' is yellow.
* 'info' is blue.
* <empty string> or anything else is white.
:param msg_type: the header of message, usually you find 'INFO', 'WARNING', 'ERROR, but you can use any string.
* ex: passing "ERROR" to header is going to be: "ERROR: ", passing empty string or None results in no header.
:param message: message to show after the header.
"""
print(dumps({
"terminal": True,
"line-type": line_type,
"msg-type": msg_type,
"message": message[0] if len(message) == 0 else reduce(
lambda accum_lines, line: accum_lines + '<br>' + line, message
)
}), flush=True)
solution = None
def init_solution(genes_num: int):
# initialize solution
global solution
solution = [
1 if random() >= .5 else 0 for _ in range(genes_num)
]
def get_fitness(genes: List[int], data: Union[Dict, List, Tuple, Set]) -> Union[float, int]:
"""
Fitness Function Template, wrap return statement with int() or float() to indicate the intended type
so the simulator can detect its type (useful for the graph).
:param genes: genes of a given individual/chromosome which consists of a list of 0s and 1s.
:param data: genes data loaded of the given path inside GA Control Panel.
"""
global solution
if isinstance(solution, type(None)):
init_solution(len(genes))
return sum(1 for ind_gene, solution_gene in zip(genes, solution) if int(ind_gene) == solution_gene) | build/examples/random-solution/random-solution.py | from typing import List, Union, Dict, Tuple, Set, Any, Literal
from functools import reduce
from json import dumps
from random import random
def to_terminal(
line_type: Union[Literal['warning', 'error', 'info', ''], None],
msg_type: Union[Literal['WARNING', 'ERROR', 'INFO', ''], str, None],
*message: Union[str, Tuple[Any], List[Any]]):
"""
prints to terminal, use this instead of print statement if you want to examine output on Genetic Py terminal.
:param line_type: this is used to determine the color of the message:
* 'error' is red.
* 'warning' is yellow.
* 'info' is blue.
* <empty string> or anything else is white.
:param msg_type: the header of message, usually you find 'INFO', 'WARNING', 'ERROR, but you can use any string.
* ex: passing "ERROR" to header is going to be: "ERROR: ", passing empty string or None results in no header.
:param message: message to show after the header.
"""
print(dumps({
"terminal": True,
"line-type": line_type,
"msg-type": msg_type,
"message": message[0] if len(message) == 0 else reduce(
lambda accum_lines, line: accum_lines + '<br>' + line, message
)
}), flush=True)
solution = None
def init_solution(genes_num: int):
# initialize solution
global solution
solution = [
1 if random() >= .5 else 0 for _ in range(genes_num)
]
def get_fitness(genes: List[int], data: Union[Dict, List, Tuple, Set]) -> Union[float, int]:
"""
Fitness Function Template, wrap return statement with int() or float() to indicate the intended type
so the simulator can detect its type (useful for the graph).
:param genes: genes of a given individual/chromosome which consists of a list of 0s and 1s.
:param data: genes data loaded of the given path inside GA Control Panel.
"""
global solution
if isinstance(solution, type(None)):
init_solution(len(genes))
return sum(1 for ind_gene, solution_gene in zip(genes, solution) if int(ind_gene) == solution_gene) | 0.86053 | 0.362377 |
from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
class MainSequence(Sequence):
"""Exercise different combinations of values for the parameters for the genPA instruction.
Focus in this test is to try values of the Size, Align and CanAlias parameters.
Type is always 'D'; Bank is always '0'.
"""
def generate(self, **kargs):
ldstr_byte_ops = ['LB##RISCV', 'SB##RISCV']
ldstr_half_ops = ['LH##RISCV', 'SH##RISCV']
ldstr_word_ops = ['LW##RISCV', 'SW##RISCV']
ldstr_double_ops = ['LD##RISCV', 'SD##RISCV']
theType = 'D'
theBank = 0
theCanAlias = 0
loopCount = 1
set_of_PAs = set()
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 1st block tests smaller values of size - 1 byte to 32 bytes.
for theSize in [2 ** x for x in range(0, 5)]:
for theAlign in [2 ** x for x in range(0, 16)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
if rand_PA in set_of_PAs:
self.error(">>>>>>>>> Error -- Received a duplicate PA from self.genPA.")
else:
set_of_PAs.add(rand_PA)
# self.notice(">>>>>> set_of_PAs: {}".format(set_of_PAs))
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 2nd block tests larger values of size - 32K to 8M.
for theSize in [2 ** x for x in range(15, 18)]:
for theAlign in [2 ** x for x in range(15, 25)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, CanAlias=0, ForceNewAddress=1, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV | tests/riscv/APIs/api_genPA_01_force.py | from riscv.EnvRISCV import EnvRISCV
from riscv.GenThreadRISCV import GenThreadRISCV
from base.Sequence import Sequence
class MainSequence(Sequence):
"""Exercise different combinations of values for the parameters for the genPA instruction.
Focus in this test is to try values of the Size, Align and CanAlias parameters.
Type is always 'D'; Bank is always '0'.
"""
def generate(self, **kargs):
ldstr_byte_ops = ['LB##RISCV', 'SB##RISCV']
ldstr_half_ops = ['LH##RISCV', 'SH##RISCV']
ldstr_word_ops = ['LW##RISCV', 'SW##RISCV']
ldstr_double_ops = ['LD##RISCV', 'SD##RISCV']
theType = 'D'
theBank = 0
theCanAlias = 0
loopCount = 1
set_of_PAs = set()
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 1st block tests smaller values of size - 1 byte to 32 bytes.
for theSize in [2 ** x for x in range(0, 5)]:
for theAlign in [2 ** x for x in range(0, 16)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
if rand_PA in set_of_PAs:
self.error(">>>>>>>>> Error -- Received a duplicate PA from self.genPA.")
else:
set_of_PAs.add(rand_PA)
# self.notice(">>>>>> set_of_PAs: {}".format(set_of_PAs))
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
# Iterate through Size and Align values. Force requires Align to be a power of 2.
# This 2nd block tests larger values of size - 32K to 8M.
for theSize in [2 ** x for x in range(15, 18)]:
for theAlign in [2 ** x for x in range(15, 25)]:
if theAlign < theSize: continue
for _ in range(loopCount):
rand_PA = self.genPA(Size=theSize, Align=theAlign, Type=theType, Bank=theBank, CanAlias=theCanAlias)
rand_VA = self.genVAforPA(PA=rand_PA, Bank=theBank, FlatMap=0, CanAlias=0, ForceNewAddress=1, Type=theType, Size=theSize)
self.notice(">>>>>> Requested Alignment: {:6d} Requested Size: {:6d} PA target= {:16X} VA target= {:16X}".format(theAlign, theSize, rand_PA, rand_VA))
instr_id = self.genInstruction(self.choice(ldstr_byte_ops), {'LSTarget':rand_VA})
MainSequenceClass = MainSequence
GenThreadClass = GenThreadRISCV
EnvClass = EnvRISCV | 0.355327 | 0.509642 |
# Third Party
from django.urls import path
# Project
from orp_apps.orp_api import views
urlpatterns = [
path('', views.APIRoot.as_view(), name='api-root'),
path('taxonomies/', views.TaxonomyListView.as_view(), name='taxonomy-list'),
path('taxonomies/<int:id>/', views.TaxonomyDetailView.as_view(), name='taxonomy-detail'),
path(
'taxonomies/<int:id>/categories/',
views.CategoryListView.as_view(),
name='categories-list'
),
path(
'taxonomies/<int:id>/categories/<int:category_id>/',
views.CategoryDetailView.as_view(),
name='category-detail'
),
path('documents/', views.DocumentListView.as_view(), name='document-list'),
path(
'documents_with_outstanding_feedback/',
views.DocumentOutstandingFeedbackList.as_view(),
name='document-outstanding-feedback-list'
),
path(
'documents_with_completed_feedback/',
views.DocumentCompletedFeedbackList.as_view(),
name='document-completed-feedback-list'
),
path('documents/<int:id>/', views.DocumentDetailView.as_view(), name='document-detail'),
path(
'documents/<int:id>/<str:event_type>/',
views.RevisionSubscriptionView.as_view(),
name='document-detail-subscriptions'
),
path(
'documents/search/<str:id_list>/',
views.DocumentListSearchView.as_view(),
name='document-search-list'
),
path('entities/', views.EntityListView.as_view(), name='entity-list'),
path('entities/<int:id>/', views.EntityDetailView.as_view(), name='entity-detail'),
path(
'entities/<int:id>/documents/',
views.EntityDetailView.as_view(),
name='entity-documents'
),
path(
'search/',
views.DocumentSearch.as_view(),
name='search-documents'
),
path(
'graph/',
views.DocumentGraphSearch.as_view(),
name='graph-documents'
),
path(
'subscription_event_types/',
views.SubscriptionEventTypesView.as_view(),
name='subscription-event-list'
),
] | external-apis/src/orp_apps/orp_api/urls.py |
# Third Party
from django.urls import path
# Project
from orp_apps.orp_api import views
urlpatterns = [
path('', views.APIRoot.as_view(), name='api-root'),
path('taxonomies/', views.TaxonomyListView.as_view(), name='taxonomy-list'),
path('taxonomies/<int:id>/', views.TaxonomyDetailView.as_view(), name='taxonomy-detail'),
path(
'taxonomies/<int:id>/categories/',
views.CategoryListView.as_view(),
name='categories-list'
),
path(
'taxonomies/<int:id>/categories/<int:category_id>/',
views.CategoryDetailView.as_view(),
name='category-detail'
),
path('documents/', views.DocumentListView.as_view(), name='document-list'),
path(
'documents_with_outstanding_feedback/',
views.DocumentOutstandingFeedbackList.as_view(),
name='document-outstanding-feedback-list'
),
path(
'documents_with_completed_feedback/',
views.DocumentCompletedFeedbackList.as_view(),
name='document-completed-feedback-list'
),
path('documents/<int:id>/', views.DocumentDetailView.as_view(), name='document-detail'),
path(
'documents/<int:id>/<str:event_type>/',
views.RevisionSubscriptionView.as_view(),
name='document-detail-subscriptions'
),
path(
'documents/search/<str:id_list>/',
views.DocumentListSearchView.as_view(),
name='document-search-list'
),
path('entities/', views.EntityListView.as_view(), name='entity-list'),
path('entities/<int:id>/', views.EntityDetailView.as_view(), name='entity-detail'),
path(
'entities/<int:id>/documents/',
views.EntityDetailView.as_view(),
name='entity-documents'
),
path(
'search/',
views.DocumentSearch.as_view(),
name='search-documents'
),
path(
'graph/',
views.DocumentGraphSearch.as_view(),
name='graph-documents'
),
path(
'subscription_event_types/',
views.SubscriptionEventTypesView.as_view(),
name='subscription-event-list'
),
] | 0.420005 | 0.080973 |
from functools import singledispatch
from operator import attrgetter
from typing import Collection, Union
from antlr4 import FileStream, ParserRuleContext
from knitscript.astnodes import Block, Call, Document, \
ExpandingStitchRepeat, FixedBlockRepeat, FixedStitchRepeat, Get, \
NaturalLit, Node, PatternDef, Pattern, Row, RowRepeat, Side, Source, \
StitchLit, StringLit, Using
# noinspection PyProtectedMember
from knitscript._parser.KnitScriptParser import KnitScriptParser
from knitscript.stitch import Stitch
@singledispatch
def build_ast(ctx: ParserRuleContext) -> Node:
"""
Builds an AST from a parse tree generated by ANTLR.
:param ctx: a parse tree context node
:return: the AST corresponding to the parse tree
"""
raise TypeError(f"unsupported parser context {type(ctx).__name__}")
@build_ast.register
def _(document: KnitScriptParser.DocumentContext) -> Node:
return Document(stmts=list(map(build_ast, document.stmts)),
sources=[_get_source(document)])
@build_ast.register
def _(stmt: KnitScriptParser.StmtContext) -> Node:
return build_ast(stmt.using() or stmt.patternDef() or stmt.call())
@build_ast.register
def _(using: KnitScriptParser.UsingContext) -> Node:
return Using(names=list(map(lambda name: name.text, using.names)),
module=using.module.text,
sources=[_get_source(using)])
@build_ast.register
def _(pattern: KnitScriptParser.PatternDefContext) -> Node:
params = (list(map(attrgetter("text"), pattern.paramList().params))
if pattern.paramList()
else [])
return PatternDef(
name=pattern.ID().getText(),
pattern=Pattern(rows=list(map(build_ast, pattern.items)),
params=params, env=None,
consumes=None, produces=None,
sources=[_get_source(pattern)]),
sources=[_get_source(pattern)]
)
@build_ast.register
def _(item: KnitScriptParser.ItemContext) -> Node:
return build_ast(item.row() or item.block() or item.rowRepeat())
@build_ast.register
def _(block: KnitScriptParser.BlockContext) -> Node:
return Block(patterns=list(map(build_ast, block.patternList().patterns)),
consumes=None, produces=None,
sources=[_get_source(block)])
@build_ast.register
def _(repeat: KnitScriptParser.PatternRepeatContext) -> Node:
return build_ast(repeat.fixedPatternRepeat() or repeat.call())
@build_ast.register
def _(repeat: KnitScriptParser.FixedPatternRepeatContext) -> Node:
return FixedBlockRepeat(
block=Block(
patterns=(
[build_ast(repeat.pattern)]
if repeat.pattern is not None
else list(map(build_ast, repeat.patternList().patterns))
),
consumes=None, produces=None,
sources=[_get_source(repeat)]
),
times=build_ast(repeat.times),
consumes=None, produces=None,
sources=[_get_source(repeat)]
)
@build_ast.register
def _(call: KnitScriptParser.CallContext) -> Node:
return Call(target=Get(name=call.ID().getText(),
sources=[_get_source(call)]),
args=list(map(build_ast, call.args) if call.args else []),
sources=[_get_source(call)])
@build_ast.register
def _(repeat: KnitScriptParser.RowRepeatContext) -> Node:
return RowRepeat(rows=list(map(build_ast, repeat.items)),
times=build_ast(repeat.times),
consumes=None, produces=None,
sources=[_get_source(repeat)])
@build_ast.register
def _(row: KnitScriptParser.RowContext) -> Node:
return Row(
stitches=list(map(build_ast, (row.stitchList().stitches
if row.stitchList() is not None
else []))),
side=Side(row.side().getText()) if row.side() is not None else None,
inferred=False,
consumes=None, produces=None,
sources=[_get_source(row)]
)
@build_ast.register
def _(repeat: KnitScriptParser.StitchRepeatContext) -> Node:
return build_ast(repeat.fixedStitchRepeat() or
repeat.expandingStitchRepeat() or
repeat.stitch())
@build_ast.register
def _(fixed: KnitScriptParser.FixedStitchRepeatContext) -> Node:
return FixedStitchRepeat(
stitches=list(map(build_ast, _get_stitches(fixed))),
times=build_ast(fixed.times),
consumes=None, produces=None,
sources=[_get_source(fixed)]
)
@build_ast.register
def _(expanding: KnitScriptParser.ExpandingStitchRepeatContext) -> Node:
return ExpandingStitchRepeat(
stitches=list(map(build_ast, _get_stitches(expanding))),
to_last=(build_ast(expanding.toLast)
if expanding.toLast
else NaturalLit.of(0)),
consumes=None, produces=None,
sources=[_get_source(expanding)]
)
@build_ast.register
def _(stitch: KnitScriptParser.StitchContext) -> Node:
value = Stitch.from_symbol(stitch.ID().getText())
return StitchLit(value=value,
consumes=value.consumes, produces=value.produces,
sources=[_get_source(stitch)])
@build_ast.register
def _(expr: KnitScriptParser.ExprContext) -> Node:
return build_ast(expr.call() or
expr.variable() or
expr.natural() or
expr.string())
@build_ast.register
def _(variable: KnitScriptParser.VariableContext) -> Node:
return Get(name=variable.ID().getText(),
sources=[_get_source(variable)])
@build_ast.register
def _(natural: KnitScriptParser.NaturalContext) -> Node:
return NaturalLit(value=int(natural.getText()),
sources=[_get_source(natural)])
@build_ast.register
def _(string: KnitScriptParser.StringContext) -> Node:
return StringLit(value=string.getText()[1:-1],
sources=[_get_source(string)])
def _get_stitches(ctx: Union[KnitScriptParser.FixedStitchRepeatContext,
KnitScriptParser.ExpandingStitchRepeatContext]) \
-> Collection[ParserRuleContext]:
return [ctx.stitch()] if ctx.stitch() else ctx.stitchList().stitches
def _get_source(ctx: ParserRuleContext) -> Source:
file = (ctx.start.source[1].fileName
if isinstance(ctx.start.source[1], FileStream)
else ctx.start.source[1].name)
return Source(line=ctx.start.line, column=ctx.start.column, file=file) | knitscript/_astgen.py | from functools import singledispatch
from operator import attrgetter
from typing import Collection, Union
from antlr4 import FileStream, ParserRuleContext
from knitscript.astnodes import Block, Call, Document, \
ExpandingStitchRepeat, FixedBlockRepeat, FixedStitchRepeat, Get, \
NaturalLit, Node, PatternDef, Pattern, Row, RowRepeat, Side, Source, \
StitchLit, StringLit, Using
# noinspection PyProtectedMember
from knitscript._parser.KnitScriptParser import KnitScriptParser
from knitscript.stitch import Stitch
@singledispatch
def build_ast(ctx: ParserRuleContext) -> Node:
"""
Builds an AST from a parse tree generated by ANTLR.
:param ctx: a parse tree context node
:return: the AST corresponding to the parse tree
"""
raise TypeError(f"unsupported parser context {type(ctx).__name__}")
@build_ast.register
def _(document: KnitScriptParser.DocumentContext) -> Node:
return Document(stmts=list(map(build_ast, document.stmts)),
sources=[_get_source(document)])
@build_ast.register
def _(stmt: KnitScriptParser.StmtContext) -> Node:
return build_ast(stmt.using() or stmt.patternDef() or stmt.call())
@build_ast.register
def _(using: KnitScriptParser.UsingContext) -> Node:
return Using(names=list(map(lambda name: name.text, using.names)),
module=using.module.text,
sources=[_get_source(using)])
@build_ast.register
def _(pattern: KnitScriptParser.PatternDefContext) -> Node:
params = (list(map(attrgetter("text"), pattern.paramList().params))
if pattern.paramList()
else [])
return PatternDef(
name=pattern.ID().getText(),
pattern=Pattern(rows=list(map(build_ast, pattern.items)),
params=params, env=None,
consumes=None, produces=None,
sources=[_get_source(pattern)]),
sources=[_get_source(pattern)]
)
@build_ast.register
def _(item: KnitScriptParser.ItemContext) -> Node:
return build_ast(item.row() or item.block() or item.rowRepeat())
@build_ast.register
def _(block: KnitScriptParser.BlockContext) -> Node:
return Block(patterns=list(map(build_ast, block.patternList().patterns)),
consumes=None, produces=None,
sources=[_get_source(block)])
@build_ast.register
def _(repeat: KnitScriptParser.PatternRepeatContext) -> Node:
return build_ast(repeat.fixedPatternRepeat() or repeat.call())
@build_ast.register
def _(repeat: KnitScriptParser.FixedPatternRepeatContext) -> Node:
return FixedBlockRepeat(
block=Block(
patterns=(
[build_ast(repeat.pattern)]
if repeat.pattern is not None
else list(map(build_ast, repeat.patternList().patterns))
),
consumes=None, produces=None,
sources=[_get_source(repeat)]
),
times=build_ast(repeat.times),
consumes=None, produces=None,
sources=[_get_source(repeat)]
)
@build_ast.register
def _(call: KnitScriptParser.CallContext) -> Node:
return Call(target=Get(name=call.ID().getText(),
sources=[_get_source(call)]),
args=list(map(build_ast, call.args) if call.args else []),
sources=[_get_source(call)])
@build_ast.register
def _(repeat: KnitScriptParser.RowRepeatContext) -> Node:
return RowRepeat(rows=list(map(build_ast, repeat.items)),
times=build_ast(repeat.times),
consumes=None, produces=None,
sources=[_get_source(repeat)])
@build_ast.register
def _(row: KnitScriptParser.RowContext) -> Node:
return Row(
stitches=list(map(build_ast, (row.stitchList().stitches
if row.stitchList() is not None
else []))),
side=Side(row.side().getText()) if row.side() is not None else None,
inferred=False,
consumes=None, produces=None,
sources=[_get_source(row)]
)
@build_ast.register
def _(repeat: KnitScriptParser.StitchRepeatContext) -> Node:
return build_ast(repeat.fixedStitchRepeat() or
repeat.expandingStitchRepeat() or
repeat.stitch())
@build_ast.register
def _(fixed: KnitScriptParser.FixedStitchRepeatContext) -> Node:
return FixedStitchRepeat(
stitches=list(map(build_ast, _get_stitches(fixed))),
times=build_ast(fixed.times),
consumes=None, produces=None,
sources=[_get_source(fixed)]
)
@build_ast.register
def _(expanding: KnitScriptParser.ExpandingStitchRepeatContext) -> Node:
return ExpandingStitchRepeat(
stitches=list(map(build_ast, _get_stitches(expanding))),
to_last=(build_ast(expanding.toLast)
if expanding.toLast
else NaturalLit.of(0)),
consumes=None, produces=None,
sources=[_get_source(expanding)]
)
@build_ast.register
def _(stitch: KnitScriptParser.StitchContext) -> Node:
value = Stitch.from_symbol(stitch.ID().getText())
return StitchLit(value=value,
consumes=value.consumes, produces=value.produces,
sources=[_get_source(stitch)])
@build_ast.register
def _(expr: KnitScriptParser.ExprContext) -> Node:
return build_ast(expr.call() or
expr.variable() or
expr.natural() or
expr.string())
@build_ast.register
def _(variable: KnitScriptParser.VariableContext) -> Node:
return Get(name=variable.ID().getText(),
sources=[_get_source(variable)])
@build_ast.register
def _(natural: KnitScriptParser.NaturalContext) -> Node:
return NaturalLit(value=int(natural.getText()),
sources=[_get_source(natural)])
@build_ast.register
def _(string: KnitScriptParser.StringContext) -> Node:
return StringLit(value=string.getText()[1:-1],
sources=[_get_source(string)])
def _get_stitches(ctx: Union[KnitScriptParser.FixedStitchRepeatContext,
KnitScriptParser.ExpandingStitchRepeatContext]) \
-> Collection[ParserRuleContext]:
return [ctx.stitch()] if ctx.stitch() else ctx.stitchList().stitches
def _get_source(ctx: ParserRuleContext) -> Source:
file = (ctx.start.source[1].fileName
if isinstance(ctx.start.source[1], FileStream)
else ctx.start.source[1].name)
return Source(line=ctx.start.line, column=ctx.start.column, file=file) | 0.789477 | 0.180323 |
import tempfile
from pathlib import Path
from yt.data_objects.time_series import DatasetSeries
from yt.testing import assert_raises
from yt.utilities.exceptions import YTUnidentifiedDataType
def test_pattern_expansion():
file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)]
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
for file in file_list:
(tmp_path / file).touch()
pattern = tmp_path / "fake_data_file_*"
expected = [str(tmp_path / file) for file in file_list]
found = DatasetSeries._get_filenames_from_glob_pattern(pattern)
assert found == expected
found2 = DatasetSeries._get_filenames_from_glob_pattern(Path(pattern))
assert found2 == expected
def test_no_match_pattern():
with tempfile.TemporaryDirectory() as tmpdir:
pattern = Path(tmpdir).joinpath("fake_data_file_*")
assert_raises(
FileNotFoundError, DatasetSeries._get_filenames_from_glob_pattern, pattern
)
def test_init_fake_dataseries():
file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)]
with tempfile.TemporaryDirectory() as tmpdir:
pfile_list = [Path(tmpdir) / file for file in file_list]
sfile_list = [str(file) for file in pfile_list]
for file in pfile_list:
file.touch()
pattern = Path(tmpdir) / "fake_data_file_*"
# init from str pattern
ts = DatasetSeries(pattern)
assert ts._pre_outputs == sfile_list
# init from Path pattern
ppattern = Path(pattern)
ts = DatasetSeries(ppattern)
assert ts._pre_outputs == sfile_list
# init form str list
ts = DatasetSeries(sfile_list)
assert ts._pre_outputs == sfile_list
# init form Path list
ts = DatasetSeries(pfile_list)
assert ts._pre_outputs == pfile_list
# rejected input type (str repr of a list) "[file1, file2, ...]"
assert_raises(FileNotFoundError, DatasetSeries, str(file_list))
# finally, check that ts[0] fails to actually load
assert_raises(YTUnidentifiedDataType, ts.__getitem__, 0) | yt/data_objects/tests/test_time_series.py | import tempfile
from pathlib import Path
from yt.data_objects.time_series import DatasetSeries
from yt.testing import assert_raises
from yt.utilities.exceptions import YTUnidentifiedDataType
def test_pattern_expansion():
file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)]
with tempfile.TemporaryDirectory() as tmpdir:
tmp_path = Path(tmpdir)
for file in file_list:
(tmp_path / file).touch()
pattern = tmp_path / "fake_data_file_*"
expected = [str(tmp_path / file) for file in file_list]
found = DatasetSeries._get_filenames_from_glob_pattern(pattern)
assert found == expected
found2 = DatasetSeries._get_filenames_from_glob_pattern(Path(pattern))
assert found2 == expected
def test_no_match_pattern():
with tempfile.TemporaryDirectory() as tmpdir:
pattern = Path(tmpdir).joinpath("fake_data_file_*")
assert_raises(
FileNotFoundError, DatasetSeries._get_filenames_from_glob_pattern, pattern
)
def test_init_fake_dataseries():
file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)]
with tempfile.TemporaryDirectory() as tmpdir:
pfile_list = [Path(tmpdir) / file for file in file_list]
sfile_list = [str(file) for file in pfile_list]
for file in pfile_list:
file.touch()
pattern = Path(tmpdir) / "fake_data_file_*"
# init from str pattern
ts = DatasetSeries(pattern)
assert ts._pre_outputs == sfile_list
# init from Path pattern
ppattern = Path(pattern)
ts = DatasetSeries(ppattern)
assert ts._pre_outputs == sfile_list
# init form str list
ts = DatasetSeries(sfile_list)
assert ts._pre_outputs == sfile_list
# init form Path list
ts = DatasetSeries(pfile_list)
assert ts._pre_outputs == pfile_list
# rejected input type (str repr of a list) "[file1, file2, ...]"
assert_raises(FileNotFoundError, DatasetSeries, str(file_list))
# finally, check that ts[0] fails to actually load
assert_raises(YTUnidentifiedDataType, ts.__getitem__, 0) | 0.427516 | 0.455925 |
import numpy as np
import adafdr.method as md
import adafdr.data_loader as dl
def test_method_init():
""" test for md.method_init
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
a, mu, sigma, w = md.method_init(p, x, 2, alpha=0.1, n_full=n_full, h=h,
random_state=0, fold_number=0)
t = md.f_all(x, a, mu, sigma, w)
gamma = md.rescale_mirror(t, p, 0.1)
t = t*gamma
n_rej = np.sum(p < t)
FDP = np.sum((p < t)*(h == 0))/n_rej
print('n_rej:', n_rej)
assert n_rej > 700
assert FDP < 0.15
mu_ref1 = np.array([[0.25, 0.25], [0.75, 0.75]], dtype=float)
mu_ref2 = np.array([[0.75, 0.75], [0.25, 0.25]], dtype=float)
error = np.min([np.linalg.norm(mu-mu_ref1),
np.linalg.norm(mu-mu_ref2)])
print('error for estimating mu = %0.8f'%error)
assert error < 0.05
def test_reparametrize():
""" test for md.reparametrize
"""
w_init = np.array([0.4, 0.3, 0.3], dtype=float)
a_init = np.array([2, 0.1], dtype=float)
mu_init = np.array([[0.2, 0.2], [0.7, 0.7]], dtype=float)
sigma_init = np.array([[0.1, 0.2], [0.1, 0.1]], dtype=float)
d = 2
x_test = np.array([[0.1, 0.2], [0.3, 0.5]], dtype=float)
a, b, w, mu, sigma = md.reparametrize(a_init, mu_init, sigma_init, w_init, d)
t_init = md.f_all(x_test, a_init, mu_init, sigma_init, w_init)
t = md.t_cal(x_test, a, b, w, mu, sigma)
print('t_init:', t_init)
print('t', t)
assert all(np.absolute(t_init-t) < 1e-8)
def test_rescale_mirror():
""" test for md.rescale_mirror
"""
p, x, _, _, _ = dl.load_2d_bump_slope(n_sample=2000)
alpha = 0.1
t = np.ones([x.shape[0]], dtype=float)
gamma_grid = np.linspace(1e-4, 0.01, 100)
alpha_hat = np.zeros([gamma_grid.shape[0]], dtype=float)
for i in range(gamma_grid.shape[0]):
alpha_hat[i] = np.sum(p > 1-t*gamma_grid[i])/np.sum(p < t*gamma_grid[i])
gamma = np.max(gamma_grid[alpha_hat < alpha])
gamma_test = md.rescale_mirror(t, p, alpha)
print('gamma_GT', gamma)
print('gamma_test', gamma_test)
assert np.absolute(gamma-gamma_test) < 1e-4
def test_method_single_fold():
""" test for md.method_single_fold
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
n_rej, t, _ = md.method_single_fold(p, x, 2, alpha=0.1, n_full=n_full,
n_itr=100, h=h, fold_number=0, random_state=0)
FDP = np.sum((p < t)*(h == 0))/n_rej
print('n_rej:', n_rej)
assert n_rej > 800
print('FDP:', n_rej)
assert FDP < 0.15
def test_preprocess_two_fold():
""" Test for preprocess_two_fold
"""
np.random.seed(0)
x_test_1 = np.random.choice([0, 1, 2, 3], size=300)
x_test_2 = np.array([0, 1, 2, 3]).reshape([-1, 1])
temp = np.arange(300)
p_test = np.ones([300], dtype=float)
p_test[x_test_1 == 0] = 0.001
p_test[(x_test_1 == 1)*(temp < 200)] = 0.001
p_test[(x_test_1 == 2)*(temp < 100)] = 0.001
_, x_test_new_2 = md.preprocess_two_fold(p_test,
x_test_1.reshape([-1, 1]),
x_test_2,
300, None, np.ones([1], dtype=bool))
print('x_test_2', x_test_2)
print('x_test_new_2', x_test_new_2)
assert x_test_new_2[0] > 0.75
assert (x_test_new_2[1] > 0.5) and (x_test_new_2[1] < 0.75)
assert (x_test_new_2[2] > 0.25) and (x_test_new_2[2] < 0.5)
assert x_test_new_2[3] < 0.25
def test_adafdr_test():
""" Test for adafdr_test
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
res = md.adafdr_test(p, x, K=2, alpha=0.1, h=None, n_full=n_full,\
n_itr=50, verbose=False, random_state=0,\
fast_mode = False, single_core=True)
t = res['threshold']
FDP = np.sum((p < t)*(h == 0))/np.sum(p < t)
n_rej = np.sum(p < t)
print('n_rej', n_rej)
assert n_rej > 700
print('FDP', FDP)
assert FDP < 0.12
def test_adafdr_retest():
""" Test for adafdr_retest
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
res = md.adafdr_test(p, x, alpha=0.1, single_core=True)
res_temp = md.adafdr_test(p, x, alpha=0.02, single_core=True)
res_retest = md.adafdr_retest(res, alpha=0.02)
print('adafdr_test discoveries at alpha=0.02:',
np.sum(res_temp['decision']))
print('adafdr_retest discoveries at alpha=0.02:',
np.sum(res_retest['decision']))
print('# diff', np.sum(res_temp['decision'] != res_retest['decision']))
assert np.sum(res_temp['decision'] != res_retest['decision'])<10 | test/test_method.py | import numpy as np
import adafdr.method as md
import adafdr.data_loader as dl
def test_method_init():
""" test for md.method_init
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
a, mu, sigma, w = md.method_init(p, x, 2, alpha=0.1, n_full=n_full, h=h,
random_state=0, fold_number=0)
t = md.f_all(x, a, mu, sigma, w)
gamma = md.rescale_mirror(t, p, 0.1)
t = t*gamma
n_rej = np.sum(p < t)
FDP = np.sum((p < t)*(h == 0))/n_rej
print('n_rej:', n_rej)
assert n_rej > 700
assert FDP < 0.15
mu_ref1 = np.array([[0.25, 0.25], [0.75, 0.75]], dtype=float)
mu_ref2 = np.array([[0.75, 0.75], [0.25, 0.25]], dtype=float)
error = np.min([np.linalg.norm(mu-mu_ref1),
np.linalg.norm(mu-mu_ref2)])
print('error for estimating mu = %0.8f'%error)
assert error < 0.05
def test_reparametrize():
""" test for md.reparametrize
"""
w_init = np.array([0.4, 0.3, 0.3], dtype=float)
a_init = np.array([2, 0.1], dtype=float)
mu_init = np.array([[0.2, 0.2], [0.7, 0.7]], dtype=float)
sigma_init = np.array([[0.1, 0.2], [0.1, 0.1]], dtype=float)
d = 2
x_test = np.array([[0.1, 0.2], [0.3, 0.5]], dtype=float)
a, b, w, mu, sigma = md.reparametrize(a_init, mu_init, sigma_init, w_init, d)
t_init = md.f_all(x_test, a_init, mu_init, sigma_init, w_init)
t = md.t_cal(x_test, a, b, w, mu, sigma)
print('t_init:', t_init)
print('t', t)
assert all(np.absolute(t_init-t) < 1e-8)
def test_rescale_mirror():
""" test for md.rescale_mirror
"""
p, x, _, _, _ = dl.load_2d_bump_slope(n_sample=2000)
alpha = 0.1
t = np.ones([x.shape[0]], dtype=float)
gamma_grid = np.linspace(1e-4, 0.01, 100)
alpha_hat = np.zeros([gamma_grid.shape[0]], dtype=float)
for i in range(gamma_grid.shape[0]):
alpha_hat[i] = np.sum(p > 1-t*gamma_grid[i])/np.sum(p < t*gamma_grid[i])
gamma = np.max(gamma_grid[alpha_hat < alpha])
gamma_test = md.rescale_mirror(t, p, alpha)
print('gamma_GT', gamma)
print('gamma_test', gamma_test)
assert np.absolute(gamma-gamma_test) < 1e-4
def test_method_single_fold():
""" test for md.method_single_fold
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
n_rej, t, _ = md.method_single_fold(p, x, 2, alpha=0.1, n_full=n_full,
n_itr=100, h=h, fold_number=0, random_state=0)
FDP = np.sum((p < t)*(h == 0))/n_rej
print('n_rej:', n_rej)
assert n_rej > 800
print('FDP:', n_rej)
assert FDP < 0.15
def test_preprocess_two_fold():
""" Test for preprocess_two_fold
"""
np.random.seed(0)
x_test_1 = np.random.choice([0, 1, 2, 3], size=300)
x_test_2 = np.array([0, 1, 2, 3]).reshape([-1, 1])
temp = np.arange(300)
p_test = np.ones([300], dtype=float)
p_test[x_test_1 == 0] = 0.001
p_test[(x_test_1 == 1)*(temp < 200)] = 0.001
p_test[(x_test_1 == 2)*(temp < 100)] = 0.001
_, x_test_new_2 = md.preprocess_two_fold(p_test,
x_test_1.reshape([-1, 1]),
x_test_2,
300, None, np.ones([1], dtype=bool))
print('x_test_2', x_test_2)
print('x_test_new_2', x_test_new_2)
assert x_test_new_2[0] > 0.75
assert (x_test_new_2[1] > 0.5) and (x_test_new_2[1] < 0.75)
assert (x_test_new_2[2] > 0.25) and (x_test_new_2[2] < 0.5)
assert x_test_new_2[3] < 0.25
def test_adafdr_test():
""" Test for adafdr_test
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
res = md.adafdr_test(p, x, K=2, alpha=0.1, h=None, n_full=n_full,\
n_itr=50, verbose=False, random_state=0,\
fast_mode = False, single_core=True)
t = res['threshold']
FDP = np.sum((p < t)*(h == 0))/np.sum(p < t)
n_rej = np.sum(p < t)
print('n_rej', n_rej)
assert n_rej > 700
print('FDP', FDP)
assert FDP < 0.12
def test_adafdr_retest():
""" Test for adafdr_retest
"""
p, x, h, n_full, _ = dl.load_2d_bump_slope(n_sample=20000)
res = md.adafdr_test(p, x, alpha=0.1, single_core=True)
res_temp = md.adafdr_test(p, x, alpha=0.02, single_core=True)
res_retest = md.adafdr_retest(res, alpha=0.02)
print('adafdr_test discoveries at alpha=0.02:',
np.sum(res_temp['decision']))
print('adafdr_retest discoveries at alpha=0.02:',
np.sum(res_retest['decision']))
print('# diff', np.sum(res_temp['decision'] != res_retest['decision']))
assert np.sum(res_temp['decision'] != res_retest['decision'])<10 | 0.431345 | 0.661281 |
import numpy as np
import subprocess
from insar.unwrapping import quality_maps
from itertools import product
import os
cwd = "/home/stepan/zpt/interferometry"
py_interp = "./env/bin/python"
folder_path = "./processing_results/150522_11-13-26/tests_26_08/hamming/"
file = "compl.npy"
compl = np.load(folder_path + file)
shifting_axis = "range"
mode = "single"
counter = 0
for az_stripe_width in [1000, 2000]:
proc_list = []
for rng_stripe_width in [50, 100, 200]:
# компенсация набега фазы по полосам
folder = "width_az_" + str(az_stripe_width) + "_range_" + str(rng_stripe_width)
print("\n"+folder)
script = "./processing_scripts/compensating.py"
script_args = folder + \
" --file " + file + \
" --folder_path " + folder_path + \
" --az_stripe_width " + str(az_stripe_width) + \
" --rng_stripe_width " + str(rng_stripe_width) + \
" --shifting_axis " + shifting_axis + \
" --mode " + mode
if mode == "single":
strip_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
strip_proc.wait()
# Карты качества
script = "./processing_scripts/make_quality_maps.py"
script_args = folder + \
" --file strip_phase.npy" + \
" --folder_path " + folder_path
maps_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
maps_proc.wait()
# Развертка
script = "./processing_scripts/unwrapping.py"
script_args = folder + \
" --file strip_phase.npy" +\
" --folder_path " + folder_path + \
" --algorithm relnp"
unw_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
unw_proc.wait()
else:
proc_list.append(subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd))
for p in proc_list:
p.wait() | processing_scripts/params_test.py | import numpy as np
import subprocess
from insar.unwrapping import quality_maps
from itertools import product
import os
cwd = "/home/stepan/zpt/interferometry"
py_interp = "./env/bin/python"
folder_path = "./processing_results/150522_11-13-26/tests_26_08/hamming/"
file = "compl.npy"
compl = np.load(folder_path + file)
shifting_axis = "range"
mode = "single"
counter = 0
for az_stripe_width in [1000, 2000]:
proc_list = []
for rng_stripe_width in [50, 100, 200]:
# компенсация набега фазы по полосам
folder = "width_az_" + str(az_stripe_width) + "_range_" + str(rng_stripe_width)
print("\n"+folder)
script = "./processing_scripts/compensating.py"
script_args = folder + \
" --file " + file + \
" --folder_path " + folder_path + \
" --az_stripe_width " + str(az_stripe_width) + \
" --rng_stripe_width " + str(rng_stripe_width) + \
" --shifting_axis " + shifting_axis + \
" --mode " + mode
if mode == "single":
strip_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
strip_proc.wait()
# Карты качества
script = "./processing_scripts/make_quality_maps.py"
script_args = folder + \
" --file strip_phase.npy" + \
" --folder_path " + folder_path
maps_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
maps_proc.wait()
# Развертка
script = "./processing_scripts/unwrapping.py"
script_args = folder + \
" --file strip_phase.npy" +\
" --folder_path " + folder_path + \
" --algorithm relnp"
unw_proc = subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd)
unw_proc.wait()
else:
proc_list.append(subprocess.Popen([py_interp, script, *(script_args.split(" "))], cwd=cwd))
for p in proc_list:
p.wait() | 0.132374 | 0.056652 |
from PyQt5.QtGui import QImage, QColor
from Common.color import Palette, Color
from threading import Thread
import numpy as np
class Image(QImage):
def __init__(self, image_path: str, width: int = 0, height: int = 0):
img = QImage(image_path)
if (width > 0) and (height > 0):
img = img.scaled(width, height)
super().__init__(img)
pass
def quantized(self, pal: Palette, thread_count: int = 1):
count = max(1, thread_count)
threads = []
partition = Image.calc_height_partition(count, self.height())
dst_vectors = []
src_vectors = []
y_areas = []
src_vector = Image.get_matrix(self)
result = None
if (pal is None) or (len(partition) == 0):
return self.copy()
for n in range(len(partition)):
x_start = 0
x_end = self.width() - 1
y_start = sum(partition[:n])
y_end = y_start + partition[n] - 1
dst_vector = src_vector[y_start:(y_end + 1)][x_start:(x_end + 1)]
src_vectors.append(dst_vector)
dst_vectors.append(dst_vector.copy())
y_areas.append((y_start, y_end))
thread = Thread(target=Image.__quantized, args=(src_vectors[-1], dst_vectors[-1],
Palette(colors=pal.colors),
0, 0,
len(dst_vector[0]) - 1,
len(dst_vector) - 1,))
threads.append(thread)
thread.start()
for thread, dst_matrix, y_area in zip(threads, dst_vectors, y_areas):
thread.join()
if result is None:
result = dst_matrix.copy()
else:
result = np.vstack((result, dst_matrix))
return Image.get_image(result, self.format(), 0, len(result) - 1)
@staticmethod
def __quantized(src: list, dst: list, pal: Palette, xs: int, ys: int, xe: int, ye: int):
for y in range(ys, ye + 1):
for x in range(xs, xe + 1):
pixel = src[y][x]
dst[y][x] = Color.quantize(pixel, pal)
pass
@staticmethod
def get_matrix(img: QImage):
matrix = list()
for y in range(img.height()):
matrix.append(list())
for x in range(img.width()):
vec = Color.get_vector(QColor(img.pixel(x, y)))
matrix[y].append(vec)
return matrix
@staticmethod
def get_image(matrix, fmt, ys, ye):
img = QImage(len(matrix[0]), len(matrix), fmt)
for y in range(ys, ye + 1):
for x in range(len(matrix[y])):
color_vector = matrix[y][x]
rgb = QColor(color_vector[0], color_vector[1], color_vector[2]).rgb()
img.setPixel(x, y, rgb)
return img
@staticmethod
def calc_height_partition(counts: int, height: int):
if (counts == 1) or (counts == 0):
return [height]
if height == 0:
return []
partitions = []
best_partition = None
count = min(counts, height)
best_max = 1e9
for divider in range(2, count + 1):
parts = height // divider
remain = height % divider
partition = [parts] * divider
if remain > 0:
if len(partition) < count:
partition.append(remain)
else:
partition[-1] += remain
partitions.append(partition)
for partition in partitions:
part_max = max(partition)
if part_max < best_max:
best_max = part_max
best_partition = partition
return best_partition | src/Common/image.py | from PyQt5.QtGui import QImage, QColor
from Common.color import Palette, Color
from threading import Thread
import numpy as np
class Image(QImage):
def __init__(self, image_path: str, width: int = 0, height: int = 0):
img = QImage(image_path)
if (width > 0) and (height > 0):
img = img.scaled(width, height)
super().__init__(img)
pass
def quantized(self, pal: Palette, thread_count: int = 1):
count = max(1, thread_count)
threads = []
partition = Image.calc_height_partition(count, self.height())
dst_vectors = []
src_vectors = []
y_areas = []
src_vector = Image.get_matrix(self)
result = None
if (pal is None) or (len(partition) == 0):
return self.copy()
for n in range(len(partition)):
x_start = 0
x_end = self.width() - 1
y_start = sum(partition[:n])
y_end = y_start + partition[n] - 1
dst_vector = src_vector[y_start:(y_end + 1)][x_start:(x_end + 1)]
src_vectors.append(dst_vector)
dst_vectors.append(dst_vector.copy())
y_areas.append((y_start, y_end))
thread = Thread(target=Image.__quantized, args=(src_vectors[-1], dst_vectors[-1],
Palette(colors=pal.colors),
0, 0,
len(dst_vector[0]) - 1,
len(dst_vector) - 1,))
threads.append(thread)
thread.start()
for thread, dst_matrix, y_area in zip(threads, dst_vectors, y_areas):
thread.join()
if result is None:
result = dst_matrix.copy()
else:
result = np.vstack((result, dst_matrix))
return Image.get_image(result, self.format(), 0, len(result) - 1)
@staticmethod
def __quantized(src: list, dst: list, pal: Palette, xs: int, ys: int, xe: int, ye: int):
for y in range(ys, ye + 1):
for x in range(xs, xe + 1):
pixel = src[y][x]
dst[y][x] = Color.quantize(pixel, pal)
pass
@staticmethod
def get_matrix(img: QImage):
matrix = list()
for y in range(img.height()):
matrix.append(list())
for x in range(img.width()):
vec = Color.get_vector(QColor(img.pixel(x, y)))
matrix[y].append(vec)
return matrix
@staticmethod
def get_image(matrix, fmt, ys, ye):
img = QImage(len(matrix[0]), len(matrix), fmt)
for y in range(ys, ye + 1):
for x in range(len(matrix[y])):
color_vector = matrix[y][x]
rgb = QColor(color_vector[0], color_vector[1], color_vector[2]).rgb()
img.setPixel(x, y, rgb)
return img
@staticmethod
def calc_height_partition(counts: int, height: int):
if (counts == 1) or (counts == 0):
return [height]
if height == 0:
return []
partitions = []
best_partition = None
count = min(counts, height)
best_max = 1e9
for divider in range(2, count + 1):
parts = height // divider
remain = height % divider
partition = [parts] * divider
if remain > 0:
if len(partition) < count:
partition.append(remain)
else:
partition[-1] += remain
partitions.append(partition)
for partition in partitions:
part_max = max(partition)
if part_max < best_max:
best_max = part_max
best_partition = partition
return best_partition | 0.544559 | 0.352648 |
import os
import pandas as pd
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
from rpy2.robjects import r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=1)
packnames = ['PKNCA', 'colorRamps']
names_to_install = []
# names_to_install = [x for packnames if not rpackages.isinstalled(x)]
for x in packnames:
if (rpackages.isinstalled(x) == False):
names_to_install.append(x)
if len(names_to_install) > 0:
utils.install_packages(StrVector(names_to_install))
rpkcna = importr('PKNCA')
raucx = r['pk.calc.auc']
rc = r['c']
# <><><> DEFINE FUNCTIONS <><><>
def calc_auc_percentile( input_df ):
"""
calculate each AUC percentile of decay curve
:param input_df: Important features and OTUs that have been passed on to be ordered
:return: ordered AUC percentiles of which OTU is most influential for percentile
"""
input_df = input_df.sort_values("metric", axis=0, ascending=False)
input_df.index = range(1, len(input_df) + 1)
input_auc = raucx(input_df["metric"], input_df.index, interval=rc(1, len(input_df["metric"])))
result_df = pd.DataFrame(columns=['auc', 'otu.num'])
parameter_df = pd.DataFrame(columns=['x', 'y'])
for factor in np.arange(0.01, 1.00, 0.01):
area = 0.0
end_range = 2
# 1. calculate the area of each trapezoid
while (area <= round(factor, 2) * input_auc):
area = raucx(input_df["metric"], input_df.index, interval=rc(1, end_range))
end_range += 1
print( f"The point at which we reach {str(round(factor * 100, 2))}% of the AUC is = {str(end_range)}" )
#2. sum trapezoid areas to get AUC
result_df.loc[int(round(factor * 100, 2))] = ["auc" + str(int(round(factor * 100, 2)))] + [end_range]
result_df.loc[100] = ["auc100"] + [len(input_df["metric"])]
parameter_df['x'] = input_df.index - 1
parameter_df['y'] = input_df["metric"]
parameter_df.loc[len(input_df)] = [len(input_df)] + [parameter_df.iloc[len(input_df) - 1, 1]]
return result_df, parameter_df.iloc[1:, :]
# <><><> DEFINE EXECUTION FUNCTION <><><>
def main( input_df, name, detailed=False ):
"""
Each OTU is now ordered by centrality and the AUC of each is calculated.
:param input_df: Important features and OTUs that have been passed on to be ordered
:param name: name attached to all detailed output
:param detailed: Output helper tables
:return: ordered AUC percentiles of which OTU is most influential for percentile
"""
out_dir = f"{os.path.dirname(os.path.realpath(__file__))}/output"
# allows for cleaner execution and use of relative paths
if( detailed ):
out_file = f"{out_dir}/{name}_auc_result.csv"
parameter_file = f"{out_dir}/{name}_auc_parameter.csv"
# Create new files for output
out_file = open( out_file, "w+", encoding="utf-8")
parameter_file = open( parameter_file, "w+", encoding="utf-8" )
print(f"Processing Input dataFrame: {name}")
result, param = calc_auc_percentile(input_df )
print(f"Output is written in file: {out_file}")
print(f"Parameters are written in file: {parameter_file}")
# Write to CSV since this is detailed
result.to_csv(out_file)
param.to_csv(parameter_file)
out_file.close()
parameter_file.close()
else:
print(f"Processing Input dataFrame: {name}")
result, param = calc_auc_percentile( input_df )
# Return results dataframe along with the parameters dataframe
return result, param | q2_winnowing/step4_5/decay_curve.py |
import os
import pandas as pd
import numpy as np
from rpy2.robjects.packages import importr
import rpy2.robjects.packages as rpackages
from rpy2.robjects.vectors import StrVector
from rpy2.robjects import r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
utils = rpackages.importr('utils')
utils.chooseCRANmirror(ind=1)
packnames = ['PKNCA', 'colorRamps']
names_to_install = []
# names_to_install = [x for packnames if not rpackages.isinstalled(x)]
for x in packnames:
if (rpackages.isinstalled(x) == False):
names_to_install.append(x)
if len(names_to_install) > 0:
utils.install_packages(StrVector(names_to_install))
rpkcna = importr('PKNCA')
raucx = r['pk.calc.auc']
rc = r['c']
# <><><> DEFINE FUNCTIONS <><><>
def calc_auc_percentile( input_df ):
"""
calculate each AUC percentile of decay curve
:param input_df: Important features and OTUs that have been passed on to be ordered
:return: ordered AUC percentiles of which OTU is most influential for percentile
"""
input_df = input_df.sort_values("metric", axis=0, ascending=False)
input_df.index = range(1, len(input_df) + 1)
input_auc = raucx(input_df["metric"], input_df.index, interval=rc(1, len(input_df["metric"])))
result_df = pd.DataFrame(columns=['auc', 'otu.num'])
parameter_df = pd.DataFrame(columns=['x', 'y'])
for factor in np.arange(0.01, 1.00, 0.01):
area = 0.0
end_range = 2
# 1. calculate the area of each trapezoid
while (area <= round(factor, 2) * input_auc):
area = raucx(input_df["metric"], input_df.index, interval=rc(1, end_range))
end_range += 1
print( f"The point at which we reach {str(round(factor * 100, 2))}% of the AUC is = {str(end_range)}" )
#2. sum trapezoid areas to get AUC
result_df.loc[int(round(factor * 100, 2))] = ["auc" + str(int(round(factor * 100, 2)))] + [end_range]
result_df.loc[100] = ["auc100"] + [len(input_df["metric"])]
parameter_df['x'] = input_df.index - 1
parameter_df['y'] = input_df["metric"]
parameter_df.loc[len(input_df)] = [len(input_df)] + [parameter_df.iloc[len(input_df) - 1, 1]]
return result_df, parameter_df.iloc[1:, :]
# <><><> DEFINE EXECUTION FUNCTION <><><>
def main( input_df, name, detailed=False ):
"""
Each OTU is now ordered by centrality and the AUC of each is calculated.
:param input_df: Important features and OTUs that have been passed on to be ordered
:param name: name attached to all detailed output
:param detailed: Output helper tables
:return: ordered AUC percentiles of which OTU is most influential for percentile
"""
out_dir = f"{os.path.dirname(os.path.realpath(__file__))}/output"
# allows for cleaner execution and use of relative paths
if( detailed ):
out_file = f"{out_dir}/{name}_auc_result.csv"
parameter_file = f"{out_dir}/{name}_auc_parameter.csv"
# Create new files for output
out_file = open( out_file, "w+", encoding="utf-8")
parameter_file = open( parameter_file, "w+", encoding="utf-8" )
print(f"Processing Input dataFrame: {name}")
result, param = calc_auc_percentile(input_df )
print(f"Output is written in file: {out_file}")
print(f"Parameters are written in file: {parameter_file}")
# Write to CSV since this is detailed
result.to_csv(out_file)
param.to_csv(parameter_file)
out_file.close()
parameter_file.close()
else:
print(f"Processing Input dataFrame: {name}")
result, param = calc_auc_percentile( input_df )
# Return results dataframe along with the parameters dataframe
return result, param | 0.436142 | 0.512693 |
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TagUpsizeRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'upsize_tag_name': 'TagName',
'upsize_tag_creator': 'TagCreator'
}
attribute_map = {
'upsize_tag_name': 'upsizeTagName',
'upsize_tag_creator': 'upsizeTagCreator'
}
def __init__(self, upsize_tag_name=None, upsize_tag_creator=None, _configuration=None): # noqa: E501
"""TagUpsizeRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._upsize_tag_name = None
self._upsize_tag_creator = None
self.discriminator = None
self.upsize_tag_name = upsize_tag_name
self.upsize_tag_creator = upsize_tag_creator
@property
def upsize_tag_name(self):
"""Gets the upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:return: The upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:rtype: TagName
"""
return self._upsize_tag_name
@upsize_tag_name.setter
def upsize_tag_name(self, upsize_tag_name):
"""Sets the upsize_tag_name of this TagUpsizeRequest.
:param upsize_tag_name: The upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:type: TagName
"""
if self._configuration.client_side_validation and upsize_tag_name is None:
raise ValueError("Invalid value for `upsize_tag_name`, must not be `None`") # noqa: E501
self._upsize_tag_name = upsize_tag_name
@property
def upsize_tag_creator(self):
"""Gets the upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:return: The upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:rtype: TagCreator
"""
return self._upsize_tag_creator
@upsize_tag_creator.setter
def upsize_tag_creator(self, upsize_tag_creator):
"""Sets the upsize_tag_creator of this TagUpsizeRequest.
:param upsize_tag_creator: The upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:type: TagCreator
"""
if self._configuration.client_side_validation and upsize_tag_creator is None:
raise ValueError("Invalid value for `upsize_tag_creator`, must not be `None`") # noqa: E501
self._upsize_tag_creator = upsize_tag_creator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TagUpsizeRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagUpsizeRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TagUpsizeRequest):
return True
return self.to_dict() != other.to_dict() | lightly/openapi_generated/swagger_client/models/tag_upsize_request.py | import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class TagUpsizeRequest(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'upsize_tag_name': 'TagName',
'upsize_tag_creator': 'TagCreator'
}
attribute_map = {
'upsize_tag_name': 'upsizeTagName',
'upsize_tag_creator': 'upsizeTagCreator'
}
def __init__(self, upsize_tag_name=None, upsize_tag_creator=None, _configuration=None): # noqa: E501
"""TagUpsizeRequest - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._upsize_tag_name = None
self._upsize_tag_creator = None
self.discriminator = None
self.upsize_tag_name = upsize_tag_name
self.upsize_tag_creator = upsize_tag_creator
@property
def upsize_tag_name(self):
"""Gets the upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:return: The upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:rtype: TagName
"""
return self._upsize_tag_name
@upsize_tag_name.setter
def upsize_tag_name(self, upsize_tag_name):
"""Sets the upsize_tag_name of this TagUpsizeRequest.
:param upsize_tag_name: The upsize_tag_name of this TagUpsizeRequest. # noqa: E501
:type: TagName
"""
if self._configuration.client_side_validation and upsize_tag_name is None:
raise ValueError("Invalid value for `upsize_tag_name`, must not be `None`") # noqa: E501
self._upsize_tag_name = upsize_tag_name
@property
def upsize_tag_creator(self):
"""Gets the upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:return: The upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:rtype: TagCreator
"""
return self._upsize_tag_creator
@upsize_tag_creator.setter
def upsize_tag_creator(self, upsize_tag_creator):
"""Sets the upsize_tag_creator of this TagUpsizeRequest.
:param upsize_tag_creator: The upsize_tag_creator of this TagUpsizeRequest. # noqa: E501
:type: TagCreator
"""
if self._configuration.client_side_validation and upsize_tag_creator is None:
raise ValueError("Invalid value for `upsize_tag_creator`, must not be `None`") # noqa: E501
self._upsize_tag_creator = upsize_tag_creator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TagUpsizeRequest, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TagUpsizeRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, TagUpsizeRequest):
return True
return self.to_dict() != other.to_dict() | 0.642096 | 0.153708 |
import datetime
from google.appengine.ext import ndb
from components import auth
from components import utils
from testing_utils import testing
from test import test_util
from test.test_util import future
from go.chromium.org.luci.buildbucket.proto import common_pb2
import expiration
import model
class ExpireBuildTests(testing.AppengineTestCase):
def setUp(self):
super(ExpireBuildTests, self).setUp()
self.now = datetime.datetime(2015, 1, 1)
self.patch('components.utils.utcnow', side_effect=lambda: self.now)
self.patch('tq.enqueue_async', autospec=True, return_value=future(None))
def test_reschedule_builds_with_expired_leases(self):
build = test_util.build()
build.lease_expiration_date = utils.utcnow()
build.lease_key = 1
build.leasee = auth.Anonymous
build.put()
expiration.expire_build_leases()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.SCHEDULED)
self.assertIsNone(build.lease_key)
self.assertIsNone(build.leasee)
def test_completed_builds_are_not_reset(self):
build = test_util.build(status=common_pb2.SUCCESS)
build.put()
expiration.expire_build_leases()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.SUCCESS)
def test_expire_builds(self):
build_time = utils.utcnow() - datetime.timedelta(days=365)
build = test_util.build(create_time=test_util.dt2ts(build_time))
build.put()
expiration.expire_builds()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.INFRA_FAILURE)
self.assertTrue(build.proto.status_details.HasField('timeout'))
self.assertIsNone(build.lease_key)
def test_delete_builds(self):
old_build_time = utils.utcnow() - model.BUILD_STORAGE_DURATION * 2
old_build = test_util.build(create_time=test_util.dt2ts(old_build_time))
old_build_steps = model.BuildSteps(
key=model.BuildSteps.key_for(old_build.key),
step_container_bytes='',
)
new_build_time = utils.utcnow() - model.BUILD_STORAGE_DURATION / 2
new_build = test_util.build(create_time=test_util.dt2ts(new_build_time))
ndb.put_multi([old_build, old_build_steps, new_build])
expiration.delete_builds()
self.assertIsNone(old_build.key.get())
self.assertIsNone(old_build_steps.key.get())
self.assertIsNotNone(new_build.key.get()) | appengine/cr-buildbucket/test/expiration_test.py |
import datetime
from google.appengine.ext import ndb
from components import auth
from components import utils
from testing_utils import testing
from test import test_util
from test.test_util import future
from go.chromium.org.luci.buildbucket.proto import common_pb2
import expiration
import model
class ExpireBuildTests(testing.AppengineTestCase):
def setUp(self):
super(ExpireBuildTests, self).setUp()
self.now = datetime.datetime(2015, 1, 1)
self.patch('components.utils.utcnow', side_effect=lambda: self.now)
self.patch('tq.enqueue_async', autospec=True, return_value=future(None))
def test_reschedule_builds_with_expired_leases(self):
build = test_util.build()
build.lease_expiration_date = utils.utcnow()
build.lease_key = 1
build.leasee = auth.Anonymous
build.put()
expiration.expire_build_leases()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.SCHEDULED)
self.assertIsNone(build.lease_key)
self.assertIsNone(build.leasee)
def test_completed_builds_are_not_reset(self):
build = test_util.build(status=common_pb2.SUCCESS)
build.put()
expiration.expire_build_leases()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.SUCCESS)
def test_expire_builds(self):
build_time = utils.utcnow() - datetime.timedelta(days=365)
build = test_util.build(create_time=test_util.dt2ts(build_time))
build.put()
expiration.expire_builds()
build = build.key.get()
self.assertEqual(build.proto.status, common_pb2.INFRA_FAILURE)
self.assertTrue(build.proto.status_details.HasField('timeout'))
self.assertIsNone(build.lease_key)
def test_delete_builds(self):
old_build_time = utils.utcnow() - model.BUILD_STORAGE_DURATION * 2
old_build = test_util.build(create_time=test_util.dt2ts(old_build_time))
old_build_steps = model.BuildSteps(
key=model.BuildSteps.key_for(old_build.key),
step_container_bytes='',
)
new_build_time = utils.utcnow() - model.BUILD_STORAGE_DURATION / 2
new_build = test_util.build(create_time=test_util.dt2ts(new_build_time))
ndb.put_multi([old_build, old_build_steps, new_build])
expiration.delete_builds()
self.assertIsNone(old_build.key.get())
self.assertIsNone(old_build_steps.key.get())
self.assertIsNotNone(new_build.key.get()) | 0.421909 | 0.277785 |
import random
from chaoscf.actions import terminate_app_instance, \
terminate_some_random_instance
from chaoscf.api import get_apps_for_org, get_app_instances
from chaoslib import Configuration, Secrets
from chaoslib.exceptions import FailedActivity
__all__ = ['get_app_states_by_org', 'terminate_random_app_instance',
'terminate_some_random_instances']
def get_app_states_by_org(org_name: str, configuration: Configuration,
secrets: Secrets):
apps = get_apps_for_org(org_name, configuration, secrets)['resources']
if not apps:
raise FailedActivity(
"no app was found under org: '{o}'.".format(o=org_name))
result = []
for app in apps:
result.append({
'name': app['entity']['name'],
'state': app['entity']['state']
})
return result
def terminate_random_app_instance(org_name: str, configuration: Configuration,
secrets: Secrets):
"""
Terminate a random instance under a randomly picked app for a specified
org name.
"""
apps = get_apps_for_org(org_name, configuration, secrets)
app_names = [app['entity']['name'] for app in apps['resources']]
app_name = random.choice(app_names)
terminate_some_random_instance(app_name, configuration, secrets, org_name)
def terminate_some_random_instances(app_name: str,
configuration: Configuration,
secrets: Secrets, count: int = 0,
percentage: int = 0, org_name: str = None,
space_name: str = None):
"""
Terminate random instances under a specified app.
The number of instances to terminate can be specified by count or
percentage. When both of count and percentage are specified, percentage
overrides the count. When the number of instances to terminate is bigger
than the one of existing instances, all instances will be terminated.
"""
instances = get_app_instances(
app_name, configuration, secrets, org_name=org_name,
space_name=space_name)
indices = [idx for idx in instances.keys()]
instance_count = len(indices)
if percentage > 0:
count = int(instance_count * percentage / 100)
indices_to_terminate = random.sample(indices, min(count, instance_count))
for idx in indices_to_terminate:
terminate_app_instance(
app_name, idx, configuration, secrets, org_name, space_name) | kallisticore/modules/cloud_foundry/actions.py | import random
from chaoscf.actions import terminate_app_instance, \
terminate_some_random_instance
from chaoscf.api import get_apps_for_org, get_app_instances
from chaoslib import Configuration, Secrets
from chaoslib.exceptions import FailedActivity
__all__ = ['get_app_states_by_org', 'terminate_random_app_instance',
'terminate_some_random_instances']
def get_app_states_by_org(org_name: str, configuration: Configuration,
secrets: Secrets):
apps = get_apps_for_org(org_name, configuration, secrets)['resources']
if not apps:
raise FailedActivity(
"no app was found under org: '{o}'.".format(o=org_name))
result = []
for app in apps:
result.append({
'name': app['entity']['name'],
'state': app['entity']['state']
})
return result
def terminate_random_app_instance(org_name: str, configuration: Configuration,
secrets: Secrets):
"""
Terminate a random instance under a randomly picked app for a specified
org name.
"""
apps = get_apps_for_org(org_name, configuration, secrets)
app_names = [app['entity']['name'] for app in apps['resources']]
app_name = random.choice(app_names)
terminate_some_random_instance(app_name, configuration, secrets, org_name)
def terminate_some_random_instances(app_name: str,
configuration: Configuration,
secrets: Secrets, count: int = 0,
percentage: int = 0, org_name: str = None,
space_name: str = None):
"""
Terminate random instances under a specified app.
The number of instances to terminate can be specified by count or
percentage. When both of count and percentage are specified, percentage
overrides the count. When the number of instances to terminate is bigger
than the one of existing instances, all instances will be terminated.
"""
instances = get_app_instances(
app_name, configuration, secrets, org_name=org_name,
space_name=space_name)
indices = [idx for idx in instances.keys()]
instance_count = len(indices)
if percentage > 0:
count = int(instance_count * percentage / 100)
indices_to_terminate = random.sample(indices, min(count, instance_count))
for idx in indices_to_terminate:
terminate_app_instance(
app_name, idx, configuration, secrets, org_name, space_name) | 0.592431 | 0.153391 |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from scanner.background.DBmanager import DBmanager, check
from scanner.background import send_message
from .forms import IDAndItem
from django.contrib.auth import logout, authenticate, login
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
def homepage(request):
if request.user.is_authenticated:
#DBmanager.reload()
if request.method == 'POST':
form = IDAndItem(request.POST)
if form.is_valid():
DBmanager.process_form(form.cleaned_data["student_id"], form.cleaned_data["item"])
for message in check.messageList:
send_message.make_toast(request, message[0], message[1], message[2])
check.messageList.clear()
return render(request, "scanner/home.html", {"form": IDAndItem, "itemList": check.itemList})
else:
return redirect("scanner:login")
def overview(request):
if request.user.is_authenticated:
missingDic = {"missing": DBmanager.create_overview()}
return render(request, "scanner/overview.html", {"missingDic": missingDic})
else:
return redirect("scanner:login")
def history(request):
if request.user.is_authenticated:
entriesDic = {"missing": DBmanager.create_history()}
return render(request, "scanner/history.html", {"missingDic": entriesDic})
else:
return redirect("scanner:login")
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("scanner:login")
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}")
return redirect('/')
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request = request,
template_name = "scanner/login.html",
context={"form":form}) | gamesnstuff/scanner/views.py | from django.shortcuts import render, redirect
from django.http import HttpResponse
from scanner.background.DBmanager import DBmanager, check
from scanner.background import send_message
from .forms import IDAndItem
from django.contrib.auth import logout, authenticate, login
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
def homepage(request):
if request.user.is_authenticated:
#DBmanager.reload()
if request.method == 'POST':
form = IDAndItem(request.POST)
if form.is_valid():
DBmanager.process_form(form.cleaned_data["student_id"], form.cleaned_data["item"])
for message in check.messageList:
send_message.make_toast(request, message[0], message[1], message[2])
check.messageList.clear()
return render(request, "scanner/home.html", {"form": IDAndItem, "itemList": check.itemList})
else:
return redirect("scanner:login")
def overview(request):
if request.user.is_authenticated:
missingDic = {"missing": DBmanager.create_overview()}
return render(request, "scanner/overview.html", {"missingDic": missingDic})
else:
return redirect("scanner:login")
def history(request):
if request.user.is_authenticated:
entriesDic = {"missing": DBmanager.create_history()}
return render(request, "scanner/history.html", {"missingDic": entriesDic})
else:
return redirect("scanner:login")
def logout_request(request):
logout(request)
messages.info(request, "Logged out successfully!")
return redirect("scanner:login")
def login_request(request):
if request.method == 'POST':
form = AuthenticationForm(request=request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}")
return redirect('/')
else:
messages.error(request, "Invalid username or password.")
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
return render(request = request,
template_name = "scanner/login.html",
context={"form":form}) | 0.387459 | 0.092729 |
# # S_ProjectionVGSub [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionVGSub&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-subordinated-brownian-motion).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, array, zeros, diff, abs, log, exp, sqrt, tile, r_, atleast_2d, newaxis
from numpy import sum as npsum, min as npmin, max as npmax
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, subplots, ylabel, \
xlabel, title, xticks
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import struct_to_dict, datenum, save_plot
from intersect_matlab import intersect
from EffectiveScenarios import EffectiveScenarios
from ConditionalFP import ConditionalFP
from MMFP import MMFP
from VG import VG
from ShiftedVGMoments import ShiftedVGMoments
# -
# ## Upload databases
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_OptionStrategy'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_OptionStrategy'), squeeze_me=True)
OptionStrategy = struct_to_dict(db['OptionStrategy'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_VIX'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_VIX'), squeeze_me=True)
VIX = struct_to_dict(db['VIX'])
# -
# ## Merge data
# +
# invariants (daily P&L)
pnl = OptionStrategy.cumPL
epsi = diff(pnl)
dates_x = array([datenum(i) for i in OptionStrategy.Dates])
dates_x = dates_x[1:]
# conditioning variable (VIX)
z = VIX.value
dates_z = VIX.Date
# merging datasets
[dates, i_epsi, i_z] = intersect(dates_x, dates_z)
pnl = pnl[i_epsi + 1]
epsi = epsi[i_epsi]
z = z[i_z]
t_ = len(epsi)
# -
# ## Compute the Flexible Probabilities conditioned via Entropy Pooling
# +
# prior
lam = log(2) / 1800 # half life 5y
prior = exp(-lam*abs(arange(t_, 1 + -1, -1))).reshape(1,-1)
prior = prior / npsum(prior)
# conditioner
VIX = namedtuple('VIX', 'Series TargetValue Leeway')
VIX.Series = z.reshape(1,-1)
VIX.TargetValue = atleast_2d(z[-1])
VIX.Leeway = 0.35
# flexible probabilities conditioned via EP
p = ConditionalFP(VIX, prior)
# effective number of scenarios
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
ens = EffectiveScenarios(p, typ)
# -
# ## Estimation of shifted-VG model
# +
# initial guess on parameters
shift0 = 0
theta0 = 0
sigma0 = 0.01
nu0 = 1
par0 = [shift0, theta0, sigma0, nu0]
# calibration
HFP = namedtuple('HFP', ['FlexProbs','Scenarios'])
HFP.FlexProbs = p
HFP.Scenarios = epsi
par = MMFP(HFP, 'SVG', par0)
shift = par.c
theta = par.theta
sigma = par.sigma
nu = par.nu
# #changing parameterization from {theta,sigma, nu} to {c,m,g}
# [c, m, g] = ParamChangeVG(theta,sigma,nu)
# -
# ## Initialize projection variables
tau = 15 # investment horizon
dt = 1 / 75 # infinitesimal step for simulations
t_j = arange(0,tau+dt,dt) # time vector for simulations
j_ = 2 # number of simulations
# +
# ## Simulate VG paths
[X, T] = VG(theta, sigma, nu, t_j, j_) # VG paths
X = X + tile(shift*t_j[newaxis,...], (j_, 1)) # shifted-VG path
X = pnl[t_-1] + X # centered path
dT = r_['-1',zeros((j_, 1)), diff(T, 1, 1)]
# -
# ## Projection to horizon
# moments
mu_tau, sigma2_tau, _, _ = ShiftedVGMoments(0, theta, sigma, nu, tau)
expectation = pnl[t_-1] + shift*tau + mu_tau # shift and center mean
sigma_tau = sqrt(sigma2_tau)
# ## Generate the figure
s_ = 2
# +
f, ax = subplots(3,1)
# figure settings
dgrey = [0.5, 0.5, 0.5]
color = {}
color [0]= 'b'
color [1]= [.9, .35, 0]
color [2]= 'm'
color [3]= 'g'
color [4]= 'c'
color [5]= 'y'
t = r_[arange(-s_,1),t_j[1:]]
plt.sca(ax[0])
m = min([npmin(X)*0.91, npmin(pnl[t_ - s_:])*0.91, pnl[-1] - 3*sigma_tau / 2])
M = max([npmax(X)*1.1, npmax(pnl[t_ - s_:])*1.1, expectation + 1.2*sigma_tau])
plt.axis([-s_, tau, m, M])
xlabel('time (days)')
ylabel('Risk driver')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Variance Gamma process (subordinated Brownian motion)')
for j in range(j_):
plot(t_j, X[j,:], color= color[j], lw=2)
for s in range(s_):
plot([s-s_, s-s_+1], [pnl[t_+s-s_-1], pnl[t_+s-s_]], color=dgrey, lw=2)
plot(s-s_, pnl[t_+s-s_-1], color=dgrey, linestyle='none', marker='.',markersize=15) # observation (dots)
plot(0, pnl[t_-1], color=dgrey, linestyle='none', marker='.',markersize=15)
plt.sca(ax[1])
M_v = npmax(dT)*1.1
m_v = -M_v*0.08
plt.axis([-s_, tau, m_v, M_v])
xlabel('time (days)')
ylabel('Stoch. time increment')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Gamma process')
for j in range(j_):
plot(t_j, dT[j,:], color= color[j], lw=2)
plot([-s_, 0], [0,0], color=dgrey, lw=2)
plt.sca(ax[2])
M_T = npmax(T[:,-1])*1.1
m_T = -M_T*0.08
plt.axis([-s_, tau, m_T, M_T])
xlabel('time (days)')
ylabel('Stoch. time')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Integrated Gamma process')
for j in range(j_):
plot(t_j, T[j,:], color= color[j], lw=2)
plot([-s_, 0], [0,0], color=dgrey, lw=2)
plt.tight_layout();
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]); | scripts/sources/S_ProjectionVGSub.py |
# # S_ProjectionVGSub [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=S_ProjectionVGSub&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=eb-subordinated-brownian-motion).
# ## Prepare the environment
# +
import os
import os.path as path
import sys
sys.path.append(path.abspath('../../functions-legacy'))
from collections import namedtuple
from numpy import arange, array, zeros, diff, abs, log, exp, sqrt, tile, r_, atleast_2d, newaxis
from numpy import sum as npsum, min as npmin, max as npmax
from scipy.io import loadmat
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot, subplots, ylabel, \
xlabel, title, xticks
plt.style.use('seaborn')
from CONFIG import GLOBAL_DB, TEMPORARY_DB
from ARPM_utils import struct_to_dict, datenum, save_plot
from intersect_matlab import intersect
from EffectiveScenarios import EffectiveScenarios
from ConditionalFP import ConditionalFP
from MMFP import MMFP
from VG import VG
from ShiftedVGMoments import ShiftedVGMoments
# -
# ## Upload databases
# +
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_OptionStrategy'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_OptionStrategy'), squeeze_me=True)
OptionStrategy = struct_to_dict(db['OptionStrategy'])
try:
db = loadmat(os.path.join(GLOBAL_DB, 'db_VIX'), squeeze_me=True)
except FileNotFoundError:
db = loadmat(os.path.join(TEMPORARY_DB, 'db_VIX'), squeeze_me=True)
VIX = struct_to_dict(db['VIX'])
# -
# ## Merge data
# +
# invariants (daily P&L)
pnl = OptionStrategy.cumPL
epsi = diff(pnl)
dates_x = array([datenum(i) for i in OptionStrategy.Dates])
dates_x = dates_x[1:]
# conditioning variable (VIX)
z = VIX.value
dates_z = VIX.Date
# merging datasets
[dates, i_epsi, i_z] = intersect(dates_x, dates_z)
pnl = pnl[i_epsi + 1]
epsi = epsi[i_epsi]
z = z[i_z]
t_ = len(epsi)
# -
# ## Compute the Flexible Probabilities conditioned via Entropy Pooling
# +
# prior
lam = log(2) / 1800 # half life 5y
prior = exp(-lam*abs(arange(t_, 1 + -1, -1))).reshape(1,-1)
prior = prior / npsum(prior)
# conditioner
VIX = namedtuple('VIX', 'Series TargetValue Leeway')
VIX.Series = z.reshape(1,-1)
VIX.TargetValue = atleast_2d(z[-1])
VIX.Leeway = 0.35
# flexible probabilities conditioned via EP
p = ConditionalFP(VIX, prior)
# effective number of scenarios
typ = namedtuple('type','Entropy')
typ.Entropy = 'Exp'
ens = EffectiveScenarios(p, typ)
# -
# ## Estimation of shifted-VG model
# +
# initial guess on parameters
shift0 = 0
theta0 = 0
sigma0 = 0.01
nu0 = 1
par0 = [shift0, theta0, sigma0, nu0]
# calibration
HFP = namedtuple('HFP', ['FlexProbs','Scenarios'])
HFP.FlexProbs = p
HFP.Scenarios = epsi
par = MMFP(HFP, 'SVG', par0)
shift = par.c
theta = par.theta
sigma = par.sigma
nu = par.nu
# #changing parameterization from {theta,sigma, nu} to {c,m,g}
# [c, m, g] = ParamChangeVG(theta,sigma,nu)
# -
# ## Initialize projection variables
tau = 15 # investment horizon
dt = 1 / 75 # infinitesimal step for simulations
t_j = arange(0,tau+dt,dt) # time vector for simulations
j_ = 2 # number of simulations
# +
# ## Simulate VG paths
[X, T] = VG(theta, sigma, nu, t_j, j_) # VG paths
X = X + tile(shift*t_j[newaxis,...], (j_, 1)) # shifted-VG path
X = pnl[t_-1] + X # centered path
dT = r_['-1',zeros((j_, 1)), diff(T, 1, 1)]
# -
# ## Projection to horizon
# moments
mu_tau, sigma2_tau, _, _ = ShiftedVGMoments(0, theta, sigma, nu, tau)
expectation = pnl[t_-1] + shift*tau + mu_tau # shift and center mean
sigma_tau = sqrt(sigma2_tau)
# ## Generate the figure
s_ = 2
# +
f, ax = subplots(3,1)
# figure settings
dgrey = [0.5, 0.5, 0.5]
color = {}
color [0]= 'b'
color [1]= [.9, .35, 0]
color [2]= 'm'
color [3]= 'g'
color [4]= 'c'
color [5]= 'y'
t = r_[arange(-s_,1),t_j[1:]]
plt.sca(ax[0])
m = min([npmin(X)*0.91, npmin(pnl[t_ - s_:])*0.91, pnl[-1] - 3*sigma_tau / 2])
M = max([npmax(X)*1.1, npmax(pnl[t_ - s_:])*1.1, expectation + 1.2*sigma_tau])
plt.axis([-s_, tau, m, M])
xlabel('time (days)')
ylabel('Risk driver')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Variance Gamma process (subordinated Brownian motion)')
for j in range(j_):
plot(t_j, X[j,:], color= color[j], lw=2)
for s in range(s_):
plot([s-s_, s-s_+1], [pnl[t_+s-s_-1], pnl[t_+s-s_]], color=dgrey, lw=2)
plot(s-s_, pnl[t_+s-s_-1], color=dgrey, linestyle='none', marker='.',markersize=15) # observation (dots)
plot(0, pnl[t_-1], color=dgrey, linestyle='none', marker='.',markersize=15)
plt.sca(ax[1])
M_v = npmax(dT)*1.1
m_v = -M_v*0.08
plt.axis([-s_, tau, m_v, M_v])
xlabel('time (days)')
ylabel('Stoch. time increment')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Gamma process')
for j in range(j_):
plot(t_j, dT[j,:], color= color[j], lw=2)
plot([-s_, 0], [0,0], color=dgrey, lw=2)
plt.sca(ax[2])
M_T = npmax(T[:,-1])*1.1
m_T = -M_T*0.08
plt.axis([-s_, tau, m_T, M_T])
xlabel('time (days)')
ylabel('Stoch. time')
xticks(arange(-s_,tau+1))
plt.grid(False)
title('Integrated Gamma process')
for j in range(j_):
plot(t_j, T[j,:], color= color[j], lw=2)
plot([-s_, 0], [0,0], color=dgrey, lw=2)
plt.tight_layout();
# save_plot(ax=plt.gca(), extension='png', scriptname=os.path.basename('.')[:-3], count=plt.get_fignums()[-1]); | 0.523908 | 0.473231 |
""" """
# Standard library modules.
# Third party modules.
import pytest
from qtpy import QtCore, QtGui
# Local modules.
from pymontecarlo_gui.options.material import (
FormulaValidator,
MaterialPureWidget,
MaterialFormulaWidget,
MaterialAdvancedWidget,
MaterialListWidget,
)
from pymontecarlo_gui.util.testutil import checkbox_click
from pymontecarlo.options.material import Material
from pymontecarlo.options.composition import generate_name, calculate_density_kg_per_m3
# Globals and constants variables.
@pytest.fixture
def formula_validator(qtbot):
return FormulaValidator()
def test_formula_validate_acceptable(qtbot, formula_validator):
state, text, pos = formula_validator.validate("Al2O3", 5)
assert state == QtGui.QValidator.Acceptable
assert text == "Al2O3"
assert pos == 5
def test_formula_validate_intermediate(qtbot, formula_validator):
state, text, pos = formula_validator.validate("A", 1)
assert state == QtGui.QValidator.Intermediate
assert text == "A"
assert pos == 1
def test_formula_validate_invalid(qtbot, formula_validator):
state, text, pos = formula_validator.validate("-", 1)
assert state == QtGui.QValidator.Invalid
assert text == "-"
assert pos == 1
@pytest.fixture
def material_pure_widget(qtbot):
return MaterialPureWidget()
def test_material_pure_widget(qtbot, material_pure_widget):
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button = material_pure_widget.wdg_periodic_table._group.button(29)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
materials = material_pure_widget.materials()
assert len(materials) == 2
assert Material.pure(13) in materials
assert Material.pure(29) in materials
def test_material_pure_widget2(qtbot, material_pure_widget):
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
materials = material_pure_widget.materials()
assert not materials
@pytest.fixture
def material_formula_widget(qtbot):
return MaterialFormulaWidget()
def test_material_formula_widget_nomaterials(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "A")
materials = material_formula_widget.materials()
assert not materials
def test_material_formula_widget_auto_density(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "Al")
materials = material_formula_widget.materials()
assert len(materials) == 1
assert materials[0].density_kg_per_m3 == pytest.approx(
Material.pure(13).density_kg_per_m3, abs=1e-4
)
def test_material_formula_widget_user_density(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "Al")
widget = material_formula_widget.field_density.suffixWidget()
widget.click()
widget = material_formula_widget.field_density.widget()
widget.clear()
qtbot.keyClicks(widget.lineedit, "9")
materials = material_formula_widget.materials()
assert len(materials) == 1
assert materials[0].density_kg_per_m3 == pytest.approx(9000, abs=1e-4)
@pytest.fixture
def material_advanced_widget(qtbot):
return MaterialAdvancedWidget()
def test_material_advanced_widget_nomaterials(qtbot, material_advanced_widget):
materials = material_advanced_widget.materials()
assert not materials
def test_material_advanced_widget_auto(qtbot, material_advanced_widget):
material_advanced_widget.tbl_composition.setComposition({13: 1.0})
materials = material_advanced_widget.materials()
assert len(materials) == 1
material = materials[0]
assert material.name == generate_name({13: 1.0})
assert material.composition == {13: 1.0}
assert material.density_kg_per_m3 == pytest.approx(
calculate_density_kg_per_m3({13: 1.0}), abs=1e-4
)
def test_material_advanced_widget_user(qtbot, material_advanced_widget):
widget = material_advanced_widget.field_name.suffixWidget()
widget.click()
widget = material_advanced_widget.field_name.widget()
widget.clear()
qtbot.keyClicks(widget, "foo")
material_advanced_widget.tbl_composition.setComposition({13: 1.0})
widget = material_advanced_widget.field_density.suffixWidget()
widget.click()
widget = material_advanced_widget.field_density.widget()
widget.clear()
qtbot.keyClicks(widget.lineedit, "9")
materials = material_advanced_widget.materials()
assert len(materials) == 1
material = materials[0]
assert material.name == "foo"
assert material.composition == {13: 1.0}
assert material.density_kg_per_m3 == pytest.approx(9000, abs=1e-4)
def test_material_advanced_widget_setMaterial(qtbot, material_advanced_widget):
material = Material("foo", {13: 1.0}, 9000)
material_advanced_widget.setMaterial(material)
widget = material_advanced_widget.field_name.suffixWidget()
assert not widget.isChecked()
widget = material_advanced_widget.field_name.widget()
assert widget.text() == material.name
widget = material_advanced_widget.field_density.suffixWidget()
assert widget.isChecked()
widget = material_advanced_widget.field_density.widget()
assert widget.value() == pytest.approx(material.density_g_per_cm3, abs=1e-4)
composition = material_advanced_widget.tbl_composition.composition()
assert composition == material.composition
materials = material_advanced_widget.materials()
assert len(materials) == 1
assert materials[0] == material
@pytest.fixture
def material_list_widget(qtbot, materials):
widget = MaterialListWidget()
widget.setMaterials(materials)
return widget
def test_material_list_widget_selectedMaterials(qtbot, material_list_widget):
assert not material_list_widget.selectedMaterials()
def test_material_list_widget_selectedMaterials_single(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
selected_materials = material_list_widget.selectedMaterials()
assert len(selected_materials) == 1
assert material in selected_materials
def test_material_list_widget_selectedMaterials_remove(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
material_list_widget.removeMaterial(material)
assert len(material_list_widget.materials()) == 2
assert not material_list_widget.selectedMaterials()
def test_material_list_widget_selectedMaterials_add(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
newmaterial = Material.pure(28)
material_list_widget.addMaterial(newmaterial)
assert newmaterial in material_list_widget.materials()
selected_materials = material_list_widget.selectedMaterials()
assert len(selected_materials) == 1
assert material in selected_materials | pymontecarlo_gui/options/test_material.py | """ """
# Standard library modules.
# Third party modules.
import pytest
from qtpy import QtCore, QtGui
# Local modules.
from pymontecarlo_gui.options.material import (
FormulaValidator,
MaterialPureWidget,
MaterialFormulaWidget,
MaterialAdvancedWidget,
MaterialListWidget,
)
from pymontecarlo_gui.util.testutil import checkbox_click
from pymontecarlo.options.material import Material
from pymontecarlo.options.composition import generate_name, calculate_density_kg_per_m3
# Globals and constants variables.
@pytest.fixture
def formula_validator(qtbot):
return FormulaValidator()
def test_formula_validate_acceptable(qtbot, formula_validator):
state, text, pos = formula_validator.validate("Al2O3", 5)
assert state == QtGui.QValidator.Acceptable
assert text == "Al2O3"
assert pos == 5
def test_formula_validate_intermediate(qtbot, formula_validator):
state, text, pos = formula_validator.validate("A", 1)
assert state == QtGui.QValidator.Intermediate
assert text == "A"
assert pos == 1
def test_formula_validate_invalid(qtbot, formula_validator):
state, text, pos = formula_validator.validate("-", 1)
assert state == QtGui.QValidator.Invalid
assert text == "-"
assert pos == 1
@pytest.fixture
def material_pure_widget(qtbot):
return MaterialPureWidget()
def test_material_pure_widget(qtbot, material_pure_widget):
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button = material_pure_widget.wdg_periodic_table._group.button(29)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
materials = material_pure_widget.materials()
assert len(materials) == 2
assert Material.pure(13) in materials
assert Material.pure(29) in materials
def test_material_pure_widget2(qtbot, material_pure_widget):
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
button = material_pure_widget.wdg_periodic_table._group.button(13)
qtbot.mouseClick(button, QtCore.Qt.LeftButton)
materials = material_pure_widget.materials()
assert not materials
@pytest.fixture
def material_formula_widget(qtbot):
return MaterialFormulaWidget()
def test_material_formula_widget_nomaterials(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "A")
materials = material_formula_widget.materials()
assert not materials
def test_material_formula_widget_auto_density(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "Al")
materials = material_formula_widget.materials()
assert len(materials) == 1
assert materials[0].density_kg_per_m3 == pytest.approx(
Material.pure(13).density_kg_per_m3, abs=1e-4
)
def test_material_formula_widget_user_density(qtbot, material_formula_widget):
widget = material_formula_widget.field_formula.widget()
qtbot.keyClicks(widget, "Al")
widget = material_formula_widget.field_density.suffixWidget()
widget.click()
widget = material_formula_widget.field_density.widget()
widget.clear()
qtbot.keyClicks(widget.lineedit, "9")
materials = material_formula_widget.materials()
assert len(materials) == 1
assert materials[0].density_kg_per_m3 == pytest.approx(9000, abs=1e-4)
@pytest.fixture
def material_advanced_widget(qtbot):
return MaterialAdvancedWidget()
def test_material_advanced_widget_nomaterials(qtbot, material_advanced_widget):
materials = material_advanced_widget.materials()
assert not materials
def test_material_advanced_widget_auto(qtbot, material_advanced_widget):
material_advanced_widget.tbl_composition.setComposition({13: 1.0})
materials = material_advanced_widget.materials()
assert len(materials) == 1
material = materials[0]
assert material.name == generate_name({13: 1.0})
assert material.composition == {13: 1.0}
assert material.density_kg_per_m3 == pytest.approx(
calculate_density_kg_per_m3({13: 1.0}), abs=1e-4
)
def test_material_advanced_widget_user(qtbot, material_advanced_widget):
widget = material_advanced_widget.field_name.suffixWidget()
widget.click()
widget = material_advanced_widget.field_name.widget()
widget.clear()
qtbot.keyClicks(widget, "foo")
material_advanced_widget.tbl_composition.setComposition({13: 1.0})
widget = material_advanced_widget.field_density.suffixWidget()
widget.click()
widget = material_advanced_widget.field_density.widget()
widget.clear()
qtbot.keyClicks(widget.lineedit, "9")
materials = material_advanced_widget.materials()
assert len(materials) == 1
material = materials[0]
assert material.name == "foo"
assert material.composition == {13: 1.0}
assert material.density_kg_per_m3 == pytest.approx(9000, abs=1e-4)
def test_material_advanced_widget_setMaterial(qtbot, material_advanced_widget):
material = Material("foo", {13: 1.0}, 9000)
material_advanced_widget.setMaterial(material)
widget = material_advanced_widget.field_name.suffixWidget()
assert not widget.isChecked()
widget = material_advanced_widget.field_name.widget()
assert widget.text() == material.name
widget = material_advanced_widget.field_density.suffixWidget()
assert widget.isChecked()
widget = material_advanced_widget.field_density.widget()
assert widget.value() == pytest.approx(material.density_g_per_cm3, abs=1e-4)
composition = material_advanced_widget.tbl_composition.composition()
assert composition == material.composition
materials = material_advanced_widget.materials()
assert len(materials) == 1
assert materials[0] == material
@pytest.fixture
def material_list_widget(qtbot, materials):
widget = MaterialListWidget()
widget.setMaterials(materials)
return widget
def test_material_list_widget_selectedMaterials(qtbot, material_list_widget):
assert not material_list_widget.selectedMaterials()
def test_material_list_widget_selectedMaterials_single(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
selected_materials = material_list_widget.selectedMaterials()
assert len(selected_materials) == 1
assert material in selected_materials
def test_material_list_widget_selectedMaterials_remove(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
material_list_widget.removeMaterial(material)
assert len(material_list_widget.materials()) == 2
assert not material_list_widget.selectedMaterials()
def test_material_list_widget_selectedMaterials_add(qtbot, material_list_widget):
material = material_list_widget.material(0)
material_list_widget.setSelectedMaterials([material])
newmaterial = Material.pure(28)
material_list_widget.addMaterial(newmaterial)
assert newmaterial in material_list_widget.materials()
selected_materials = material_list_widget.selectedMaterials()
assert len(selected_materials) == 1
assert material in selected_materials | 0.743727 | 0.462959 |
import os
from ..schema.utils.engine import DataBase
__all__ = ['db', 'GenericService', 'PermissionsMixin']
db_path = os.getenv('DATABASE_URI')
db = DataBase(db_path)
def make_key(**opts):
key = '-'.join([
f'{k}={v}'
for k, v in dict(opts).items()
])
return key
# Caching is currently disabled; too buggy.
# If this ends up being inefficient
# we should return to caching and fix it
class ServiceMetaClass(type):
@property
def db(cls):
return db
def cache(cls, key, value):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
cls.__cache[key] = value
def cached(cls, key):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
# Using 'get' instead of 'pop' here would enable caching
return cls.__cache.pop(key, None)
def clear_cache(cls):
cls.__cache = {}
class GenericService(metaclass=ServiceMetaClass):
__model__ = None
__unique_on__ = []
auto_commit = True
@classmethod
def commit(cls):
cls.clear_cache()
cls.db.session.commit()
@classmethod
def rollback(cls):
cls.clear_cache()
cls.db.session.rollback()
@classmethod
def create(cls, *, no_commit=False, check_unique=True, **kwargs):
opts = dict(kwargs)
for k in list(opts):
opt = opts.pop(k)
if hasattr(opt, 'id') and hasattr(cls.__model__, f'{k}_id'):
opts[f'{k}_id'] = opt.id
else:
opts[k] = opt
if check_unique and cls.__unique_on__:
check = {k: opts.get(k) for k in cls.__unique_on__}
existing_term = cls.get(**check)
if existing_term:
for k, v in kwargs.items():
if hasattr(existing_term, k):
setattr(existing_term, k, v)
cls.db.session.add(existing_term)
if not no_commit and cls.auto_commit:
cls.commit()
return existing_term
with cls.db.session.no_autoflush:
model = cls.__model__(**opts)
cls.db.session.add(model)
cls.cache(make_key(**opts), model)
if not no_commit and cls.auto_commit:
cls.commit()
return model
@classmethod
def get_all(cls, **kwargs):
key = make_key(**kwargs)
if not cls.cached(key):
with cls.db.session.no_autoflush:
model_query = cls.db.session.query(cls.__model__)
if kwargs:
model_query = model_query.filter_by(**kwargs)
models = model_query.all()
cls.cache(key, models)
ret = cls.cached(key) or []
if isinstance(ret, cls.__model__):
ret = [ret]
with cls.db.session.no_autoflush:
ret = [
x if (x in cls.db.session) else cls.db.session.merge(x)
for x in ret
]
return ret
@classmethod
def get(cls, model_id=None, **kwargs):
ret = None
query_options = kwargs.pop('query_options', None)
if model_id is not None:
if not isinstance(model_id, int):
raise TypeError(
'"model_id" must be of type int,'
f' not {model_id.__class__.__name__}'
)
if model_id > 0:
if not cls.cached(model_id):
with cls.db.session.no_autoflush:
q = cls.db.session.query(cls.__model__)
if query_options is not None:
q = q.options(query_options)
model = q.get(
model_id
)
cls.cache(model_id, model)
ret = cls.cached(model_id)
elif kwargs:
models = cls.get_all(**kwargs)
models.append(None)
ret = models[0]
if ret:
with cls.db.session.no_autoflush:
if ret not in cls.db.session:
ret = cls.db.session.merge(ret)
return ret
@classmethod
def get_or_create(cls, model_id=None, no_commit=False, **kwargs):
model = cls.get(model_id=model_id, **kwargs)
if model is None:
check_unique = kwargs.pop('check_unique', False)
model = cls.create(
no_commit=no_commit, check_unique=check_unique, **kwargs
)
with cls.db.session.no_autoflush:
if model not in cls.db.session:
model = cls.db.session.merge(model)
return model
@classmethod
def update(cls, model):
if isinstance(model, list):
models = model
else:
models = [model]
for model in models:
if not isinstance(model, cls.__model__):
raise TypeError(
'"model" must be of type'
f' {cls.__model__.__class__.__name__},'
f' not {model.__class__.__name__}'
)
if model in cls.db.session:
cls.db.session.expunge(model)
cls.db.session.add_all(models)
cls.commit()
@classmethod
def delete(cls, model_or_id, no_commit=False):
if isinstance(model_or_id, int):
model = cls.get(model_or_id)
elif isinstance(model_or_id, cls.__model__):
model = model_or_id
if model not in cls.db.session:
model = cls.db.session.merge(model)
else:
raise TypeError(
'"model_or_id" must be of type int'
f' or {cls.__model__.__class__.__name__},'
f' not {model_or_id.__class__.__name__}'
)
cls.db.session.delete(model)
if not no_commit and cls.auto_commit:
cls.commit()
def __init__(self, model, *args, **kwargs):
self.__instance = model
class PermissionsMixin:
@classmethod
def grant(cls, model, user, permission, no_commit=False):
if not hasattr(model, 'grant'):
raise TypeError(
f'{model.__class__.__qualname__} does not support permissions'
)
model.grant(user, permission)
cls.db.session.add(model)
if not no_commit and cls.auto_commit:
cls.commit()
@classmethod
def clear_permissions(cls, model, except_for=[]):
model._permissions = [ # Clear existing permissions
x for x in model._permissions if x.user_id in except_for
] | src/db/services/generic.py | import os
from ..schema.utils.engine import DataBase
__all__ = ['db', 'GenericService', 'PermissionsMixin']
db_path = os.getenv('DATABASE_URI')
db = DataBase(db_path)
def make_key(**opts):
key = '-'.join([
f'{k}={v}'
for k, v in dict(opts).items()
])
return key
# Caching is currently disabled; too buggy.
# If this ends up being inefficient
# we should return to caching and fix it
class ServiceMetaClass(type):
@property
def db(cls):
return db
def cache(cls, key, value):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
cls.__cache[key] = value
def cached(cls, key):
try:
cls.__cache
except AttributeError:
cls.__cache = {}
# Using 'get' instead of 'pop' here would enable caching
return cls.__cache.pop(key, None)
def clear_cache(cls):
cls.__cache = {}
class GenericService(metaclass=ServiceMetaClass):
__model__ = None
__unique_on__ = []
auto_commit = True
@classmethod
def commit(cls):
cls.clear_cache()
cls.db.session.commit()
@classmethod
def rollback(cls):
cls.clear_cache()
cls.db.session.rollback()
@classmethod
def create(cls, *, no_commit=False, check_unique=True, **kwargs):
opts = dict(kwargs)
for k in list(opts):
opt = opts.pop(k)
if hasattr(opt, 'id') and hasattr(cls.__model__, f'{k}_id'):
opts[f'{k}_id'] = opt.id
else:
opts[k] = opt
if check_unique and cls.__unique_on__:
check = {k: opts.get(k) for k in cls.__unique_on__}
existing_term = cls.get(**check)
if existing_term:
for k, v in kwargs.items():
if hasattr(existing_term, k):
setattr(existing_term, k, v)
cls.db.session.add(existing_term)
if not no_commit and cls.auto_commit:
cls.commit()
return existing_term
with cls.db.session.no_autoflush:
model = cls.__model__(**opts)
cls.db.session.add(model)
cls.cache(make_key(**opts), model)
if not no_commit and cls.auto_commit:
cls.commit()
return model
@classmethod
def get_all(cls, **kwargs):
key = make_key(**kwargs)
if not cls.cached(key):
with cls.db.session.no_autoflush:
model_query = cls.db.session.query(cls.__model__)
if kwargs:
model_query = model_query.filter_by(**kwargs)
models = model_query.all()
cls.cache(key, models)
ret = cls.cached(key) or []
if isinstance(ret, cls.__model__):
ret = [ret]
with cls.db.session.no_autoflush:
ret = [
x if (x in cls.db.session) else cls.db.session.merge(x)
for x in ret
]
return ret
@classmethod
def get(cls, model_id=None, **kwargs):
ret = None
query_options = kwargs.pop('query_options', None)
if model_id is not None:
if not isinstance(model_id, int):
raise TypeError(
'"model_id" must be of type int,'
f' not {model_id.__class__.__name__}'
)
if model_id > 0:
if not cls.cached(model_id):
with cls.db.session.no_autoflush:
q = cls.db.session.query(cls.__model__)
if query_options is not None:
q = q.options(query_options)
model = q.get(
model_id
)
cls.cache(model_id, model)
ret = cls.cached(model_id)
elif kwargs:
models = cls.get_all(**kwargs)
models.append(None)
ret = models[0]
if ret:
with cls.db.session.no_autoflush:
if ret not in cls.db.session:
ret = cls.db.session.merge(ret)
return ret
@classmethod
def get_or_create(cls, model_id=None, no_commit=False, **kwargs):
model = cls.get(model_id=model_id, **kwargs)
if model is None:
check_unique = kwargs.pop('check_unique', False)
model = cls.create(
no_commit=no_commit, check_unique=check_unique, **kwargs
)
with cls.db.session.no_autoflush:
if model not in cls.db.session:
model = cls.db.session.merge(model)
return model
@classmethod
def update(cls, model):
if isinstance(model, list):
models = model
else:
models = [model]
for model in models:
if not isinstance(model, cls.__model__):
raise TypeError(
'"model" must be of type'
f' {cls.__model__.__class__.__name__},'
f' not {model.__class__.__name__}'
)
if model in cls.db.session:
cls.db.session.expunge(model)
cls.db.session.add_all(models)
cls.commit()
@classmethod
def delete(cls, model_or_id, no_commit=False):
if isinstance(model_or_id, int):
model = cls.get(model_or_id)
elif isinstance(model_or_id, cls.__model__):
model = model_or_id
if model not in cls.db.session:
model = cls.db.session.merge(model)
else:
raise TypeError(
'"model_or_id" must be of type int'
f' or {cls.__model__.__class__.__name__},'
f' not {model_or_id.__class__.__name__}'
)
cls.db.session.delete(model)
if not no_commit and cls.auto_commit:
cls.commit()
def __init__(self, model, *args, **kwargs):
self.__instance = model
class PermissionsMixin:
@classmethod
def grant(cls, model, user, permission, no_commit=False):
if not hasattr(model, 'grant'):
raise TypeError(
f'{model.__class__.__qualname__} does not support permissions'
)
model.grant(user, permission)
cls.db.session.add(model)
if not no_commit and cls.auto_commit:
cls.commit()
@classmethod
def clear_permissions(cls, model, except_for=[]):
model._permissions = [ # Clear existing permissions
x for x in model._permissions if x.user_id in except_for
] | 0.502686 | 0.099821 |
import search
from math import(cos, pi)
sumner_map = search.UndirectedGraph(dict(
#Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
#Cottontown=dict(Portland=18),
#Fairfield=dict(Mitchellville=21, Portland=17),
#Mitchellville=dict(Portland=7, Fairfield=21),
#cost is in estimated minutes of drive time from
#https://distancefrom.co.uk
#https://distancefrom.co.uk/from-machynlleth-to-dolgellau for example
Newtown=dict(Machynlleth=46, Dolgellau=61, Conwy=113, Bangor=131, Caernarnfon=123, Betws_y_coed=110,
Pwllheli=117, Llangollen=63, Welshpool=22, Aberystwyth=70),
Machynlleth=dict(Newtown=46, Dolgellau=27, Conwy=100, Bangor=103, Caernarnfon=88, Betws_y_coed=74, Wrexham= 93,
Llangollen = 81, Welshpool= 57, Aberystwyth= 33),
Dolgellau=dict(Newtown=61, Machynlleth=27, Conwy=77, Bangor=81, Caernarnfon=65, Betws_y_coed=52, Wrexham=78,
Llangollen=63, Welshpool=57, Aberystwyth=60),
Conwy=dict(Newtown= 113, Machynlleth= 100, Dolgellau= 77, Bangor=24, Caernarnfon=31, Betws_y_coed=31, Wrexham=60,
Llangollen=72, Welshpool=96, Aberystwyth=133),
Bangor=dict(Newtown= 131, Machynlleth= 103, Dolgellau= 81, Conwy=24, Caernarnfon=18, Betws_y_coed=37, Wrexham=77,
Llangollen=86, Welshpool=113, Aberystwyth=136),
Caernarnfon=dict(Newtown= 123, Machynlleth= 88, Dolgellau= 65, Conwy=31, Bangor=18, Betws_y_coed=44, Wrexham=86,
Pwllheli=34, Llangollen=93, Welshpool=117, Aberystwyth=121),
Betws_y_coed=dict(Newtown= 110, Machynlleth= 74, Dolgellau= 52, Conwy=31, Bangor=37, Caernarnfon=44, Wrexham=67,
Pwllheli=61, Llangollen=51, Welshpool=89, Aberystwyth=108),
Wrexham=dict(Machynlleth= 93, Dolgellau= 78, Conwy=60, Bangor=77, Caernarnfon=86, Betws_y_coed=67,
Pwllheli=113, Llangollen=22, Aberystwyth=126),
Pwllheli=dict(Newtown= 117, Caernarnfon=34, Betws_y_coed=61,
Wrexham=113, Llangollen=96, Welshpool=111, Aberystwyth=114),
Llangollen=dict(Newtown= 63, Machynlleth= 81, Dolgellau= 63, Conwy=72, Bangor=86, Caernarnfon=93, Betws_y_coed=51,
Wrexham=22, Pwllheli=96, Welshpool=45, Aberystwyth=114),
Welshpool=dict(Newtown= 22, Machynlleth= 57, Dolgellau= 57, Conwy=96, Bangor=113, Caernarnfon=117, Betws_y_coed=89,
Pwllheli=111, Llangollen=45, Aberystwyth=90),
Aberystwyth=dict(Newtown= 70, Machynlleth= 33, Dolgellau= 60, Conwy=133, Bangor=136, Caernarnfon=121, Betws_y_coed=108,
Wrexham=126, Pwllheli=114, Llangollen=114, Welshpool=90)))
sumner_map.locations = dict(Newtown=(525121,33131), Machynlleth=(525903,38535),
Dolgellau=(527421,38844), Conwy=(532829,38295),
Bangor=(532274,41293), Caernarnfon=(531396,42739),
Betws_y_coed=(530931, 38010), Wrexham=(530430, 29925),
Pwllheli=(528888,44176), Llangollen=(529692,31717),
Welshpool=(526603,31464), Aberystwyth=(524153,40829))
#all instances run BestFS and A*
#sumner_puzzle yields better solution for BestFS than DFS, and BFS better than BestFS
sumner_puzzle = search.GraphProblem('Pwllheli', 'Conwy', sumner_map)
#sumner1_puzzle adds nothing new
sumner1_puzzle = search.GraphProblem('Pwllheli', 'Newtown', sumner_map)
#sumner2_puzzle yields same solution with UCS and A*, but A* expands fewer nodes
sumner2_puzzle = search.GraphProblem('Newtown', 'Wrexham', sumner_map)
sumner_puzzle.description = '''
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
#cannot for the life of me remember how to get the table to print every problem instance.
myPuzzles = [
sumner_puzzle,
sumner1_puzzle,
sumner2_puzzle
] | submissions/Johnson/puzzles.py | import search
from math import(cos, pi)
sumner_map = search.UndirectedGraph(dict(
#Portland=dict(Mitchellville=7, Fairfield=17, Cottontown=18),
#Cottontown=dict(Portland=18),
#Fairfield=dict(Mitchellville=21, Portland=17),
#Mitchellville=dict(Portland=7, Fairfield=21),
#cost is in estimated minutes of drive time from
#https://distancefrom.co.uk
#https://distancefrom.co.uk/from-machynlleth-to-dolgellau for example
Newtown=dict(Machynlleth=46, Dolgellau=61, Conwy=113, Bangor=131, Caernarnfon=123, Betws_y_coed=110,
Pwllheli=117, Llangollen=63, Welshpool=22, Aberystwyth=70),
Machynlleth=dict(Newtown=46, Dolgellau=27, Conwy=100, Bangor=103, Caernarnfon=88, Betws_y_coed=74, Wrexham= 93,
Llangollen = 81, Welshpool= 57, Aberystwyth= 33),
Dolgellau=dict(Newtown=61, Machynlleth=27, Conwy=77, Bangor=81, Caernarnfon=65, Betws_y_coed=52, Wrexham=78,
Llangollen=63, Welshpool=57, Aberystwyth=60),
Conwy=dict(Newtown= 113, Machynlleth= 100, Dolgellau= 77, Bangor=24, Caernarnfon=31, Betws_y_coed=31, Wrexham=60,
Llangollen=72, Welshpool=96, Aberystwyth=133),
Bangor=dict(Newtown= 131, Machynlleth= 103, Dolgellau= 81, Conwy=24, Caernarnfon=18, Betws_y_coed=37, Wrexham=77,
Llangollen=86, Welshpool=113, Aberystwyth=136),
Caernarnfon=dict(Newtown= 123, Machynlleth= 88, Dolgellau= 65, Conwy=31, Bangor=18, Betws_y_coed=44, Wrexham=86,
Pwllheli=34, Llangollen=93, Welshpool=117, Aberystwyth=121),
Betws_y_coed=dict(Newtown= 110, Machynlleth= 74, Dolgellau= 52, Conwy=31, Bangor=37, Caernarnfon=44, Wrexham=67,
Pwllheli=61, Llangollen=51, Welshpool=89, Aberystwyth=108),
Wrexham=dict(Machynlleth= 93, Dolgellau= 78, Conwy=60, Bangor=77, Caernarnfon=86, Betws_y_coed=67,
Pwllheli=113, Llangollen=22, Aberystwyth=126),
Pwllheli=dict(Newtown= 117, Caernarnfon=34, Betws_y_coed=61,
Wrexham=113, Llangollen=96, Welshpool=111, Aberystwyth=114),
Llangollen=dict(Newtown= 63, Machynlleth= 81, Dolgellau= 63, Conwy=72, Bangor=86, Caernarnfon=93, Betws_y_coed=51,
Wrexham=22, Pwllheli=96, Welshpool=45, Aberystwyth=114),
Welshpool=dict(Newtown= 22, Machynlleth= 57, Dolgellau= 57, Conwy=96, Bangor=113, Caernarnfon=117, Betws_y_coed=89,
Pwllheli=111, Llangollen=45, Aberystwyth=90),
Aberystwyth=dict(Newtown= 70, Machynlleth= 33, Dolgellau= 60, Conwy=133, Bangor=136, Caernarnfon=121, Betws_y_coed=108,
Wrexham=126, Pwllheli=114, Llangollen=114, Welshpool=90)))
sumner_map.locations = dict(Newtown=(525121,33131), Machynlleth=(525903,38535),
Dolgellau=(527421,38844), Conwy=(532829,38295),
Bangor=(532274,41293), Caernarnfon=(531396,42739),
Betws_y_coed=(530931, 38010), Wrexham=(530430, 29925),
Pwllheli=(528888,44176), Llangollen=(529692,31717),
Welshpool=(526603,31464), Aberystwyth=(524153,40829))
#all instances run BestFS and A*
#sumner_puzzle yields better solution for BestFS than DFS, and BFS better than BestFS
sumner_puzzle = search.GraphProblem('Pwllheli', 'Conwy', sumner_map)
#sumner1_puzzle adds nothing new
sumner1_puzzle = search.GraphProblem('Pwllheli', 'Newtown', sumner_map)
#sumner2_puzzle yields same solution with UCS and A*, but A* expands fewer nodes
sumner2_puzzle = search.GraphProblem('Newtown', 'Wrexham', sumner_map)
sumner_puzzle.description = '''
An abbreviated map of Sumner County, TN.
This map is unique, to the best of my knowledge.
'''
#cannot for the life of me remember how to get the table to print every problem instance.
myPuzzles = [
sumner_puzzle,
sumner1_puzzle,
sumner2_puzzle
] | 0.295636 | 0.156491 |
import datetime
import os
import sys
import yaml
import shutil
import argparse
import string
# This script will append the current number of commits given as an arg
# (presumably since some past base tag), and the git hash arg for a final
# version like: 0.1.189-3f73a592
VERSION_BASE = "0.1"
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--operator-name", type=str, help="Name of the operator", required=True)
parser.add_argument("-d", "--output-dir", type=str, help="Directory for the CSV generation", required=True)
parser.add_argument("-p", "--previous-version", type=str, help="Directory for the CSV generation", required=True)
parser.add_argument("-n", "--commit-number", type=str, help="Number of commits in the project (used for version generation)", required=True)
parser.add_argument("-c", "--commit-hash", type=str, help="Current commit hashDirectory for the CSV generation (used for version generation)", required=True)
parser.add_argument("-i", "--operator-image", type=str, help="Base index image to be used", required=True)
args = parser.parse_args()
operator_name = args.operator_name
outdir = args.output_dir
prev_version = args.previous_version
git_num_commits = args.commit_number
git_hash = args.commit_hash
operator_image = args.operator_image
full_version = "%s.%s-%s" % (VERSION_BASE, git_num_commits, git_hash)
print("Generating CSV for version: %s" % full_version)
if not os.path.exists(outdir):
os.mkdir(outdir)
version_dir = os.path.join(outdir, full_version)
if not os.path.exists(version_dir):
os.mkdir(version_dir)
with open('config/templates/csv-template.yaml'.format(operator_name), 'r') as stream:
csv = yaml.load(stream)
csv['spec']['customresourcedefinitions']['owned'] = []
# Copy all CRD files over to the bundle output dir:
crd_files = [ f for f in os.listdir('deploy/crds') if f.endswith('_crd.yaml') ]
for file_name in crd_files:
full_path = os.path.join('deploy/crds', file_name)
if (os.path.isfile(os.path.join('deploy/crds', file_name))):
shutil.copy(full_path, os.path.join(version_dir, file_name))
# Load CRD so we can use attributes from it
with open("deploy/crds/{}".format(file_name), "r") as stream:
crd = yaml.load(stream)
# Update CSV template customresourcedefinitions key
csv['spec']['customresourcedefinitions']['owned'].append(
{
"name": crd["metadata"]["name"],
"description": crd["spec"]["names"]["kind"],
"displayName": crd["spec"]["names"]["kind"],
"kind": crd["spec"]["names"]["kind"],
"version": crd["spec"]["version"]
}
)
csv['spec']['install']['spec']['clusterPermissions'] = []
# Add operator role to the CSV:
with open('deploy/role.yaml', 'r') as stream:
operator_role = yaml.load(stream)
csv['spec']['install']['spec']['clusterPermissions'].append(
{
'rules': operator_role['rules'],
'serviceAccountName': operator_name,
})
# Add our deployment spec for the operator:
with open('deploy/operator.yaml', 'r') as stream:
operator_components = []
operator = yaml.load_all(stream)
for doc in operator:
operator_components.append(doc)
# There is only one yaml document in the operator deployment
operator_deployment = operator_components[0]
csv['spec']['install']['spec']['deployments'][0]['spec'] = operator_deployment['spec']
# Update the deployment to use the defined image:
csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = operator_image
# Update the versions to include git hash:
csv['metadata']['name'] = "{}.v{}".format(operator_name, full_version)
csv['spec']['version'] = full_version
csv['spec']['replaces'] = "{}.v{}".format(operator_name, prev_version)
# Set the CSV createdAt annotation:
now = datetime.datetime.now()
csv['metadata']['annotations']['createdAt'] = now.strftime("%Y-%m-%dT%H:%M:%SZ")
# Write the CSV to disk:
csv_filename = "{}.v{}.clusterserviceversion.yaml".format(operator_name, full_version)
csv_file = os.path.join(version_dir, csv_filename)
with open(csv_file, 'w') as outfile:
yaml.dump(csv, outfile, default_flow_style=False)
print("Wrote ClusterServiceVersion: %s" % csv_file) | boilerplate/openshift/golang-osd-operator/csv-generate/common-generate-operator-bundle.py |
import datetime
import os
import sys
import yaml
import shutil
import argparse
import string
# This script will append the current number of commits given as an arg
# (presumably since some past base tag), and the git hash arg for a final
# version like: 0.1.189-3f73a592
VERSION_BASE = "0.1"
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--operator-name", type=str, help="Name of the operator", required=True)
parser.add_argument("-d", "--output-dir", type=str, help="Directory for the CSV generation", required=True)
parser.add_argument("-p", "--previous-version", type=str, help="Directory for the CSV generation", required=True)
parser.add_argument("-n", "--commit-number", type=str, help="Number of commits in the project (used for version generation)", required=True)
parser.add_argument("-c", "--commit-hash", type=str, help="Current commit hashDirectory for the CSV generation (used for version generation)", required=True)
parser.add_argument("-i", "--operator-image", type=str, help="Base index image to be used", required=True)
args = parser.parse_args()
operator_name = args.operator_name
outdir = args.output_dir
prev_version = args.previous_version
git_num_commits = args.commit_number
git_hash = args.commit_hash
operator_image = args.operator_image
full_version = "%s.%s-%s" % (VERSION_BASE, git_num_commits, git_hash)
print("Generating CSV for version: %s" % full_version)
if not os.path.exists(outdir):
os.mkdir(outdir)
version_dir = os.path.join(outdir, full_version)
if not os.path.exists(version_dir):
os.mkdir(version_dir)
with open('config/templates/csv-template.yaml'.format(operator_name), 'r') as stream:
csv = yaml.load(stream)
csv['spec']['customresourcedefinitions']['owned'] = []
# Copy all CRD files over to the bundle output dir:
crd_files = [ f for f in os.listdir('deploy/crds') if f.endswith('_crd.yaml') ]
for file_name in crd_files:
full_path = os.path.join('deploy/crds', file_name)
if (os.path.isfile(os.path.join('deploy/crds', file_name))):
shutil.copy(full_path, os.path.join(version_dir, file_name))
# Load CRD so we can use attributes from it
with open("deploy/crds/{}".format(file_name), "r") as stream:
crd = yaml.load(stream)
# Update CSV template customresourcedefinitions key
csv['spec']['customresourcedefinitions']['owned'].append(
{
"name": crd["metadata"]["name"],
"description": crd["spec"]["names"]["kind"],
"displayName": crd["spec"]["names"]["kind"],
"kind": crd["spec"]["names"]["kind"],
"version": crd["spec"]["version"]
}
)
csv['spec']['install']['spec']['clusterPermissions'] = []
# Add operator role to the CSV:
with open('deploy/role.yaml', 'r') as stream:
operator_role = yaml.load(stream)
csv['spec']['install']['spec']['clusterPermissions'].append(
{
'rules': operator_role['rules'],
'serviceAccountName': operator_name,
})
# Add our deployment spec for the operator:
with open('deploy/operator.yaml', 'r') as stream:
operator_components = []
operator = yaml.load_all(stream)
for doc in operator:
operator_components.append(doc)
# There is only one yaml document in the operator deployment
operator_deployment = operator_components[0]
csv['spec']['install']['spec']['deployments'][0]['spec'] = operator_deployment['spec']
# Update the deployment to use the defined image:
csv['spec']['install']['spec']['deployments'][0]['spec']['template']['spec']['containers'][0]['image'] = operator_image
# Update the versions to include git hash:
csv['metadata']['name'] = "{}.v{}".format(operator_name, full_version)
csv['spec']['version'] = full_version
csv['spec']['replaces'] = "{}.v{}".format(operator_name, prev_version)
# Set the CSV createdAt annotation:
now = datetime.datetime.now()
csv['metadata']['annotations']['createdAt'] = now.strftime("%Y-%m-%dT%H:%M:%SZ")
# Write the CSV to disk:
csv_filename = "{}.v{}.clusterserviceversion.yaml".format(operator_name, full_version)
csv_file = os.path.join(version_dir, csv_filename)
with open(csv_file, 'w') as outfile:
yaml.dump(csv, outfile, default_flow_style=False)
print("Wrote ClusterServiceVersion: %s" % csv_file) | 0.350421 | 0.084153 |
import torch
from .base_model import BaseModel
from . import networks
import pdb
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# pdb.set_trace()
# (Pdb) pp opt
# Namespace(batch_size=16, beta1=0.5, checkpoints_dir='./checkpoints', continue_train=False,
# crop_size=256, dataroot='dataset', dataset_mode='colorization', direction='AtoB',
# display_env='main', display_freq=400, display_id=1, display_ncols=4, display_port=8097,
# display_server='http://localhost', display_winsize=256, epoch='1000', epoch_count=200,
# gan_mode='vanilla', gpu_ids=[0], init_gain=0.02, init_type='normal', input_nc=1, isTrain=True,
# lambda_L1=100.0, load_iter=0, load_size=286, lr=0.0002, lr_decay_iters=50, lr_policy='linear',
# max_dataset_size=inf, model='colorization', n_layers_D=3, name='experiment_name', ndf=64,
# netD='basic',
# netG='unet_256', ngf=64, niter=500, niter_decay=500, no_dropout=False, no_flip=False, no_html=True,
# norm='batch', num_threads=4, output_nc=2, phase='train', pool_size=0, preprocess='resize_and_crop',
# print_freq=100, save_by_iter=False, save_epoch_freq=5, save_latest_freq=5000, serial_batches=False,
# suffix='', update_html_freq=1000, verbose=False)
# specify the training losses you want to print out
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display.
# The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk.
# The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain:
# define a discriminator; conditional GANs need to take both input and output images;
# Therefore, channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
# (Pdb) pp self.netG
# DataParallel(
# (module): UnetGenerator(
# (model): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): Conv2d(1, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (1): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): ReLU(inplace=True)
# (3): ConvTranspose2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(512, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(256, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (2): ReLU(inplace=True)
# (3): ConvTranspose2d(128, 2, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
# (4): Tanh()
# )
# )
# )
# )
self.fake_B = self.netG(self.real_A) # G(A)
# pdb.set_trace()
# (Pdb) pp self.fake_B.size()
# torch.Size([10, 2, 256, 256])
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# (Pdb) pp self.netD
# DataParallel(
# (module): NLayerDiscriminator(
# (model): Sequential(
# (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
# (1): LeakyReLU(negative_slope=0.2, inplace=True)
# (2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (4): LeakyReLU(negative_slope=0.2, inplace=True)
# (5): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): LeakyReLU(negative_slope=0.2, inplace=True)
# (8): Conv2d(256, 512, kernel_size=(4, 4), stride=(1, 1), padding=(1, 1), bias=False)
# (9): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (10): LeakyReLU(negative_slope=0.2, inplace=True)
# (11): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), padding=(1, 1))
# )
# )
# )
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
# (Pdb) fake_AB.size()
# torch.Size([10, 3, 256, 256])
pred_fake = self.netD(fake_AB.detach())
# (Pdb) pred_fake.size()
# torch.Size([10, 1, 30, 30])
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# (Pdb) self.criterionGAN
# GANLoss(
# (loss): BCEWithLogitsLoss()
# )
# (Pdb) self.loss_D_fake
# tensor(0.7503, device='cuda:0', grad_fn=<BinaryCrossEntropyWithLogitsBackward>)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
self.optimizer_D.step() # update D's weights
# (Pdb) self.optimizer_D
# Adam (
# Parameter Group 0
# amsgrad: False
# betas: (0.5, 0.999)
# eps: 1e-08
# initial_lr: 0.0002
# lr: 0.0002
# weight_decay: 0
# )
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# (Pdb) pp pred_fake.size()
# torch.Size([10, 1, 30, 30])
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# (Pdb) pp self.opt.lambda_L1
# 100.0
# (Pdb) self.criterionL1
# L1Loss()
# (Pdb) self.loss_G_L1
# tensor(17.2437, device='cuda:0', grad_fn=<MulBackward0>)
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
self.optimizer_G.step() # udpate G's weights
# pdb.set_trace()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
# # self.set_requires_grad(self.netD, True) # enable backprop for D
# # self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
# # self.optimizer_D.step() # update D's weights
# update G(Pdb) self.loss_G_L1
# tensor(17.2437, device='cuda:0', grad_fn=<MulBackward0>)
# # self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
# # self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
# # self.optimizer_G.step() # udpate G's weights | models/pix2pix_model.py | import torch
from .base_model import BaseModel
from . import networks
import pdb
class Pix2PixModel(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# pdb.set_trace()
# (Pdb) pp opt
# Namespace(batch_size=16, beta1=0.5, checkpoints_dir='./checkpoints', continue_train=False,
# crop_size=256, dataroot='dataset', dataset_mode='colorization', direction='AtoB',
# display_env='main', display_freq=400, display_id=1, display_ncols=4, display_port=8097,
# display_server='http://localhost', display_winsize=256, epoch='1000', epoch_count=200,
# gan_mode='vanilla', gpu_ids=[0], init_gain=0.02, init_type='normal', input_nc=1, isTrain=True,
# lambda_L1=100.0, load_iter=0, load_size=286, lr=0.0002, lr_decay_iters=50, lr_policy='linear',
# max_dataset_size=inf, model='colorization', n_layers_D=3, name='experiment_name', ndf=64,
# netD='basic',
# netG='unet_256', ngf=64, niter=500, niter_decay=500, no_dropout=False, no_flip=False, no_html=True,
# norm='batch', num_threads=4, output_nc=2, phase='train', pool_size=0, preprocess='resize_and_crop',
# print_freq=100, save_by_iter=False, save_epoch_freq=5, save_latest_freq=5000, serial_batches=False,
# suffix='', update_html_freq=1000, verbose=False)
# specify the training losses you want to print out
# The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display.
# The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B']
# specify the models you want to save to the disk.
# The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
self.model_names = ['G', 'D']
else: # during test time, only load G
self.model_names = ['G']
# define networks (both generator and discriminator)
self.netG = networks.define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.norm, not opt.no_dropout, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain:
# define a discriminator; conditional GANs need to take both input and output images;
# Therefore, channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type,
opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
# (Pdb) pp self.netG
# DataParallel(
# (module): UnetGenerator(
# (model): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): Conv2d(1, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (1): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(256, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (3): UnetSkipConnectionBlock(
# (model): Sequential(
# (0): LeakyReLU(negative_slope=0.2, inplace=True)
# (1): Conv2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (2): ReLU(inplace=True)
# (3): ConvTranspose2d(512, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (4): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 512, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): Dropout(p=0.5, inplace=False)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(1024, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(512, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (4): ReLU(inplace=True)
# (5): ConvTranspose2d(256, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# )
# )
# (2): ReLU(inplace=True)
# (3): ConvTranspose2d(128, 2, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
# (4): Tanh()
# )
# )
# )
# )
self.fake_B = self.netG(self.real_A) # G(A)
# pdb.set_trace()
# (Pdb) pp self.fake_B.size()
# torch.Size([10, 2, 256, 256])
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# (Pdb) pp self.netD
# DataParallel(
# (module): NLayerDiscriminator(
# (model): Sequential(
# (0): Conv2d(3, 64, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1))
# (1): LeakyReLU(negative_slope=0.2, inplace=True)
# (2): Conv2d(64, 128, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (3): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (4): LeakyReLU(negative_slope=0.2, inplace=True)
# (5): Conv2d(128, 256, kernel_size=(4, 4), stride=(2, 2), padding=(1, 1), bias=False)
# (6): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (7): LeakyReLU(negative_slope=0.2, inplace=True)
# (8): Conv2d(256, 512, kernel_size=(4, 4), stride=(1, 1), padding=(1, 1), bias=False)
# (9): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
# (10): LeakyReLU(negative_slope=0.2, inplace=True)
# (11): Conv2d(512, 1, kernel_size=(4, 4), stride=(1, 1), padding=(1, 1))
# )
# )
# )
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
# Fake; stop backprop to the generator by detaching fake_B
fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
# (Pdb) fake_AB.size()
# torch.Size([10, 3, 256, 256])
pred_fake = self.netD(fake_AB.detach())
# (Pdb) pred_fake.size()
# torch.Size([10, 1, 30, 30])
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# (Pdb) self.criterionGAN
# GANLoss(
# (loss): BCEWithLogitsLoss()
# )
# (Pdb) self.loss_D_fake
# tensor(0.7503, device='cuda:0', grad_fn=<BinaryCrossEntropyWithLogitsBackward>)
# Real
real_AB = torch.cat((self.real_A, self.real_B), 1)
pred_real = self.netD(real_AB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
self.optimizer_D.step() # update D's weights
# (Pdb) self.optimizer_D
# Adam (
# Parameter Group 0
# amsgrad: False
# betas: (0.5, 0.999)
# eps: 1e-08
# initial_lr: 0.0002
# lr: 0.0002
# weight_decay: 0
# )
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
self.optimizer_G.zero_grad() # set G's gradients to zero
# First, G(A) should fake the discriminator
fake_AB = torch.cat((self.real_A, self.fake_B), 1)
pred_fake = self.netD(fake_AB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# (Pdb) pp pred_fake.size()
# torch.Size([10, 1, 30, 30])
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# (Pdb) pp self.opt.lambda_L1
# 100.0
# (Pdb) self.criterionL1
# L1Loss()
# (Pdb) self.loss_G_L1
# tensor(17.2437, device='cuda:0', grad_fn=<MulBackward0>)
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
self.optimizer_G.step() # udpate G's weights
# pdb.set_trace()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
# # self.set_requires_grad(self.netD, True) # enable backprop for D
# # self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
# # self.optimizer_D.step() # update D's weights
# update G(Pdb) self.loss_G_L1
# tensor(17.2437, device='cuda:0', grad_fn=<MulBackward0>)
# # self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
# # self.optimizer_G.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
# # self.optimizer_G.step() # udpate G's weights | 0.905119 | 0.375392 |
from numpy import *
import numpy as np
import os
import torch
import torch.nn as nn
from sklearn.metrics import f1_score, precision_score, recall_score
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from tqdm import tqdm
import logging # 引入logging模块
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import warnings
warnings.filterwarnings("ignore")
from dataset import Model10DataSet
from Model.DPCN import DPCN_vanilla
from params import Args
import matplotlib.pyplot as plt
from numpy import *
from time import strftime, localtime
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train():
pwd = os.getcwd()
weights_dir = os.path.join(pwd, 'weights')
if not os.path.exists(weights_dir):
os.makedirs(weights_dir)
logging.info('Loading Dataset...')
train_dataset = Model10DataSet(train=True)
test_dataset = Model10DataSet(train=False)
logging.info('train_dataset: {}'.format(len(train_dataset)))
logging.info('test_dataset: {}'.format(len(test_dataset)))
logging.info('Done...\n')
logging.info('Creating DataLoader...')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=Args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=Args.batch_size, shuffle=False, num_workers=2)
logging.info('Done...\n')
logging.info('Checking gpu...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
logging.info('gpu available: {}'.format(torch.cuda.device_count()))
logging.info('current gpu: {}'.format(torch.cuda.get_device_name(0)))
logging.info('gpu capability: {}'.format(torch.cuda.get_device_capability(0)))
else:
logging.info('gpu not available, running on cpu instead.')
logging.info('Done...\n')
logging.info('Create SummaryWriter in ./summary')
summary_writer = SummaryWriter(comment='DPCN', log_dir='summary')
logging.info('Done...\n')
logging.info('Creating Model...')
model = DPCN_vanilla(num_classes=10).to(Args.device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
schedular = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
logging.info('Done...\n')
epoch_losses = []
epoch_acc = []
logging.info('Start training...')
epoch_losses = []
epoch_ma_f1 = []
epoch_precision = []
epoch_recall = []
for epoch in range(1, Args.num_epochs+1):
logging.info("--------Epoch {}--------".format(epoch))
schedular.step()
tqdm_batch = tqdm(train_loader, desc='Epoch-{} training'.format(epoch))
# train
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
model.train()
loss_tracker = AverageMeter()
for batch_idx, (data, label) in enumerate(tqdm_batch):
data, label = data.to(device), label.to(device)
out = model(data)
loss = criterion(out, label.view(-1).long())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_tracker.update(loss.item(), label.size(0))
tqdm_batch.close()
logging.info('Loss: {:.4f} ({:.4f})'.format(loss_tracker.val, loss_tracker.avg))
summary_writer.add_scalar('loss', loss_tracker.avg, epoch)
epoch_losses.append(loss_tracker.avg)
if epoch % Args.test_freq == 0:
tqdm_batch = tqdm(test_loader, desc='Epoch-{} testing'.format(epoch))
model.eval()
test_pred, test_label = [], []
correct_cnt = 0
total_cnt = 0
with torch.no_grad():
for batch_idx, (data, label) in enumerate(tqdm_batch):
data, label = data.to(device), label.to(device)
out = model(data)
pred_choice = out.max(1)[1]
label = label.long()
correct_cnt += pred_choice.eq(label.view(-1)).sum().item()
total_cnt += label.size(0)
pred = torch.max(out, 1)[1].view(-1)
test_pred += pred.detach().cpu().numpy().tolist()
test_label += label.cpu().numpy().tolist()
print('correct_cnt: {}, total_cnt: {}'.format(correct_cnt, total_cnt))
acc = correct_cnt / total_cnt
logging.info('Accuracy: {:.4f}'.format(acc))
epoch_acc.append(acc)
summary_writer.add_scalar('acc', acc, epoch)
precision = precision_score(test_label, test_pred, average='macro')
recall = recall_score(test_label, test_pred, average='macro')
ma_f1 = f1_score(test_label, test_pred, average='macro')
epoch_ma_f1.append(ma_f1)
epoch_precision.append(precision)
epoch_recall.append(recall)
print('precision: {:.4f}'.format(precision))
print('recall: {:.4f}'.format(recall))
print('ma_f1: {:.4f}'.format(ma_f1))
tqdm_batch.close()
if epoch % Args.save_freq == 0:
ckpt_name = os.path.join(weights_dir, 'DPCN_{0}.pth'.format(epoch))
torch.save(model.state_dict(), ckpt_name)
logging.info('model saved in {}'.format(ckpt_name))
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
summary_writer.close()
np.savetxt(r'F:\DPCN\loss.txt',epoch_losses,fmt='%.4f')
np.savetxt(r'F:\DPCN\acc.txt', epoch_acc,fmt='%.4f')
np.savetxt(r'F:\DPCN\precision.txt', epoch_precision,fmt='%.4f')
np.savetxt(r'F:\DPCN\recall.txt', epoch_recall,fmt='%.4f')
np.savetxt(r'F:\DPCN\ma_f1.txt', epoch_ma_f1,fmt='%.4f')
if __name__ == '__main__':
train() | DPCN/train.py | from numpy import *
import numpy as np
import os
import torch
import torch.nn as nn
from sklearn.metrics import f1_score, precision_score, recall_score
from torch.optim import lr_scheduler
from tensorboardX import SummaryWriter
from tqdm import tqdm
import logging # 引入logging模块
logging.basicConfig(level = logging.INFO,format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
import warnings
warnings.filterwarnings("ignore")
from dataset import Model10DataSet
from Model.DPCN import DPCN_vanilla
from params import Args
import matplotlib.pyplot as plt
from numpy import *
from time import strftime, localtime
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def train():
pwd = os.getcwd()
weights_dir = os.path.join(pwd, 'weights')
if not os.path.exists(weights_dir):
os.makedirs(weights_dir)
logging.info('Loading Dataset...')
train_dataset = Model10DataSet(train=True)
test_dataset = Model10DataSet(train=False)
logging.info('train_dataset: {}'.format(len(train_dataset)))
logging.info('test_dataset: {}'.format(len(test_dataset)))
logging.info('Done...\n')
logging.info('Creating DataLoader...')
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=Args.batch_size, shuffle=True, num_workers=2)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=Args.batch_size, shuffle=False, num_workers=2)
logging.info('Done...\n')
logging.info('Checking gpu...')
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if torch.cuda.is_available():
logging.info('gpu available: {}'.format(torch.cuda.device_count()))
logging.info('current gpu: {}'.format(torch.cuda.get_device_name(0)))
logging.info('gpu capability: {}'.format(torch.cuda.get_device_capability(0)))
else:
logging.info('gpu not available, running on cpu instead.')
logging.info('Done...\n')
logging.info('Create SummaryWriter in ./summary')
summary_writer = SummaryWriter(comment='DPCN', log_dir='summary')
logging.info('Done...\n')
logging.info('Creating Model...')
model = DPCN_vanilla(num_classes=10).to(Args.device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
schedular = lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
logging.info('Done...\n')
epoch_losses = []
epoch_acc = []
logging.info('Start training...')
epoch_losses = []
epoch_ma_f1 = []
epoch_precision = []
epoch_recall = []
for epoch in range(1, Args.num_epochs+1):
logging.info("--------Epoch {}--------".format(epoch))
schedular.step()
tqdm_batch = tqdm(train_loader, desc='Epoch-{} training'.format(epoch))
# train
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
model.train()
loss_tracker = AverageMeter()
for batch_idx, (data, label) in enumerate(tqdm_batch):
data, label = data.to(device), label.to(device)
out = model(data)
loss = criterion(out, label.view(-1).long())
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_tracker.update(loss.item(), label.size(0))
tqdm_batch.close()
logging.info('Loss: {:.4f} ({:.4f})'.format(loss_tracker.val, loss_tracker.avg))
summary_writer.add_scalar('loss', loss_tracker.avg, epoch)
epoch_losses.append(loss_tracker.avg)
if epoch % Args.test_freq == 0:
tqdm_batch = tqdm(test_loader, desc='Epoch-{} testing'.format(epoch))
model.eval()
test_pred, test_label = [], []
correct_cnt = 0
total_cnt = 0
with torch.no_grad():
for batch_idx, (data, label) in enumerate(tqdm_batch):
data, label = data.to(device), label.to(device)
out = model(data)
pred_choice = out.max(1)[1]
label = label.long()
correct_cnt += pred_choice.eq(label.view(-1)).sum().item()
total_cnt += label.size(0)
pred = torch.max(out, 1)[1].view(-1)
test_pred += pred.detach().cpu().numpy().tolist()
test_label += label.cpu().numpy().tolist()
print('correct_cnt: {}, total_cnt: {}'.format(correct_cnt, total_cnt))
acc = correct_cnt / total_cnt
logging.info('Accuracy: {:.4f}'.format(acc))
epoch_acc.append(acc)
summary_writer.add_scalar('acc', acc, epoch)
precision = precision_score(test_label, test_pred, average='macro')
recall = recall_score(test_label, test_pred, average='macro')
ma_f1 = f1_score(test_label, test_pred, average='macro')
epoch_ma_f1.append(ma_f1)
epoch_precision.append(precision)
epoch_recall.append(recall)
print('precision: {:.4f}'.format(precision))
print('recall: {:.4f}'.format(recall))
print('ma_f1: {:.4f}'.format(ma_f1))
tqdm_batch.close()
if epoch % Args.save_freq == 0:
ckpt_name = os.path.join(weights_dir, 'DPCN_{0}.pth'.format(epoch))
torch.save(model.state_dict(), ckpt_name)
logging.info('model saved in {}'.format(ckpt_name))
print(strftime("%Y-%m-%d %H:%M:%S", localtime()))
summary_writer.close()
np.savetxt(r'F:\DPCN\loss.txt',epoch_losses,fmt='%.4f')
np.savetxt(r'F:\DPCN\acc.txt', epoch_acc,fmt='%.4f')
np.savetxt(r'F:\DPCN\precision.txt', epoch_precision,fmt='%.4f')
np.savetxt(r'F:\DPCN\recall.txt', epoch_recall,fmt='%.4f')
np.savetxt(r'F:\DPCN\ma_f1.txt', epoch_ma_f1,fmt='%.4f')
if __name__ == '__main__':
train() | 0.728265 | 0.20144 |
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdSHTConf(object):
"""
unix command line handler
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def __addr_str(cls, addr):
if addr is None:
return None
return "0x%02x" % addr
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
self.__parser = optparse.OptionParser(usage="%prog [{ [-i INT_ADDR] [-e EXT_ADDR] | -d }] [-v]",
version="%prog 1.0")
# optional...
self.__parser.add_option("--int-addr", "-i", type="int", nargs=1, action="store", dest="int_addr",
help="set I2C address of SHT in A4 package")
self.__parser.add_option("--ext-addr", "-e", type="int", nargs=1, action="store", dest="ext_addr",
help="set I2C address of SHT exposed to air")
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete the SHT configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
return True
def is_complete(self):
if self.int_addr is None or self.ext_addr is None:
return False
return True
def set(self):
return self.int_addr is not None or self.ext_addr is not None
# ----------------------------------------------------------------------------------------------------------------
@property
def int_addr(self):
return self.__opts.int_addr
@property
def ext_addr(self):
return self.__opts.ext_addr
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdSHTConf:{int_addr:%s, ext_addr:%s, delete:%s, verbose:%s}" % \
(CmdSHTConf.__addr_str(self.int_addr), CmdSHTConf.__addr_str(self.ext_addr),
self.delete, self.verbose) | src/scs_mfr/cmd/cmd_sht_conf.py | import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdSHTConf(object):
"""
unix command line handler
"""
# ----------------------------------------------------------------------------------------------------------------
@classmethod
def __addr_str(cls, addr):
if addr is None:
return None
return "0x%02x" % addr
# ----------------------------------------------------------------------------------------------------------------
def __init__(self):
self.__parser = optparse.OptionParser(usage="%prog [{ [-i INT_ADDR] [-e EXT_ADDR] | -d }] [-v]",
version="%prog 1.0")
# optional...
self.__parser.add_option("--int-addr", "-i", type="int", nargs=1, action="store", dest="int_addr",
help="set I2C address of SHT in A4 package")
self.__parser.add_option("--ext-addr", "-e", type="int", nargs=1, action="store", dest="ext_addr",
help="set I2C address of SHT exposed to air")
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete the SHT configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
return True
def is_complete(self):
if self.int_addr is None or self.ext_addr is None:
return False
return True
def set(self):
return self.int_addr is not None or self.ext_addr is not None
# ----------------------------------------------------------------------------------------------------------------
@property
def int_addr(self):
return self.__opts.int_addr
@property
def ext_addr(self):
return self.__opts.ext_addr
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdSHTConf:{int_addr:%s, ext_addr:%s, delete:%s, verbose:%s}" % \
(CmdSHTConf.__addr_str(self.int_addr), CmdSHTConf.__addr_str(self.ext_addr),
self.delete, self.verbose) | 0.419291 | 0.107157 |
import os
import pickle
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix0(y_true, y_pred,
classes=None,
normalize=False,
title=None,
save_to=None):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, axes = plt.subplots(figsize=(32, 32), dpi=200)
axes.imshow(cm, interpolation='nearest', cmap=plt.cm.viridis) # Spectral)
axes.set_title(title)
tick_marks = np.arange(len(classes))
axes.set_xticks(tick_marks)
axes.set_yticks(tick_marks)
axes.set_xticklabels(classes, rotation=90)
axes.set_yticklabels(classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
axes.text(j, i, '{:.02f}'.format(cm[i, j]),
fontsize='x-small',
horizontalalignment="center",
verticalalignment="center",
color="xkcd:midnight" if cm[i, j] > thresh else "white")
if i == j:
axes.add_patch(Rectangle((i - .5, j - .5), 1, 1, fill=False, edgecolor='black', lw=2))
axes.set_ylabel('True label')
axes.set_xlabel('Predicted label')
bottom, top = axes.get_ylim()
axes.set_ylim(bottom + 0.5, top - 0.5)
if save_to:
plt.savefig(save_to)
plt.close()
return axes
def plot_confusion_matrix(y_true, y_pred,
classes=None,
normalize=False,
title=None,
save_to=None,
cmap=plt.cm.viridis):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if classes is None:
classes = unique_labels(y_true, y_pred)
# else:
# classes = classes[unique_labels(y_true, y_pred)]
# classes = [c.split('__')[1] for c in classes]
if normalize:
cm = cm.astype('float')
cm = np.divide(cm, cm.sum(axis=1)[:, np.newaxis], where=cm != 0)
fig, ax = plt.subplots(figsize=(32, 32), dpi=200)
plt.rcParams.update({'font.size': 36})
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
fontsize='x-small',
horizontalalignment="center",
verticalalignment="center",
color="xkcd:midnight" if cm[i, j] > thresh else "white")
if i == j:
ax.add_patch(Rectangle((i - .5, j - .5), 1, 1, fill=False, edgecolor='black', lw=1))
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
fig.tight_layout()
if save_to:
plt.savefig(save_to)
plt.close()
return ax
if __name__ == '__main__':
plot_confusion_matrix(
np.random.randint(0, 10, size=1000),
np.random.randint(0, 10, size=1000),
save_to='/workspace/cm.png'
) | src/utils/plots.py | import os
import pickle
import itertools
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Rectangle
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
def plot_confusion_matrix0(y_true, y_pred,
classes=None,
normalize=False,
title=None,
save_to=None):
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig, axes = plt.subplots(figsize=(32, 32), dpi=200)
axes.imshow(cm, interpolation='nearest', cmap=plt.cm.viridis) # Spectral)
axes.set_title(title)
tick_marks = np.arange(len(classes))
axes.set_xticks(tick_marks)
axes.set_yticks(tick_marks)
axes.set_xticklabels(classes, rotation=90)
axes.set_yticklabels(classes)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
axes.text(j, i, '{:.02f}'.format(cm[i, j]),
fontsize='x-small',
horizontalalignment="center",
verticalalignment="center",
color="xkcd:midnight" if cm[i, j] > thresh else "white")
if i == j:
axes.add_patch(Rectangle((i - .5, j - .5), 1, 1, fill=False, edgecolor='black', lw=2))
axes.set_ylabel('True label')
axes.set_xlabel('Predicted label')
bottom, top = axes.get_ylim()
axes.set_ylim(bottom + 0.5, top - 0.5)
if save_to:
plt.savefig(save_to)
plt.close()
return axes
def plot_confusion_matrix(y_true, y_pred,
classes=None,
normalize=False,
title=None,
save_to=None,
cmap=plt.cm.viridis):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
if classes is None:
classes = unique_labels(y_true, y_pred)
# else:
# classes = classes[unique_labels(y_true, y_pred)]
# classes = [c.split('__')[1] for c in classes]
if normalize:
cm = cm.astype('float')
cm = np.divide(cm, cm.sum(axis=1)[:, np.newaxis], where=cm != 0)
fig, ax = plt.subplots(figsize=(32, 32), dpi=200)
plt.rcParams.update({'font.size': 36})
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
# ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
fontsize='x-small',
horizontalalignment="center",
verticalalignment="center",
color="xkcd:midnight" if cm[i, j] > thresh else "white")
if i == j:
ax.add_patch(Rectangle((i - .5, j - .5), 1, 1, fill=False, edgecolor='black', lw=1))
ax.set_ylabel('True label')
ax.set_xlabel('Predicted label')
bottom, top = ax.get_ylim()
ax.set_ylim(bottom + 0.5, top - 0.5)
fig.tight_layout()
if save_to:
plt.savefig(save_to)
plt.close()
return ax
if __name__ == '__main__':
plot_confusion_matrix(
np.random.randint(0, 10, size=1000),
np.random.randint(0, 10, size=1000),
save_to='/workspace/cm.png'
) | 0.785473 | 0.414129 |
from flask import Blueprint, request, jsonify, current_app
from main import db
from main.models.stock import IndexComponentSH50, IndexComponentHS300, Overview
from crawler.eastmoney.stock.index_component import get_sh50_component, get_hs300_component
from crawler.eastmoney.stock.data import get_code_hxtc
from main.stock_overview import upsert_overview
bp = Blueprint('stock_index_component', __name__)
def get_pagination(model, page, page_size):
pagination = None
data = {}
# 对象属性:https://flask-sqlalchemy.palletsprojects.com/en/2.x/api/#flask_sqlalchemy.Pagination
pagination = db.session.query(model, Overview).outerjoin(
Overview, model.code == Overview.code).order_by(
model.id).paginate(page=page, per_page=page_size, error_out=False)
if pagination:
items = []
for i in pagination.items:
item = {
'id': i[0].id,
'code': i[0].code,
'name': None,
'plate': None,
'business_scope': None,
'sync_time': i[0].sync_time.strftime('%Y-%m-%d %H:%M:%S')
}
if i[1]:
item['name'] = i[1].name
item['plate'] = i[1].plate
item['business_scope'] = i[1].business_scope
items.append(item)
# data['items'] = [{'code': i[0].code, 'sync_time': i[0].sync_time.strftime('%Y-%m-%d %H:%M:%S')}
# for i in pagination.items]
data['items'] = items
data['page'] = pagination.page
data['pages'] = pagination.pages
data['per_page'] = pagination.per_page
data['total'] = pagination.total
return data
def update_components(components, model):
if components:
current_app.logger.info(f'更新数据表:{model.__tablename__}')
db.session.execute(f'truncate table {model.__tablename__}')
db.session.add_all([model(id=idx + 1, code=i['code']) for idx, i in enumerate(components)])
db.session.commit()
update_components_overview(components)
return jsonify({'msg': 'Synchronization succeeded', 'status_code': 201}), 201
else:
return jsonify({'msg': 'Synchronization failed', 'status_code': 500}), 500
def update_components_overview(components):
for i in components:
hxtc = get_code_hxtc(current_app.config['CHROME_DRIVER'], i['code'])
upsert_overview(hxtc)
current_app.logger.info(f'本次共更新 {len(components)} 只股票的核心题材到数据库')
@bp.route('/sh50', methods=['GET'])
def get_sh50():
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 15, type=int)
data = get_pagination(IndexComponentSH50, page, page_size)
return jsonify(data)
@bp.route('/sh50', methods=['PUT', 'POST'])
def put_or_post_sh50():
return update_components(get_sh50_component(), IndexComponentSH50)
@bp.route('/hs300', methods=['GET'])
def get_hs300():
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 15, type=int)
data = get_pagination(IndexComponentHS300, page, page_size)
return jsonify(data)
@bp.route('/hs300', methods=['PUT', 'POST'])
def put_or_post_hs300():
return update_components(get_hs300_component(), IndexComponentHS300) | backend/main/stock_index_component.py | from flask import Blueprint, request, jsonify, current_app
from main import db
from main.models.stock import IndexComponentSH50, IndexComponentHS300, Overview
from crawler.eastmoney.stock.index_component import get_sh50_component, get_hs300_component
from crawler.eastmoney.stock.data import get_code_hxtc
from main.stock_overview import upsert_overview
bp = Blueprint('stock_index_component', __name__)
def get_pagination(model, page, page_size):
pagination = None
data = {}
# 对象属性:https://flask-sqlalchemy.palletsprojects.com/en/2.x/api/#flask_sqlalchemy.Pagination
pagination = db.session.query(model, Overview).outerjoin(
Overview, model.code == Overview.code).order_by(
model.id).paginate(page=page, per_page=page_size, error_out=False)
if pagination:
items = []
for i in pagination.items:
item = {
'id': i[0].id,
'code': i[0].code,
'name': None,
'plate': None,
'business_scope': None,
'sync_time': i[0].sync_time.strftime('%Y-%m-%d %H:%M:%S')
}
if i[1]:
item['name'] = i[1].name
item['plate'] = i[1].plate
item['business_scope'] = i[1].business_scope
items.append(item)
# data['items'] = [{'code': i[0].code, 'sync_time': i[0].sync_time.strftime('%Y-%m-%d %H:%M:%S')}
# for i in pagination.items]
data['items'] = items
data['page'] = pagination.page
data['pages'] = pagination.pages
data['per_page'] = pagination.per_page
data['total'] = pagination.total
return data
def update_components(components, model):
if components:
current_app.logger.info(f'更新数据表:{model.__tablename__}')
db.session.execute(f'truncate table {model.__tablename__}')
db.session.add_all([model(id=idx + 1, code=i['code']) for idx, i in enumerate(components)])
db.session.commit()
update_components_overview(components)
return jsonify({'msg': 'Synchronization succeeded', 'status_code': 201}), 201
else:
return jsonify({'msg': 'Synchronization failed', 'status_code': 500}), 500
def update_components_overview(components):
for i in components:
hxtc = get_code_hxtc(current_app.config['CHROME_DRIVER'], i['code'])
upsert_overview(hxtc)
current_app.logger.info(f'本次共更新 {len(components)} 只股票的核心题材到数据库')
@bp.route('/sh50', methods=['GET'])
def get_sh50():
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 15, type=int)
data = get_pagination(IndexComponentSH50, page, page_size)
return jsonify(data)
@bp.route('/sh50', methods=['PUT', 'POST'])
def put_or_post_sh50():
return update_components(get_sh50_component(), IndexComponentSH50)
@bp.route('/hs300', methods=['GET'])
def get_hs300():
page = request.args.get('page', 1, type=int)
page_size = request.args.get('page_size', 15, type=int)
data = get_pagination(IndexComponentHS300, page, page_size)
return jsonify(data)
@bp.route('/hs300', methods=['PUT', 'POST'])
def put_or_post_hs300():
return update_components(get_hs300_component(), IndexComponentHS300) | 0.375248 | 0.085824 |
import os
import sys
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from typing import Any, Callable, Optional
from watchopticalmc.internal.generatemc.generatemc import GenerateMCConfig, generatemc
from watchopticalmc.internal.generatemc.makeratdb import makeratdb
from watchopticalmc.internal.generatemc.runwatchmakers import WatchMakersConfig
from watchopticalmc.internal.stringconstants import StringConstants
from watchopticalutils.client import ClientType, client
from watchopticalutils.filepathutils import expandpath
def _parsecml() -> Namespace:
parser = ArgumentParser(description="Generate WATCHMAN MC files")
parser.add_argument(
"--directory",
"-d",
type=str,
default=os.getcwd(),
help="Directory to store generated files. "
"It will be created if it does not exist.",
)
parser.add_argument(
"--client",
"-c",
type=ClientType,
choices=list(ClientType),
default=ClientType.CLUSTER,
help="Where to run jobs.",
)
parser.add_argument("--signal-only", action="store_true")
parser.add_argument("--background-only", action="store_true")
parser.add_argument(
"--num-events-per-job",
"-n",
type=int,
default=10000,
help="Number of events per sub-job to generate for each source of "
"signal/background type.",
)
parser.add_argument(
"--num-jobs",
"-j",
type=int,
default=100,
help="Number of sub-jobs to generate for each source of signal/background "
"type.",
)
parser.add_argument(
"--bonsai",
help="Path to the bonsai executable. Environment variable ${BONSAIDIR}/bonsai "
"is used if not set.",
default="${BONSAIDIR}/bonsai",
)
parser.add_argument(
"--bonsai-likelihood",
help="Path to the bonsai likelihood. Environment variable "
"${BONSAIDIR}/like.bin is used if not set.",
default="${BONSAIDIR}/like.bin",
)
parser.add_argument(
"--attenuation", help="Set attenuation length.", type=float, default=None
)
parser.add_argument(
"--scattering", help="Set scattering length.", type=float, default=None
)
return parser.parse_args()
def _validatearguments(args):
if not os.path.exists(expandpath(args.bonsai)):
print(f"Cannot find bonsai executable {args.bonsai}")
sys.exit(1)
if not os.path.exists(expandpath(args.bonsai_likelihood)):
print(f"Cannot find bonsai likelihood {args.bonsai_likelihood}")
sys.exit(1)
return
def _wrapindict(key: str, value: Any):
if value is not None:
return OrderedDict({key: value})
else:
return None
def _getconfigdir(args: Namespace) -> str:
suffix = ""
if args.attenuation is not None:
suffix += f"_attenuation{args.attenuation:.5e}"
if args.scattering is not None:
suffix += f"_scattering{args.scattering:.5e}"
if suffix == "":
suffix = "_nominal"
return "watchmanmc" + suffix
def _filenamefilterfromargs(args: Namespace) -> Optional[Callable[[str], bool]]:
if args.signal_only:
return lambda f: StringConstants.WATCHMAKERS_SIGNAL_PATTERN in f
elif args.background_only:
return lambda f: StringConstants.WATCHMAKERS_SIGNAL_PATTERN not in f
else:
return None
def _run(args):
directory = args.directory + os.sep + _getconfigdir(args)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
filenamefilter = _filenamefilterfromargs(args)
injectratdb = _wrapindict(
f"attenuation_{args.attenuation}_scattering_{args.scattering}",
makeratdb(attenuation=args.attenuation, scattering=args.scattering),
)
config = GenerateMCConfig(
WatchMakersConfig(directory=directory, numevents=args.num_events_per_job),
numjobs=args.num_jobs,
bonsaiexecutable=expandpath(args.bonsai),
bonsailikelihood=expandpath(args.bonsai_likelihood),
injectratdb=injectratdb,
filenamefilter=filenamefilter,
)
with client(args.client):
generatemc(config).compute()
def main():
args = _parsecml()
_validatearguments(args)
_run(args)
return
if __name__ == "__main__":
main() | lib/watchopticalmc/watchopticalmc/scripts/generatemc.py | import os
import sys
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from typing import Any, Callable, Optional
from watchopticalmc.internal.generatemc.generatemc import GenerateMCConfig, generatemc
from watchopticalmc.internal.generatemc.makeratdb import makeratdb
from watchopticalmc.internal.generatemc.runwatchmakers import WatchMakersConfig
from watchopticalmc.internal.stringconstants import StringConstants
from watchopticalutils.client import ClientType, client
from watchopticalutils.filepathutils import expandpath
def _parsecml() -> Namespace:
parser = ArgumentParser(description="Generate WATCHMAN MC files")
parser.add_argument(
"--directory",
"-d",
type=str,
default=os.getcwd(),
help="Directory to store generated files. "
"It will be created if it does not exist.",
)
parser.add_argument(
"--client",
"-c",
type=ClientType,
choices=list(ClientType),
default=ClientType.CLUSTER,
help="Where to run jobs.",
)
parser.add_argument("--signal-only", action="store_true")
parser.add_argument("--background-only", action="store_true")
parser.add_argument(
"--num-events-per-job",
"-n",
type=int,
default=10000,
help="Number of events per sub-job to generate for each source of "
"signal/background type.",
)
parser.add_argument(
"--num-jobs",
"-j",
type=int,
default=100,
help="Number of sub-jobs to generate for each source of signal/background "
"type.",
)
parser.add_argument(
"--bonsai",
help="Path to the bonsai executable. Environment variable ${BONSAIDIR}/bonsai "
"is used if not set.",
default="${BONSAIDIR}/bonsai",
)
parser.add_argument(
"--bonsai-likelihood",
help="Path to the bonsai likelihood. Environment variable "
"${BONSAIDIR}/like.bin is used if not set.",
default="${BONSAIDIR}/like.bin",
)
parser.add_argument(
"--attenuation", help="Set attenuation length.", type=float, default=None
)
parser.add_argument(
"--scattering", help="Set scattering length.", type=float, default=None
)
return parser.parse_args()
def _validatearguments(args):
if not os.path.exists(expandpath(args.bonsai)):
print(f"Cannot find bonsai executable {args.bonsai}")
sys.exit(1)
if not os.path.exists(expandpath(args.bonsai_likelihood)):
print(f"Cannot find bonsai likelihood {args.bonsai_likelihood}")
sys.exit(1)
return
def _wrapindict(key: str, value: Any):
if value is not None:
return OrderedDict({key: value})
else:
return None
def _getconfigdir(args: Namespace) -> str:
suffix = ""
if args.attenuation is not None:
suffix += f"_attenuation{args.attenuation:.5e}"
if args.scattering is not None:
suffix += f"_scattering{args.scattering:.5e}"
if suffix == "":
suffix = "_nominal"
return "watchmanmc" + suffix
def _filenamefilterfromargs(args: Namespace) -> Optional[Callable[[str], bool]]:
if args.signal_only:
return lambda f: StringConstants.WATCHMAKERS_SIGNAL_PATTERN in f
elif args.background_only:
return lambda f: StringConstants.WATCHMAKERS_SIGNAL_PATTERN not in f
else:
return None
def _run(args):
directory = args.directory + os.sep + _getconfigdir(args)
if not os.path.exists(directory):
os.makedirs(directory, exist_ok=True)
filenamefilter = _filenamefilterfromargs(args)
injectratdb = _wrapindict(
f"attenuation_{args.attenuation}_scattering_{args.scattering}",
makeratdb(attenuation=args.attenuation, scattering=args.scattering),
)
config = GenerateMCConfig(
WatchMakersConfig(directory=directory, numevents=args.num_events_per_job),
numjobs=args.num_jobs,
bonsaiexecutable=expandpath(args.bonsai),
bonsailikelihood=expandpath(args.bonsai_likelihood),
injectratdb=injectratdb,
filenamefilter=filenamefilter,
)
with client(args.client):
generatemc(config).compute()
def main():
args = _parsecml()
_validatearguments(args)
_run(args)
return
if __name__ == "__main__":
main() | 0.598664 | 0.09886 |
import numpy as np
from abmarl.sim.modules import GridResources
def test_builder():
sim = GridResources.build()
assert sim.region == 10
assert sim.max_value == 1.
assert sim.min_value == 0.1
assert sim.revive_rate == 0.04
assert sim.coverage == 0.75
def test_builder_custom():
sim = GridResources.build({
'region': 5,
'max_value': 2.,
'min_value': 0.01,
'revive_rate': 0.5,
'coverage': 0.4
})
assert sim.region == 5
assert sim.max_value == 2.
assert sim.min_value == 0.01
assert sim.revive_rate == 0.5
assert sim.coverage == 0.4
def test_reset():
np.random.seed(24)
sim = GridResources.build({'region': 5})
sim.reset()
assert ((sim.resources <= sim.max_value) & (sim.resources >= 0.)).all()
def test_harvest_and_regrow():
np.random.seed(24)
sim = GridResources.build()
sim.reset()
# Normal action with harvest and replenish
value_before = {
(4,5) : sim.resources[(4,5)],
(3,3) : sim.resources[(3,3)]
}
assert sim.harvest((4,5), 0.7) == 0.7
assert sim.harvest((3,3), 0.1) == 0.1
sim.regrow()
assert sim.resources[(4,5)] == value_before[(4,5)] - 0.7 + 0.04
assert sim.resources[(3,3)] == value_before[(3,3)] - 0.1 + 0.04
# action that has depleted one of the resources
value_before = {
(4,5) : sim.resources[(4,5)],
(2,1) : sim.resources[(2,1)]
}
assert sim.harvest((4,5), 0.7) == value_before[(4,5)]
assert sim.harvest((2,1), 0.15) == 0.15
sim.regrow()
assert sim.resources[(4,5)] == 0.
assert sim.resources[(2,1)] == value_before[(2,1)] - 0.15 + 0.04
# Check that the depleted resources do not restore
value_before = {
(2,1) : sim.resources[(2,1)]
}
sim.regrow()
assert sim.resources[(4,5)] == 0.
assert sim.resources[(2,1)] == value_before[(2,1)] + 0.04
# Check that nothing is above maximum value
for _ in range(25):
sim.regrow()
assert (sim.resources <= sim.max_value).all() | tests/test_grid_resources.py | import numpy as np
from abmarl.sim.modules import GridResources
def test_builder():
sim = GridResources.build()
assert sim.region == 10
assert sim.max_value == 1.
assert sim.min_value == 0.1
assert sim.revive_rate == 0.04
assert sim.coverage == 0.75
def test_builder_custom():
sim = GridResources.build({
'region': 5,
'max_value': 2.,
'min_value': 0.01,
'revive_rate': 0.5,
'coverage': 0.4
})
assert sim.region == 5
assert sim.max_value == 2.
assert sim.min_value == 0.01
assert sim.revive_rate == 0.5
assert sim.coverage == 0.4
def test_reset():
np.random.seed(24)
sim = GridResources.build({'region': 5})
sim.reset()
assert ((sim.resources <= sim.max_value) & (sim.resources >= 0.)).all()
def test_harvest_and_regrow():
np.random.seed(24)
sim = GridResources.build()
sim.reset()
# Normal action with harvest and replenish
value_before = {
(4,5) : sim.resources[(4,5)],
(3,3) : sim.resources[(3,3)]
}
assert sim.harvest((4,5), 0.7) == 0.7
assert sim.harvest((3,3), 0.1) == 0.1
sim.regrow()
assert sim.resources[(4,5)] == value_before[(4,5)] - 0.7 + 0.04
assert sim.resources[(3,3)] == value_before[(3,3)] - 0.1 + 0.04
# action that has depleted one of the resources
value_before = {
(4,5) : sim.resources[(4,5)],
(2,1) : sim.resources[(2,1)]
}
assert sim.harvest((4,5), 0.7) == value_before[(4,5)]
assert sim.harvest((2,1), 0.15) == 0.15
sim.regrow()
assert sim.resources[(4,5)] == 0.
assert sim.resources[(2,1)] == value_before[(2,1)] - 0.15 + 0.04
# Check that the depleted resources do not restore
value_before = {
(2,1) : sim.resources[(2,1)]
}
sim.regrow()
assert sim.resources[(4,5)] == 0.
assert sim.resources[(2,1)] == value_before[(2,1)] + 0.04
# Check that nothing is above maximum value
for _ in range(25):
sim.regrow()
assert (sim.resources <= sim.max_value).all() | 0.68742 | 0.781664 |
import numpy as np
import click
import os
from PIL import Image
from lmnet.nnlib import NNLib as NNLib
from lmnet.common import Tasks
from lmnet.utils.output import JsonOutput, ImageFromJson
from lmnet.utils.config import (
load_yaml,
build_pre_process,
build_post_process,
)
def _pre_process(raw_image, pre_processor, data_format):
pre_process = build_pre_process(pre_processor)
image = pre_process(image=raw_image)['image']
if data_format == 'NCHW':
image = np.transpose(image, [2, 0, 1])
return image
def _post_process(output, post_processor):
post_process = build_post_process(post_processor)
output = post_process(outputs=output)['outputs']
return output
def _save_json(output_dir, json_obj):
output_file_name = os.path.join(output_dir, "output.json")
dirname = os.path.dirname(output_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(output_file_name, "w") as json_file:
json_file.write(json_obj)
print("save json: {}".format(output_file_name))
def _save_images(output_dir, filename_images):
for filename, image in filename_images:
base_name = os.path.basename(filename)
output_file_name = os.path.join(output_dir, "images", base_name)
dirname = os.path.dirname(output_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
image.save(output_file_name)
print("save image: {}".format(output_file_name))
def main_test(input_image, library, config_file, max_percent_incorrect_values=0.1):
if not input_image or not library or not config_file:
print('Please check usage with --help option')
exit(1)
config = load_yaml(config_file)
# load and initialize the generated shared library
nn = NNLib()
nn.load(library)
nn.init()
# load the image
img = Image.open(input_image).convert("RGB")
# convert into numpy array
data = np.asarray(img)
raw_image = data
# pre process for image
data = _pre_process(data, config.PRE_PROCESSOR, config.DATA_FORMAT)
# add the batch dimension
data = np.expand_dims(data, axis=0)
# run the graph
output = nn.run(data)
print('Output: (before post process)')
print(output)
# pre process for output
output = _post_process(output, config.POST_PROCESSOR)
print('Output: ')
print(output)
# json output
json_output = JsonOutput(
task=Tasks(config.TASK),
classes=config.CLASSES,
image_size=config.IMAGE_SIZE,
data_format=config.DATA_FORMAT,
)
image_from_json = ImageFromJson(
task=Tasks(config.TASK),
classes=config.CLASSES,
image_size=config.IMAGE_SIZE,
)
output_dir = "output"
outputs = output
raw_images = [raw_image]
image_files = [input_image]
json_obj = json_output(outputs, raw_images, image_files)
_save_json(output_dir, json_obj)
filename_images = image_from_json(json_obj, raw_images, image_files)
_save_images(output_dir, filename_images)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-i",
"--input_image",
type=click.Path(exists=True),
help="Input image filename",
)
@click.option(
"-l",
"--library",
type=click.Path(exists=True),
help="Shared library filename",
)
@click.option(
"-c",
"--config_file",
type=click.Path(exists=True),
help="Config file Path",
)
def run_test(input_image, library, config_file):
main_test(input_image, library, config_file)
if __name__ == "__main__":
run_test() | output_template/python/run.py | import numpy as np
import click
import os
from PIL import Image
from lmnet.nnlib import NNLib as NNLib
from lmnet.common import Tasks
from lmnet.utils.output import JsonOutput, ImageFromJson
from lmnet.utils.config import (
load_yaml,
build_pre_process,
build_post_process,
)
def _pre_process(raw_image, pre_processor, data_format):
pre_process = build_pre_process(pre_processor)
image = pre_process(image=raw_image)['image']
if data_format == 'NCHW':
image = np.transpose(image, [2, 0, 1])
return image
def _post_process(output, post_processor):
post_process = build_post_process(post_processor)
output = post_process(outputs=output)['outputs']
return output
def _save_json(output_dir, json_obj):
output_file_name = os.path.join(output_dir, "output.json")
dirname = os.path.dirname(output_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(output_file_name, "w") as json_file:
json_file.write(json_obj)
print("save json: {}".format(output_file_name))
def _save_images(output_dir, filename_images):
for filename, image in filename_images:
base_name = os.path.basename(filename)
output_file_name = os.path.join(output_dir, "images", base_name)
dirname = os.path.dirname(output_file_name)
if not os.path.exists(dirname):
os.makedirs(dirname)
image.save(output_file_name)
print("save image: {}".format(output_file_name))
def main_test(input_image, library, config_file, max_percent_incorrect_values=0.1):
if not input_image or not library or not config_file:
print('Please check usage with --help option')
exit(1)
config = load_yaml(config_file)
# load and initialize the generated shared library
nn = NNLib()
nn.load(library)
nn.init()
# load the image
img = Image.open(input_image).convert("RGB")
# convert into numpy array
data = np.asarray(img)
raw_image = data
# pre process for image
data = _pre_process(data, config.PRE_PROCESSOR, config.DATA_FORMAT)
# add the batch dimension
data = np.expand_dims(data, axis=0)
# run the graph
output = nn.run(data)
print('Output: (before post process)')
print(output)
# pre process for output
output = _post_process(output, config.POST_PROCESSOR)
print('Output: ')
print(output)
# json output
json_output = JsonOutput(
task=Tasks(config.TASK),
classes=config.CLASSES,
image_size=config.IMAGE_SIZE,
data_format=config.DATA_FORMAT,
)
image_from_json = ImageFromJson(
task=Tasks(config.TASK),
classes=config.CLASSES,
image_size=config.IMAGE_SIZE,
)
output_dir = "output"
outputs = output
raw_images = [raw_image]
image_files = [input_image]
json_obj = json_output(outputs, raw_images, image_files)
_save_json(output_dir, json_obj)
filename_images = image_from_json(json_obj, raw_images, image_files)
_save_images(output_dir, filename_images)
@click.command(context_settings=dict(help_option_names=['-h', '--help']))
@click.option(
"-i",
"--input_image",
type=click.Path(exists=True),
help="Input image filename",
)
@click.option(
"-l",
"--library",
type=click.Path(exists=True),
help="Shared library filename",
)
@click.option(
"-c",
"--config_file",
type=click.Path(exists=True),
help="Config file Path",
)
def run_test(input_image, library, config_file):
main_test(input_image, library, config_file)
if __name__ == "__main__":
run_test() | 0.378919 | 0.13452 |
import logging
import spotipy
from spotmover.providers.spotify.util import obtain_token_localhost
from spotmover.providers.base import Provider, ProviderAuthError
from spotmover.dump import Dump
from spotmover.cache import DiskCache
logger = logging.getLogger(__name__)
def confirm(msg):
answer = input(msg + " ")
return answer.lower() in ("y", "yes")
class NotFoundError(Exception):
pass
class SpotifyProvider(Provider):
def __init__(self):
self.token = None
self.api = None
self._cache = self.init_cache()
self.username = None
def init_cache(self):
return {}
def authenticate(self, username: str, client_id: str, client_secret: str, redirect_uri: str): # pylint: disable=W0221
scope = 'user-library-modify playlist-modify-private playlist-modify-public playlist-read-private playlist-read-collaborative'
token = obtain_token_localhost(username, client_id, client_secret, redirect_uri, scope=scope)
if not token:
raise ProviderAuthError("Unable to authenticate user {}".format(username))
self.token = token
self.api = spotipy.Spotify(auth=token)
self.username = username
def is_authenticated(self):
return self.api is not None
def need_authentication(self):
if not self.is_authenticated():
raise ProviderAuthError("User is not authenticated")
def get_album(self, artist, album):
album_cache = self._cache.get("albums")
if album_cache is None:
self._cache["albums"] = {}
album_cache = self._cache.get("albums")
cache_key = (artist, album)
if cache_key in album_cache:
cache_value = album_cache[cache_key]
if isinstance(cache_value, Exception):
raise cache_value
else:
return album_cache[cache_key]
self.need_authentication()
result = self.api.search("artist:{} album:{}".format(artist, album), type="album")
album_l = album.lower()
if len(result["albums"]["items"]) == 0:
exc = NotFoundError("No such album: {}".format(album))
album_cache[cache_key] = exc
self._cache["albums"] = album_cache
raise exc
retval = None
for album in result["albums"]["items"]:
if album["name"].lower() == album_l:
retval = album
break
if not retval:
exc = NotFoundError("No exact match for the album: {}".format(album))
album_cache[cache_key] = exc
self._cache["albums"] = album_cache
raise exc
album_cache[cache_key] = retval
self._cache["albums"] = album_cache
return retval
def fetch_all(self, results, items_key="items", limit=50):
retval = results[items_key]
while results["next"]:
results = self.api.next(results)
retval.extend(results[items_key])
return retval
def iter_current_user_saved_albums(self):
self.need_authentication()
saved_items = self.fetch_all(self.api.current_user_saved_albums())
for album in saved_items:
album_name = album["album"]["name"]
for artist in album["album"]["artists"]:
artist_name = artist["name"]
yield (artist_name, album_name)
def load_songs(self, data: Dump):
self.need_authentication()
album_ids = []
not_found = []
current_albums = set([(x[0].lower(), x[1].lower()) for x in self.iter_current_user_saved_albums()])
for src_album in data.albums:
src_album_artist = (src_album["artist"], src_album["album"])
src_album_artist_lower = (src_album["artist"].lower(), src_album["album"].lower())
if src_album_artist_lower in current_albums:
logger.info("Already added; {}: {}".format(*src_album_artist))
continue
try:
album = self.get_album(*src_album_artist)
except NotFoundError:
logger.warn("Not found; {}: {}".format(*src_album_artist))
not_found.append(src_album_artist)
else:
logger.info("Album found; {}: {}".format(*src_album_artist))
album_ids.append(album["id"])
logger.info("Albums not found in spotify:")
for album in not_found:
logger.info(" {}: {}".format(*album))
logger.info("Found {} albums, saving...".format(len(album_ids)))
for start_idx in range(0, len(album_ids), 50):
self.api.current_user_saved_albums_add(albums=album_ids[start_idx: start_idx + 50])
logger.info("Done.")
def find_song(self, artist, album, song):
if "find_song" not in self._cache:
cache_obj = self._cache["find_song"] = {}
else:
cache_obj = self._cache["find_song"]
cache_key = (artist, album, song)
if cache_key in cache_obj:
cache_value = cache_obj[cache_key]
if isinstance(cache_value, Exception):
raise cache_value
else:
return cache_value
result = self.api.search("artist:{} album:{} track:{}".format(artist, album, song), type="track")
items = result["tracks"]["items"]
if len(items) == 0:
logger.warning("find_song {}/{} {}: NOT FOUND".format(artist, album, song))
exc = NotFoundError("Song not found: {}".format(song))
cache_obj[cache_key] = exc
self._cache["find_song"] = cache_obj
raise exc
if len(items) == 1:
logger.info("find_song {}/{} {}: FOUND".format(artist, album, song))
return items[0]["id"]
for item in items:
item_album_name = item["album"]["name"]
item_track_name = item["name"]
for artist_result in item["artists"]:
item_artist_name = artist_result["name"]
if item_album_name.lower() == album.lower() and \
item_artist_name.lower() == artist.lower() and \
item_track_name.lower() == song.lower():
logger.info("find_song {}/{} {}: FOUND".format(artist, album, song))
cache_obj[cache_key] = item["id"]
self._cache["find_song"] = cache_obj
return item["id"]
logger.warn("find_song {}/{} {}: NOT FOUND".format(artist, album, song))
exc = NotFoundError("No exact match for song: {}".format(song))
cache_obj[cache_key] = exc
self._cache["find_song"] = cache_obj
raise exc
def get_track_ids_for_songs(self, songs):
track_ids = []
not_found = []
for song in songs:
artist = song["artist"]
album = song["album"]
title = song["title"]
try:
track_id = self.find_song(artist, album, title)
except NotFoundError:
not_found.append(song)
logger.warning("Not found: {}/{}".format(artist, title))
continue
track_ids.append(track_id)
return (track_ids, not_found)
def create_playlist(self, name, track_ids):
logger.info("Creating playlist '{}' with {} tracks".format(name, len(track_ids)))
playlist = self.api.user_playlist_create(self.username, name, public=False)
playlist_id = playlist["id"]
for start_idx in range(0, len(track_ids), 100):
self.api.user_playlist_add_tracks(self.username, playlist_id, track_ids[start_idx:start_idx + 100])
def load_playlist(self, playlist, force: bool):
name = playlist["name"]
songs = playlist["tracks"]
track_ids, not_found = self.get_track_ids_for_songs(songs)
if len(track_ids) == 0:
logger.error("No songs found")
return
if len(track_ids) != len(songs):
logger.warning("Some songs were not found")
if not force:
for song in not_found:
logger.info("- {artist}/{album}: {title}".format(**song))
if not confirm("Are you sure to create the playlist? (y/n)"):
logger.info("Skipping...")
return
self.create_playlist(name, track_ids)
# tracks = self.api.user_playlist(self.username, playlist["id"], fields="tracks")
# self.api.user_playlist_add_tracks(self.username, playlist_id, track_ids)
def load_playlists(self, data: Dump, force: bool, force_create: bool):
self.need_authentication()
current_playlists = {x["name"]: x for x in self.fetch_all(self.api.current_user_playlists())}
# import pdb
# pdb.set_trace()
for playlist in data.playlists:
name = playlist["name"]
if not confirm("Do you want to import playlist '{}'? (y/n)".format(name)):
logger.info("Skipping...")
continue
if name in current_playlists and not force_create:
logger.info("Playlist {} already exists, skipping".format(name))
continue
self.load_playlist(playlist, force)
class CachedSpotifyProvider(SpotifyProvider):
def init_cache(self):
return DiskCache("spotmover-spotify") | spotmover/providers/spotify/spotify.py | import logging
import spotipy
from spotmover.providers.spotify.util import obtain_token_localhost
from spotmover.providers.base import Provider, ProviderAuthError
from spotmover.dump import Dump
from spotmover.cache import DiskCache
logger = logging.getLogger(__name__)
def confirm(msg):
answer = input(msg + " ")
return answer.lower() in ("y", "yes")
class NotFoundError(Exception):
pass
class SpotifyProvider(Provider):
def __init__(self):
self.token = None
self.api = None
self._cache = self.init_cache()
self.username = None
def init_cache(self):
return {}
def authenticate(self, username: str, client_id: str, client_secret: str, redirect_uri: str): # pylint: disable=W0221
scope = 'user-library-modify playlist-modify-private playlist-modify-public playlist-read-private playlist-read-collaborative'
token = obtain_token_localhost(username, client_id, client_secret, redirect_uri, scope=scope)
if not token:
raise ProviderAuthError("Unable to authenticate user {}".format(username))
self.token = token
self.api = spotipy.Spotify(auth=token)
self.username = username
def is_authenticated(self):
return self.api is not None
def need_authentication(self):
if not self.is_authenticated():
raise ProviderAuthError("User is not authenticated")
def get_album(self, artist, album):
album_cache = self._cache.get("albums")
if album_cache is None:
self._cache["albums"] = {}
album_cache = self._cache.get("albums")
cache_key = (artist, album)
if cache_key in album_cache:
cache_value = album_cache[cache_key]
if isinstance(cache_value, Exception):
raise cache_value
else:
return album_cache[cache_key]
self.need_authentication()
result = self.api.search("artist:{} album:{}".format(artist, album), type="album")
album_l = album.lower()
if len(result["albums"]["items"]) == 0:
exc = NotFoundError("No such album: {}".format(album))
album_cache[cache_key] = exc
self._cache["albums"] = album_cache
raise exc
retval = None
for album in result["albums"]["items"]:
if album["name"].lower() == album_l:
retval = album
break
if not retval:
exc = NotFoundError("No exact match for the album: {}".format(album))
album_cache[cache_key] = exc
self._cache["albums"] = album_cache
raise exc
album_cache[cache_key] = retval
self._cache["albums"] = album_cache
return retval
def fetch_all(self, results, items_key="items", limit=50):
retval = results[items_key]
while results["next"]:
results = self.api.next(results)
retval.extend(results[items_key])
return retval
def iter_current_user_saved_albums(self):
self.need_authentication()
saved_items = self.fetch_all(self.api.current_user_saved_albums())
for album in saved_items:
album_name = album["album"]["name"]
for artist in album["album"]["artists"]:
artist_name = artist["name"]
yield (artist_name, album_name)
def load_songs(self, data: Dump):
self.need_authentication()
album_ids = []
not_found = []
current_albums = set([(x[0].lower(), x[1].lower()) for x in self.iter_current_user_saved_albums()])
for src_album in data.albums:
src_album_artist = (src_album["artist"], src_album["album"])
src_album_artist_lower = (src_album["artist"].lower(), src_album["album"].lower())
if src_album_artist_lower in current_albums:
logger.info("Already added; {}: {}".format(*src_album_artist))
continue
try:
album = self.get_album(*src_album_artist)
except NotFoundError:
logger.warn("Not found; {}: {}".format(*src_album_artist))
not_found.append(src_album_artist)
else:
logger.info("Album found; {}: {}".format(*src_album_artist))
album_ids.append(album["id"])
logger.info("Albums not found in spotify:")
for album in not_found:
logger.info(" {}: {}".format(*album))
logger.info("Found {} albums, saving...".format(len(album_ids)))
for start_idx in range(0, len(album_ids), 50):
self.api.current_user_saved_albums_add(albums=album_ids[start_idx: start_idx + 50])
logger.info("Done.")
def find_song(self, artist, album, song):
if "find_song" not in self._cache:
cache_obj = self._cache["find_song"] = {}
else:
cache_obj = self._cache["find_song"]
cache_key = (artist, album, song)
if cache_key in cache_obj:
cache_value = cache_obj[cache_key]
if isinstance(cache_value, Exception):
raise cache_value
else:
return cache_value
result = self.api.search("artist:{} album:{} track:{}".format(artist, album, song), type="track")
items = result["tracks"]["items"]
if len(items) == 0:
logger.warning("find_song {}/{} {}: NOT FOUND".format(artist, album, song))
exc = NotFoundError("Song not found: {}".format(song))
cache_obj[cache_key] = exc
self._cache["find_song"] = cache_obj
raise exc
if len(items) == 1:
logger.info("find_song {}/{} {}: FOUND".format(artist, album, song))
return items[0]["id"]
for item in items:
item_album_name = item["album"]["name"]
item_track_name = item["name"]
for artist_result in item["artists"]:
item_artist_name = artist_result["name"]
if item_album_name.lower() == album.lower() and \
item_artist_name.lower() == artist.lower() and \
item_track_name.lower() == song.lower():
logger.info("find_song {}/{} {}: FOUND".format(artist, album, song))
cache_obj[cache_key] = item["id"]
self._cache["find_song"] = cache_obj
return item["id"]
logger.warn("find_song {}/{} {}: NOT FOUND".format(artist, album, song))
exc = NotFoundError("No exact match for song: {}".format(song))
cache_obj[cache_key] = exc
self._cache["find_song"] = cache_obj
raise exc
def get_track_ids_for_songs(self, songs):
track_ids = []
not_found = []
for song in songs:
artist = song["artist"]
album = song["album"]
title = song["title"]
try:
track_id = self.find_song(artist, album, title)
except NotFoundError:
not_found.append(song)
logger.warning("Not found: {}/{}".format(artist, title))
continue
track_ids.append(track_id)
return (track_ids, not_found)
def create_playlist(self, name, track_ids):
logger.info("Creating playlist '{}' with {} tracks".format(name, len(track_ids)))
playlist = self.api.user_playlist_create(self.username, name, public=False)
playlist_id = playlist["id"]
for start_idx in range(0, len(track_ids), 100):
self.api.user_playlist_add_tracks(self.username, playlist_id, track_ids[start_idx:start_idx + 100])
def load_playlist(self, playlist, force: bool):
name = playlist["name"]
songs = playlist["tracks"]
track_ids, not_found = self.get_track_ids_for_songs(songs)
if len(track_ids) == 0:
logger.error("No songs found")
return
if len(track_ids) != len(songs):
logger.warning("Some songs were not found")
if not force:
for song in not_found:
logger.info("- {artist}/{album}: {title}".format(**song))
if not confirm("Are you sure to create the playlist? (y/n)"):
logger.info("Skipping...")
return
self.create_playlist(name, track_ids)
# tracks = self.api.user_playlist(self.username, playlist["id"], fields="tracks")
# self.api.user_playlist_add_tracks(self.username, playlist_id, track_ids)
def load_playlists(self, data: Dump, force: bool, force_create: bool):
self.need_authentication()
current_playlists = {x["name"]: x for x in self.fetch_all(self.api.current_user_playlists())}
# import pdb
# pdb.set_trace()
for playlist in data.playlists:
name = playlist["name"]
if not confirm("Do you want to import playlist '{}'? (y/n)".format(name)):
logger.info("Skipping...")
continue
if name in current_playlists and not force_create:
logger.info("Playlist {} already exists, skipping".format(name))
continue
self.load_playlist(playlist, force)
class CachedSpotifyProvider(SpotifyProvider):
def init_cache(self):
return DiskCache("spotmover-spotify") | 0.411939 | 0.076304 |
"""Version bumper on upload."""
import subprocess
from datetime import datetime
import configparser
import v0tools_doc
DTFMT = "%Y-%m-%d %H:%M UTC"
def commits(start, end):
cmd = f"git rev-list {start}...{end}".split()
return subprocess.check_output(cmd, encoding="utf-8").splitlines()
def commit_details(commit):
cmd = ["git", "show", "--quiet", commit, "--pretty=%ct:::%s:::%b"]
epoch, msg, desc = subprocess.check_output(cmd, encoding="utf-8").split(":::")
dto = datetime.utcfromtimestamp(int(epoch))
return dto.strftime(DTFMT), msg.strip(), desc.strip()
def get_repo_url():
config = configparser.ConfigParser()
config.read(v0tools_doc.SETUP_CFG)
vals = [
list(map(str.strip, i.strip().split("=")))
for i in config.get("metadata", "project_urls").splitlines()
if i.strip()
]
urls = {i[0]: i[1] for i in vals}
return urls["Source Code"]
def _sort_tags(tag):
arr = map(int, tag.replace("v", "").split("."))
return tuple(arr)
def get_changlog():
cmd = "git rev-list --max-parents=0 HEAD".split()
initial_commit = subprocess.check_output(cmd, encoding="utf-8").strip()
tags = subprocess.check_output(
["git", "tag", "-l"],
encoding="utf-8",
).splitlines()
tags = sorted(tags, key=_sort_tags)[::-1]
# tags.insert(0, "HEAD")
content = []
url = get_repo_url()
first = "uninit"
for i in range(len(tags)):
try:
end, start = tags[i], tags[i + 1]
except IndexError:
end, start = tags[i], initial_commit
commit_list = commits(start, end)
if not commit_list:
continue
content.append(f"# {end}")
for _, com in enumerate(commit_list):
dt, msg, desc = commit_details(com)
if first == "uninit":
link = f"[HEAD]({url}/commit/HEAD)"
first = "init"
else:
link = f"[{com[:7]}]({url}/commit/{com})"
content.append(f"{msg}")
content.append(f"> {dt} {link})")
content.append("")
if desc:
content.append(f"```")
content.append(desc)
content.append(f"```")
content.append("---")
return "\n".join(content) | src/v0tools_doc/changelog.py | """Version bumper on upload."""
import subprocess
from datetime import datetime
import configparser
import v0tools_doc
DTFMT = "%Y-%m-%d %H:%M UTC"
def commits(start, end):
cmd = f"git rev-list {start}...{end}".split()
return subprocess.check_output(cmd, encoding="utf-8").splitlines()
def commit_details(commit):
cmd = ["git", "show", "--quiet", commit, "--pretty=%ct:::%s:::%b"]
epoch, msg, desc = subprocess.check_output(cmd, encoding="utf-8").split(":::")
dto = datetime.utcfromtimestamp(int(epoch))
return dto.strftime(DTFMT), msg.strip(), desc.strip()
def get_repo_url():
config = configparser.ConfigParser()
config.read(v0tools_doc.SETUP_CFG)
vals = [
list(map(str.strip, i.strip().split("=")))
for i in config.get("metadata", "project_urls").splitlines()
if i.strip()
]
urls = {i[0]: i[1] for i in vals}
return urls["Source Code"]
def _sort_tags(tag):
arr = map(int, tag.replace("v", "").split("."))
return tuple(arr)
def get_changlog():
cmd = "git rev-list --max-parents=0 HEAD".split()
initial_commit = subprocess.check_output(cmd, encoding="utf-8").strip()
tags = subprocess.check_output(
["git", "tag", "-l"],
encoding="utf-8",
).splitlines()
tags = sorted(tags, key=_sort_tags)[::-1]
# tags.insert(0, "HEAD")
content = []
url = get_repo_url()
first = "uninit"
for i in range(len(tags)):
try:
end, start = tags[i], tags[i + 1]
except IndexError:
end, start = tags[i], initial_commit
commit_list = commits(start, end)
if not commit_list:
continue
content.append(f"# {end}")
for _, com in enumerate(commit_list):
dt, msg, desc = commit_details(com)
if first == "uninit":
link = f"[HEAD]({url}/commit/HEAD)"
first = "init"
else:
link = f"[{com[:7]}]({url}/commit/{com})"
content.append(f"{msg}")
content.append(f"> {dt} {link})")
content.append("")
if desc:
content.append(f"```")
content.append(desc)
content.append(f"```")
content.append("---")
return "\n".join(content) | 0.405802 | 0.14069 |
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MyMainGUI(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.qtxt1 = QTextEdit(self)
self.btn1 = QPushButton("Start", self)
self.btn2 = QPushButton("Stop", self)
self.btn3 = QPushButton("add 100", self)
self.btn4 = QPushButton("send instance", self)
vbox = QVBoxLayout()
vbox.addWidget(self.qtxt1)
vbox.addWidget(self.btn1)
vbox.addWidget(self.btn2)
vbox.addWidget(self.btn3)
vbox.addWidget(self.btn4)
self.setLayout(vbox)
self.setGeometry(100, 50, 300, 300)
class Test:
def __init__(self):
name = ""
class MyMain(MyMainGUI):
add_sec_signal = pyqtSignal()
send_instance_singal = pyqtSignal("PyQt_PyObject")
def __init__(self, parent=None):
super().__init__(parent)
self.btn1.clicked.connect(self.time_start)
self.btn2.clicked.connect(self.time_stop)
self.btn3.clicked.connect(self.add_sec)
self.btn4.clicked.connect(self.send_instance)
self.th = Worker(parent=self)
self.th.sec_changed.connect(self.time_update) # custom signal from worker thread to main thread
self.add_sec_signal.connect(self.th.add_sec) # custom signal from main thread to worker thread
self.send_instance_singal.connect(self.th.recive_instance_singal)
self.show()
@pyqtSlot()
def time_start(self):
self.th.start()
self.th.working = True
@pyqtSlot()
def time_stop(self):
self.th.working = False
@pyqtSlot()
def add_sec(self):
print(".... add singal emit....")
self.add_sec_signal.emit()
@pyqtSlot(str)
def time_update(self, msg):
self.qtxt1.append(msg)
@pyqtSlot()
def send_instance(self):
t1 = Test()
t1.name = "SuperPower!!!"
self.send_instance_singal.emit(t1)
class Worker(QThread):
sec_changed = pyqtSignal(str)
def __init__(self, sec=0, parent=None):
super().__init__()
self.main = parent
self.working = True
self.sec = sec
# self.main.add_sec_signal.connect(self.add_sec) # 이것도 작동함. # custom signal from main thread to worker thread
def __del__(self):
print(".... end thread.....")
self.wait()
def run(self):
while self.working:
self.sec_changed.emit('time (secs):{}'.format(self.sec))
self.sleep(1)
self.sec += 1
@pyqtSlot()
def add_sec(self):
print("add_sec....")
self.sec += 100
@pyqtSlot("PyQt_PyObject")
def recive_instance_singal(self, inst):
print(inst.name)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = MyMain()
app.exec_() | test.py | from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class MyMainGUI(QDialog):
def __init__(self, parent=None):
super().__init__(parent)
self.qtxt1 = QTextEdit(self)
self.btn1 = QPushButton("Start", self)
self.btn2 = QPushButton("Stop", self)
self.btn3 = QPushButton("add 100", self)
self.btn4 = QPushButton("send instance", self)
vbox = QVBoxLayout()
vbox.addWidget(self.qtxt1)
vbox.addWidget(self.btn1)
vbox.addWidget(self.btn2)
vbox.addWidget(self.btn3)
vbox.addWidget(self.btn4)
self.setLayout(vbox)
self.setGeometry(100, 50, 300, 300)
class Test:
def __init__(self):
name = ""
class MyMain(MyMainGUI):
add_sec_signal = pyqtSignal()
send_instance_singal = pyqtSignal("PyQt_PyObject")
def __init__(self, parent=None):
super().__init__(parent)
self.btn1.clicked.connect(self.time_start)
self.btn2.clicked.connect(self.time_stop)
self.btn3.clicked.connect(self.add_sec)
self.btn4.clicked.connect(self.send_instance)
self.th = Worker(parent=self)
self.th.sec_changed.connect(self.time_update) # custom signal from worker thread to main thread
self.add_sec_signal.connect(self.th.add_sec) # custom signal from main thread to worker thread
self.send_instance_singal.connect(self.th.recive_instance_singal)
self.show()
@pyqtSlot()
def time_start(self):
self.th.start()
self.th.working = True
@pyqtSlot()
def time_stop(self):
self.th.working = False
@pyqtSlot()
def add_sec(self):
print(".... add singal emit....")
self.add_sec_signal.emit()
@pyqtSlot(str)
def time_update(self, msg):
self.qtxt1.append(msg)
@pyqtSlot()
def send_instance(self):
t1 = Test()
t1.name = "SuperPower!!!"
self.send_instance_singal.emit(t1)
class Worker(QThread):
sec_changed = pyqtSignal(str)
def __init__(self, sec=0, parent=None):
super().__init__()
self.main = parent
self.working = True
self.sec = sec
# self.main.add_sec_signal.connect(self.add_sec) # 이것도 작동함. # custom signal from main thread to worker thread
def __del__(self):
print(".... end thread.....")
self.wait()
def run(self):
while self.working:
self.sec_changed.emit('time (secs):{}'.format(self.sec))
self.sleep(1)
self.sec += 1
@pyqtSlot()
def add_sec(self):
print("add_sec....")
self.sec += 100
@pyqtSlot("PyQt_PyObject")
def recive_instance_singal(self, inst):
print(inst.name)
if __name__ == "__main__":
import sys
app = QApplication(sys.argv)
form = MyMain()
app.exec_() | 0.429669 | 0.069226 |
import numbers
import numpy
import autofile.info
from autofile.system._util import utc_time as _utc_time
def conformer_trunk(nsamp, tors_ranges):
""" conformer trunk information
:param nsamp: the number of samples
:type nsamp: int
:param tors_ranges: sampling ranges [(start, end)] for each torsional
coordinate, by z-matrix coordinate name
:type tors_ranges: dict[str: (float, float)]
"""
tors_range_dct = dict(tors_ranges)
for key, rng in tors_range_dct.items():
tors_range_dct[key] = (rng[0]*180./numpy.pi, rng[1]*180./numpy.pi)
assert all(isinstance(key, str) and len(rng) == 2
and all(isinstance(x, numbers.Real) for x in rng)
for key, rng in tors_range_dct.items())
tors_ranges = autofile.info.Info(**tors_range_dct)
assert isinstance(nsamp, numbers.Integral)
inf_obj = autofile.info.Info(nsamp=nsamp, tors_ranges=tors_ranges)
assert autofile.info.matches_function_signature(inf_obj, conformer_trunk)
return inf_obj
def tau_trunk(nsamp, tors_ranges):
""" tau trunk information
:param nsamp: the number of samples
:type nsamp: int
:param tors_ranges: sampling ranges [(start, end)] for each torsional
coordinate, by z-matrix coordinate name
:type tors_ranges: dict[str: (float, float)]
"""
tors_range_dct = dict(tors_ranges)
for key, rng in tors_range_dct.items():
tors_range_dct[key] = (rng[0]*180./numpy.pi, rng[1]*180./numpy.pi)
assert all(isinstance(key, str) and len(rng) == 2
and all(isinstance(x, numbers.Real) for x in rng)
for key, rng in tors_range_dct.items())
tors_ranges = autofile.info.Info(**tors_range_dct)
assert isinstance(nsamp, numbers.Integral)
inf_obj = autofile.info.Info(nsamp=nsamp, tors_ranges=tors_ranges)
assert autofile.info.matches_function_signature(inf_obj, tau_trunk)
return inf_obj
def scan_branch(grids):
""" scan trunk information
:param grids: sampling grids, [val1, val2, ...], for each coordinate,
by coordinate name
:type grids: dict[str: list[float]]
"""
grid_dct = dict(grids)
# note:renormalization of angle ranges needs to be updated for 2D grids.
for key, rng in grid_dct.items():
if 'R' not in key:
grid_dct[key] = rng*180./numpy.pi
assert all(isinstance(key, str) and numpy.ndim(vals) == 1
and all(isinstance(x, numbers.Real) for x in vals)
for key, vals in grid_dct.items())
grids = autofile.info.Info(**grid_dct)
inf_obj = autofile.info.Info(grids=grids)
assert autofile.info.matches_function_signature(inf_obj, scan_branch)
return inf_obj
def vpt2_trunk(fermi):
""" vpt2 trunk information
:param fermi: description of fermi resonance treatment
:type fermi: str
"""
assert isinstance(fermi, str)
inf_obj = autofile.info.Info(fermi=fermi)
assert autofile.info.matches_function_signature(inf_obj, vpt2_trunk)
return inf_obj
def lennard_jones(potential, nsamp,
method, basis, program, version):
""" energy transfer trunk """
inf_obj = autofile.info.Info(potential=potential, nsamp=nsamp,
method=method, basis=basis,
program=program, version=version)
assert autofile.info.matches_function_signature(
inf_obj, lennard_jones)
return inf_obj
class RunStatus():
""" run statuses """
RUNNING = "running"
SUCCESS = "succeeded"
FAILURE = "failed"
def run(job, prog, version, method, basis, status, utc_start_time=None,
utc_end_time=None):
""" run information
"""
inf_obj = autofile.info.Info(
job=job,
prog=prog,
version=version,
method=method,
basis=basis,
status=status,
utc_start_time=utc_start_time,
utc_end_time=utc_end_time,
)
assert autofile.info.matches_function_signature(inf_obj, run)
return inf_obj
def utc_time():
""" current run time
"""
return _utc_time() | autofile/system/info.py | import numbers
import numpy
import autofile.info
from autofile.system._util import utc_time as _utc_time
def conformer_trunk(nsamp, tors_ranges):
""" conformer trunk information
:param nsamp: the number of samples
:type nsamp: int
:param tors_ranges: sampling ranges [(start, end)] for each torsional
coordinate, by z-matrix coordinate name
:type tors_ranges: dict[str: (float, float)]
"""
tors_range_dct = dict(tors_ranges)
for key, rng in tors_range_dct.items():
tors_range_dct[key] = (rng[0]*180./numpy.pi, rng[1]*180./numpy.pi)
assert all(isinstance(key, str) and len(rng) == 2
and all(isinstance(x, numbers.Real) for x in rng)
for key, rng in tors_range_dct.items())
tors_ranges = autofile.info.Info(**tors_range_dct)
assert isinstance(nsamp, numbers.Integral)
inf_obj = autofile.info.Info(nsamp=nsamp, tors_ranges=tors_ranges)
assert autofile.info.matches_function_signature(inf_obj, conformer_trunk)
return inf_obj
def tau_trunk(nsamp, tors_ranges):
""" tau trunk information
:param nsamp: the number of samples
:type nsamp: int
:param tors_ranges: sampling ranges [(start, end)] for each torsional
coordinate, by z-matrix coordinate name
:type tors_ranges: dict[str: (float, float)]
"""
tors_range_dct = dict(tors_ranges)
for key, rng in tors_range_dct.items():
tors_range_dct[key] = (rng[0]*180./numpy.pi, rng[1]*180./numpy.pi)
assert all(isinstance(key, str) and len(rng) == 2
and all(isinstance(x, numbers.Real) for x in rng)
for key, rng in tors_range_dct.items())
tors_ranges = autofile.info.Info(**tors_range_dct)
assert isinstance(nsamp, numbers.Integral)
inf_obj = autofile.info.Info(nsamp=nsamp, tors_ranges=tors_ranges)
assert autofile.info.matches_function_signature(inf_obj, tau_trunk)
return inf_obj
def scan_branch(grids):
""" scan trunk information
:param grids: sampling grids, [val1, val2, ...], for each coordinate,
by coordinate name
:type grids: dict[str: list[float]]
"""
grid_dct = dict(grids)
# note:renormalization of angle ranges needs to be updated for 2D grids.
for key, rng in grid_dct.items():
if 'R' not in key:
grid_dct[key] = rng*180./numpy.pi
assert all(isinstance(key, str) and numpy.ndim(vals) == 1
and all(isinstance(x, numbers.Real) for x in vals)
for key, vals in grid_dct.items())
grids = autofile.info.Info(**grid_dct)
inf_obj = autofile.info.Info(grids=grids)
assert autofile.info.matches_function_signature(inf_obj, scan_branch)
return inf_obj
def vpt2_trunk(fermi):
""" vpt2 trunk information
:param fermi: description of fermi resonance treatment
:type fermi: str
"""
assert isinstance(fermi, str)
inf_obj = autofile.info.Info(fermi=fermi)
assert autofile.info.matches_function_signature(inf_obj, vpt2_trunk)
return inf_obj
def lennard_jones(potential, nsamp,
method, basis, program, version):
""" energy transfer trunk """
inf_obj = autofile.info.Info(potential=potential, nsamp=nsamp,
method=method, basis=basis,
program=program, version=version)
assert autofile.info.matches_function_signature(
inf_obj, lennard_jones)
return inf_obj
class RunStatus():
""" run statuses """
RUNNING = "running"
SUCCESS = "succeeded"
FAILURE = "failed"
def run(job, prog, version, method, basis, status, utc_start_time=None,
utc_end_time=None):
""" run information
"""
inf_obj = autofile.info.Info(
job=job,
prog=prog,
version=version,
method=method,
basis=basis,
status=status,
utc_start_time=utc_start_time,
utc_end_time=utc_end_time,
)
assert autofile.info.matches_function_signature(inf_obj, run)
return inf_obj
def utc_time():
""" current run time
"""
return _utc_time() | 0.741206 | 0.731059 |
params_num = 0
allWeights = []
for i in range(len(model.layers)):
if "conv" in model.layers[i].name:
weights = model.layers[i].get_weights()[0]
params = weights.shape[3]
allWeights.append((i,np.arange(params_num,params_num+params)))
params_num += params
#NS_mutation_weights = []
for i in range(100):
print(i, end = "\r")
model.load_weights('cifar10resnet_weights.h5')
gamma = 0.07 # portion of weights to be changed
for i in range(len(allWeights)):
weights, bias = model.layers[allWeights[i][0]].get_weights()
params = weights.shape[3]
rnd = np.arange(params)
np.random.shuffle(rnd)
rnd = rnd[:int(params*gamma)]
if len(rnd) >= 2:
firstW = weights[:,:,:,rnd[0]]
for j in range(len(rnd)-1):
weights[:,:,:,rnd[j]] = weights[:,:,:,rnd[j+1]]
weights[:,:,:,rnd[-1]] = firstW
model.layers[allWeights[i][0]].set_weights([weights, bias])
res = model.predict(x_test)
acc = np.argmax(res, axis = 1) == np.argmax(y_test, axis = 1)
acc = np.mean(acc)
if acc > 0.73:
print(acc)
NS_mutation_weights.append(model.get_weights())
pickle.dump(NS_mutation_weights, open('NS_mutation_weights.p', 'wb'))
train_pred = np.argmax(model.predict(x_train[:10000]), axis = 1)
train_label = np.argmax(y_train[:10000], axis = 1)
train_acc = np.mean(train_pred == train_label)
#NAI
ok_model = []
for i in range(100):
print(i, end = "\r")
model.load_weights('cifar10resnet_weights.h5')
gamma = 0.01
rnd = np.arange(params_num)
np.random.shuffle(rnd)
rnd = rnd[:int(params_num*gamma)]
rnd = sorted(rnd)
for i in range(len(allWeights)):
for num in rnd:
if num in allWeights[i][1]:
index = np.argwhere(allWeights[i][1] == num).item()
w = model.layers[allWeights[i][0]].get_weights()[0]
b = model.layers[allWeights[i][0]].get_weights()[1]
w[:,:,:,index] = -1*w[:,:,:,index]
model.layers[allWeights[i][0]].set_weights([w,b])
res = model.predict(x_test)
acc = np.argmax(res, axis = 1) == np.argmax(y_test, axis = 1)
acc = np.mean(acc)
if acc > 0.9*train_acc:
print(acc)
ok_model.append(model.get_weights())
pickle.dump(ok_model, open('ok_model.p', 'wb'))
ok_model = pickle.load(open('ok_model.p', 'rb'))
len(ok_model)
pickle.dump(ok_model, open('ok_model_train.p', 'wb'))
ok_model = pickle.load(open('ok_model_train.p', 'rb'))
len(ok_model) | cifar10/scripts/model_mutation.py |
params_num = 0
allWeights = []
for i in range(len(model.layers)):
if "conv" in model.layers[i].name:
weights = model.layers[i].get_weights()[0]
params = weights.shape[3]
allWeights.append((i,np.arange(params_num,params_num+params)))
params_num += params
#NS_mutation_weights = []
for i in range(100):
print(i, end = "\r")
model.load_weights('cifar10resnet_weights.h5')
gamma = 0.07 # portion of weights to be changed
for i in range(len(allWeights)):
weights, bias = model.layers[allWeights[i][0]].get_weights()
params = weights.shape[3]
rnd = np.arange(params)
np.random.shuffle(rnd)
rnd = rnd[:int(params*gamma)]
if len(rnd) >= 2:
firstW = weights[:,:,:,rnd[0]]
for j in range(len(rnd)-1):
weights[:,:,:,rnd[j]] = weights[:,:,:,rnd[j+1]]
weights[:,:,:,rnd[-1]] = firstW
model.layers[allWeights[i][0]].set_weights([weights, bias])
res = model.predict(x_test)
acc = np.argmax(res, axis = 1) == np.argmax(y_test, axis = 1)
acc = np.mean(acc)
if acc > 0.73:
print(acc)
NS_mutation_weights.append(model.get_weights())
pickle.dump(NS_mutation_weights, open('NS_mutation_weights.p', 'wb'))
train_pred = np.argmax(model.predict(x_train[:10000]), axis = 1)
train_label = np.argmax(y_train[:10000], axis = 1)
train_acc = np.mean(train_pred == train_label)
#NAI
ok_model = []
for i in range(100):
print(i, end = "\r")
model.load_weights('cifar10resnet_weights.h5')
gamma = 0.01
rnd = np.arange(params_num)
np.random.shuffle(rnd)
rnd = rnd[:int(params_num*gamma)]
rnd = sorted(rnd)
for i in range(len(allWeights)):
for num in rnd:
if num in allWeights[i][1]:
index = np.argwhere(allWeights[i][1] == num).item()
w = model.layers[allWeights[i][0]].get_weights()[0]
b = model.layers[allWeights[i][0]].get_weights()[1]
w[:,:,:,index] = -1*w[:,:,:,index]
model.layers[allWeights[i][0]].set_weights([w,b])
res = model.predict(x_test)
acc = np.argmax(res, axis = 1) == np.argmax(y_test, axis = 1)
acc = np.mean(acc)
if acc > 0.9*train_acc:
print(acc)
ok_model.append(model.get_weights())
pickle.dump(ok_model, open('ok_model.p', 'wb'))
ok_model = pickle.load(open('ok_model.p', 'rb'))
len(ok_model)
pickle.dump(ok_model, open('ok_model_train.p', 'wb'))
ok_model = pickle.load(open('ok_model_train.p', 'rb'))
len(ok_model) | 0.282691 | 0.339116 |
from __future__ import (print_function, unicode_literals, division,
absolute_import)
import io
import os
import re
import subprocess
from common import vprint, exe, ff, wf, check, get_latest_driver_version
ETC = "/etc/glance/glance-api.conf"
PACKAGE_INSTALL = "/usr/lib/python2.7/dist-packages/glance_store"
PACKAGE_INSTALL_2 = "/usr/local/lib/python2.7/dist-packages/glance_store"
SITE_PACKAGE_INSTALL = "/usr/lib/python2.7/site-packages/glance_store"
SITE_PACKAGE_INSTALL_2 = "/usr/local/lib/python2.7/site-packages/glance_store"
DEVSTACK_INSTALL = "/usr/local/lib/python2.7/site-packages/glance_store"
TAGS = "https://api.github.com/repos/Datera/glance-driver/tags"
VERSION_RE = re.compile(r"^\s+VERSION = ['\"]v([\d\.]+)['\"]\s*$")
ETC_DEFAULT_RE = re.compile(r"^\[DEFAULT\]\s*$")
ETC_SECTION_RE = re.compile(r"^\[glance_store\]\s*$")
LOCATIONS = [PACKAGE_INSTALL, PACKAGE_INSTALL_2, SITE_PACKAGE_INSTALL,
SITE_PACKAGE_INSTALL_2, DEVSTACK_INSTALL]
def detect_glance_install():
for path in LOCATIONS:
if os.path.isdir(path):
return path
else:
result = None
try:
vprint("Normal cinder install not found, searching for driver")
result = exe("sudo find / -name datera.py")
if not result or result.isspace() or "glance-driver" in result:
return None
return result.strip().replace(
"/_drivers/datera.py", "")
except (subprocess.CalledProcessError, ValueError):
return None
def find_entry_points_file():
result = exe("find /usr/ -name 'entry_points.txt' | grep glance_store")
if not result:
return None
return result.strip()
@check("Glance", "driver", "plugin", "image", "local")
def check_glance_driver(config):
version = get_latest_driver_version(TAGS)
need_version = version.strip("v")
loc = detect_glance_install()
if not loc:
return ff("Could not detect Glance install location", "6515ADB8")
dfile = os.path.join(loc, "_drivers/datera.py")
if not os.path.exists(dfile):
errloc = os.path.join(loc, "_drivers")
return ff("Couldn't detect Datera Glance driver install at "
"{}".format(errloc), "DD51CEC9")
version = None
with io.open(dfile, 'r') as f:
for line in f:
version = VERSION_RE.match(line)
if version:
version = version.group(1)
break
if not version:
return ff("No version detected for Datera Glance driver at "
"{}".format(dfile), "75A8A315")
if version != need_version:
return ff("Glance Driver version mismatch, have: {}, want: "
"{}".format(version, need_version), "B65FD598")
entry = find_entry_points_file()
if not entry:
return ff("Could not find entry_points.txt file for glance_store",
"842A4DB1")
efound = None
with io.open(entry) as f:
for line in f:
if 'datera' in line:
efound = line
break
if not efound:
return ff("Could not find 'datera' entry in {}".format(entry),
"22DC6275")
k, v = efound.split("=")
if k.strip() != 'datera':
return ff("entry_points.txt entry malformed", "3F9F67BF")
if v.strip() != 'glance_store._drivers.datera:Store':
return ff("entry_points.txt entry malformed", "3F9F67BF")
backend = os.path.join(loc, "backend.py")
bfound = False
with io.open(backend) as f:
for line in f:
if 'datera' in line:
bfound = True
break
if 'class Indexable' in line:
break
if not bfound:
ff("'datera' has not been added to the 'default_store' StrOpt's "
"'choices' parameter", "C521E039")
@check("Glance Conf", "driver", "plugin", "config", "image", "local")
def check_glance_conf(config):
pass
section = None
with io.open(ETC, 'r') as f:
for line in f:
default = ETC_DEFAULT_RE.match(line)
if default:
break
if not default:
ff("[DEFAULT] section missing from {}".format(ETC), "228241A8")
for line in f:
section = ETC_SECTION_RE.match(line)
if section:
break
if not section:
return ff("[glance_store] section missing from {}".format(ETC),
"AFCBBDD7")
dsection = []
section_match = re.compile(r"^\[.*\]")
for line in f:
if section_match.match(line):
break
dsection.append(line)
ip = config['mgmt_ip']
user = config['username']
passwd = config['password']
san_check = False
user_check = False
pass_check = False
stores_check = False
default_check = False
for line in dsection:
if line.startswith("stores"):
stores_check = True
if "datera" not in line:
ff("datera is not set under 'stores' in {}".format(ETC),
"0D862946")
if line.startswith("default_store"):
default_check = True
if "datera" not in line:
wf("datera is not set as default_store in {}".format(ETC),
"B74CEBC3")
if line.startswith("datera_san_ip"):
san_check = True
if line.split("=")[-1].strip() != ip:
ff("datera_san_ip doesn't match mgmt ip", "2330CACB")
if line.startswith("datera_san_login"):
user_check = True
if line.split("=")[-1].strip() != user:
ff("datera_san_login doesn't match username",
"E9F02293")
if line.startswith("datera_san_password"):
pass_check = True
if line.split("=")[-1].strip() != passwd:
ff("datera_san_password doesn't match password",
"<PASSWORD>")
if not stores_check:
ff("'stores' entry not found under [glance_store]", "11F30DCF")
if not default_check:
ff("'default_store' entry not found under [glance_store]",
"540C3008")
if not san_check:
ff("'datera_san_ip' entry not found under [glance_store]",
"42481C71")
if not user_check:
ff("'datera_san_login' entry not found under [glance_store]",
"6E281004")
if not pass_check:
ff("'datera_san_password' entry not found under [glance_store]",
"<PASSWORD>")
def load_checks():
return [check_glance_driver,
check_glance_conf] | src/plugins/check_glance.py | from __future__ import (print_function, unicode_literals, division,
absolute_import)
import io
import os
import re
import subprocess
from common import vprint, exe, ff, wf, check, get_latest_driver_version
ETC = "/etc/glance/glance-api.conf"
PACKAGE_INSTALL = "/usr/lib/python2.7/dist-packages/glance_store"
PACKAGE_INSTALL_2 = "/usr/local/lib/python2.7/dist-packages/glance_store"
SITE_PACKAGE_INSTALL = "/usr/lib/python2.7/site-packages/glance_store"
SITE_PACKAGE_INSTALL_2 = "/usr/local/lib/python2.7/site-packages/glance_store"
DEVSTACK_INSTALL = "/usr/local/lib/python2.7/site-packages/glance_store"
TAGS = "https://api.github.com/repos/Datera/glance-driver/tags"
VERSION_RE = re.compile(r"^\s+VERSION = ['\"]v([\d\.]+)['\"]\s*$")
ETC_DEFAULT_RE = re.compile(r"^\[DEFAULT\]\s*$")
ETC_SECTION_RE = re.compile(r"^\[glance_store\]\s*$")
LOCATIONS = [PACKAGE_INSTALL, PACKAGE_INSTALL_2, SITE_PACKAGE_INSTALL,
SITE_PACKAGE_INSTALL_2, DEVSTACK_INSTALL]
def detect_glance_install():
for path in LOCATIONS:
if os.path.isdir(path):
return path
else:
result = None
try:
vprint("Normal cinder install not found, searching for driver")
result = exe("sudo find / -name datera.py")
if not result or result.isspace() or "glance-driver" in result:
return None
return result.strip().replace(
"/_drivers/datera.py", "")
except (subprocess.CalledProcessError, ValueError):
return None
def find_entry_points_file():
result = exe("find /usr/ -name 'entry_points.txt' | grep glance_store")
if not result:
return None
return result.strip()
@check("Glance", "driver", "plugin", "image", "local")
def check_glance_driver(config):
version = get_latest_driver_version(TAGS)
need_version = version.strip("v")
loc = detect_glance_install()
if not loc:
return ff("Could not detect Glance install location", "6515ADB8")
dfile = os.path.join(loc, "_drivers/datera.py")
if not os.path.exists(dfile):
errloc = os.path.join(loc, "_drivers")
return ff("Couldn't detect Datera Glance driver install at "
"{}".format(errloc), "DD51CEC9")
version = None
with io.open(dfile, 'r') as f:
for line in f:
version = VERSION_RE.match(line)
if version:
version = version.group(1)
break
if not version:
return ff("No version detected for Datera Glance driver at "
"{}".format(dfile), "75A8A315")
if version != need_version:
return ff("Glance Driver version mismatch, have: {}, want: "
"{}".format(version, need_version), "B65FD598")
entry = find_entry_points_file()
if not entry:
return ff("Could not find entry_points.txt file for glance_store",
"842A4DB1")
efound = None
with io.open(entry) as f:
for line in f:
if 'datera' in line:
efound = line
break
if not efound:
return ff("Could not find 'datera' entry in {}".format(entry),
"22DC6275")
k, v = efound.split("=")
if k.strip() != 'datera':
return ff("entry_points.txt entry malformed", "3F9F67BF")
if v.strip() != 'glance_store._drivers.datera:Store':
return ff("entry_points.txt entry malformed", "3F9F67BF")
backend = os.path.join(loc, "backend.py")
bfound = False
with io.open(backend) as f:
for line in f:
if 'datera' in line:
bfound = True
break
if 'class Indexable' in line:
break
if not bfound:
ff("'datera' has not been added to the 'default_store' StrOpt's "
"'choices' parameter", "C521E039")
@check("Glance Conf", "driver", "plugin", "config", "image", "local")
def check_glance_conf(config):
pass
section = None
with io.open(ETC, 'r') as f:
for line in f:
default = ETC_DEFAULT_RE.match(line)
if default:
break
if not default:
ff("[DEFAULT] section missing from {}".format(ETC), "228241A8")
for line in f:
section = ETC_SECTION_RE.match(line)
if section:
break
if not section:
return ff("[glance_store] section missing from {}".format(ETC),
"AFCBBDD7")
dsection = []
section_match = re.compile(r"^\[.*\]")
for line in f:
if section_match.match(line):
break
dsection.append(line)
ip = config['mgmt_ip']
user = config['username']
passwd = config['password']
san_check = False
user_check = False
pass_check = False
stores_check = False
default_check = False
for line in dsection:
if line.startswith("stores"):
stores_check = True
if "datera" not in line:
ff("datera is not set under 'stores' in {}".format(ETC),
"0D862946")
if line.startswith("default_store"):
default_check = True
if "datera" not in line:
wf("datera is not set as default_store in {}".format(ETC),
"B74CEBC3")
if line.startswith("datera_san_ip"):
san_check = True
if line.split("=")[-1].strip() != ip:
ff("datera_san_ip doesn't match mgmt ip", "2330CACB")
if line.startswith("datera_san_login"):
user_check = True
if line.split("=")[-1].strip() != user:
ff("datera_san_login doesn't match username",
"E9F02293")
if line.startswith("datera_san_password"):
pass_check = True
if line.split("=")[-1].strip() != passwd:
ff("datera_san_password doesn't match password",
"<PASSWORD>")
if not stores_check:
ff("'stores' entry not found under [glance_store]", "11F30DCF")
if not default_check:
ff("'default_store' entry not found under [glance_store]",
"540C3008")
if not san_check:
ff("'datera_san_ip' entry not found under [glance_store]",
"42481C71")
if not user_check:
ff("'datera_san_login' entry not found under [glance_store]",
"6E281004")
if not pass_check:
ff("'datera_san_password' entry not found under [glance_store]",
"<PASSWORD>")
def load_checks():
return [check_glance_driver,
check_glance_conf] | 0.28897 | 0.047603 |
from pathlib import Path
from typing import Any
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.base import ModelBase
from django_analyses.models import help_text
from django_analyses.models.input.definitions import messages
from django_analyses.models.input.input import Input
from django_analyses.models.managers.input_definition import (
InputDefinitionManager,
)
class InputDefinition(models.Model):
"""
Represents a single input definition in the database. Instances are used to
as the building blocks for
:class:`~django_analyses.models.input.input_specification.InputSpecification`
instances.
"""
#: Input key used when passing inputs to run some analysis.
key = models.CharField(max_length=50)
#: A description of this input definition.
description = models.TextField(blank=True, null=True)
#: Whether this input is required for the execution of the analysis.
required = models.BooleanField(default=False)
#: Child models may allow setting a default value using the appropriate
#: :class:`~django.db.models.Field` subclass.
default = None
#: Whether this input definition is a configuration of the analysis
#: parameters or, e.g., a definition of the input or output of it.
is_configuration = models.BooleanField(
default=True, help_text=help_text.IS_CONFIGURATION
)
#: If the actual input to the analysis class is meant to be some attribute
#: of given input, the attribute name may be set here.
value_attribute = models.CharField(
max_length=255,
blank=True,
null=True,
help_text=help_text.VALUE_ATTRIBUTE,
)
#: If values passed as inputs matching this input definition should be
#: extracted from some object, this field specifies the name of the
#: attribute which will be called using :func:`get_db_value`.
db_value_preprocessing = models.CharField(
max_length=255,
blank=True,
null=True,
help_text=help_text.DB_VALUE_PREPROCESSING,
verbose_name="DB Value Preprocessing",
)
#: Whether the created inputs instances should be passed to interface's
#: class at initialization (False) or upon calling the run method (True).
run_method_input = models.BooleanField(
default=False, help_text=help_text.RUN_METHOD_INPUT
)
#: Each definition should override this class attribute in order to allow
#: for Input instances creation.
input_class = None
objects = InputDefinitionManager()
class Meta:
ordering = ("key",)
def __str__(self) -> str:
"""
Returns the string representation of this instance.
Returns
-------
str
String representation of this instance
"""
try:
input_type = self.input_class.__name__.replace("Input", "")
except AttributeError:
return self.key
else:
return f"{self.key:<25}\t{input_type:<15}"
def extract_nested_value(self, obj: Any, location: str) -> Any:
"""
Extract some nested attribute within an object.
Parameters
----------
obj : Any
The object containing the nested value
location : str
Address of nested attribute within object
Returns
-------
Any
Nested attribute value
"""
parts = location.split(".")
for part in parts:
obj = getattr(obj, part)
return obj() if callable(obj) else obj
def check_input_class_definition(self) -> None:
"""
Checks the validity of the assigned :attr:`input_class`.
Raises
------
ValidationError
Invalid :attr:`input_class` definition
"""
input_base_name = f"{Input.__module__}.{Input.__name__}"
not_model = not isinstance(self.input_class, ModelBase)
base = getattr(self.input_class, "__base__", None)
not_input_subclass = base is not Input
invalid_input_class = (
not self.input_class or not_model or not_input_subclass
)
if invalid_input_class:
message = messages.INVALID_INPUT_CLASS.format(
base_name=input_base_name
)
raise ValidationError(message)
def get_db_value(self, value: Any) -> Any:
"""
Returns the appropriate DB value for inputs in which
:attr:`db_value_preprocessing` is defined.
Parameters
----------
value : Any
The object containing the nested value
Returns
-------
Any
Nested attribute value
Raises
------
ValueError
Value extraction failure
"""
path_field = self.db_value_preprocessing == "path"
if value and self.db_value_preprocessing:
location = self.db_value_preprocessing
if isinstance(value, list):
return [
self.extract_nested_value(element, location)
for element in value
]
return self.extract_nested_value(value, location)
elif value and path_field:
if isinstance(value, Path) or Path(value).is_file():
return str(value)
else:
try:
return str(value.path)
except AttributeError:
raise ValueError(
f"Failed to infer path from {value} for {self.key}!"
)
return value
def get_or_create_input_instance(self, **kwargs) -> Input:
"""
Creates an instance of the appropriate
:class:`django_analyses.models.input.input.Input` subclass.
Returns
-------
Input
Created instance
"""
kwargs["value"] = self.get_db_value(kwargs.get("value"))
try:
return self.input_class.objects.get_or_create(
definition=self, **kwargs
)
except AttributeError:
self.check_input_class_definition()
raise
def validate(self) -> None:
"""
Validates input definition instances before calling :func:`save`.
This method should be overridden by subclasses that require some kind
of custom validation.
"""
pass
def save(self, *args, **kwargs):
"""
Overrides the model's :meth:`~django.db.models.Model.save` method to
provide custom functionality.
Hint
----
For more information, see Django's documentation on `overriding model
methods`_.
.. _overriding model methods:
https://docs.djangoproject.com/en/3.0/topics/db/models/#overriding-model-methods
"""
self.validate()
super().save(*args, **kwargs) | django_analyses/models/input/definitions/input_definition.py | from pathlib import Path
from typing import Any
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.base import ModelBase
from django_analyses.models import help_text
from django_analyses.models.input.definitions import messages
from django_analyses.models.input.input import Input
from django_analyses.models.managers.input_definition import (
InputDefinitionManager,
)
class InputDefinition(models.Model):
"""
Represents a single input definition in the database. Instances are used to
as the building blocks for
:class:`~django_analyses.models.input.input_specification.InputSpecification`
instances.
"""
#: Input key used when passing inputs to run some analysis.
key = models.CharField(max_length=50)
#: A description of this input definition.
description = models.TextField(blank=True, null=True)
#: Whether this input is required for the execution of the analysis.
required = models.BooleanField(default=False)
#: Child models may allow setting a default value using the appropriate
#: :class:`~django.db.models.Field` subclass.
default = None
#: Whether this input definition is a configuration of the analysis
#: parameters or, e.g., a definition of the input or output of it.
is_configuration = models.BooleanField(
default=True, help_text=help_text.IS_CONFIGURATION
)
#: If the actual input to the analysis class is meant to be some attribute
#: of given input, the attribute name may be set here.
value_attribute = models.CharField(
max_length=255,
blank=True,
null=True,
help_text=help_text.VALUE_ATTRIBUTE,
)
#: If values passed as inputs matching this input definition should be
#: extracted from some object, this field specifies the name of the
#: attribute which will be called using :func:`get_db_value`.
db_value_preprocessing = models.CharField(
max_length=255,
blank=True,
null=True,
help_text=help_text.DB_VALUE_PREPROCESSING,
verbose_name="DB Value Preprocessing",
)
#: Whether the created inputs instances should be passed to interface's
#: class at initialization (False) or upon calling the run method (True).
run_method_input = models.BooleanField(
default=False, help_text=help_text.RUN_METHOD_INPUT
)
#: Each definition should override this class attribute in order to allow
#: for Input instances creation.
input_class = None
objects = InputDefinitionManager()
class Meta:
ordering = ("key",)
def __str__(self) -> str:
"""
Returns the string representation of this instance.
Returns
-------
str
String representation of this instance
"""
try:
input_type = self.input_class.__name__.replace("Input", "")
except AttributeError:
return self.key
else:
return f"{self.key:<25}\t{input_type:<15}"
def extract_nested_value(self, obj: Any, location: str) -> Any:
"""
Extract some nested attribute within an object.
Parameters
----------
obj : Any
The object containing the nested value
location : str
Address of nested attribute within object
Returns
-------
Any
Nested attribute value
"""
parts = location.split(".")
for part in parts:
obj = getattr(obj, part)
return obj() if callable(obj) else obj
def check_input_class_definition(self) -> None:
"""
Checks the validity of the assigned :attr:`input_class`.
Raises
------
ValidationError
Invalid :attr:`input_class` definition
"""
input_base_name = f"{Input.__module__}.{Input.__name__}"
not_model = not isinstance(self.input_class, ModelBase)
base = getattr(self.input_class, "__base__", None)
not_input_subclass = base is not Input
invalid_input_class = (
not self.input_class or not_model or not_input_subclass
)
if invalid_input_class:
message = messages.INVALID_INPUT_CLASS.format(
base_name=input_base_name
)
raise ValidationError(message)
def get_db_value(self, value: Any) -> Any:
"""
Returns the appropriate DB value for inputs in which
:attr:`db_value_preprocessing` is defined.
Parameters
----------
value : Any
The object containing the nested value
Returns
-------
Any
Nested attribute value
Raises
------
ValueError
Value extraction failure
"""
path_field = self.db_value_preprocessing == "path"
if value and self.db_value_preprocessing:
location = self.db_value_preprocessing
if isinstance(value, list):
return [
self.extract_nested_value(element, location)
for element in value
]
return self.extract_nested_value(value, location)
elif value and path_field:
if isinstance(value, Path) or Path(value).is_file():
return str(value)
else:
try:
return str(value.path)
except AttributeError:
raise ValueError(
f"Failed to infer path from {value} for {self.key}!"
)
return value
def get_or_create_input_instance(self, **kwargs) -> Input:
"""
Creates an instance of the appropriate
:class:`django_analyses.models.input.input.Input` subclass.
Returns
-------
Input
Created instance
"""
kwargs["value"] = self.get_db_value(kwargs.get("value"))
try:
return self.input_class.objects.get_or_create(
definition=self, **kwargs
)
except AttributeError:
self.check_input_class_definition()
raise
def validate(self) -> None:
"""
Validates input definition instances before calling :func:`save`.
This method should be overridden by subclasses that require some kind
of custom validation.
"""
pass
def save(self, *args, **kwargs):
"""
Overrides the model's :meth:`~django.db.models.Model.save` method to
provide custom functionality.
Hint
----
For more information, see Django's documentation on `overriding model
methods`_.
.. _overriding model methods:
https://docs.djangoproject.com/en/3.0/topics/db/models/#overriding-model-methods
"""
self.validate()
super().save(*args, **kwargs) | 0.916395 | 0.303293 |
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, ClusterMixin
from itertools import repeat
class HistogramTransform(BaseEstimator, TransformerMixin):
"""Apply an histogram transform to the data.
"""
def __init__(self, edges):
"""
Parameters:
-----------
edges : dct, shape (n_channels, )
Dictionary with key:value as follow
'channel_id':edges with edges an array
containing the edges of the bins along a
particular channel.
"""
self.edges = edges
def fit(self, X, y=None):
"""No parameters to estimate.
"""
return self
def transform(self, X, y=None):
"""Create an histogram according to the given bins.
Parameters:
-----------
X : FCMeasurement,
Contains the flow cytometer data.
"""
#extract the volume from the meta data
#V = float(X.get_meta()['$VOL'])
#extract only the colums of interest
X_ = X[list(self.edges.keys())]
sorted_keys = [c for c in list(X_.columns) if c in list(self.edges.keys())]
#construct the multidimensional histogram
H, edges = np.histogramdd(X_.values, bins=[self.edges[key] for key in sorted_keys])
edges = np.array(edges)
#get the bins centers
centers = edges[:, 0:-1] + (edges[:, 1] - edges[:, 0]).reshape(-1, 1) / 2
hist = pd.DataFrame(columns=list(self.edges.keys()) + ['counts'])
bin_sizes = [len(e) for e in centers]
nb_copies = np.cumprod([1] + bin_sizes[0:-1])
nb_repeat = np.cumprod([1] + bin_sizes[-1:0:-1])[::-1]
for (c, name) in enumerate(X_.columns):
hist[name] = np.array(list(map(lambda e: [e] * nb_repeat[c], centers[c])) * nb_copies[c]).flatten()
#hist['counts'] = np.array(H).flatten() * (9E+4 / V)
hist['counts'] = np.array(H).flatten()
return hist
class DTClassifier(BaseEstimator, TransformerMixin, ClusterMixin):
"""Cluster a dataset in clusters with same cardinality by recursively
splitting the dataset along the axis of maximal variance. The splits are
done using the median value so that each split has the same number of
samples.
"""
def __init__(self, max_depth=3, columns=None, normalized = False, weight_decay=None):
"""
Parameters:
-----------
max_depth : int, defaults to 3.
Maximal depth of the recurrence
columns : list, defaults to None.
Apply the clustering along the specified columns only.
normalized : boolean. Determines whether the final bin count is normalized or not.
weight_decay : float, [0; 1], default to None.
If None the decision tree classifier is fit on the
data X.
If not None the DC_classifier must follow a
Histogram_transform in the pipeline and the decision
tree classifier is fit on the exponentially weighted
moving average (ewma). Note that this require that the
ewma is initialized before calling fit on the pipeline.
Larger weight decay discard contribution of old FCS
faster and a weight decay of zero corresponds to a
constant mean histogram fixed to the initialized values.
"""
self.max_depth = max_depth
self.columns = columns
self.normalized = normalized
self.weight_decay = weight_decay
def fit(self, X, y=None):
"""Build the decision tree.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
List of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
self : this estimator (to be compatible with sklearn API).
"""
def recursive_fit(x, depth=0, branch=None, queue=[], tree=[]):
"""Recursive clustering of the data.
Parameters:
-----------
x : pandas DataFrame, shape (n_events, n_channels)
Input data at current node.
depth : int
depth of the node from which the current branch is
leaving.
branch : list, shape (3,)
branch leading to the current node in the decision
tree. (splitting_variable, median, result) with
'splitting_variable' the column with maximal variance,
'median' the value of the median along this column,
'result' the result of the >= operator.
queue : list, shape (depth, )
concatenation of all the branches leading to the
current state.
tree : pandas DataFrame, shape (2^max_depth, max_depth)
list of all the branches from initial node to leaf node.
Returns:
tree : see above.
"""
if branch:
queue.append(branch)
if depth < self.max_depth:
#compute the branch varible
if 'counts' in list(x.columns):
means = x[self.columns].mean(axis=0)
variances = np.square((x[self.columns] - means)).multiply(x['counts'], axis=0).sum()
splitting_variable = variances.idxmax()
else:
splitting_variable = x[self.columns].var(axis=0).idxmax()
if 'counts' in list(x.columns):
cumsum = x[[splitting_variable, 'counts']].groupby(by=splitting_variable, sort=False).sum().cumsum()
median = (cumsum >= cumsum.iloc[-1]/2).idxmax()[0]
else:
median = x[splitting_variable].median(axis=0)
mask = (x[splitting_variable] > median).values
#handle the case where the values are equal to the mdian (e.g. projection on one axis)
idx_switch = np.random.permutation(np.where((x[splitting_variable] == median)))
idx_switch = idx_switch[:, :np.max([0, int(np.floor(0.5 * mask.size - np.sum(mask)))])].squeeze()
mask[idx_switch] = np.logical_not(mask[idx_switch])
#recursion
recursive_fit(x.loc[mask, :],
depth+1,
(splitting_variable, median, True),
queue.copy(),
tree)
recursive_fit(x.loc[np.logical_not(mask), :],
depth+1,
(splitting_variable, median, False),
queue.copy(),
tree)
else:
#stopping condition
tree.append(queue)
return tree
return tree
Xt = X
if self.weight_decay is not None:
#fit the decision tree on the ewma of the previous step
Xt = self.ewma
if self.weight_decay > 0:
#update the ewma with the new histogram
self.ewma.loc[:, 'counts'] = self.weight_decay * X['counts'].values + (1 - self.weight_decay) * self.ewma['counts'].values
if self.columns:
self.tree_ = pd.DataFrame(recursive_fit(Xt[self.columns + ['counts'] if 'counts' in list(Xt.columns) else self.columns]))
else:
self.tree_ = pd.DataFrame(recursive_fit(Xt))
return self
def predict(self, X, y=None):
"""Cluster the data using the fitted decision tree.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
labels_ : pandas DataFrame containing the cluster index for
each event.
"""
def recursive_predict(x=X, tree=self.tree_.copy(), label_cursor=1):
"""Recursive clustering of the data.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
tree : pandas DataFrame, shape (2^max_depth, max_depth)
list of all the branches from initial node to leaf node.
label_cursor : counter giving the current cluster index.
Returns:
--------
labels_cursor : see above.
"""
if tree.shape[1]:
#get the 2 truncated trees (trees after the 2
#branches leaving the current node)
grp = tree.groupby(by=list(tree.columns)[0], sort=False)
branches = list(grp.groups.keys())
mask = ((x[branches[0][0]] > branches[0][1]) == branches[0][2]).values
#handle the case where the value is equal to the median (e.g. projection)
idx_switch = np.random.permutation(np.where((x[branches[0][0]] == branches[0][1])))
idx_switch = idx_switch[:, :np.max([0, int(np.floor(0.5 * mask.size - np.sum(mask)))])].squeeze()
mask[idx_switch] = np.logical_not(mask[idx_switch])
#recursion
label_cursor = recursive_predict(x.loc[mask, :],
grp.get_group(branches[0]).drop(list(tree.columns)[0], axis=1),
label_cursor)
label_cursor = recursive_predict(x.loc[np.logical_not(mask), :],
grp.get_group(branches[1]).drop(list(tree.columns)[0], axis=1),
label_cursor)
else:
X.loc[x.index, 'cluster_ID'] = label_cursor
label_cursor += 1
return label_cursor
return label_cursor
X['cluster_ID'] = 0
recursive_predict()
self.labels_ = X['cluster_ID'].values
return self.labels_
def transform(self, X, y=None):
"""Given a dataset return the count per bin.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
Number of counts per bin.
"""
self.predict(X)
if 'counts' in list(X.columns):
df = pd.DataFrame({'counts':X['counts'], 'labels':self.labels_})
output = np.atleast_1d(df.groupby(by='labels').sum().values.squeeze())
if self.normalized:
return np.nan_to_num(output / sum(output))
else:
return output
else:
output = np.histogram(self.labels_, len(np.unique(self.labels_)))[0]
if self.normalized:
return np.nan_to_num(output / sum(output))
else:
return output
def initialize_ewma(self, fcms, preprocessing, edges):
"""Initialize the exponentialy weighted moving average histogram
with the mean over multiple FCM.
Parameters:
-----------
fcms : iterable,
Iterable pointing toward FCM files.
preprocessing : FCTFunction,
Lambda function applying the
FLowCytometryTools preprocessing transform
and gating to the FCM.
edges : dct, shape (n_channels, )
Dictionary with key:value as follow
'channel_id':edges with edges an array
containing the edges of the bins along a
particular channel.
"""
#instanciate the histogram transformer
hist = HistogramTransform(edges)
N = len(fcms)
#initialize the mean histogram
self.ewma = hist.transform(fcms[0])
for i in np.arange(1, len(fcms)):
self.ewma['counts'] += hist.transform(preprocessing.transform(fcms[i]))['counts']
self.ewma['counts'] /= N | bactoml/decision_tree_classifier.py | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin, ClusterMixin
from itertools import repeat
class HistogramTransform(BaseEstimator, TransformerMixin):
"""Apply an histogram transform to the data.
"""
def __init__(self, edges):
"""
Parameters:
-----------
edges : dct, shape (n_channels, )
Dictionary with key:value as follow
'channel_id':edges with edges an array
containing the edges of the bins along a
particular channel.
"""
self.edges = edges
def fit(self, X, y=None):
"""No parameters to estimate.
"""
return self
def transform(self, X, y=None):
"""Create an histogram according to the given bins.
Parameters:
-----------
X : FCMeasurement,
Contains the flow cytometer data.
"""
#extract the volume from the meta data
#V = float(X.get_meta()['$VOL'])
#extract only the colums of interest
X_ = X[list(self.edges.keys())]
sorted_keys = [c for c in list(X_.columns) if c in list(self.edges.keys())]
#construct the multidimensional histogram
H, edges = np.histogramdd(X_.values, bins=[self.edges[key] for key in sorted_keys])
edges = np.array(edges)
#get the bins centers
centers = edges[:, 0:-1] + (edges[:, 1] - edges[:, 0]).reshape(-1, 1) / 2
hist = pd.DataFrame(columns=list(self.edges.keys()) + ['counts'])
bin_sizes = [len(e) for e in centers]
nb_copies = np.cumprod([1] + bin_sizes[0:-1])
nb_repeat = np.cumprod([1] + bin_sizes[-1:0:-1])[::-1]
for (c, name) in enumerate(X_.columns):
hist[name] = np.array(list(map(lambda e: [e] * nb_repeat[c], centers[c])) * nb_copies[c]).flatten()
#hist['counts'] = np.array(H).flatten() * (9E+4 / V)
hist['counts'] = np.array(H).flatten()
return hist
class DTClassifier(BaseEstimator, TransformerMixin, ClusterMixin):
"""Cluster a dataset in clusters with same cardinality by recursively
splitting the dataset along the axis of maximal variance. The splits are
done using the median value so that each split has the same number of
samples.
"""
def __init__(self, max_depth=3, columns=None, normalized = False, weight_decay=None):
"""
Parameters:
-----------
max_depth : int, defaults to 3.
Maximal depth of the recurrence
columns : list, defaults to None.
Apply the clustering along the specified columns only.
normalized : boolean. Determines whether the final bin count is normalized or not.
weight_decay : float, [0; 1], default to None.
If None the decision tree classifier is fit on the
data X.
If not None the DC_classifier must follow a
Histogram_transform in the pipeline and the decision
tree classifier is fit on the exponentially weighted
moving average (ewma). Note that this require that the
ewma is initialized before calling fit on the pipeline.
Larger weight decay discard contribution of old FCS
faster and a weight decay of zero corresponds to a
constant mean histogram fixed to the initialized values.
"""
self.max_depth = max_depth
self.columns = columns
self.normalized = normalized
self.weight_decay = weight_decay
def fit(self, X, y=None):
"""Build the decision tree.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
List of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
self : this estimator (to be compatible with sklearn API).
"""
def recursive_fit(x, depth=0, branch=None, queue=[], tree=[]):
"""Recursive clustering of the data.
Parameters:
-----------
x : pandas DataFrame, shape (n_events, n_channels)
Input data at current node.
depth : int
depth of the node from which the current branch is
leaving.
branch : list, shape (3,)
branch leading to the current node in the decision
tree. (splitting_variable, median, result) with
'splitting_variable' the column with maximal variance,
'median' the value of the median along this column,
'result' the result of the >= operator.
queue : list, shape (depth, )
concatenation of all the branches leading to the
current state.
tree : pandas DataFrame, shape (2^max_depth, max_depth)
list of all the branches from initial node to leaf node.
Returns:
tree : see above.
"""
if branch:
queue.append(branch)
if depth < self.max_depth:
#compute the branch varible
if 'counts' in list(x.columns):
means = x[self.columns].mean(axis=0)
variances = np.square((x[self.columns] - means)).multiply(x['counts'], axis=0).sum()
splitting_variable = variances.idxmax()
else:
splitting_variable = x[self.columns].var(axis=0).idxmax()
if 'counts' in list(x.columns):
cumsum = x[[splitting_variable, 'counts']].groupby(by=splitting_variable, sort=False).sum().cumsum()
median = (cumsum >= cumsum.iloc[-1]/2).idxmax()[0]
else:
median = x[splitting_variable].median(axis=0)
mask = (x[splitting_variable] > median).values
#handle the case where the values are equal to the mdian (e.g. projection on one axis)
idx_switch = np.random.permutation(np.where((x[splitting_variable] == median)))
idx_switch = idx_switch[:, :np.max([0, int(np.floor(0.5 * mask.size - np.sum(mask)))])].squeeze()
mask[idx_switch] = np.logical_not(mask[idx_switch])
#recursion
recursive_fit(x.loc[mask, :],
depth+1,
(splitting_variable, median, True),
queue.copy(),
tree)
recursive_fit(x.loc[np.logical_not(mask), :],
depth+1,
(splitting_variable, median, False),
queue.copy(),
tree)
else:
#stopping condition
tree.append(queue)
return tree
return tree
Xt = X
if self.weight_decay is not None:
#fit the decision tree on the ewma of the previous step
Xt = self.ewma
if self.weight_decay > 0:
#update the ewma with the new histogram
self.ewma.loc[:, 'counts'] = self.weight_decay * X['counts'].values + (1 - self.weight_decay) * self.ewma['counts'].values
if self.columns:
self.tree_ = pd.DataFrame(recursive_fit(Xt[self.columns + ['counts'] if 'counts' in list(Xt.columns) else self.columns]))
else:
self.tree_ = pd.DataFrame(recursive_fit(Xt))
return self
def predict(self, X, y=None):
"""Cluster the data using the fitted decision tree.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
labels_ : pandas DataFrame containing the cluster index for
each event.
"""
def recursive_predict(x=X, tree=self.tree_.copy(), label_cursor=1):
"""Recursive clustering of the data.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
tree : pandas DataFrame, shape (2^max_depth, max_depth)
list of all the branches from initial node to leaf node.
label_cursor : counter giving the current cluster index.
Returns:
--------
labels_cursor : see above.
"""
if tree.shape[1]:
#get the 2 truncated trees (trees after the 2
#branches leaving the current node)
grp = tree.groupby(by=list(tree.columns)[0], sort=False)
branches = list(grp.groups.keys())
mask = ((x[branches[0][0]] > branches[0][1]) == branches[0][2]).values
#handle the case where the value is equal to the median (e.g. projection)
idx_switch = np.random.permutation(np.where((x[branches[0][0]] == branches[0][1])))
idx_switch = idx_switch[:, :np.max([0, int(np.floor(0.5 * mask.size - np.sum(mask)))])].squeeze()
mask[idx_switch] = np.logical_not(mask[idx_switch])
#recursion
label_cursor = recursive_predict(x.loc[mask, :],
grp.get_group(branches[0]).drop(list(tree.columns)[0], axis=1),
label_cursor)
label_cursor = recursive_predict(x.loc[np.logical_not(mask), :],
grp.get_group(branches[1]).drop(list(tree.columns)[0], axis=1),
label_cursor)
else:
X.loc[x.index, 'cluster_ID'] = label_cursor
label_cursor += 1
return label_cursor
return label_cursor
X['cluster_ID'] = 0
recursive_predict()
self.labels_ = X['cluster_ID'].values
return self.labels_
def transform(self, X, y=None):
"""Given a dataset return the count per bin.
Parameters:
-----------
X : pandas DataFrame, shape (n_events, n_channels)
list of (n_channels)-dimensional data points. Each row
corresponds to a single event.
Returns:
--------
Number of counts per bin.
"""
self.predict(X)
if 'counts' in list(X.columns):
df = pd.DataFrame({'counts':X['counts'], 'labels':self.labels_})
output = np.atleast_1d(df.groupby(by='labels').sum().values.squeeze())
if self.normalized:
return np.nan_to_num(output / sum(output))
else:
return output
else:
output = np.histogram(self.labels_, len(np.unique(self.labels_)))[0]
if self.normalized:
return np.nan_to_num(output / sum(output))
else:
return output
def initialize_ewma(self, fcms, preprocessing, edges):
"""Initialize the exponentialy weighted moving average histogram
with the mean over multiple FCM.
Parameters:
-----------
fcms : iterable,
Iterable pointing toward FCM files.
preprocessing : FCTFunction,
Lambda function applying the
FLowCytometryTools preprocessing transform
and gating to the FCM.
edges : dct, shape (n_channels, )
Dictionary with key:value as follow
'channel_id':edges with edges an array
containing the edges of the bins along a
particular channel.
"""
#instanciate the histogram transformer
hist = HistogramTransform(edges)
N = len(fcms)
#initialize the mean histogram
self.ewma = hist.transform(fcms[0])
for i in np.arange(1, len(fcms)):
self.ewma['counts'] += hist.transform(preprocessing.transform(fcms[i]))['counts']
self.ewma['counts'] /= N | 0.910426 | 0.608216 |
import os
import time
from datetime import date
from ftplib import FTP
import pandas as pd
from sqlalchemy import create_engine
# 服务器地址
FTP_SERVER = '172.16.17.32'
USER = 'yxh'
PWD = '<PASSWORD>'
FTP_PATH = '/'
local_root = 'E:\\projects\\python\\beestock\\data\\result'
DATE = time.strftime('%Y%m%d', time.localtime(time.time()))
def isDir(filename):
try:
path = filename;
path.replace('/', '\\')
if os.path.exists(path):
print('---file exists--')
else:
print('file not exists ', local_root)
os.mkdirs(local_root)
return True
except:
return False
def ftpconnect():
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect(FTP_SERVER, 21)
ftp.login(USER, PWD)
return ftp
def downloadfile():
ftp = ftpconnect()
ftp.cwd(FTP_PATH)
ftp.set_pasv(0)
li = ftp.nlst()
print('ftp: ', li)
i = 0
for eachfile in li:
i += 1
if i > 1:
break
localpath = 'e:' + eachfile
print('-- open localpath --', localpath)
bufsize = 1024
isDir(localpath)
fp = open(localpath, 'wb+')
code = ftp.retrbinary('RETR ' + eachfile, fp.write, bufsize)
print('+++++++++++++:', code)
fp.flush()
ftp.set_debuglevel(0) # 关闭调试
# fp.close()
ftp.quit() # 退出ftp服务器
def synchronize_result_file():
"""
:return:
"""
update_zixuan('Y_ZIXUAN.blk', 1000)
update_top_up('Y_UP.blk', 1000)
def synchronize_result_file_():
"""
:return:
"""
today = date.today()
if today.isoweekday() > 5: # 周六周日不执行
return
ftp = ftpconnect()
ftp.cwd(FTP_PATH)
ftp.set_pasv(0)
date_str = date.today().strftime('%Y-%m-%d')
hope_file = 'hope_stock_' + date_str + '.csv'
update_result_file(hope_file, local_root, ftp, 'Y_ZIXUAN.blk')
hope_file = 'hope_stock_' + date_str + '_limitup.csv'
update_result_file(hope_file, local_root, ftp, 'Y_UP.blk')
def update_zixuan(tdx_group, size):
"""
:param tdx_group:
:param size:
:return:
"""
conn = create_engine('mysql+pymysql://root:yangxh@172.16.17.32:3306/quant_bee?charset=utf8')
sql = 'select * from hushen_hope_daily'
data = pd.read_sql(sql, conn, index_col='id')
update_tdx_group(data, tdx_group, size)
print('update zi xuan successfully')
def update_top_up(tdx_group, size):
"""
:param tdx_group:
:param size:
:return:
"""
conn = create_engine('mysql+pymysql://root:yangxh@172.16.17.32:3306/quant_bee?charset=utf8')
sql = 'select * from hushen_hope_daily_top_up'
data = pd.read_sql(sql, conn, index_col='id')
update_tdx_group(data, tdx_group, size)
print('update top up successfully')
def update_result_file(filename, local_path, ftp, tdx_group):
"""
:param filename:
:param local_path:
:param ftp:
:param tdx_group:
:return:
"""
abs_file = local_path + '/' + filename
if os.path.exists(abs_file):
print(abs_file + ' exists!')
df = pd.read_csv(abs_file, encoding='utf8', dtype={'code': str})
update_tdx_group(df, tdx_group, 600)
print(abs_file + ' update tdx successfully!')
return
else:
fp = open(abs_file, 'wb')
try:
code = ftp.retrbinary('RETR ' + filename, fp.write, 1024)
fp.flush()
except Exception:
fp.close()
os.remove(abs_file)
else:
if code.startswith('226'): # 同步成功,更新通达信自选股
df = pd.read_csv(abs_file, encoding='utf8', dtype={'code': str})
update_tdx_group(df, tdx_group, 600)
fp.close()
def update_tdx_group(data, tdx_group, size):
"""
更新通达信的自定义品种
:param data:
:param tdx_group:
:param size:
:return:
"""
tdx_path = 'D:\\Program Files\\new_txd\T0002\\blocknew\\'
f = open(tdx_path + tdx_group, 'w')
i = 0
for _, item in data.iterrows():
if item['code'].startswith('60'):
f.write('1' + item['code'])
f.write('\n')
else:
f.write('0' + item['code'])
f.write('\n')
i += 1
if i >= size:
break
f.flush()
f.close()
if __name__ == "__main__":
# downloadfile()
# f=open(local_root+'/hope_stock_2018-09-21_limitup.csv','r')
# data=pd.read_csv(f,encoding='utf8',dtype={'code':str})
# update_tdx_group(data,'Y_UP.blk',600)
synchronize_result_file() | easytrader/assistant/hope_file_downloader.py |
import os
import time
from datetime import date
from ftplib import FTP
import pandas as pd
from sqlalchemy import create_engine
# 服务器地址
FTP_SERVER = '172.16.17.32'
USER = 'yxh'
PWD = '<PASSWORD>'
FTP_PATH = '/'
local_root = 'E:\\projects\\python\\beestock\\data\\result'
DATE = time.strftime('%Y%m%d', time.localtime(time.time()))
def isDir(filename):
try:
path = filename;
path.replace('/', '\\')
if os.path.exists(path):
print('---file exists--')
else:
print('file not exists ', local_root)
os.mkdirs(local_root)
return True
except:
return False
def ftpconnect():
ftp = FTP()
ftp.set_debuglevel(2)
ftp.connect(FTP_SERVER, 21)
ftp.login(USER, PWD)
return ftp
def downloadfile():
ftp = ftpconnect()
ftp.cwd(FTP_PATH)
ftp.set_pasv(0)
li = ftp.nlst()
print('ftp: ', li)
i = 0
for eachfile in li:
i += 1
if i > 1:
break
localpath = 'e:' + eachfile
print('-- open localpath --', localpath)
bufsize = 1024
isDir(localpath)
fp = open(localpath, 'wb+')
code = ftp.retrbinary('RETR ' + eachfile, fp.write, bufsize)
print('+++++++++++++:', code)
fp.flush()
ftp.set_debuglevel(0) # 关闭调试
# fp.close()
ftp.quit() # 退出ftp服务器
def synchronize_result_file():
"""
:return:
"""
update_zixuan('Y_ZIXUAN.blk', 1000)
update_top_up('Y_UP.blk', 1000)
def synchronize_result_file_():
"""
:return:
"""
today = date.today()
if today.isoweekday() > 5: # 周六周日不执行
return
ftp = ftpconnect()
ftp.cwd(FTP_PATH)
ftp.set_pasv(0)
date_str = date.today().strftime('%Y-%m-%d')
hope_file = 'hope_stock_' + date_str + '.csv'
update_result_file(hope_file, local_root, ftp, 'Y_ZIXUAN.blk')
hope_file = 'hope_stock_' + date_str + '_limitup.csv'
update_result_file(hope_file, local_root, ftp, 'Y_UP.blk')
def update_zixuan(tdx_group, size):
"""
:param tdx_group:
:param size:
:return:
"""
conn = create_engine('mysql+pymysql://root:yangxh@172.16.17.32:3306/quant_bee?charset=utf8')
sql = 'select * from hushen_hope_daily'
data = pd.read_sql(sql, conn, index_col='id')
update_tdx_group(data, tdx_group, size)
print('update zi xuan successfully')
def update_top_up(tdx_group, size):
"""
:param tdx_group:
:param size:
:return:
"""
conn = create_engine('mysql+pymysql://root:yangxh@172.16.17.32:3306/quant_bee?charset=utf8')
sql = 'select * from hushen_hope_daily_top_up'
data = pd.read_sql(sql, conn, index_col='id')
update_tdx_group(data, tdx_group, size)
print('update top up successfully')
def update_result_file(filename, local_path, ftp, tdx_group):
"""
:param filename:
:param local_path:
:param ftp:
:param tdx_group:
:return:
"""
abs_file = local_path + '/' + filename
if os.path.exists(abs_file):
print(abs_file + ' exists!')
df = pd.read_csv(abs_file, encoding='utf8', dtype={'code': str})
update_tdx_group(df, tdx_group, 600)
print(abs_file + ' update tdx successfully!')
return
else:
fp = open(abs_file, 'wb')
try:
code = ftp.retrbinary('RETR ' + filename, fp.write, 1024)
fp.flush()
except Exception:
fp.close()
os.remove(abs_file)
else:
if code.startswith('226'): # 同步成功,更新通达信自选股
df = pd.read_csv(abs_file, encoding='utf8', dtype={'code': str})
update_tdx_group(df, tdx_group, 600)
fp.close()
def update_tdx_group(data, tdx_group, size):
"""
更新通达信的自定义品种
:param data:
:param tdx_group:
:param size:
:return:
"""
tdx_path = 'D:\\Program Files\\new_txd\T0002\\blocknew\\'
f = open(tdx_path + tdx_group, 'w')
i = 0
for _, item in data.iterrows():
if item['code'].startswith('60'):
f.write('1' + item['code'])
f.write('\n')
else:
f.write('0' + item['code'])
f.write('\n')
i += 1
if i >= size:
break
f.flush()
f.close()
if __name__ == "__main__":
# downloadfile()
# f=open(local_root+'/hope_stock_2018-09-21_limitup.csv','r')
# data=pd.read_csv(f,encoding='utf8',dtype={'code':str})
# update_tdx_group(data,'Y_UP.blk',600)
synchronize_result_file() | 0.217254 | 0.087916 |
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
self.head = None
def get(self, index):
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
"""
current = self.head
if not current:
return -1
ind = 0
while ind != index:
ind += 1
if not current.next:
return -1
current = current.next
return current.val
def addAtHead(self, val):
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
"""
node = Node(val)
if self.head:
current = self.head
node.next = current
self.head = node
else:
self.head = node
def addAtTail(self, val):
"""
Append a node of value val to the last element of the linked list.
"""
node = Node(val)
current = self.head
if self.head:
while current.next:
current = current.next
current.next = node
def addAtIndex(self, index, val):
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
"""
ind = 0
if ind == index:
self.addAtHead(val)
return
element = Node(val)
current = self.head
while ind != index - 1:
ind += 1
if not current.next:
return
current = current.next
element.next = current.next
current.next = element
def deleteAtIndex(self, index):
"""
Delete the index-th node in the linked list, if the index is valid.
"""
current = self.head
if index < 0 or not current:
return
ind = 0
if ind == index:
self.head = self.head.next
return
while ind != index-1:
ind += 1
if not current.next:
return
current = current.next
if current and current.next:
current.next = current.next.next | MyLinkedList.py | class Node(object):
def __init__(self, val):
self.val = val
self.next = None
class MyLinkedList:
def __init__(self):
self.head = None
def get(self, index):
"""
Get the value of the index-th node in the linked list. If the index is invalid, return -1.
"""
current = self.head
if not current:
return -1
ind = 0
while ind != index:
ind += 1
if not current.next:
return -1
current = current.next
return current.val
def addAtHead(self, val):
"""
Add a node of value val before the first element of the linked list. After the insertion, the new node will be the first node of the linked list.
"""
node = Node(val)
if self.head:
current = self.head
node.next = current
self.head = node
else:
self.head = node
def addAtTail(self, val):
"""
Append a node of value val to the last element of the linked list.
"""
node = Node(val)
current = self.head
if self.head:
while current.next:
current = current.next
current.next = node
def addAtIndex(self, index, val):
"""
Add a node of value val before the index-th node in the linked list. If index equals to the length of linked list, the node will be appended to the end of linked list. If index is greater than the length, the node will not be inserted.
"""
ind = 0
if ind == index:
self.addAtHead(val)
return
element = Node(val)
current = self.head
while ind != index - 1:
ind += 1
if not current.next:
return
current = current.next
element.next = current.next
current.next = element
def deleteAtIndex(self, index):
"""
Delete the index-th node in the linked list, if the index is valid.
"""
current = self.head
if index < 0 or not current:
return
ind = 0
if ind == index:
self.head = self.head.next
return
while ind != index-1:
ind += 1
if not current.next:
return
current = current.next
if current and current.next:
current.next = current.next.next | 0.579638 | 0.214671 |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TranscriptionNormalization(object):
"""
Information to Normalize generated transcript.
"""
def __init__(self, **kwargs):
"""
Initializes a new TranscriptionNormalization object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_punctuation_enabled:
The value to assign to the is_punctuation_enabled property of this TranscriptionNormalization.
:type is_punctuation_enabled: bool
:param filters:
The value to assign to the filters property of this TranscriptionNormalization.
:type filters: list[oci.ai_speech.models.TranscriptionFilter]
"""
self.swagger_types = {
'is_punctuation_enabled': 'bool',
'filters': 'list[TranscriptionFilter]'
}
self.attribute_map = {
'is_punctuation_enabled': 'isPunctuationEnabled',
'filters': 'filters'
}
self._is_punctuation_enabled = None
self._filters = None
@property
def is_punctuation_enabled(self):
"""
Gets the is_punctuation_enabled of this TranscriptionNormalization.
Whether to add punctuation in generated transcription. By default it is enabled.
:return: The is_punctuation_enabled of this TranscriptionNormalization.
:rtype: bool
"""
return self._is_punctuation_enabled
@is_punctuation_enabled.setter
def is_punctuation_enabled(self, is_punctuation_enabled):
"""
Sets the is_punctuation_enabled of this TranscriptionNormalization.
Whether to add punctuation in generated transcription. By default it is enabled.
:param is_punctuation_enabled: The is_punctuation_enabled of this TranscriptionNormalization.
:type: bool
"""
self._is_punctuation_enabled = is_punctuation_enabled
@property
def filters(self):
"""
Gets the filters of this TranscriptionNormalization.
List of filters.
:return: The filters of this TranscriptionNormalization.
:rtype: list[oci.ai_speech.models.TranscriptionFilter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""
Sets the filters of this TranscriptionNormalization.
List of filters.
:param filters: The filters of this TranscriptionNormalization.
:type: list[oci.ai_speech.models.TranscriptionFilter]
"""
self._filters = filters
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | src/oci/ai_speech/models/transcription_normalization.py |
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class TranscriptionNormalization(object):
"""
Information to Normalize generated transcript.
"""
def __init__(self, **kwargs):
"""
Initializes a new TranscriptionNormalization object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_punctuation_enabled:
The value to assign to the is_punctuation_enabled property of this TranscriptionNormalization.
:type is_punctuation_enabled: bool
:param filters:
The value to assign to the filters property of this TranscriptionNormalization.
:type filters: list[oci.ai_speech.models.TranscriptionFilter]
"""
self.swagger_types = {
'is_punctuation_enabled': 'bool',
'filters': 'list[TranscriptionFilter]'
}
self.attribute_map = {
'is_punctuation_enabled': 'isPunctuationEnabled',
'filters': 'filters'
}
self._is_punctuation_enabled = None
self._filters = None
@property
def is_punctuation_enabled(self):
"""
Gets the is_punctuation_enabled of this TranscriptionNormalization.
Whether to add punctuation in generated transcription. By default it is enabled.
:return: The is_punctuation_enabled of this TranscriptionNormalization.
:rtype: bool
"""
return self._is_punctuation_enabled
@is_punctuation_enabled.setter
def is_punctuation_enabled(self, is_punctuation_enabled):
"""
Sets the is_punctuation_enabled of this TranscriptionNormalization.
Whether to add punctuation in generated transcription. By default it is enabled.
:param is_punctuation_enabled: The is_punctuation_enabled of this TranscriptionNormalization.
:type: bool
"""
self._is_punctuation_enabled = is_punctuation_enabled
@property
def filters(self):
"""
Gets the filters of this TranscriptionNormalization.
List of filters.
:return: The filters of this TranscriptionNormalization.
:rtype: list[oci.ai_speech.models.TranscriptionFilter]
"""
return self._filters
@filters.setter
def filters(self, filters):
"""
Sets the filters of this TranscriptionNormalization.
List of filters.
:param filters: The filters of this TranscriptionNormalization.
:type: list[oci.ai_speech.models.TranscriptionFilter]
"""
self._filters = filters
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other | 0.790692 | 0.251717 |
import pytest
from collections import OrderedDict
import numpy as np
import pypospack.potential as potential
def test__import__pypospack_potential():
from pypospack.potential import MorsePotential
def test__import__pypospack_potentials_morse():
from pypospack.potentials.morse import MorsePotential
def test__1element____init__():
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
try:
morse = potential.MorsePotential(symbols=symbols)
except:
pytest.fail()
#<-- test attribute symbols
assert type(morse.symbols) is list
assert len(morse.symbols) == len(symbols)
assert morse.symbols == symbols
#<--- test attribute symbol_pairs
assert type(morse.symbol_pairs) is list
assert len(morse.symbol_pairs) == len(symbol_pairs)
assert morse.symbol_pairs == symbol_pairs
#<-- test attribute parameter_names
assert type(morse.parameter_names) is list
assert len(morse.parameter_names) == len(parameter_names)
assert morse.parameter_names == parameter_names
#<-- test attribute parameters
assert type(morse.parameters) is OrderedDict
for name,value in morse.parameters.items():
assert value is None
assert len(morse.parameters) == len(morse.parameter_names)
for name in morse.parameter_names:
assert name in morse.parameters
def test__1element__evaluate():
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
r = r_max * np.linspace(1,100,N_r)/100
try:
morse = potential.MorsePotential(symbols=symbols)
morse.evaluate(r,parameters)
except:
pytest.fail()
assert isinstance(morse.potential, OrderedDict)
for pair_key,pot in morse.potential.items():
assert isinstance(pot,np.ndarray)
assert pot.shape == r.shape
def test__2element___init__():
symbols = ['Ni','Al']
parameters= OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
parameters['NiAl_D0'] = 0.001114
parameters['NiAl_a'] = 3.429506
parameters['NiAl_r0'] = 2.6813
parameters['AlAl_D0'] = 0.001114
parameters['AlAl_a'] = 3.429506
parameters['AlAl_r0'] = 2.6813
symbol_pairs = [['Ni','Ni'],['Ni','Al'],['Al','Al']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0',
'NiAl_D0','NiAl_a','NiAl_r0',
'AlAl_D0','AlAl_a','AlAl_r0']
try:
morse = potential.MorsePotential(symbols=symbols)
except:
pytest.fail()
assert type(morse.symbols) is list
assert type(morse.symbol_pairs) is list
assert morse.symbol_pairs == symbol_pairs
assert type(morse.parameter_names) is list
assert morse.parameter_names == parameter_names
if __name__ == "__main__":
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
r = r_max * np.linspace(1,100,N_r)/100
try:
morse = potential.MorsePotential(symbols=symbols)
morse.evaluate(r,parameters)
except:
print(morse.parameters) | tests/potential/BornMayerPotential/test_MorsePotential.py | import pytest
from collections import OrderedDict
import numpy as np
import pypospack.potential as potential
def test__import__pypospack_potential():
from pypospack.potential import MorsePotential
def test__import__pypospack_potentials_morse():
from pypospack.potentials.morse import MorsePotential
def test__1element____init__():
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
try:
morse = potential.MorsePotential(symbols=symbols)
except:
pytest.fail()
#<-- test attribute symbols
assert type(morse.symbols) is list
assert len(morse.symbols) == len(symbols)
assert morse.symbols == symbols
#<--- test attribute symbol_pairs
assert type(morse.symbol_pairs) is list
assert len(morse.symbol_pairs) == len(symbol_pairs)
assert morse.symbol_pairs == symbol_pairs
#<-- test attribute parameter_names
assert type(morse.parameter_names) is list
assert len(morse.parameter_names) == len(parameter_names)
assert morse.parameter_names == parameter_names
#<-- test attribute parameters
assert type(morse.parameters) is OrderedDict
for name,value in morse.parameters.items():
assert value is None
assert len(morse.parameters) == len(morse.parameter_names)
for name in morse.parameter_names:
assert name in morse.parameters
def test__1element__evaluate():
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
r = r_max * np.linspace(1,100,N_r)/100
try:
morse = potential.MorsePotential(symbols=symbols)
morse.evaluate(r,parameters)
except:
pytest.fail()
assert isinstance(morse.potential, OrderedDict)
for pair_key,pot in morse.potential.items():
assert isinstance(pot,np.ndarray)
assert pot.shape == r.shape
def test__2element___init__():
symbols = ['Ni','Al']
parameters= OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
parameters['NiAl_D0'] = 0.001114
parameters['NiAl_a'] = 3.429506
parameters['NiAl_r0'] = 2.6813
parameters['AlAl_D0'] = 0.001114
parameters['AlAl_a'] = 3.429506
parameters['AlAl_r0'] = 2.6813
symbol_pairs = [['Ni','Ni'],['Ni','Al'],['Al','Al']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0',
'NiAl_D0','NiAl_a','NiAl_r0',
'AlAl_D0','AlAl_a','AlAl_r0']
try:
morse = potential.MorsePotential(symbols=symbols)
except:
pytest.fail()
assert type(morse.symbols) is list
assert type(morse.symbol_pairs) is list
assert morse.symbol_pairs == symbol_pairs
assert type(morse.parameter_names) is list
assert morse.parameter_names == parameter_names
if __name__ == "__main__":
symbols = ['Ni']
symbol_pairs = [['Ni','Ni']]
parameter_names = ['NiNi_D0','NiNi_a','NiNi_r0']
parameters = OrderedDict()
parameters['NiNi_D0'] = 0.001114
parameters['NiNi_a'] = 3.429506
parameters['NiNi_r0'] = 2.6813
r_max = 11.
N_r = 500
r = r_max * np.linspace(1,100,N_r)/100
try:
morse = potential.MorsePotential(symbols=symbols)
morse.evaluate(r,parameters)
except:
print(morse.parameters) | 0.411229 | 0.583648 |
import numpy as np
from astropy.wcs import WCS
from ._det_spatial import get_shadowed_pix_mask_for_urddata, DL, F, multiply_photons
from .time import get_gti, GTI, tGTI, emptyGTI, deadtime_correction
from .atthist import hist_orientation_for_attdata
from .planwcs import make_wcs_for_attdata
from .caldb import get_energycal, get_shadowmask, get_energycal_by_urd, get_shadowmask_by_urd, urdbkgsc, OPAXOFFSET
from .energy import get_events_energy
from .telescope import URDNS
from .orientation import get_photons_sky_coord, read_gyro_fits, read_bokz_fits, AttDATA, define_required_correction, pol_to_vec, get_photons_vectors
from .lightcurve import make_overall_bkglc
from .vignetting import load_raw_wignetting_function
from astropy.io import fits
from math import pi, cos, sin
from multiprocessing import Pool, cpu_count, Queue, Process, Pipe
from threading import Thread
import copy
import time
import matplotlib.pyplot as plt
import os, sys
from .expmap import make_expmap_for_wcs
from .background import make_bkgmap_for_wcs
from scipy.interpolate import interp1d
from matplotlib.colors import LogNorm
from functools import reduce
from collections import namedtuple
import pickle
eband = namedtuple("eband", ["emin", "emax"])
class NoDATA(Exception):
pass
def constscale(const, func):
def newfunc(val):
return func(val)*const
return newfunc
def make_events_mask(minenergy=4., maxenergy=12., minflag=-1, ):
def mask_events(urddata, grade, energy):
eventsmask = np.all([grade > mingrade, grade < maxgrade,
urddata["RAW_X"] > minrawx, urddata["RAW_X"] < maxrawx,
urddata["RAW_Y"] > minrawy, urddata["RAW_Y"] < maxrawy,
energy > minenergy, energy < maxenergy], axis=0)
return eventsmask
return mask_events
standard_events_mask = make_events_mask(minenergy=4., maxenergy=12.)
def make_energies_flags_and_grades(urddata, urdhk, urdn):
flag = np.zeros(urddata.size, np.uint8)
shadow = get_shadowmask_by_urd(urdn)
caldbfile = get_energycal_by_urd(urdn)
maskshadow = get_shadowed_pix_mask_for_urddata(urddata, shadow)
flag[np.logical_not(maskshadow)] = 2
flag[np.any([urddata["RAW_X"] == 0, urddata["RAW_X"] == 47, \
urddata["RAW_Y"] == 0, urddata["RAW_Y"] == 47], axis=0)] = 3
energy, xc, yc, grade = get_events_energy(urddata, urdhk, caldbfile)
return energy, grade, flag
def make_vignetting_weighted_phot_images(urddata, urdn, energy, attdata, locwcs, photsplitside=1):
rawvignfun = load_raw_wignetting_function()
x, y = multiply_photons(urddata, photsplitside)
weights = rawvignfun(np.array([np.repeat(energy, photsplitside*photsplitside), x, y]).T)
r, d = get_photons_sky_coord(urddata, urdn, attdata, photsplitside)
x, y = locwcs.all_world2pix(np.array([r*180./pi, d*180./pi]).T, 1.).T
img = np.histogram2d(x, y, [np.arange(locwcs.wcs.crpix[0]*2 + 2) + 0.5,
np.arange(locwcs.wcs.crpix[1]*2 + 2) + 0.5],
weights=weights)[0].T
return img
def make_sky_image(urddata, urdn, attdata, locwcs, photsplitside=10, weight_with_vignetting=False):
r, d = get_photons_sky_coord(urddata, urdn, attdata, photsplitside)
x, y = locwcs.all_world2pix(np.array([r*180./pi, d*180./pi]).T, 1.).T
img = np.histogram2d(x, y, [np.arange(locwcs.wcs.crpix[0]*2 + 2) + 0.5,
np.arange(locwcs.wcs.crpix[1]*2 + 2) + 0.5])[0].T
return img/photsplitside/photsplitside
def get_attdata(fname):
ffile = fits.open(fname)
attdata = read_gyro_fits(ffile["ORIENTATION"]) if "gyro" in fname else read_bokz_fits(ffile["ORIENTATION"])
attdata.times = attdata.times - (0.97 if "gyro" in fname else 1.55)
attdata.gti.arr = attdata.gti.arr - (0.97 if "gyro" in fname else 1.55)
if "gyro" in fname:
attdata = define_required_correction(attdata)
return attdata
def make_mosaic_for_urdset_by_gti(urdflist, attflist, gti,
outctsname, outbkgname, outexpmapname,
urdbti={}, ebands={"soft": eband(4, 12), "hard": eband(8, 16)},
photsplitnside=1, pixsize=20/3600., usedtcorr=True,
weightphotons=False, locwcs=None):
"""
given two sets with paths to the urdfiles and corresponding attfiles,
and gti as a dictionary, each key contains gti for particular urd
the program produces overall count map and exposition map for this urdfiles set
the wcs is produced automatically to cover nonzero exposition area with some margin
"""
attdata = AttDATA.concatenate([get_attdata(fname) for fname in set(attflist)])
#attdata usually has data points stored each 3 seconds so try here to obtaind attitude information for slightly longer time span
attdata = attdata.apply_gti(gti + [-30, 30])
gti = attdata.gti & gti
if locwcs is None: locwcs = make_wcs_for_attdata(attdata, gti, pixsize) #produce wcs for accumulated atitude information
xsize, ysize = int(locwcs.wcs.crpix[0]*2 + 1), int(locwcs.wcs.crpix[1]*2 + 1)
imgdata = {name: np.zeros((ysize, xsize), np.double) for name in ebands}
urdgti = {URDN:emptyGTI for URDN in URDNS}
urdhk = {}
urdbkg = {}
urdbkge = {}
bkggti = {}
urdevt = []
for urdfname in urdflist[:]:
urdfile = fits.open(urdfname)
urdn = urdfile["EVENTS"].header["URDN"]
tchk = (urdfile["HK"].data["TIME"][1:] + urdfile['HK'].data["TIME"][:-1])/2.
print("processing:", urdfname)
locgti = (get_gti(urdfile, "STDGTI") if "STDGTI" in urdfile else get_gti(urdfile)) & gti & -urdgti.get(urdn, emptyGTI) # & -urdbti.get(urdn, emptyGTI)
locgti.merge_joint()
locbgti = (get_gti(urdfile, "STDGTI") if "STDGTI" in urdfile else get_gti(urdfile)) & (gti + [-200, 200]) & -bkggti.get(urdn, emptyGTI)
print("exposure in GTI:", locgti.exposure)
locgti = locgti & -urdbti.get(urdn, emptyGTI)
print("exposure after excluding BTI", locgti.exposure)
if locgti.exposure == 0.:
continue
print("Tstart, Tstop:", locgti.arr[[0, -1], [0, 1]])
urdgti[urdn] = urdgti.get(urdn, emptyGTI) | locgti
bkggti[urdn] = bkggti.get(urdn, emptyGTI) | locbgti
urddata = np.copy(urdfile["EVENTS"].data) #hint: do not apply bool mask to a fitsrec - it's a stright way to the memory leak :)
urddata = urddata[(locgti + [-200, 200]).mask_outofgti_times(urddata["TIME"])]
hkdata = np.copy(urdfile["HK"].data)
hkdata = hkdata[(locgti + [-30, 30]).mask_outofgti_times(hkdata["TIME"])]
urdhk[urdn] = urdhk.get(urdn, []) + [hkdata,]
energy, grade, flag = make_energies_flags_and_grades(urddata, hkdata, urdn)
timemask = locgti.mask_outofgti_times(urddata["TIME"])
for bandname, band in ebands.items():
pickimg = np.all([energy > band.emin, energy < band.emax, grade > -1, grade < 10,
flag == 0, locgti.mask_outofgti_times(urddata["TIME"])], axis=0)
if np.any(pickimg):
urdloc = urddata[pickimg]
vec1 = pol_to_vec(263.8940535*pi/180., -32.2583163*pi/180.)
urdloc = get_photons_vectors(urdloc, urdn, attdata)
masklast = np.arccos(np.sum(urdloc*vec1, axis=1)) < 100./3600.*pi/180.
urdevt.append(urdloc[masklast])
if weightphotons:
timg = make_vignetting_weighted_phot_images(urddata[pickimg], urdn, energy[pickimg], attdata, locwcs, photsplitnside)
else:
timg = make_sky_image(urddata[pickimg], urdn, attdata, locwcs, photsplitnside)
print("total photon on img", timg.sum(), "selected events", pickimg.sum())
imgdata[bandname] += timg
pickbkg = np.all([energy > 40., energy < 100., grade > -1, grade < 10, flag < 3], axis=0)
bkgevts = urddata["TIME"][pickbkg]
urdbkge[urdn] = urdbkge.get(urdn, []) + [bkgevts,]
for bandname, img in imgdata.items():
img = fits.PrimaryHDU(header=locwcs.to_header(), data=img)
img.writeto(bandname + outctsname, overwrite=True)
urdhk = {urdn:np.unique(np.concatenate(hklist)) for urdn, hklist in urdhk.items()}
urddtc = {urdn: deadtime_correction(hk) for urdn, hk in urdhk.items()}
tevts = np.sort(np.concatenate([np.concatenate(e) for e in urdbkge.values()]))
tgti = reduce(lambda a, b: a & b, urdgti.values())
te = np.concatenate([np.linspace(s, e, int((e-s)//100.) + 2) for s, e in tgti.arr])
mgaps = np.ones(te.size - 1, np.bool)
if tgti.arr.size > 2:
mgaps[np.cumsum([(int((e-s)//100.) + 2) for s, e in tgti.arr[:-1]]) - 1] = False
mgaps[te[1:] - te[:-1] < 10] = False
tevts = np.sort(np.concatenate([np.concatenate(e) for e in urdbkge.values()]))
rate = tevts.searchsorted(te)
rate = (rate[1:] - rate[:-1])[mgaps]/(te[1:] - te[:-1])[mgaps]
tc = (te[1:] + te[:-1])[mgaps]/2.
tm = np.sum(tgti.mask_outofgti_times(tevts))/tgti.exposure
if tc.size == 0:
urdbkg = {urdn: lambda x: np.ones(x.size)*tm*urdbkgsc[urdn]/7.62 for urdn in urdbkgsc}
else:
urdbkg = {urdn: interp1d(tc, rate*urdbkgsc[urdn]/7.61, bounds_error=False, fill_value=tm*urdbkgsc[urdn]/7.62) for urdn in urdbkgsc}
tebkg, mgapsbkg, cratebkg, crerrbkg, bkgrate = make_overall_bkglc(tevts, bkggti, 25.)
pickle.dump([tevts, bkggti, urdevt, urdgti, attdata], open("backgroud.pickle", "wb"))
urdbkg = {urdn: constscale(urdbkgsc[urdn], bkgrate) for urdn in urdbkgsc}
if usedtcorr:
emap = make_expmap_for_wcs(locwcs, attdata, urdgti, dtcorr=urddtc) #, flatprofile=True)
else:
emap = make_expmap_for_wcs(locwcs, attdata, urdgti)
emap = fits.PrimaryHDU(data=emap, header=locwcs.to_header())
emap.writeto(outexpmapname, overwrite=True)
bmap = make_bkgmap_for_wcs(locwcs, attdata, urdgti, time_corr=urdbkg)
bmap = fits.PrimaryHDU(data=bmap, header=locwcs.to_header())
bmap.writeto(outbkgname, overwrite=True)
if __name__ == "__main__":
pass
#pass, r, d - quasi cartesian coordinates of the vecteces
#it should be noted that convex hull is expected to be alongated along equator after quaternion rotation | arttools/plot.py | import numpy as np
from astropy.wcs import WCS
from ._det_spatial import get_shadowed_pix_mask_for_urddata, DL, F, multiply_photons
from .time import get_gti, GTI, tGTI, emptyGTI, deadtime_correction
from .atthist import hist_orientation_for_attdata
from .planwcs import make_wcs_for_attdata
from .caldb import get_energycal, get_shadowmask, get_energycal_by_urd, get_shadowmask_by_urd, urdbkgsc, OPAXOFFSET
from .energy import get_events_energy
from .telescope import URDNS
from .orientation import get_photons_sky_coord, read_gyro_fits, read_bokz_fits, AttDATA, define_required_correction, pol_to_vec, get_photons_vectors
from .lightcurve import make_overall_bkglc
from .vignetting import load_raw_wignetting_function
from astropy.io import fits
from math import pi, cos, sin
from multiprocessing import Pool, cpu_count, Queue, Process, Pipe
from threading import Thread
import copy
import time
import matplotlib.pyplot as plt
import os, sys
from .expmap import make_expmap_for_wcs
from .background import make_bkgmap_for_wcs
from scipy.interpolate import interp1d
from matplotlib.colors import LogNorm
from functools import reduce
from collections import namedtuple
import pickle
eband = namedtuple("eband", ["emin", "emax"])
class NoDATA(Exception):
pass
def constscale(const, func):
def newfunc(val):
return func(val)*const
return newfunc
def make_events_mask(minenergy=4., maxenergy=12., minflag=-1, ):
def mask_events(urddata, grade, energy):
eventsmask = np.all([grade > mingrade, grade < maxgrade,
urddata["RAW_X"] > minrawx, urddata["RAW_X"] < maxrawx,
urddata["RAW_Y"] > minrawy, urddata["RAW_Y"] < maxrawy,
energy > minenergy, energy < maxenergy], axis=0)
return eventsmask
return mask_events
standard_events_mask = make_events_mask(minenergy=4., maxenergy=12.)
def make_energies_flags_and_grades(urddata, urdhk, urdn):
flag = np.zeros(urddata.size, np.uint8)
shadow = get_shadowmask_by_urd(urdn)
caldbfile = get_energycal_by_urd(urdn)
maskshadow = get_shadowed_pix_mask_for_urddata(urddata, shadow)
flag[np.logical_not(maskshadow)] = 2
flag[np.any([urddata["RAW_X"] == 0, urddata["RAW_X"] == 47, \
urddata["RAW_Y"] == 0, urddata["RAW_Y"] == 47], axis=0)] = 3
energy, xc, yc, grade = get_events_energy(urddata, urdhk, caldbfile)
return energy, grade, flag
def make_vignetting_weighted_phot_images(urddata, urdn, energy, attdata, locwcs, photsplitside=1):
rawvignfun = load_raw_wignetting_function()
x, y = multiply_photons(urddata, photsplitside)
weights = rawvignfun(np.array([np.repeat(energy, photsplitside*photsplitside), x, y]).T)
r, d = get_photons_sky_coord(urddata, urdn, attdata, photsplitside)
x, y = locwcs.all_world2pix(np.array([r*180./pi, d*180./pi]).T, 1.).T
img = np.histogram2d(x, y, [np.arange(locwcs.wcs.crpix[0]*2 + 2) + 0.5,
np.arange(locwcs.wcs.crpix[1]*2 + 2) + 0.5],
weights=weights)[0].T
return img
def make_sky_image(urddata, urdn, attdata, locwcs, photsplitside=10, weight_with_vignetting=False):
r, d = get_photons_sky_coord(urddata, urdn, attdata, photsplitside)
x, y = locwcs.all_world2pix(np.array([r*180./pi, d*180./pi]).T, 1.).T
img = np.histogram2d(x, y, [np.arange(locwcs.wcs.crpix[0]*2 + 2) + 0.5,
np.arange(locwcs.wcs.crpix[1]*2 + 2) + 0.5])[0].T
return img/photsplitside/photsplitside
def get_attdata(fname):
ffile = fits.open(fname)
attdata = read_gyro_fits(ffile["ORIENTATION"]) if "gyro" in fname else read_bokz_fits(ffile["ORIENTATION"])
attdata.times = attdata.times - (0.97 if "gyro" in fname else 1.55)
attdata.gti.arr = attdata.gti.arr - (0.97 if "gyro" in fname else 1.55)
if "gyro" in fname:
attdata = define_required_correction(attdata)
return attdata
def make_mosaic_for_urdset_by_gti(urdflist, attflist, gti,
outctsname, outbkgname, outexpmapname,
urdbti={}, ebands={"soft": eband(4, 12), "hard": eband(8, 16)},
photsplitnside=1, pixsize=20/3600., usedtcorr=True,
weightphotons=False, locwcs=None):
"""
given two sets with paths to the urdfiles and corresponding attfiles,
and gti as a dictionary, each key contains gti for particular urd
the program produces overall count map and exposition map for this urdfiles set
the wcs is produced automatically to cover nonzero exposition area with some margin
"""
attdata = AttDATA.concatenate([get_attdata(fname) for fname in set(attflist)])
#attdata usually has data points stored each 3 seconds so try here to obtaind attitude information for slightly longer time span
attdata = attdata.apply_gti(gti + [-30, 30])
gti = attdata.gti & gti
if locwcs is None: locwcs = make_wcs_for_attdata(attdata, gti, pixsize) #produce wcs for accumulated atitude information
xsize, ysize = int(locwcs.wcs.crpix[0]*2 + 1), int(locwcs.wcs.crpix[1]*2 + 1)
imgdata = {name: np.zeros((ysize, xsize), np.double) for name in ebands}
urdgti = {URDN:emptyGTI for URDN in URDNS}
urdhk = {}
urdbkg = {}
urdbkge = {}
bkggti = {}
urdevt = []
for urdfname in urdflist[:]:
urdfile = fits.open(urdfname)
urdn = urdfile["EVENTS"].header["URDN"]
tchk = (urdfile["HK"].data["TIME"][1:] + urdfile['HK'].data["TIME"][:-1])/2.
print("processing:", urdfname)
locgti = (get_gti(urdfile, "STDGTI") if "STDGTI" in urdfile else get_gti(urdfile)) & gti & -urdgti.get(urdn, emptyGTI) # & -urdbti.get(urdn, emptyGTI)
locgti.merge_joint()
locbgti = (get_gti(urdfile, "STDGTI") if "STDGTI" in urdfile else get_gti(urdfile)) & (gti + [-200, 200]) & -bkggti.get(urdn, emptyGTI)
print("exposure in GTI:", locgti.exposure)
locgti = locgti & -urdbti.get(urdn, emptyGTI)
print("exposure after excluding BTI", locgti.exposure)
if locgti.exposure == 0.:
continue
print("Tstart, Tstop:", locgti.arr[[0, -1], [0, 1]])
urdgti[urdn] = urdgti.get(urdn, emptyGTI) | locgti
bkggti[urdn] = bkggti.get(urdn, emptyGTI) | locbgti
urddata = np.copy(urdfile["EVENTS"].data) #hint: do not apply bool mask to a fitsrec - it's a stright way to the memory leak :)
urddata = urddata[(locgti + [-200, 200]).mask_outofgti_times(urddata["TIME"])]
hkdata = np.copy(urdfile["HK"].data)
hkdata = hkdata[(locgti + [-30, 30]).mask_outofgti_times(hkdata["TIME"])]
urdhk[urdn] = urdhk.get(urdn, []) + [hkdata,]
energy, grade, flag = make_energies_flags_and_grades(urddata, hkdata, urdn)
timemask = locgti.mask_outofgti_times(urddata["TIME"])
for bandname, band in ebands.items():
pickimg = np.all([energy > band.emin, energy < band.emax, grade > -1, grade < 10,
flag == 0, locgti.mask_outofgti_times(urddata["TIME"])], axis=0)
if np.any(pickimg):
urdloc = urddata[pickimg]
vec1 = pol_to_vec(263.8940535*pi/180., -32.2583163*pi/180.)
urdloc = get_photons_vectors(urdloc, urdn, attdata)
masklast = np.arccos(np.sum(urdloc*vec1, axis=1)) < 100./3600.*pi/180.
urdevt.append(urdloc[masklast])
if weightphotons:
timg = make_vignetting_weighted_phot_images(urddata[pickimg], urdn, energy[pickimg], attdata, locwcs, photsplitnside)
else:
timg = make_sky_image(urddata[pickimg], urdn, attdata, locwcs, photsplitnside)
print("total photon on img", timg.sum(), "selected events", pickimg.sum())
imgdata[bandname] += timg
pickbkg = np.all([energy > 40., energy < 100., grade > -1, grade < 10, flag < 3], axis=0)
bkgevts = urddata["TIME"][pickbkg]
urdbkge[urdn] = urdbkge.get(urdn, []) + [bkgevts,]
for bandname, img in imgdata.items():
img = fits.PrimaryHDU(header=locwcs.to_header(), data=img)
img.writeto(bandname + outctsname, overwrite=True)
urdhk = {urdn:np.unique(np.concatenate(hklist)) for urdn, hklist in urdhk.items()}
urddtc = {urdn: deadtime_correction(hk) for urdn, hk in urdhk.items()}
tevts = np.sort(np.concatenate([np.concatenate(e) for e in urdbkge.values()]))
tgti = reduce(lambda a, b: a & b, urdgti.values())
te = np.concatenate([np.linspace(s, e, int((e-s)//100.) + 2) for s, e in tgti.arr])
mgaps = np.ones(te.size - 1, np.bool)
if tgti.arr.size > 2:
mgaps[np.cumsum([(int((e-s)//100.) + 2) for s, e in tgti.arr[:-1]]) - 1] = False
mgaps[te[1:] - te[:-1] < 10] = False
tevts = np.sort(np.concatenate([np.concatenate(e) for e in urdbkge.values()]))
rate = tevts.searchsorted(te)
rate = (rate[1:] - rate[:-1])[mgaps]/(te[1:] - te[:-1])[mgaps]
tc = (te[1:] + te[:-1])[mgaps]/2.
tm = np.sum(tgti.mask_outofgti_times(tevts))/tgti.exposure
if tc.size == 0:
urdbkg = {urdn: lambda x: np.ones(x.size)*tm*urdbkgsc[urdn]/7.62 for urdn in urdbkgsc}
else:
urdbkg = {urdn: interp1d(tc, rate*urdbkgsc[urdn]/7.61, bounds_error=False, fill_value=tm*urdbkgsc[urdn]/7.62) for urdn in urdbkgsc}
tebkg, mgapsbkg, cratebkg, crerrbkg, bkgrate = make_overall_bkglc(tevts, bkggti, 25.)
pickle.dump([tevts, bkggti, urdevt, urdgti, attdata], open("backgroud.pickle", "wb"))
urdbkg = {urdn: constscale(urdbkgsc[urdn], bkgrate) for urdn in urdbkgsc}
if usedtcorr:
emap = make_expmap_for_wcs(locwcs, attdata, urdgti, dtcorr=urddtc) #, flatprofile=True)
else:
emap = make_expmap_for_wcs(locwcs, attdata, urdgti)
emap = fits.PrimaryHDU(data=emap, header=locwcs.to_header())
emap.writeto(outexpmapname, overwrite=True)
bmap = make_bkgmap_for_wcs(locwcs, attdata, urdgti, time_corr=urdbkg)
bmap = fits.PrimaryHDU(data=bmap, header=locwcs.to_header())
bmap.writeto(outbkgname, overwrite=True)
if __name__ == "__main__":
pass
#pass, r, d - quasi cartesian coordinates of the vecteces
#it should be noted that convex hull is expected to be alongated along equator after quaternion rotation | 0.421076 | 0.239944 |
from ops import *
from utils import *
from glob import glob
import time
import shutil
from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch
import numpy as np
class UGATIT(object) :
def __init__(self, sess, args):
self.light = args.light
self.args_dict = vars(args)
if self.light :
self.model_name = 'UGATIT_light'
else :
self.model_name = 'UGATIT'
self.print_heatmap = args.print_heatmap
self.sess = sess
self.phase = args.phase
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.init_lr = args.lr
self.ch = args.ch
""" Weight """
self.adv_weight = args.adv_weight
self.cycle_weight = args.cycle_weight
self.identity_weight = args.identity_weight
self.cam_weight = args.cam_weight
self.ld = args.GP_ld
self.smoothing = args.smoothing
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.n_critic = args.n_critic
self.sn = args.sn
self.img_size = args.img_size
self.img_ch = args.img_ch
""" working on dir params """
self.train_log_root = args.train_log_root
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.sample_dir = args.sample_dir
self.model_dir = args.model_dir
# self.trainA, self.trainB = prepare_data(dataset_name=self.dataset_name, size=self.img_size
self.trainA_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainA'))
self.trainB_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainB'))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print()
print("##### Information #####")
print("# light : ", self.light)
print("# gan type : ", self.gan_type)
print("# dataset : ", self.dataset_name)
print("# max dataset number : ", self.dataset_num)
print("# batch_size : ", self.batch_size)
print("# epoch : ", self.epoch)
print("# iteration per epoch : ", self.iteration)
print("# smoothing : ", self.smoothing)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print("# the number of critic : ", self.n_critic)
print("# spectral normalization : ", self.sn)
print()
print("##### Weight #####")
print("# adv_weight : ", self.adv_weight)
print("# cycle_weight : ", self.cycle_weight)
print("# identity_weight : ", self.identity_weight)
print("# cam_weight : ", self.cam_weight)
##################################################################################
# Generator
##################################################################################
@property
def default_model_dir(self):
n_res = str(self.n_res) + 'resblock'
n_dis = str(self.n_dis) + 'dis'
if self.smoothing:
smoothing = '_smoothing'
else:
smoothing = ''
if self.sn:
sn = '_sn'
else:
sn = ''
return "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}{}{}".format(self.model_name, self.dataset_name,
self.gan_type, n_res, n_dis,
self.n_critic,
self.adv_weight, self.cycle_weight,
self.identity_weight, self.cam_weight, sn,
smoothing)
def check_and_mkdirs(self):
from datetime import datetime
# check and make folders
if self.model_dir == '':
self.model_dir = self.default_model_dir
# current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
if self.checkpoint_dir == "":
self.checkpoint_dir = os.path.join(self.train_log_root, self.model_dir)
elif '/' not in self.checkpoint_dir:
self.checkpoint_dir = os.path.join(self.train_log_root, self.checkpoint_dir, self.model_dir)
if self.log_dir == "":
self.log_dir = os.path.join(self.train_log_root, self.model_dir, "log")
elif '/' not in self.log_dir:
self.log_dir = os.path.join(self.train_log_root, self.log_dir, self.model_dir)
if self.sample_dir == "":
self.sample_dir = os.path.join(self.train_log_root, self.model_dir, "samples")
elif '/' not in self.sample_dir:
self.sample_dir = os.path.join(self.train_log_root, self.sample_dir, self.model_dir)
if self.result_dir == "":
self.result_dir = os.path.join(self.train_log_root, self.model_dir, "result")
elif '/' not in self.result_dir:
self.result_dir = os.path.join(self.train_log_root, self.result_dir, self.model_dir)
if self.phase in ('train',):
check_folder(self.checkpoint_dir)
check_folder(self.log_dir)
if self.phase in ('train', 'test'):
check_folder(os.path.join(self.sample_dir, "imgs"))
if self.phase in ('test', 'export'):
check_folder(os.path.join(self.result_dir))
def write_args_to_html(self):
body = ""
for k, v in self.args_dict.items():
body = body + "--" + str(k) + " " + str(v) + " \\<br>"
with open(self.total_sample_path, 'a') as t_html:
t_html.write("python3 main.py \\<br>")
t_html.write(body)
def write_to_html(self, html_path, epoch, idx, img_id):
names = ['source', 'output', 'real']
body = ""
for name in names:
image_name = '{}_{:02d}_{:06d}_{:02d}.jpg'.format(name, epoch, idx, img_id)
body = body + str("<img src=\"" + os.path.join('imgs', image_name) + "\">")
body = body + str("<br>")
with open(html_path, 'a') as v_html:
v_html.write(body)
with open(self.total_sample_path, 'a') as t_html:
t_html.write(body)
def generator(self, x_init, reuse=False, scope="generator"):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x_init, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = instance_norm(x, scope='ins_norm')
x = relu(x)
# Down-Sampling
for i in range(2) :
x = conv(x, channel*2, kernel=3, stride=2, pad=1, pad_type='reflect', scope='conv_'+str(i))
x = instance_norm(x, scope='ins_norm_'+str(i))
x = relu(x)
channel = channel * 2
# Down-Sampling Bottleneck
for i in range(self.n_res):
x = resblock(x, channel, scope='resblock_' + str(i))
# Class Activation Map
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = relu(x)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
# Gamma, Beta block
gamma, beta = self.MLP(x, reuse=reuse)
# Up-Sampling Bottleneck
for i in range(self.n_res):
x = adaptive_ins_layer_resblock(x, channel, gamma, beta, smoothing=self.smoothing, scope='adaptive_resblock' + str(i))
# Up-Sampling
for i in range(2) :
x = up_sample(x, scale_factor=2)
x = conv(x, channel//2, kernel=3, stride=1, pad=1, pad_type='reflect', scope='up_conv_'+str(i))
x = layer_instance_norm(x, scope='layer_ins_norm_'+str(i))
x = relu(x)
channel = channel // 2
x = conv(x, channels=3, kernel=7, stride=1, pad=3, pad_type='reflect', scope='G_logit')
x = tanh(x)
return x, cam_logit, heatmap
def MLP(self, x, use_bias=True, reuse=False, scope='MLP'):
channel = self.ch * self.n_res
if self.light :
x = global_avg_pooling(x)
with tf.variable_scope(scope, reuse=reuse):
for i in range(2) :
x = fully_connected(x, channel, use_bias, scope='linear_' + str(i))
x = relu(x)
gamma = fully_connected(x, channel, use_bias, scope='gamma')
beta = fully_connected(x, channel, use_bias, scope='beta')
gamma = tf.reshape(gamma, shape=[self.batch_size, 1, 1, channel])
beta = tf.reshape(beta, shape=[self.batch_size, 1, 1, channel])
return gamma, beta
##################################################################################
# Discriminator
##################################################################################
def discriminator(self, x_init, reuse=False, scope="discriminator"):
D_logit = []
D_CAM_logit = []
with tf.variable_scope(scope, reuse=reuse) :
local_x, local_cam, local_heatmap = self.discriminator_local(x_init, reuse=reuse, scope='local')
global_x, global_cam, global_heatmap = self.discriminator_global(x_init, reuse=reuse, scope='global')
D_logit.extend([local_x, global_x])
D_CAM_logit.extend([local_cam, global_cam])
return D_logit, D_CAM_logit, local_heatmap, global_heatmap
def discriminator_global(self, x_init, reuse=False, scope='discriminator_global'):
with tf.variable_scope(scope, reuse=reuse):
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_0')
x = lrelu(x, 0.2)
for i in range(1, self.n_dis - 1):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = lrelu(x, 0.2)
channel = channel * 2
x = conv(x, channel * 2, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='conv_last')
x = lrelu(x, 0.2)
channel = channel * 2
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = lrelu(x, 0.2)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
x = conv(x, channels=1, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='D_logit')
return x, cam_logit, heatmap
def discriminator_local(self, x_init, reuse=False, scope='discriminator_local'):
with tf.variable_scope(scope, reuse=reuse) :
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_0')
x = lrelu(x, 0.2)
for i in range(1, self.n_dis - 2 - 1):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = lrelu(x, 0.2)
channel = channel * 2
x = conv(x, channel * 2, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='conv_last')
x = lrelu(x, 0.2)
channel = channel * 2
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = lrelu(x, 0.2)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
x = conv(x, channels=1, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='D_logit')
return x, cam_logit, heatmap
##################################################################################
# Model
##################################################################################
def generate_a2b(self, x_A, reuse=False):
out, cam, heatmap = self.generator(x_A, reuse=reuse, scope="generator_B")
return out, cam, heatmap
def generate_b2a(self, x_B, reuse=False):
out, cam, heatmap = self.generator(x_B, reuse=reuse, scope="generator_A")
return out, cam, heatmap
def discriminate_real(self, x_A, x_B):
real_A_logit, real_A_cam_logit, _, _ = self.discriminator(x_A, scope="discriminator_A")
real_B_logit, real_B_cam_logit, _, _ = self.discriminator(x_B, scope="discriminator_B")
return real_A_logit, real_A_cam_logit, real_B_logit, real_B_cam_logit
def discriminate_fake(self, x_ba, x_ab):
fake_A_logit, fake_A_cam_logit, _, _ = self.discriminator(x_ba, reuse=True, scope="discriminator_A")
fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap = self.discriminator(x_ab, reuse=True, scope="discriminator_B")
return fake_A_logit, fake_A_cam_logit, fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap
def gradient_panalty(self, real, fake, scope="discriminator_A"):
if self.gan_type.__contains__('dragan'):
eps = tf.random_uniform(shape=tf.shape(real), minval=0., maxval=1.)
_, x_var = tf.nn.moments(real, axes=[0, 1, 2, 3])
x_std = tf.sqrt(x_var) # magnitude of noise decides the size of local region
fake = real + 0.5 * x_std * eps
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)
interpolated = real + alpha * (fake - real)
logit, cam_logit, _, _ = self.discriminator(interpolated, reuse=True, scope=scope)
GP = []
cam_GP = []
for i in range(2) :
grad = tf.gradients(logit[i], interpolated)[0] # gradient of D(interpolated)
grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm
# WGAN - LP
if self.gan_type == 'wgan-lp' :
GP.append(self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.))))
elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':
GP.append(self.ld * tf.reduce_mean(tf.square(grad_norm - 1.)))
for i in range(2) :
grad = tf.gradients(cam_logit[i], interpolated)[0] # gradient of D(interpolated)
grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm
# WGAN - LP
if self.gan_type == 'wgan-lp' :
cam_GP.append(self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.))))
elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':
cam_GP.append(self.ld * tf.reduce_mean(tf.square(grad_norm - 1.)))
return sum(GP), sum(cam_GP)
def build_model(self):
if self.phase == 'train' :
self.lr = tf.placeholder(tf.float32, name='learning_rate')
""" Input Image"""
Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
gpu_device = '/gpu:0'
trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))
trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
self.domain_A = trainA_iterator.get_next()
self.domain_B = trainB_iterator.get_next()
""" Define Generator, Discriminator """
x_ab, cam_ab, heatmap_g_a2b = self.generate_a2b(self.domain_A) # real a
x_ba, cam_ba, heatmap_g_b2a = self.generate_b2a(self.domain_B) # real b
x_aba, _, _ = self.generate_b2a(x_ab, reuse=True) # real b
x_bab, _, _ = self.generate_a2b(x_ba, reuse=True) # real a
x_aa, cam_aa, _ = self.generate_b2a(self.domain_A, reuse=True) # fake b
x_bb, cam_bb, _ = self.generate_a2b(self.domain_B, reuse=True) # fake a
real_A_logit, real_A_cam_logit, real_B_logit, real_B_cam_logit = self.discriminate_real(self.domain_A, self.domain_B)
fake_A_logit, fake_A_cam_logit, fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap = self.discriminate_fake(x_ba, x_ab)
""" Define Loss """
if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
GP_A, GP_CAM_A = self.gradient_panalty(real=self.domain_A, fake=x_ba, scope="discriminator_A")
GP_B, GP_CAM_B = self.gradient_panalty(real=self.domain_B, fake=x_ab, scope="discriminator_B")
else :
GP_A, GP_CAM_A = 0, 0
GP_B, GP_CAM_B = 0, 0
G_ad_loss_A = (generator_loss(self.gan_type, fake_A_logit) + generator_loss(self.gan_type, fake_A_cam_logit))
G_ad_loss_B = (generator_loss(self.gan_type, fake_B_logit) + generator_loss(self.gan_type, fake_B_cam_logit))
D_ad_loss_A = (discriminator_loss(self.gan_type, real_A_logit, fake_A_logit) + discriminator_loss(self.gan_type, real_A_cam_logit, fake_A_cam_logit) + GP_A + GP_CAM_A)
D_ad_loss_B = (discriminator_loss(self.gan_type, real_B_logit, fake_B_logit) + discriminator_loss(self.gan_type, real_B_cam_logit, fake_B_cam_logit) + GP_B + GP_CAM_B)
reconstruction_A = L1_loss(x_aba, self.domain_A) # reconstruction
reconstruction_B = L1_loss(x_bab, self.domain_B) # reconstruction
identity_A = L1_loss(x_aa, self.domain_A)
identity_B = L1_loss(x_bb, self.domain_B)
cam_A = cam_loss(source=cam_ba, non_source=cam_aa)
cam_B = cam_loss(source=cam_ab, non_source=cam_bb)
Generator_A_gan = self.adv_weight * G_ad_loss_A
Generator_A_cycle = self.cycle_weight * reconstruction_B
Generator_A_identity = self.identity_weight * identity_A
Generator_A_cam = self.cam_weight * cam_A
Generator_B_gan = self.adv_weight * G_ad_loss_B
Generator_B_cycle = self.cycle_weight * reconstruction_A
Generator_B_identity = self.identity_weight * identity_B
Generator_B_cam = self.cam_weight * cam_B
Generator_A_loss = Generator_A_gan + Generator_A_cycle + Generator_A_identity + Generator_A_cam
Generator_B_loss = Generator_B_gan + Generator_B_cycle + Generator_B_identity + Generator_B_cam
Discriminator_A_loss = self.adv_weight * D_ad_loss_A
Discriminator_B_loss = self.adv_weight * D_ad_loss_B
self.Generator_loss = Generator_A_loss + Generator_B_loss + regularization_loss('generator')
self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss + regularization_loss('discriminator')
""" Result Image """
self.fake_A = x_ba
self.fake_B = x_ab
self.real_A = self.domain_A
self.real_B = self.domain_B
""" Training """
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if 'generator' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name]
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
"""" Summary """
self.all_G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss)
self.all_D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss)
self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss)
self.G_A_gan = tf.summary.scalar("G_A_gan", Generator_A_gan)
self.G_A_cycle = tf.summary.scalar("G_A_cycle", Generator_A_cycle)
self.G_A_identity = tf.summary.scalar("G_A_identity", Generator_A_identity)
self.G_A_cam = tf.summary.scalar("G_A_cam", Generator_A_cam)
self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss)
self.G_B_gan = tf.summary.scalar("G_B_gan", Generator_B_gan)
self.G_B_cycle = tf.summary.scalar("G_B_cycle", Generator_B_cycle)
self.G_B_identity = tf.summary.scalar("G_B_identity", Generator_B_identity)
self.G_B_cam = tf.summary.scalar("G_B_cam", Generator_B_cam)
self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss)
self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss)
self.rho_var = []
for var in tf.trainable_variables():
if 'rho' in var.name:
self.rho_var.append(tf.summary.histogram(var.name, var))
self.rho_var.append(tf.summary.scalar(var.name + "_min", tf.reduce_min(var)))
self.rho_var.append(tf.summary.scalar(var.name + "_max", tf.reduce_max(var)))
self.rho_var.append(tf.summary.scalar(var.name + "_mean", tf.reduce_mean(var)))
g_summary_list = [self.G_A_loss, self.G_A_gan, self.G_A_cycle, self.G_A_identity, self.G_A_cam,
self.G_B_loss, self.G_B_gan, self.G_B_cycle, self.G_B_identity, self.G_B_cam,
self.all_G_loss]
g_summary_list.extend(self.rho_var)
d_summary_list = [self.D_A_loss, self.D_B_loss, self.all_D_loss]
self.G_loss = tf.summary.merge(g_summary_list)
self.D_loss = tf.summary.merge(d_summary_list)
elif self.phase == 'export':
""" Export a serving model of domainA to domainB"""
self.input_domain_A = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='input_domain_A')
self.predict_domain_B, _, _ = self.generate_a2b(self.input_domain_A)
self.predict_result = tf.identity(self.predict_domain_B, name="predict_result")
else :
""" Test """
self.test_domain_A = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_domain_A')
self.test_domain_B = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_domain_B')
self.test_fake_B, _, self.test_heatmap_a2b = self.generate_a2b(self.test_domain_A)
self.test_fake_A, _, self.test_heatmap_b2a = self.generate_b2a(self.test_domain_B)
if self.print_heatmap:
_, _, self.test_heatmap_local_dis_ab, self.test_heatmap_global_dis_ab = self.discriminator(self.test_fake_B, scope="test_discriminator_B")
def train(self):
self.check_and_mkdirs()
self.total_sample_path = os.path.join(os.path.join(self.sample_dir, "_total_samples.html"))
self.write_args_to_html()
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
past_g_loss = -1.
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
# lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
if self.decay_flag :
#lr = self.init_lr * pow(0.5, epoch // self.decay_epoch)
lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {
self.lr : lr
}
# Update D
_, d_loss, summary_str = self.sess.run([self.D_optim,
self.Discriminator_loss, self.D_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update G
g_loss = None
if (counter - 1) % self.n_critic == 0 :
batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B,
self.fake_A, self.fake_B,
self.G_optim,
self.Generator_loss, self.G_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
past_g_loss = g_loss
# display training status
counter += 1
if g_loss == None :
g_loss = past_g_loss
print("Epoch: [%2d] [%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.iteration, time.time() - start_time, d_loss, g_loss))
if np.mod(idx+1, self.print_freq) == 0 :
save_images(batch_A_images, [self.batch_size, 1],
'./{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
# save_images(batch_B_images, [self.batch_size, 1],
# './{}/real_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
# save_images(fake_A, [self.batch_size, 1],
# './{}/fake_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
save_images(fake_B, [self.batch_size, 1],
'./{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
if np.mod(idx + 1, self.save_freq) == 0:
self.save(self.checkpoint_dir, counter)
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model for final step
self.save(self.checkpoint_dir, counter)
def save(self, checkpoint_dir, step):
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def test(self):
self.check_and_mkdirs()
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA'))
test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB'))
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load :
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
img_dir = os.path.join(self.result_dir, 'imgs')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
np_dir = os.path.join(os.path.join(self.result_dir, 'npys'))
if not os.path.exists(np_dir):
os.makedirs(np_dir)
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
if self.print_heatmap:
index.write("<th>name</th><th>input</th><th>output</th> <th>heatmap_G</th> <th>heatmap_D_local</th> <th>heatmap_D_global</th> </tr>")
for source_path in test_A_files:
print('Processing A image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_A_' + filename
output_filename = 'Target_B_' + filename
heatmap_G_filename = 'heatmap_G_' + filename
heatmap_D_local_filename = 'heatmap_D_local_' + filename
heatmap_D_global_filename = 'heatmap_D_global_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image, heatmap_G, heatmap_D_local, heatmap_D_global = self.sess.run(
[self.test_fake_B, self.test_heatmap_a2b, self.test_heatmap_local_dis_ab, self.test_heatmap_global_dis_ab],
feed_dict={self.test_domain_A: image})
composed_heatmap_G = superimpose(inverse_transform(image), heatmap_G)
save_images(composed_heatmap_G, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_G_filename),
inverse=False)
composed_heatmap_D_local = superimpose(inverse_transform(fake_image), heatmap_D_local)
save_images(composed_heatmap_D_local, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_D_local_filename),
inverse=False)
composed_heatmap_D_global = superimpose(inverse_transform(fake_image), heatmap_D_global)
save_images(composed_heatmap_D_global, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_D_global_filename),
inverse=False)
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_G_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_D_local_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_D_global_filename, self.img_size, self.img_size))
index.write("</tr>")
else:
index.write("<th>name</th><th>input</th><th>output</th></tr>")
for source_path in test_A_files:
print('Processing A image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_A_' + filename
output_filename = 'Target_B_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image = self.sess.run(self.test_fake_B, feed_dict={self.test_domain_A : image})
save_images(fake_image, [1, 1], os.path.join(self.result_dir, 'imgs', output_filename))
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write("</tr>")
for source_path in test_B_files:
print('Processing B image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_B_' + filename
output_filename = 'Target_A_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image = self.sess.run(self.test_fake_A, feed_dict={self.test_domain_B: image})
save_images(fake_image, [1, 1], os.path.join(self.result_dir, 'imgs', output_filename))
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write("</tr>")
index.close()
def export(self):
self.check_and_mkdirs()
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
export_dir = os.path.join(self.result_dir, 'export', str(int(time.time())))
if os.path.exists(export_dir):
shutil.rmtree(export_dir)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
model_signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={
"input_images": tf.saved_model.utils.build_tensor_info(self.input_domain_A)
},
outputs={
"predict_image": tf.saved_model.utils.build_tensor_info(self.predict_result)
},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
self.sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature,
},
clear_devices=True
)
builder.save() | UGATIT.py | from ops import *
from utils import *
from glob import glob
import time
import shutil
from tensorflow.contrib.data import prefetch_to_device, shuffle_and_repeat, map_and_batch
import numpy as np
class UGATIT(object) :
def __init__(self, sess, args):
self.light = args.light
self.args_dict = vars(args)
if self.light :
self.model_name = 'UGATIT_light'
else :
self.model_name = 'UGATIT'
self.print_heatmap = args.print_heatmap
self.sess = sess
self.phase = args.phase
self.dataset_name = args.dataset
self.augment_flag = args.augment_flag
self.epoch = args.epoch
self.iteration = args.iteration
self.decay_flag = args.decay_flag
self.decay_epoch = args.decay_epoch
self.gan_type = args.gan_type
self.batch_size = args.batch_size
self.print_freq = args.print_freq
self.save_freq = args.save_freq
self.init_lr = args.lr
self.ch = args.ch
""" Weight """
self.adv_weight = args.adv_weight
self.cycle_weight = args.cycle_weight
self.identity_weight = args.identity_weight
self.cam_weight = args.cam_weight
self.ld = args.GP_ld
self.smoothing = args.smoothing
""" Generator """
self.n_res = args.n_res
""" Discriminator """
self.n_dis = args.n_dis
self.n_critic = args.n_critic
self.sn = args.sn
self.img_size = args.img_size
self.img_ch = args.img_ch
""" working on dir params """
self.train_log_root = args.train_log_root
self.checkpoint_dir = args.checkpoint_dir
self.result_dir = args.result_dir
self.log_dir = args.log_dir
self.sample_dir = args.sample_dir
self.model_dir = args.model_dir
# self.trainA, self.trainB = prepare_data(dataset_name=self.dataset_name, size=self.img_size
self.trainA_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainA'))
self.trainB_dataset = glob('./dataset/{}/*.*'.format(self.dataset_name + '/trainB'))
self.dataset_num = max(len(self.trainA_dataset), len(self.trainB_dataset))
print()
print("##### Information #####")
print("# light : ", self.light)
print("# gan type : ", self.gan_type)
print("# dataset : ", self.dataset_name)
print("# max dataset number : ", self.dataset_num)
print("# batch_size : ", self.batch_size)
print("# epoch : ", self.epoch)
print("# iteration per epoch : ", self.iteration)
print("# smoothing : ", self.smoothing)
print()
print("##### Generator #####")
print("# residual blocks : ", self.n_res)
print()
print("##### Discriminator #####")
print("# discriminator layer : ", self.n_dis)
print("# the number of critic : ", self.n_critic)
print("# spectral normalization : ", self.sn)
print()
print("##### Weight #####")
print("# adv_weight : ", self.adv_weight)
print("# cycle_weight : ", self.cycle_weight)
print("# identity_weight : ", self.identity_weight)
print("# cam_weight : ", self.cam_weight)
##################################################################################
# Generator
##################################################################################
@property
def default_model_dir(self):
n_res = str(self.n_res) + 'resblock'
n_dis = str(self.n_dis) + 'dis'
if self.smoothing:
smoothing = '_smoothing'
else:
smoothing = ''
if self.sn:
sn = '_sn'
else:
sn = ''
return "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}{}{}".format(self.model_name, self.dataset_name,
self.gan_type, n_res, n_dis,
self.n_critic,
self.adv_weight, self.cycle_weight,
self.identity_weight, self.cam_weight, sn,
smoothing)
def check_and_mkdirs(self):
from datetime import datetime
# check and make folders
if self.model_dir == '':
self.model_dir = self.default_model_dir
# current_time = datetime.now().strftime("%Y%m%d_%H%M%S")
if self.checkpoint_dir == "":
self.checkpoint_dir = os.path.join(self.train_log_root, self.model_dir)
elif '/' not in self.checkpoint_dir:
self.checkpoint_dir = os.path.join(self.train_log_root, self.checkpoint_dir, self.model_dir)
if self.log_dir == "":
self.log_dir = os.path.join(self.train_log_root, self.model_dir, "log")
elif '/' not in self.log_dir:
self.log_dir = os.path.join(self.train_log_root, self.log_dir, self.model_dir)
if self.sample_dir == "":
self.sample_dir = os.path.join(self.train_log_root, self.model_dir, "samples")
elif '/' not in self.sample_dir:
self.sample_dir = os.path.join(self.train_log_root, self.sample_dir, self.model_dir)
if self.result_dir == "":
self.result_dir = os.path.join(self.train_log_root, self.model_dir, "result")
elif '/' not in self.result_dir:
self.result_dir = os.path.join(self.train_log_root, self.result_dir, self.model_dir)
if self.phase in ('train',):
check_folder(self.checkpoint_dir)
check_folder(self.log_dir)
if self.phase in ('train', 'test'):
check_folder(os.path.join(self.sample_dir, "imgs"))
if self.phase in ('test', 'export'):
check_folder(os.path.join(self.result_dir))
def write_args_to_html(self):
body = ""
for k, v in self.args_dict.items():
body = body + "--" + str(k) + " " + str(v) + " \\<br>"
with open(self.total_sample_path, 'a') as t_html:
t_html.write("python3 main.py \\<br>")
t_html.write(body)
def write_to_html(self, html_path, epoch, idx, img_id):
names = ['source', 'output', 'real']
body = ""
for name in names:
image_name = '{}_{:02d}_{:06d}_{:02d}.jpg'.format(name, epoch, idx, img_id)
body = body + str("<img src=\"" + os.path.join('imgs', image_name) + "\">")
body = body + str("<br>")
with open(html_path, 'a') as v_html:
v_html.write(body)
with open(self.total_sample_path, 'a') as t_html:
t_html.write(body)
def generator(self, x_init, reuse=False, scope="generator"):
channel = self.ch
with tf.variable_scope(scope, reuse=reuse) :
x = conv(x_init, channel, kernel=7, stride=1, pad=3, pad_type='reflect', scope='conv')
x = instance_norm(x, scope='ins_norm')
x = relu(x)
# Down-Sampling
for i in range(2) :
x = conv(x, channel*2, kernel=3, stride=2, pad=1, pad_type='reflect', scope='conv_'+str(i))
x = instance_norm(x, scope='ins_norm_'+str(i))
x = relu(x)
channel = channel * 2
# Down-Sampling Bottleneck
for i in range(self.n_res):
x = resblock(x, channel, scope='resblock_' + str(i))
# Class Activation Map
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = relu(x)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
# Gamma, Beta block
gamma, beta = self.MLP(x, reuse=reuse)
# Up-Sampling Bottleneck
for i in range(self.n_res):
x = adaptive_ins_layer_resblock(x, channel, gamma, beta, smoothing=self.smoothing, scope='adaptive_resblock' + str(i))
# Up-Sampling
for i in range(2) :
x = up_sample(x, scale_factor=2)
x = conv(x, channel//2, kernel=3, stride=1, pad=1, pad_type='reflect', scope='up_conv_'+str(i))
x = layer_instance_norm(x, scope='layer_ins_norm_'+str(i))
x = relu(x)
channel = channel // 2
x = conv(x, channels=3, kernel=7, stride=1, pad=3, pad_type='reflect', scope='G_logit')
x = tanh(x)
return x, cam_logit, heatmap
def MLP(self, x, use_bias=True, reuse=False, scope='MLP'):
channel = self.ch * self.n_res
if self.light :
x = global_avg_pooling(x)
with tf.variable_scope(scope, reuse=reuse):
for i in range(2) :
x = fully_connected(x, channel, use_bias, scope='linear_' + str(i))
x = relu(x)
gamma = fully_connected(x, channel, use_bias, scope='gamma')
beta = fully_connected(x, channel, use_bias, scope='beta')
gamma = tf.reshape(gamma, shape=[self.batch_size, 1, 1, channel])
beta = tf.reshape(beta, shape=[self.batch_size, 1, 1, channel])
return gamma, beta
##################################################################################
# Discriminator
##################################################################################
def discriminator(self, x_init, reuse=False, scope="discriminator"):
D_logit = []
D_CAM_logit = []
with tf.variable_scope(scope, reuse=reuse) :
local_x, local_cam, local_heatmap = self.discriminator_local(x_init, reuse=reuse, scope='local')
global_x, global_cam, global_heatmap = self.discriminator_global(x_init, reuse=reuse, scope='global')
D_logit.extend([local_x, global_x])
D_CAM_logit.extend([local_cam, global_cam])
return D_logit, D_CAM_logit, local_heatmap, global_heatmap
def discriminator_global(self, x_init, reuse=False, scope='discriminator_global'):
with tf.variable_scope(scope, reuse=reuse):
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_0')
x = lrelu(x, 0.2)
for i in range(1, self.n_dis - 1):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = lrelu(x, 0.2)
channel = channel * 2
x = conv(x, channel * 2, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='conv_last')
x = lrelu(x, 0.2)
channel = channel * 2
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = lrelu(x, 0.2)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
x = conv(x, channels=1, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='D_logit')
return x, cam_logit, heatmap
def discriminator_local(self, x_init, reuse=False, scope='discriminator_local'):
with tf.variable_scope(scope, reuse=reuse) :
channel = self.ch
x = conv(x_init, channel, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_0')
x = lrelu(x, 0.2)
for i in range(1, self.n_dis - 2 - 1):
x = conv(x, channel * 2, kernel=4, stride=2, pad=1, pad_type='reflect', sn=self.sn, scope='conv_' + str(i))
x = lrelu(x, 0.2)
channel = channel * 2
x = conv(x, channel * 2, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='conv_last')
x = lrelu(x, 0.2)
channel = channel * 2
cam_x = global_avg_pooling(x)
cam_gap_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, scope='CAM_logit')
x_gap = tf.multiply(x, cam_x_weight)
cam_x = global_max_pooling(x)
cam_gmp_logit, cam_x_weight = fully_connected_with_w(cam_x, sn=self.sn, reuse=True, scope='CAM_logit')
x_gmp = tf.multiply(x, cam_x_weight)
cam_logit = tf.concat([cam_gap_logit, cam_gmp_logit], axis=-1)
x = tf.concat([x_gap, x_gmp], axis=-1)
x = conv(x, channel, kernel=1, stride=1, scope='conv_1x1')
x = lrelu(x, 0.2)
heatmap = tf.squeeze(tf.reduce_sum(x, axis=-1))
x = conv(x, channels=1, kernel=4, stride=1, pad=1, pad_type='reflect', sn=self.sn, scope='D_logit')
return x, cam_logit, heatmap
##################################################################################
# Model
##################################################################################
def generate_a2b(self, x_A, reuse=False):
out, cam, heatmap = self.generator(x_A, reuse=reuse, scope="generator_B")
return out, cam, heatmap
def generate_b2a(self, x_B, reuse=False):
out, cam, heatmap = self.generator(x_B, reuse=reuse, scope="generator_A")
return out, cam, heatmap
def discriminate_real(self, x_A, x_B):
real_A_logit, real_A_cam_logit, _, _ = self.discriminator(x_A, scope="discriminator_A")
real_B_logit, real_B_cam_logit, _, _ = self.discriminator(x_B, scope="discriminator_B")
return real_A_logit, real_A_cam_logit, real_B_logit, real_B_cam_logit
def discriminate_fake(self, x_ba, x_ab):
fake_A_logit, fake_A_cam_logit, _, _ = self.discriminator(x_ba, reuse=True, scope="discriminator_A")
fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap = self.discriminator(x_ab, reuse=True, scope="discriminator_B")
return fake_A_logit, fake_A_cam_logit, fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap
def gradient_panalty(self, real, fake, scope="discriminator_A"):
if self.gan_type.__contains__('dragan'):
eps = tf.random_uniform(shape=tf.shape(real), minval=0., maxval=1.)
_, x_var = tf.nn.moments(real, axes=[0, 1, 2, 3])
x_std = tf.sqrt(x_var) # magnitude of noise decides the size of local region
fake = real + 0.5 * x_std * eps
alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)
interpolated = real + alpha * (fake - real)
logit, cam_logit, _, _ = self.discriminator(interpolated, reuse=True, scope=scope)
GP = []
cam_GP = []
for i in range(2) :
grad = tf.gradients(logit[i], interpolated)[0] # gradient of D(interpolated)
grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm
# WGAN - LP
if self.gan_type == 'wgan-lp' :
GP.append(self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.))))
elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':
GP.append(self.ld * tf.reduce_mean(tf.square(grad_norm - 1.)))
for i in range(2) :
grad = tf.gradients(cam_logit[i], interpolated)[0] # gradient of D(interpolated)
grad_norm = tf.norm(flatten(grad), axis=1) # l2 norm
# WGAN - LP
if self.gan_type == 'wgan-lp' :
cam_GP.append(self.ld * tf.reduce_mean(tf.square(tf.maximum(0.0, grad_norm - 1.))))
elif self.gan_type == 'wgan-gp' or self.gan_type == 'dragan':
cam_GP.append(self.ld * tf.reduce_mean(tf.square(grad_norm - 1.)))
return sum(GP), sum(cam_GP)
def build_model(self):
if self.phase == 'train' :
self.lr = tf.placeholder(tf.float32, name='learning_rate')
""" Input Image"""
Image_Data_Class = ImageData(self.img_size, self.img_ch, self.augment_flag)
trainA = tf.data.Dataset.from_tensor_slices(self.trainA_dataset)
trainB = tf.data.Dataset.from_tensor_slices(self.trainB_dataset)
gpu_device = '/gpu:0'
trainA = trainA.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))
trainB = trainB.apply(shuffle_and_repeat(self.dataset_num)).apply(map_and_batch(Image_Data_Class.image_processing, self.batch_size, num_parallel_batches=16, drop_remainder=True)).apply(prefetch_to_device(gpu_device, None))
trainA_iterator = trainA.make_one_shot_iterator()
trainB_iterator = trainB.make_one_shot_iterator()
self.domain_A = trainA_iterator.get_next()
self.domain_B = trainB_iterator.get_next()
""" Define Generator, Discriminator """
x_ab, cam_ab, heatmap_g_a2b = self.generate_a2b(self.domain_A) # real a
x_ba, cam_ba, heatmap_g_b2a = self.generate_b2a(self.domain_B) # real b
x_aba, _, _ = self.generate_b2a(x_ab, reuse=True) # real b
x_bab, _, _ = self.generate_a2b(x_ba, reuse=True) # real a
x_aa, cam_aa, _ = self.generate_b2a(self.domain_A, reuse=True) # fake b
x_bb, cam_bb, _ = self.generate_a2b(self.domain_B, reuse=True) # fake a
real_A_logit, real_A_cam_logit, real_B_logit, real_B_cam_logit = self.discriminate_real(self.domain_A, self.domain_B)
fake_A_logit, fake_A_cam_logit, fake_B_logit, fake_B_cam_logit, dis_ab_local_heatmap, dis_ab_global_heatmap = self.discriminate_fake(x_ba, x_ab)
""" Define Loss """
if self.gan_type.__contains__('wgan') or self.gan_type == 'dragan' :
GP_A, GP_CAM_A = self.gradient_panalty(real=self.domain_A, fake=x_ba, scope="discriminator_A")
GP_B, GP_CAM_B = self.gradient_panalty(real=self.domain_B, fake=x_ab, scope="discriminator_B")
else :
GP_A, GP_CAM_A = 0, 0
GP_B, GP_CAM_B = 0, 0
G_ad_loss_A = (generator_loss(self.gan_type, fake_A_logit) + generator_loss(self.gan_type, fake_A_cam_logit))
G_ad_loss_B = (generator_loss(self.gan_type, fake_B_logit) + generator_loss(self.gan_type, fake_B_cam_logit))
D_ad_loss_A = (discriminator_loss(self.gan_type, real_A_logit, fake_A_logit) + discriminator_loss(self.gan_type, real_A_cam_logit, fake_A_cam_logit) + GP_A + GP_CAM_A)
D_ad_loss_B = (discriminator_loss(self.gan_type, real_B_logit, fake_B_logit) + discriminator_loss(self.gan_type, real_B_cam_logit, fake_B_cam_logit) + GP_B + GP_CAM_B)
reconstruction_A = L1_loss(x_aba, self.domain_A) # reconstruction
reconstruction_B = L1_loss(x_bab, self.domain_B) # reconstruction
identity_A = L1_loss(x_aa, self.domain_A)
identity_B = L1_loss(x_bb, self.domain_B)
cam_A = cam_loss(source=cam_ba, non_source=cam_aa)
cam_B = cam_loss(source=cam_ab, non_source=cam_bb)
Generator_A_gan = self.adv_weight * G_ad_loss_A
Generator_A_cycle = self.cycle_weight * reconstruction_B
Generator_A_identity = self.identity_weight * identity_A
Generator_A_cam = self.cam_weight * cam_A
Generator_B_gan = self.adv_weight * G_ad_loss_B
Generator_B_cycle = self.cycle_weight * reconstruction_A
Generator_B_identity = self.identity_weight * identity_B
Generator_B_cam = self.cam_weight * cam_B
Generator_A_loss = Generator_A_gan + Generator_A_cycle + Generator_A_identity + Generator_A_cam
Generator_B_loss = Generator_B_gan + Generator_B_cycle + Generator_B_identity + Generator_B_cam
Discriminator_A_loss = self.adv_weight * D_ad_loss_A
Discriminator_B_loss = self.adv_weight * D_ad_loss_B
self.Generator_loss = Generator_A_loss + Generator_B_loss + regularization_loss('generator')
self.Discriminator_loss = Discriminator_A_loss + Discriminator_B_loss + regularization_loss('discriminator')
""" Result Image """
self.fake_A = x_ba
self.fake_B = x_ab
self.real_A = self.domain_A
self.real_B = self.domain_B
""" Training """
t_vars = tf.trainable_variables()
G_vars = [var for var in t_vars if 'generator' in var.name]
D_vars = [var for var in t_vars if 'discriminator' in var.name]
self.G_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Generator_loss, var_list=G_vars)
self.D_optim = tf.train.AdamOptimizer(self.lr, beta1=0.5, beta2=0.999).minimize(self.Discriminator_loss, var_list=D_vars)
"""" Summary """
self.all_G_loss = tf.summary.scalar("Generator_loss", self.Generator_loss)
self.all_D_loss = tf.summary.scalar("Discriminator_loss", self.Discriminator_loss)
self.G_A_loss = tf.summary.scalar("G_A_loss", Generator_A_loss)
self.G_A_gan = tf.summary.scalar("G_A_gan", Generator_A_gan)
self.G_A_cycle = tf.summary.scalar("G_A_cycle", Generator_A_cycle)
self.G_A_identity = tf.summary.scalar("G_A_identity", Generator_A_identity)
self.G_A_cam = tf.summary.scalar("G_A_cam", Generator_A_cam)
self.G_B_loss = tf.summary.scalar("G_B_loss", Generator_B_loss)
self.G_B_gan = tf.summary.scalar("G_B_gan", Generator_B_gan)
self.G_B_cycle = tf.summary.scalar("G_B_cycle", Generator_B_cycle)
self.G_B_identity = tf.summary.scalar("G_B_identity", Generator_B_identity)
self.G_B_cam = tf.summary.scalar("G_B_cam", Generator_B_cam)
self.D_A_loss = tf.summary.scalar("D_A_loss", Discriminator_A_loss)
self.D_B_loss = tf.summary.scalar("D_B_loss", Discriminator_B_loss)
self.rho_var = []
for var in tf.trainable_variables():
if 'rho' in var.name:
self.rho_var.append(tf.summary.histogram(var.name, var))
self.rho_var.append(tf.summary.scalar(var.name + "_min", tf.reduce_min(var)))
self.rho_var.append(tf.summary.scalar(var.name + "_max", tf.reduce_max(var)))
self.rho_var.append(tf.summary.scalar(var.name + "_mean", tf.reduce_mean(var)))
g_summary_list = [self.G_A_loss, self.G_A_gan, self.G_A_cycle, self.G_A_identity, self.G_A_cam,
self.G_B_loss, self.G_B_gan, self.G_B_cycle, self.G_B_identity, self.G_B_cam,
self.all_G_loss]
g_summary_list.extend(self.rho_var)
d_summary_list = [self.D_A_loss, self.D_B_loss, self.all_D_loss]
self.G_loss = tf.summary.merge(g_summary_list)
self.D_loss = tf.summary.merge(d_summary_list)
elif self.phase == 'export':
""" Export a serving model of domainA to domainB"""
self.input_domain_A = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='input_domain_A')
self.predict_domain_B, _, _ = self.generate_a2b(self.input_domain_A)
self.predict_result = tf.identity(self.predict_domain_B, name="predict_result")
else :
""" Test """
self.test_domain_A = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_domain_A')
self.test_domain_B = tf.placeholder(tf.float32, [1, self.img_size, self.img_size, self.img_ch], name='test_domain_B')
self.test_fake_B, _, self.test_heatmap_a2b = self.generate_a2b(self.test_domain_A)
self.test_fake_A, _, self.test_heatmap_b2a = self.generate_b2a(self.test_domain_B)
if self.print_heatmap:
_, _, self.test_heatmap_local_dis_ab, self.test_heatmap_global_dis_ab = self.discriminator(self.test_fake_B, scope="test_discriminator_B")
def train(self):
self.check_and_mkdirs()
self.total_sample_path = os.path.join(os.path.join(self.sample_dir, "_total_samples.html"))
self.write_args_to_html()
# initialize all variables
tf.global_variables_initializer().run()
# saver to save model
self.saver = tf.train.Saver()
# summary writer
self.writer = tf.summary.FileWriter(self.log_dir, self.sess.graph)
# restore check-point if it exits
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
start_epoch = (int)(checkpoint_counter / self.iteration)
start_batch_id = checkpoint_counter - start_epoch * self.iteration
counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
start_epoch = 0
start_batch_id = 0
counter = 1
print(" [!] Load failed...")
# loop for epoch
start_time = time.time()
past_g_loss = -1.
lr = self.init_lr
for epoch in range(start_epoch, self.epoch):
# lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
if self.decay_flag :
#lr = self.init_lr * pow(0.5, epoch // self.decay_epoch)
lr = self.init_lr if epoch < self.decay_epoch else self.init_lr * (self.epoch - epoch) / (self.epoch - self.decay_epoch)
for idx in range(start_batch_id, self.iteration):
train_feed_dict = {
self.lr : lr
}
# Update D
_, d_loss, summary_str = self.sess.run([self.D_optim,
self.Discriminator_loss, self.D_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
# Update G
g_loss = None
if (counter - 1) % self.n_critic == 0 :
batch_A_images, batch_B_images, fake_A, fake_B, _, g_loss, summary_str = self.sess.run([self.real_A, self.real_B,
self.fake_A, self.fake_B,
self.G_optim,
self.Generator_loss, self.G_loss], feed_dict = train_feed_dict)
self.writer.add_summary(summary_str, counter)
past_g_loss = g_loss
# display training status
counter += 1
if g_loss == None :
g_loss = past_g_loss
print("Epoch: [%2d] [%5d/%5d] time: %4.4f d_loss: %.8f, g_loss: %.8f" % (epoch, idx, self.iteration, time.time() - start_time, d_loss, g_loss))
if np.mod(idx+1, self.print_freq) == 0 :
save_images(batch_A_images, [self.batch_size, 1],
'./{}/real_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
# save_images(batch_B_images, [self.batch_size, 1],
# './{}/real_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
# save_images(fake_A, [self.batch_size, 1],
# './{}/fake_A_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
save_images(fake_B, [self.batch_size, 1],
'./{}/fake_B_{:03d}_{:05d}.png'.format(self.sample_dir, epoch, idx+1))
if np.mod(idx + 1, self.save_freq) == 0:
self.save(self.checkpoint_dir, counter)
# After an epoch, start_batch_id is set to zero
# non-zero value is only for the first epoch after loading pre-trained model
start_batch_id = 0
# save model for final step
self.save(self.checkpoint_dir, counter)
def save(self, checkpoint_dir, step):
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess, os.path.join(checkpoint_dir, self.model_name + '.model'), global_step=step)
def load(self, checkpoint_dir):
print(" [*] Reading checkpoints...")
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
counter = int(ckpt_name.split('-')[-1])
print(" [*] Success to read {}".format(ckpt_name))
return True, counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
def test(self):
self.check_and_mkdirs()
tf.global_variables_initializer().run()
test_A_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testA'))
test_B_files = glob('./dataset/{}/*.*'.format(self.dataset_name + '/testB'))
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load :
print(" [*] Load SUCCESS")
else :
print(" [!] Load failed...")
# write html for visual comparison
index_path = os.path.join(self.result_dir, 'index.html')
img_dir = os.path.join(self.result_dir, 'imgs')
if not os.path.exists(img_dir):
os.makedirs(img_dir)
np_dir = os.path.join(os.path.join(self.result_dir, 'npys'))
if not os.path.exists(np_dir):
os.makedirs(np_dir)
index = open(index_path, 'w')
index.write("<html><body><table><tr>")
if self.print_heatmap:
index.write("<th>name</th><th>input</th><th>output</th> <th>heatmap_G</th> <th>heatmap_D_local</th> <th>heatmap_D_global</th> </tr>")
for source_path in test_A_files:
print('Processing A image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_A_' + filename
output_filename = 'Target_B_' + filename
heatmap_G_filename = 'heatmap_G_' + filename
heatmap_D_local_filename = 'heatmap_D_local_' + filename
heatmap_D_global_filename = 'heatmap_D_global_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image, heatmap_G, heatmap_D_local, heatmap_D_global = self.sess.run(
[self.test_fake_B, self.test_heatmap_a2b, self.test_heatmap_local_dis_ab, self.test_heatmap_global_dis_ab],
feed_dict={self.test_domain_A: image})
composed_heatmap_G = superimpose(inverse_transform(image), heatmap_G)
save_images(composed_heatmap_G, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_G_filename),
inverse=False)
composed_heatmap_D_local = superimpose(inverse_transform(fake_image), heatmap_D_local)
save_images(composed_heatmap_D_local, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_D_local_filename),
inverse=False)
composed_heatmap_D_global = superimpose(inverse_transform(fake_image), heatmap_D_global)
save_images(composed_heatmap_D_global, [1, 1],
os.path.join(self.result_dir, 'imgs', heatmap_D_global_filename),
inverse=False)
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_G_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_D_local_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + heatmap_D_global_filename, self.img_size, self.img_size))
index.write("</tr>")
else:
index.write("<th>name</th><th>input</th><th>output</th></tr>")
for source_path in test_A_files:
print('Processing A image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_A_' + filename
output_filename = 'Target_B_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image = self.sess.run(self.test_fake_B, feed_dict={self.test_domain_A : image})
save_images(fake_image, [1, 1], os.path.join(self.result_dir, 'imgs', output_filename))
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write("</tr>")
for source_path in test_B_files:
print('Processing B image: ' + source_path)
filename = os.path.basename(source_path)
input_filename = 'Source_B_' + filename
output_filename = 'Target_A_' + filename
shutil.copy(source_path, os.path.join(self.result_dir, 'imgs', input_filename))
image = np.asarray(load_test_data(source_path, size=self.img_size))
fake_image = self.sess.run(self.test_fake_A, feed_dict={self.test_domain_B: image})
save_images(fake_image, [1, 1], os.path.join(self.result_dir, 'imgs', output_filename))
index.write("<td>%s</td>" % filename)
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + input_filename, self.img_size, self.img_size))
index.write(
"<td><img src='%s' width='%d' height='%d'></td>" % (
'imgs/' + output_filename, self.img_size, self.img_size))
index.write("</tr>")
index.close()
def export(self):
self.check_and_mkdirs()
tf.global_variables_initializer().run()
self.saver = tf.train.Saver()
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
export_dir = os.path.join(self.result_dir, 'export', str(int(time.time())))
if os.path.exists(export_dir):
shutil.rmtree(export_dir)
builder = tf.saved_model.builder.SavedModelBuilder(export_dir)
model_signature = tf.saved_model.signature_def_utils.build_signature_def(
inputs={
"input_images": tf.saved_model.utils.build_tensor_info(self.input_domain_A)
},
outputs={
"predict_image": tf.saved_model.utils.build_tensor_info(self.predict_result)
},
method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
self.sess, [tf.saved_model.tag_constants.SERVING],
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature,
},
clear_devices=True
)
builder.save() | 0.394318 | 0.088899 |
# Imports
import numpy as np
import cv2
import pickle
face_cascade = cv2.CascadeClassifier(
'/home/alejandro/Escritorio/Prototype FaceReconigtion/Cascades/data/haarcascades/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier(
'/home/alejandro/Escritorio/Prototype FaceReconigtion/Cascades/data/haarcascades/haarcascade_eye.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("./recognizers/face-trainner.yml")
labels = {"name": 1}
def stopProgram():
videoCap.release()
cv2.destroyAllWindows()
# Capture frame-by-frame
def captureFrame():
return videoCap.read()
def grayTransform():
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
def cascadeDetect():
return face_cascade.detectMultiScale(
gray, scaleFactor=1.5, minNeighbors=5)
def drawNameInRectangle(id_, x, y):
# Poner el nombre en el marco
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x, y), font, 1,
color, stroke, cv2.LINE_AA)
def personalizeFaceRectangle(x, y, widht, height):
# Marco alrededor del rostro
""" Como hemos escogido haarcascade_frontalface_alt2.xml nos detectará cuando tengamos la cara de frente a la cámara"""
color = (87, 255, 51)
stroke = 1
cv2.rectangle(frame, (x, y), (widht, height), color, stroke)
def drawEyesRectangle(roi_gray, roi_color):
# Marco de los ojos
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
widht = ex+ew
height = ey+eh
color = (0, 255, 204) # BGR
stroke = 1
cv2.rectangle(roi_color, (ex, ey), (widht, height), color, stroke)
def detectRegionOFInterestAndDrawRectangle():
for (x, y, w, h) in faces:
# Esta la región de interés
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
widht = x+w
height = y+h
# Vamos a utilizar el reconocedor
id_, conf = recognizer.predict(roi_gray)
if conf >= 50:
drawNameInRectangle(id_, x, y)
personalizeFaceRectangle(x, y, widht, height)
drawEyesRectangle(roi_gray, roi_color)
# Guardar la ultima imagen detectada porla webcam en un archivo png
img_item = "Image.png"
cv2.imwrite(img_item, roi_color)
def drawFrame():
# Mostrar el resultado del frame
cv2.imshow('frame', frame)
# Vamos ha invertir el valor y conseguir el nombre
with open("Pickles/face-labels.pickle", 'rb') as f:
readLabels = pickle.load(f)
labels = {v: k for k, v in readLabels.items()}
""" El método VideoCapture de la API OpenCV, si le pasas el parámetro 0,
te captura el vídeo de la cámara por defecto del ordenador """
videoCap = cv2.VideoCapture(0)
while(True):
ret, frame = captureFrame()
gray = grayTransform()
faces = cascadeDetect()
detectRegionOFInterestAndDrawRectangle()
drawFrame()
# El frame se cerrara al presionar la s
if cv2.waitKey(20) & 0xFF == ord('s'):
break
# Una vez sales del bucle al presionar la s(stop), se destruyen todos los frames
stopProgram() | Application.py |
# Imports
import numpy as np
import cv2
import pickle
face_cascade = cv2.CascadeClassifier(
'/home/alejandro/Escritorio/Prototype FaceReconigtion/Cascades/data/haarcascades/haarcascade_frontalface_alt2.xml')
eye_cascade = cv2.CascadeClassifier(
'/home/alejandro/Escritorio/Prototype FaceReconigtion/Cascades/data/haarcascades/haarcascade_eye.xml')
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read("./recognizers/face-trainner.yml")
labels = {"name": 1}
def stopProgram():
videoCap.release()
cv2.destroyAllWindows()
# Capture frame-by-frame
def captureFrame():
return videoCap.read()
def grayTransform():
return cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
def cascadeDetect():
return face_cascade.detectMultiScale(
gray, scaleFactor=1.5, minNeighbors=5)
def drawNameInRectangle(id_, x, y):
# Poner el nombre en el marco
font = cv2.FONT_HERSHEY_SIMPLEX
name = labels[id_]
color = (255, 255, 255)
stroke = 2
cv2.putText(frame, name, (x, y), font, 1,
color, stroke, cv2.LINE_AA)
def personalizeFaceRectangle(x, y, widht, height):
# Marco alrededor del rostro
""" Como hemos escogido haarcascade_frontalface_alt2.xml nos detectará cuando tengamos la cara de frente a la cámara"""
color = (87, 255, 51)
stroke = 1
cv2.rectangle(frame, (x, y), (widht, height), color, stroke)
def drawEyesRectangle(roi_gray, roi_color):
# Marco de los ojos
eyes = eye_cascade.detectMultiScale(roi_gray)
for (ex, ey, ew, eh) in eyes:
widht = ex+ew
height = ey+eh
color = (0, 255, 204) # BGR
stroke = 1
cv2.rectangle(roi_color, (ex, ey), (widht, height), color, stroke)
def detectRegionOFInterestAndDrawRectangle():
for (x, y, w, h) in faces:
# Esta la región de interés
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
widht = x+w
height = y+h
# Vamos a utilizar el reconocedor
id_, conf = recognizer.predict(roi_gray)
if conf >= 50:
drawNameInRectangle(id_, x, y)
personalizeFaceRectangle(x, y, widht, height)
drawEyesRectangle(roi_gray, roi_color)
# Guardar la ultima imagen detectada porla webcam en un archivo png
img_item = "Image.png"
cv2.imwrite(img_item, roi_color)
def drawFrame():
# Mostrar el resultado del frame
cv2.imshow('frame', frame)
# Vamos ha invertir el valor y conseguir el nombre
with open("Pickles/face-labels.pickle", 'rb') as f:
readLabels = pickle.load(f)
labels = {v: k for k, v in readLabels.items()}
""" El método VideoCapture de la API OpenCV, si le pasas el parámetro 0,
te captura el vídeo de la cámara por defecto del ordenador """
videoCap = cv2.VideoCapture(0)
while(True):
ret, frame = captureFrame()
gray = grayTransform()
faces = cascadeDetect()
detectRegionOFInterestAndDrawRectangle()
drawFrame()
# El frame se cerrara al presionar la s
if cv2.waitKey(20) & 0xFF == ord('s'):
break
# Una vez sales del bucle al presionar la s(stop), se destruyen todos los frames
stopProgram() | 0.515132 | 0.262415 |
from typing import Optional, List, Set, Callable
from rdflib import URIRef, Graph, OWL, RDF, BNode
from rdflib.compare import graph_diff
from rdflib.term import Node
from fhirtordf.rdfsupport.namespaces import FHIR
from fhirtordf.rdfsupport.prettygraph import PrettyGraph
def subj_pred_idx_to_uri(s: URIRef, p: URIRef, idx: Optional[int] = None) -> URIRef:
""" Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted
for the name of the target BNODE
:param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...)
:param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use
:param idx: Relative position of BNODE if in a list
:return: URI that can replace the BNODE (e.g. "fhir:Patient/f201
"""
return URIRef(str(s) + '.' + str(p).rsplit('/', 1)[1] + ("_{}".format(idx) if idx is not None else ''))
def map_node(s: Node, sk_s: URIRef, gin: Graph, gout: Graph) -> None:
"""
Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent
:param s: Actual subject
:param sk_s: Equivalent URI of subject in output graph
:param gin: Input graph
:param gout: Output graph
"""
for p, o in gin.predicate_objects(s):
if not isinstance(o, BNode):
gout.add((sk_s, p, o))
else:
sk_o = subj_pred_idx_to_uri(sk_s, p, gin.value(o, FHIR.index))
gout.add((sk_s, p, sk_o))
map_node(o, sk_o, gin, gout)
def skolemize(gin: Graph) -> Graph:
"""
Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph
"""
gout = Graph()
# Emit any unreferenced subject BNodes (boxes)
anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0]
if anon_subjs:
idx = None if len(anon_subjs) == 1 else 0
for s in anon_subjs:
map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout)
if idx is not None:
idx += 1
# Cover all other non-bnode entries
for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)):
map_node(subj, subj, gin, gout)
return gout
def complete_definition(subj: Node,
source_graph: Graph,
target_graph: Optional[Graph]=None) -> PrettyGraph:
"""
Return the transitive closure of subject.
:param subj: URI or BNode for subject
:param source_graph: Graph containing defininition
:param target_graph: return graph (for recursion)
:return: target_graph
"""
if target_graph is None:
target_graph = PrettyGraph()
for p, o in source_graph.predicate_objects(subj):
target_graph.add((subj, p, o))
if isinstance(o, BNode):
complete_definition(o, source_graph, target_graph)
return target_graph
def dump_nt_sorted(g: Graph) -> List[str]:
"""
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
"""
return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l]
def rdf_compare(g1: Graph, g2: Graph, ignore_owl_version: bool=False, ignore_type_arcs: bool = False,
compare_filter: Optional[Callable[[Graph, Graph, Graph], None]]=None) -> str:
"""
Compare graph g1 and g2
:param g1: first graph
:param g2: second graph
:param ignore_owl_version:
:param ignore_type_arcs:
:param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems.
:return: List of differences as printable lines or blank if everything matches
"""
def graph_for_subject(g: Graph, subj: Node) -> Graph:
subj_in_g = complete_definition(subj, g)
if ignore_type_arcs:
for ta_s, ta_o in subj_in_g.subject_objects(RDF.type):
if isinstance(ta_s, BNode) and isinstance(ta_o, URIRef):
subj_in_g.remove((ta_s, RDF.type, ta_o))
if ignore_owl_version:
subj_in_g.remove((subj, OWL.versionIRI, subj_in_g.value(subj, OWL.versionIRI)))
return subj_in_g
def primary_subjects(g: Graph) -> Set[Node]:
anon_subjs = set(anon_s for anon_s in g.subjects()
if isinstance(anon_s, BNode) and len([g.subject_predicates(anon_s)]) == 0)
return set(s_ for s_ in g1.subjects() if isinstance(s_, URIRef)).union(anon_subjs)
rval = ""
# Step 1: Find any subjects in one graph that don't exist in the other
g1_subjs = primary_subjects(g1)
g2_subjs = primary_subjects(g2)
for s in g1_subjs - g2_subjs:
rval += "\n===== Subjects in Graph 1 but not Graph 2: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g1))
for s in g2_subjs - g1_subjs:
rval += "\n===== Subjects in Graph 2 but not Graph 1: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g2))
# Step 2: Iterate over all of the remaining subjects comparing their contents
for s in g1_subjs.intersection(g2_subjs):
s_in_g1 = graph_for_subject(g1, s)
s_in_g2 = graph_for_subject(g2, s)
in_both, in_first, in_second = graph_diff(skolemize(s_in_g1), skolemize(s_in_g2))
if compare_filter:
compare_filter(in_both, in_first, in_second)
if len(list(in_first)) or len(list(in_second)):
rval += "\n\nSubject {} DIFFERENCE: ".format(s) + '=' * 30
if len(in_first):
rval += "\n\t----> First: \n" + '\n'.join(dump_nt_sorted(in_first))
if len(in_second):
rval += "\n\t----> Second: \n" + '\n'.join(dump_nt_sorted(in_second))
rval += '-' * 40
return rval | fhirtordf/rdfsupport/rdfcompare.py | from typing import Optional, List, Set, Callable
from rdflib import URIRef, Graph, OWL, RDF, BNode
from rdflib.compare import graph_diff
from rdflib.term import Node
from fhirtordf.rdfsupport.namespaces import FHIR
from fhirtordf.rdfsupport.prettygraph import PrettyGraph
def subj_pred_idx_to_uri(s: URIRef, p: URIRef, idx: Optional[int] = None) -> URIRef:
""" Convert FHIR subject, predicate and entry index into a URI. The resulting element can be substituted
for the name of the target BNODE
:param s: Subject URI (e.g. "fhir:Patient/f201", "fhir:Patient/f201.Patient.identifier_0", ...)
:param p: Predicate URI (e.g. "fhir:Patient.identifier", "fhir.Identifier.use
:param idx: Relative position of BNODE if in a list
:return: URI that can replace the BNODE (e.g. "fhir:Patient/f201
"""
return URIRef(str(s) + '.' + str(p).rsplit('/', 1)[1] + ("_{}".format(idx) if idx is not None else ''))
def map_node(s: Node, sk_s: URIRef, gin: Graph, gout: Graph) -> None:
"""
Transform the BNode whose subject is s into its equivalent, replacing s with its 'skolemized' equivalent
:param s: Actual subject
:param sk_s: Equivalent URI of subject in output graph
:param gin: Input graph
:param gout: Output graph
"""
for p, o in gin.predicate_objects(s):
if not isinstance(o, BNode):
gout.add((sk_s, p, o))
else:
sk_o = subj_pred_idx_to_uri(sk_s, p, gin.value(o, FHIR.index))
gout.add((sk_s, p, sk_o))
map_node(o, sk_o, gin, gout)
def skolemize(gin: Graph) -> Graph:
"""
Replace all of the blank nodes in graph gin with FHIR paths
:param gin: input graph
:return: output graph
"""
gout = Graph()
# Emit any unreferenced subject BNodes (boxes)
anon_subjs = [s for s in gin.subjects() if isinstance(s, BNode) and len([gin.subject_predicates(s)]) == 0]
if anon_subjs:
idx = None if len(anon_subjs) == 1 else 0
for s in anon_subjs:
map_node(s, FHIR['treeRoot' + ('_{}'.format(idx) if idx is not None else '')], gin, gout)
if idx is not None:
idx += 1
# Cover all other non-bnode entries
for subj in set(s for s in gin.subjects() if isinstance(s, URIRef)):
map_node(subj, subj, gin, gout)
return gout
def complete_definition(subj: Node,
source_graph: Graph,
target_graph: Optional[Graph]=None) -> PrettyGraph:
"""
Return the transitive closure of subject.
:param subj: URI or BNode for subject
:param source_graph: Graph containing defininition
:param target_graph: return graph (for recursion)
:return: target_graph
"""
if target_graph is None:
target_graph = PrettyGraph()
for p, o in source_graph.predicate_objects(subj):
target_graph.add((subj, p, o))
if isinstance(o, BNode):
complete_definition(o, source_graph, target_graph)
return target_graph
def dump_nt_sorted(g: Graph) -> List[str]:
"""
Dump graph g in a sorted n3 format
:param g: graph to dump
:return: stringified representation of g
"""
return [l.decode('ascii') for l in sorted(g.serialize(format='nt').splitlines()) if l]
def rdf_compare(g1: Graph, g2: Graph, ignore_owl_version: bool=False, ignore_type_arcs: bool = False,
compare_filter: Optional[Callable[[Graph, Graph, Graph], None]]=None) -> str:
"""
Compare graph g1 and g2
:param g1: first graph
:param g2: second graph
:param ignore_owl_version:
:param ignore_type_arcs:
:param compare_filter: Final adjustment for graph difference. Used, for example, to deal with FHIR decimal problems.
:return: List of differences as printable lines or blank if everything matches
"""
def graph_for_subject(g: Graph, subj: Node) -> Graph:
subj_in_g = complete_definition(subj, g)
if ignore_type_arcs:
for ta_s, ta_o in subj_in_g.subject_objects(RDF.type):
if isinstance(ta_s, BNode) and isinstance(ta_o, URIRef):
subj_in_g.remove((ta_s, RDF.type, ta_o))
if ignore_owl_version:
subj_in_g.remove((subj, OWL.versionIRI, subj_in_g.value(subj, OWL.versionIRI)))
return subj_in_g
def primary_subjects(g: Graph) -> Set[Node]:
anon_subjs = set(anon_s for anon_s in g.subjects()
if isinstance(anon_s, BNode) and len([g.subject_predicates(anon_s)]) == 0)
return set(s_ for s_ in g1.subjects() if isinstance(s_, URIRef)).union(anon_subjs)
rval = ""
# Step 1: Find any subjects in one graph that don't exist in the other
g1_subjs = primary_subjects(g1)
g2_subjs = primary_subjects(g2)
for s in g1_subjs - g2_subjs:
rval += "\n===== Subjects in Graph 1 but not Graph 2: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g1))
for s in g2_subjs - g1_subjs:
rval += "\n===== Subjects in Graph 2 but not Graph 1: "
rval += PrettyGraph.strip_prefixes(complete_definition(s, g2))
# Step 2: Iterate over all of the remaining subjects comparing their contents
for s in g1_subjs.intersection(g2_subjs):
s_in_g1 = graph_for_subject(g1, s)
s_in_g2 = graph_for_subject(g2, s)
in_both, in_first, in_second = graph_diff(skolemize(s_in_g1), skolemize(s_in_g2))
if compare_filter:
compare_filter(in_both, in_first, in_second)
if len(list(in_first)) or len(list(in_second)):
rval += "\n\nSubject {} DIFFERENCE: ".format(s) + '=' * 30
if len(in_first):
rval += "\n\t----> First: \n" + '\n'.join(dump_nt_sorted(in_first))
if len(in_second):
rval += "\n\t----> Second: \n" + '\n'.join(dump_nt_sorted(in_second))
rval += '-' * 40
return rval | 0.833697 | 0.349949 |
from typing import Any, Dict, List, NoReturn, Optional, Type, Union
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import Session
from tgbot.db.models import (
BaseModel,
TelegramUser,
Habit,
Event
)
class BaseRepo:
"""
Database abstraction layer
"""
model: Type[BaseModel]
def __init__(self, session: Session) -> None:
self.session = session
def get(
self, default: Optional[str] = None, **kwargs
) -> Union[BaseModel, NoReturn]:
"""
Get row from table or raise exception
:param default: Specific field for lookup
:param kwargs: Arguments for search
:return: self.model type object
"""
check_args = kwargs
if default is not None:
check_args = {default: kwargs[default]}
return self.session.query(self.model).filter_by(**check_args).one()
def filter(self, kwargs) -> List[BaseModel]:
"""
Filter data
:param kwargs: filter expressions
:return: Filtered data
"""
expression = self.session.query(self.model)
for item in kwargs:
expression = getattr(expression, "filter")(item)
return expression.all()
def create(self, instance: Optional[BaseModel] = None, **kwargs) -> BaseModel:
"""
Create instance in the table
:param instance: Instance of self.model type
:param kwargs: Arguments for create instance
:return: self.model type object
"""
if instance is None:
instance = self.model(**kwargs)
self.session.add(instance)
self.session.commit()
return instance
def update(self, instance: BaseModel, values: Dict[str, Any]) -> BaseModel:
"""
Update instance in the table
:param instance: Instance of self.model type
:param values: Arguments for update instance
:return: self.model type object
"""
for key, item in values.items():
setattr(instance, key, item)
self.session.commit()
return instance
def list(self) -> List[BaseModel]:
"""
Get all list of instances from table
:return: List of table records
"""
return self.session.query(self.model).all()
def get_or_create(self, default: Optional[str] = None, **kwargs) -> BaseModel:
"""
Get or create instance from/in table
:param default: Specific lookup field
:param kwargs: Arguments for create instance
:return: self.model type object
"""
try:
instance = self.get(default=default, **kwargs)
except NoResultFound:
instance = self.create(**kwargs)
return instance
def update_or_create(
self,
instance: Optional[BaseModel] = None,
default: Optional[str] = None,
**kwargs
) -> BaseModel:
"""
Update or create record in/to table
:param default: Specific lookup field
:param instance: self.model type object
:param kwargs: Values for create or update
:return: self.model type object
"""
if instance is None:
instance = self.get(default=default, **kwargs)
if instance is None:
instance = self.create(**kwargs)
return instance
return self.update(instance=instance, values=kwargs)
def delete(self, **kwargs):
self.session.query(self.model).filter_by(**kwargs).delete()
self.session.commit()
def truncate(self) -> None:
"""Delete all data from table"""
self.session.query(self.model).delete()
self.session.commit()
class TelegramUserRepo(BaseRepo):
model = TelegramUser
def get_habits(self, instance):
return instance.habits
def get_events(self, instance):
results = {}
habits = self.get_habits(instance)
for habit in habits:
results[habit] = habit.events
return results
class HabitRepo(BaseRepo):
model = Habit
def get_events(self, instance):
return instance.events.all()
class EventRepo(BaseRepo):
model = Event
def is_today_events_completed(self, user):
events = self.session.query(self.model).join(
self.model.habit, aliased=True
).filter_by(
user_telegram_id=user.telegram_id
).all()
for event in events:
if not event.is_completed:
return False
return True
def complete_event(self, instance):
instance.is_completed = True
self.session.commit()
return instance | tgbot/services/repository.py | from typing import Any, Dict, List, NoReturn, Optional, Type, Union
from sqlalchemy.exc import NoResultFound
from sqlalchemy.orm import Session
from tgbot.db.models import (
BaseModel,
TelegramUser,
Habit,
Event
)
class BaseRepo:
"""
Database abstraction layer
"""
model: Type[BaseModel]
def __init__(self, session: Session) -> None:
self.session = session
def get(
self, default: Optional[str] = None, **kwargs
) -> Union[BaseModel, NoReturn]:
"""
Get row from table or raise exception
:param default: Specific field for lookup
:param kwargs: Arguments for search
:return: self.model type object
"""
check_args = kwargs
if default is not None:
check_args = {default: kwargs[default]}
return self.session.query(self.model).filter_by(**check_args).one()
def filter(self, kwargs) -> List[BaseModel]:
"""
Filter data
:param kwargs: filter expressions
:return: Filtered data
"""
expression = self.session.query(self.model)
for item in kwargs:
expression = getattr(expression, "filter")(item)
return expression.all()
def create(self, instance: Optional[BaseModel] = None, **kwargs) -> BaseModel:
"""
Create instance in the table
:param instance: Instance of self.model type
:param kwargs: Arguments for create instance
:return: self.model type object
"""
if instance is None:
instance = self.model(**kwargs)
self.session.add(instance)
self.session.commit()
return instance
def update(self, instance: BaseModel, values: Dict[str, Any]) -> BaseModel:
"""
Update instance in the table
:param instance: Instance of self.model type
:param values: Arguments for update instance
:return: self.model type object
"""
for key, item in values.items():
setattr(instance, key, item)
self.session.commit()
return instance
def list(self) -> List[BaseModel]:
"""
Get all list of instances from table
:return: List of table records
"""
return self.session.query(self.model).all()
def get_or_create(self, default: Optional[str] = None, **kwargs) -> BaseModel:
"""
Get or create instance from/in table
:param default: Specific lookup field
:param kwargs: Arguments for create instance
:return: self.model type object
"""
try:
instance = self.get(default=default, **kwargs)
except NoResultFound:
instance = self.create(**kwargs)
return instance
def update_or_create(
self,
instance: Optional[BaseModel] = None,
default: Optional[str] = None,
**kwargs
) -> BaseModel:
"""
Update or create record in/to table
:param default: Specific lookup field
:param instance: self.model type object
:param kwargs: Values for create or update
:return: self.model type object
"""
if instance is None:
instance = self.get(default=default, **kwargs)
if instance is None:
instance = self.create(**kwargs)
return instance
return self.update(instance=instance, values=kwargs)
def delete(self, **kwargs):
self.session.query(self.model).filter_by(**kwargs).delete()
self.session.commit()
def truncate(self) -> None:
"""Delete all data from table"""
self.session.query(self.model).delete()
self.session.commit()
class TelegramUserRepo(BaseRepo):
model = TelegramUser
def get_habits(self, instance):
return instance.habits
def get_events(self, instance):
results = {}
habits = self.get_habits(instance)
for habit in habits:
results[habit] = habit.events
return results
class HabitRepo(BaseRepo):
model = Habit
def get_events(self, instance):
return instance.events.all()
class EventRepo(BaseRepo):
model = Event
def is_today_events_completed(self, user):
events = self.session.query(self.model).join(
self.model.habit, aliased=True
).filter_by(
user_telegram_id=user.telegram_id
).all()
for event in events:
if not event.is_completed:
return False
return True
def complete_event(self, instance):
instance.is_completed = True
self.session.commit()
return instance | 0.893759 | 0.287187 |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.collections import LineCollection
def grid(ax):
segments,colors,linewidths = [], [], []
for x in ax.xaxis.get_minorticklocs():
segments.append([(x,ymin), (x,ymax)])
colors.append("0.75")
linewidths.append(0.50)
for x in ax.xaxis.get_majorticklocs():
segments.append([(x,ymin), (x,ymax)])
colors.append("0.50")
linewidths.append(0.75)
for y in ax.yaxis.get_minorticklocs():
segments.append([(xmin,y), (xmax,y)])
colors.append("0.75")
linewidths.append(0.50)
for y in ax.yaxis.get_majorticklocs():
segments.append([(xmin,y), (xmax,y)])
colors.append("0.50")
linewidths.append(0.75)
collection = LineCollection(segments, zorder=-10,
colors=colors, linewidths=linewidths)
ax.add_collection(collection)
fig = plt.figure(figsize=(8,8))
xmin, xmax = 0,10000
ymin, ymax = 0,10000
T = np.linspace(0,2*np.pi,1000)
X = (xmax+xmin)/2 + 4999.9*np.cos(T)
Y = (ymax+ymin)/2 + 4999.9*np.sin(T)
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,1)
ax.ticklabel_format(axis="both", style="sci")
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
ax.set_yticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
ax.set_title("X linear, Y linear", size="medium")
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,2)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.1, 0.0
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xscale("log")
ax.set_yticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,3)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.0, 0.1
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_yscale("log")
ax.set_xticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
ax.set_title("X linear, Y logarithmic", size="medium")
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,4)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.1, 0.1
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xscale("log")
ax.set_yscale("log")
grid(ax)
ax.set_title("X logarithmic, Y logarithmic", size="medium")
plt.savefig("scales.pdf")
plt.show() | reference-scales.py | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
from matplotlib.collections import LineCollection
def grid(ax):
segments,colors,linewidths = [], [], []
for x in ax.xaxis.get_minorticklocs():
segments.append([(x,ymin), (x,ymax)])
colors.append("0.75")
linewidths.append(0.50)
for x in ax.xaxis.get_majorticklocs():
segments.append([(x,ymin), (x,ymax)])
colors.append("0.50")
linewidths.append(0.75)
for y in ax.yaxis.get_minorticklocs():
segments.append([(xmin,y), (xmax,y)])
colors.append("0.75")
linewidths.append(0.50)
for y in ax.yaxis.get_majorticklocs():
segments.append([(xmin,y), (xmax,y)])
colors.append("0.50")
linewidths.append(0.75)
collection = LineCollection(segments, zorder=-10,
colors=colors, linewidths=linewidths)
ax.add_collection(collection)
fig = plt.figure(figsize=(8,8))
xmin, xmax = 0,10000
ymin, ymax = 0,10000
T = np.linspace(0,2*np.pi,1000)
X = (xmax+xmin)/2 + 4999.9*np.cos(T)
Y = (ymax+ymin)/2 + 4999.9*np.sin(T)
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,1)
ax.ticklabel_format(axis="both", style="sci")
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
ax.set_yticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
ax.set_title("X linear, Y linear", size="medium")
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,2)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.1, 0.0
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xscale("log")
ax.set_yticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,3)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.0, 0.1
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_yscale("log")
ax.set_xticklabels(["0","2.10³","4.10³","6.10³","8.10³","10⁴"])
grid(ax)
ax.set_title("X linear, Y logarithmic", size="medium")
# -----------------------------------------------------------------------------
ax = plt.subplot(2,2,4)
ax.xaxis.set_minor_locator(AutoMinorLocator(10))
ax.yaxis.set_minor_locator(AutoMinorLocator(10))
ax.plot(X, Y, color="black", linewidth=1.0)
xmin, ymin = 0.1, 0.1
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
ax.set_xscale("log")
ax.set_yscale("log")
grid(ax)
ax.set_title("X logarithmic, Y logarithmic", size="medium")
plt.savefig("scales.pdf")
plt.show() | 0.435661 | 0.446434 |
import os
import csv
import click
import random
import uwnet_datasets
def create_list(foldername, fulldir=True, suffix=".jpg"):
"""
:param foldername: The full path of the folder.
:param fulldir: Whether to return the full path or not.
:param suffix: Filter by suffix.
:return: The list of filenames in the folder with given suffix.
"""
file_list_tmp = os.listdir(foldername)
file_list_tmp.sort()
file_list = []
if fulldir:
for item in file_list_tmp:
file_list.append(os.path.join(foldername, item))
else:
for item in file_list_tmp:
file_list.append(item)
return file_list
@click.command()
@click.option('--image_path_a',
type=click.STRING,
default='/home/honey/honey/underwater/nyu/resized_hazy_smoothDepth',
help='The path to folder containing the .npy RGBD images.')
@click.option('--image_path_b',
type=click.STRING,
default='/home/honey/honey/underwater/datasets/Berman_hazelines/final_rgb',
help='The path to the folder containing the underwater images.')
@click.option('--dataset_name',
type=click.STRING,
default='hazelines',
help='The name of the dataset in uwnet_dataset.')
@click.option('--do_shuffle',
type=click.BOOL,
default=False,
help='Whether to shuffle images when creating the dataset.')
@click.option('--mode',
type=click.STRING,
default='test',
help='Choose one among ["train","test"].')
def create_dataset(image_path_a, image_path_b,
dataset_name, do_shuffle, mode):
if mode == 'train':
list_a = create_list(image_path_a, True,
uwnet_datasets.DATASET_TO_IMAGETYPE[dataset_name])
list_b = create_list(image_path_b, True,
uwnet_datasets.DATASET_TO_IMAGETYPE[dataset_name])
output_path = uwnet_datasets.PATH_TO_CSV[dataset_name]
num_rows = uwnet_datasets.DATASET_TO_SIZES[dataset_name]
all_data_tuples = []
if mode == 'train':
for i in range(num_rows):
all_data_tuples.append((
list_a[i % len(list_a)],
list_b[i % len(list_b)]
))
elif mode == 'test':
all_data_tuples = list_b
if do_shuffle is True:
random.shuffle(all_data_tuples)
with open(output_path, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
if mode == 'train':
for data_tuple in enumerate(all_data_tuples):
csv_writer.writerow(list(data_tuple[1]))
elif mode == 'test':
for data_tuple in all_data_tuples:
csv_writer.writerow((data_tuple,))
if __name__ == '__main__':
create_dataset() | create_uwnet_dataset.py | import os
import csv
import click
import random
import uwnet_datasets
def create_list(foldername, fulldir=True, suffix=".jpg"):
"""
:param foldername: The full path of the folder.
:param fulldir: Whether to return the full path or not.
:param suffix: Filter by suffix.
:return: The list of filenames in the folder with given suffix.
"""
file_list_tmp = os.listdir(foldername)
file_list_tmp.sort()
file_list = []
if fulldir:
for item in file_list_tmp:
file_list.append(os.path.join(foldername, item))
else:
for item in file_list_tmp:
file_list.append(item)
return file_list
@click.command()
@click.option('--image_path_a',
type=click.STRING,
default='/home/honey/honey/underwater/nyu/resized_hazy_smoothDepth',
help='The path to folder containing the .npy RGBD images.')
@click.option('--image_path_b',
type=click.STRING,
default='/home/honey/honey/underwater/datasets/Berman_hazelines/final_rgb',
help='The path to the folder containing the underwater images.')
@click.option('--dataset_name',
type=click.STRING,
default='hazelines',
help='The name of the dataset in uwnet_dataset.')
@click.option('--do_shuffle',
type=click.BOOL,
default=False,
help='Whether to shuffle images when creating the dataset.')
@click.option('--mode',
type=click.STRING,
default='test',
help='Choose one among ["train","test"].')
def create_dataset(image_path_a, image_path_b,
dataset_name, do_shuffle, mode):
if mode == 'train':
list_a = create_list(image_path_a, True,
uwnet_datasets.DATASET_TO_IMAGETYPE[dataset_name])
list_b = create_list(image_path_b, True,
uwnet_datasets.DATASET_TO_IMAGETYPE[dataset_name])
output_path = uwnet_datasets.PATH_TO_CSV[dataset_name]
num_rows = uwnet_datasets.DATASET_TO_SIZES[dataset_name]
all_data_tuples = []
if mode == 'train':
for i in range(num_rows):
all_data_tuples.append((
list_a[i % len(list_a)],
list_b[i % len(list_b)]
))
elif mode == 'test':
all_data_tuples = list_b
if do_shuffle is True:
random.shuffle(all_data_tuples)
with open(output_path, 'w') as csv_file:
csv_writer = csv.writer(csv_file)
if mode == 'train':
for data_tuple in enumerate(all_data_tuples):
csv_writer.writerow(list(data_tuple[1]))
elif mode == 'test':
for data_tuple in all_data_tuples:
csv_writer.writerow((data_tuple,))
if __name__ == '__main__':
create_dataset() | 0.386185 | 0.173498 |
import os
import discord
from discord.ext import commands
import requests
import random
import numpy as np
from pydub import AudioSegment as audi
from moviepy.editor import *
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip as cutr
import youtube_dl
audi.converter = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffmpeg = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffprobe ="C:\\ffmpeg\\bin\\ffprobe.exe"
if os.getcwd().find("cogs") > -1 :
os.chdir("..")
path = os.getcwd()
path+="\\tempstore"
class AV(commands.Cog):
def __init__(self, bot):
self.client = bot
@commands.Cog.listener()
async def on_ready(self):
print("AV cog loaded")
async def hwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url.lower()
if url[-3:] == "jpg" or url[-3:] == "png" :
return url
if x.content[-3:].lower() == "jpg" or x.content[-3:].lower() == "png" :
return x.content
async def ghwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "gif":
return url
if x.content[-3:] == "gif" :
return x.content
async def ahwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp3" or url[-3:] == "wav":
return url
if x.content[-3:] == "wav" or x.content[-3:] == "mp3":
return x.content
async def mhwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp4" or url[-3:] == "mov" or url[-4:] == "webm" :
return url
if x.content[-3:] == "mp4" or x.content[-3:] == "mov" or x.content[-4:] == "webm":
return x.content
def dwn(url, fln):
r = requests.get(url)
f = open(fln,"wb")
f.write(r.content)
f.close
@commands.command()
async def play(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
query = "base."+form
chnnl= ctx.author.voice.channel
if chnnl == None :
await ctx.send("JOIN A VOICE CHAT DUMBASS")
return
if ctx.voice_client is not None :
await ctx.voice_client.move_to(chnnl)
else :
await chnnl.connect()
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
time.sleep(len(clip)/1000)
await ctx.voice_client.disconnect()
@commands.command()
async def gain(self,ctx,db=6):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
clip = clip.apply_gain(db)
clip.export("amp.mp3", format="mp3")
await ctx.send(file=discord.File('amp.mp3'))
@commands.command()
async def ytdown(self,ctx,url, quality="worst"):
try :
quality = quality.lower()
except :
ctx.send("thats not a word")
if quality == "best" :
ydl_opts = {
'format': 'best',
'outtmpl': 'del',
'noplaylist' : True,
}
elif quality == "worst" :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
else :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
os.chdir(path+"\\sounds")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
files=os.listdir()
res = None
for x in files :
if x.find('del') > -1 :
res = x
try :
video = VideoFileClip(res)
video.write_videofile("base.mp4")
os.remove(res)
except :
await ctx.send("Error downloading the video")
try :
await ctx.send(file=discord.File('base.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def audiox(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("result.mp3")
try :
await ctx.send(file=discord.File('result.mp3'))
except:
await ctx.send("File to large")
@commands.command()
async def vamp(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvamp(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
subs = []
for x in range(1, int(video.duration*10)):
pos1 = (x-1)/10
pos2 = x/10
if x == int(video.duration*10) :
sub = video.subclip(t_start=pos2, t_end=video.duration)
else :
sub = video.subclip(t_start=pos1, t_end=pos2)
sub = sub.volumex(pos2*1.1)
subs.append(sub)
fclip = concatenate_videoclips(subs)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def distort(self,ctx, ds=5, db=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video = vfx.colorx(video, int(ds))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pdistort(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
seg = int(leng/10)
clips = []
for x in range(1,11) :
if x == 10 :
sub = video.subclip(t_start=(x-1)*seg, t_end=leng)
else :
sub = video.subclip(t_start=(x-1)*seg, t_end=seg*x)
sub = vfx.colorx(sub,x)
clips.append(sub)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vshrink(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
w,h = video.size
w = int(w/2)
h = int(h/2)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def spedup(self,ctx, multi=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = vfx.speedx(video, multi)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
clip.export("temp.mp3", bitrate="16k", format="mp3")
audio = AudioFileClip("temp.mp3")
video = video.set_audio(audio)
w,h = video.size
w = int(w/16)
h = int(h/16)
video = vfx.resize(video, (w,h))
#audio = audio.fx(resize, 0.125, method='bilinear')
w = int(w*16)
h = int(h*16)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def fhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, 0, mid, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
flag = True
bit = 32
seg = int(video.duration/6)
aclips = []
for x in range(1,7) :
clip.export("temp.mp3", bitrate=str(bit)+'k', format="mp3")
audio = AudioFileClip("temp.mp3")
if x == 6 :
taudio = audio.subclip((x)*seg, video.duration)
else :
taudio = audio.subclip((x-1)*seg, seg*x)
bit/=2
aclips.append(taudio)
clips = []
for x in range(1,7) :
if x == 6 :
print("fa")
tvideo = video.subclip((x)*seg, video.duration)
else :
tvideo = video.subclip((x-1)*seg, seg*x)
h,w=video.size
h /= int(2*x)
w /= int(2*x)
tvideo = vfx.resize(tvideo, (w,h))
h *= (2*x)
w *= (2*x)
tvideo = vfx.resize(tvideo, (w,h))
tvideo = tvideo.set_audio(aclips[x-1])
clips.append(tvideo)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def bhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, mid, leng-1, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def lframe(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
video.save_frame("res.png",t=leng-1,withmask=True)
try :
await ctx.send(file=discord.File('res.png'))
except:
await ctx.send("File to large")
@commands.command()
async def mp4gif(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video.write_gif("res.gif")
try :
await ctx.send(file=discord.File('res.gif'))
except:
await ctx.send("File to large")
@commands.command()
async def gifmp4(self,ctx) :
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.ghwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
url = await AV.ahwnt(ctx)
AV.dwn(url,"base.mp3")
audio = AudioFileClip("base.mp3")
clips = []
if video.duration > audio.duration :
clips.append(video.subclip(0, audio.duration))
else :
leng=audio.duration-video.duration
clips.append(video)
while leng >= video.duration :
clips.append(video)
leng -= video.duration
clips.append(video.subclip(0,leng))
video = concatenate_videoclips(clips)
video = video.set_audio(audio)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
def setup(bot):
bot.add_cog(AV(bot)) | cogs/AV.py | import os
import discord
from discord.ext import commands
import requests
import random
import numpy as np
from pydub import AudioSegment as audi
from moviepy.editor import *
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip as cutr
import youtube_dl
audi.converter = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffmpeg = "C:\\ffmpeg\\bin\\ffmpeg.exe"
audi.ffprobe ="C:\\ffmpeg\\bin\\ffprobe.exe"
if os.getcwd().find("cogs") > -1 :
os.chdir("..")
path = os.getcwd()
path+="\\tempstore"
class AV(commands.Cog):
def __init__(self, bot):
self.client = bot
@commands.Cog.listener()
async def on_ready(self):
print("AV cog loaded")
async def hwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url.lower()
if url[-3:] == "jpg" or url[-3:] == "png" :
return url
if x.content[-3:].lower() == "jpg" or x.content[-3:].lower() == "png" :
return x.content
async def ghwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "gif":
return url
if x.content[-3:] == "gif" :
return x.content
async def ahwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp3" or url[-3:] == "wav":
return url
if x.content[-3:] == "wav" or x.content[-3:] == "mp3":
return x.content
async def mhwnt(ctx):
chnnl = ctx.message.channel
msgs = await chnnl.history(limit=10).flatten()
url = None
for x in msgs :
if len(x.attachments) > 0 :
url = x.attachments[0].url
if url[-3:] == "mp4" or url[-3:] == "mov" or url[-4:] == "webm" :
return url
if x.content[-3:] == "mp4" or x.content[-3:] == "mov" or x.content[-4:] == "webm":
return x.content
def dwn(url, fln):
r = requests.get(url)
f = open(fln,"wb")
f.write(r.content)
f.close
@commands.command()
async def play(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
query = "base."+form
chnnl= ctx.author.voice.channel
if chnnl == None :
await ctx.send("JOIN A VOICE CHAT DUMBASS")
return
if ctx.voice_client is not None :
await ctx.voice_client.move_to(chnnl)
else :
await chnnl.connect()
source = discord.PCMVolumeTransformer(discord.FFmpegPCMAudio(query))
ctx.voice_client.play(source, after=lambda e: print('Player error: %s' % e) if e else None)
time.sleep(len(clip)/1000)
await ctx.voice_client.disconnect()
@commands.command()
async def gain(self,ctx,db=6):
os.chdir(path+"\\sounds")
url = await AV.ahwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
if form == 'mp3' :
clip = audi.from_mp3("base."+form)
else :
clip = audi.from_wav("base."+form)
clip = clip.apply_gain(db)
clip.export("amp.mp3", format="mp3")
await ctx.send(file=discord.File('amp.mp3'))
@commands.command()
async def ytdown(self,ctx,url, quality="worst"):
try :
quality = quality.lower()
except :
ctx.send("thats not a word")
if quality == "best" :
ydl_opts = {
'format': 'best',
'outtmpl': 'del',
'noplaylist' : True,
}
elif quality == "worst" :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
else :
ydl_opts = {
'format': 'worst',
'outtmpl': 'del',
'noplaylist' : True,
}
os.chdir(path+"\\sounds")
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([url])
files=os.listdir()
res = None
for x in files :
if x.find('del') > -1 :
res = x
try :
video = VideoFileClip(res)
video.write_videofile("base.mp4")
os.remove(res)
except :
await ctx.send("Error downloading the video")
try :
await ctx.send(file=discord.File('base.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def audiox(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("result.mp3")
try :
await ctx.send(file=discord.File('result.mp3'))
except:
await ctx.send("File to large")
@commands.command()
async def vamp(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvamp(self,ctx):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
subs = []
for x in range(1, int(video.duration*10)):
pos1 = (x-1)/10
pos2 = x/10
if x == int(video.duration*10) :
sub = video.subclip(t_start=pos2, t_end=video.duration)
else :
sub = video.subclip(t_start=pos1, t_end=pos2)
sub = sub.volumex(pos2*1.1)
subs.append(sub)
fclip = concatenate_videoclips(subs)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def distort(self,ctx, ds=5, db=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = video.volumex(db/6)
video = vfx.colorx(video, int(ds))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pdistort(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
seg = int(leng/10)
clips = []
for x in range(1,11) :
if x == 10 :
sub = video.subclip(t_start=(x-1)*seg, t_end=leng)
else :
sub = video.subclip(t_start=(x-1)*seg, t_end=seg*x)
sub = vfx.colorx(sub,x)
clips.append(sub)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vshrink(self,ctx, ds=5):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
w,h = video.size
w = int(w/2)
h = int(h/2)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def spedup(self,ctx, multi=12):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video = vfx.speedx(video, multi)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def vdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
clip.export("temp.mp3", bitrate="16k", format="mp3")
audio = AudioFileClip("temp.mp3")
video = video.set_audio(audio)
w,h = video.size
w = int(w/16)
h = int(h/16)
video = vfx.resize(video, (w,h))
#audio = audio.fx(resize, 0.125, method='bilinear')
w = int(w*16)
h = int(h*16)
video = vfx.resize(video, (w,h))
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def fhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, 0, mid, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def pvdownscale(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
audio = video.audio
audio.write_audiofile("temp.mp3")
clip = audi.from_mp3("temp.mp3")
clip = clip.set_frame_rate(24000)
flag = True
bit = 32
seg = int(video.duration/6)
aclips = []
for x in range(1,7) :
clip.export("temp.mp3", bitrate=str(bit)+'k', format="mp3")
audio = AudioFileClip("temp.mp3")
if x == 6 :
taudio = audio.subclip((x)*seg, video.duration)
else :
taudio = audio.subclip((x-1)*seg, seg*x)
bit/=2
aclips.append(taudio)
clips = []
for x in range(1,7) :
if x == 6 :
print("fa")
tvideo = video.subclip((x)*seg, video.duration)
else :
tvideo = video.subclip((x-1)*seg, seg*x)
h,w=video.size
h /= int(2*x)
w /= int(2*x)
tvideo = vfx.resize(tvideo, (w,h))
h *= (2*x)
w *= (2*x)
tvideo = vfx.resize(tvideo, (w,h))
tvideo = tvideo.set_audio(aclips[x-1])
clips.append(tvideo)
fclip = concatenate_videoclips(clips)
fclip.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def bhalf(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
mid = int(leng/2)
cutr("base."+form, mid, leng-1, targetname="res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
@commands.command()
async def lframe(self,ctx):
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
leng = video.duration
video.save_frame("res.png",t=leng-1,withmask=True)
try :
await ctx.send(file=discord.File('res.png'))
except:
await ctx.send("File to large")
@commands.command()
async def mp4gif(self,ctx, db=12):
os.chdir(path+"\\sounds")
url = await AV.mhwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
video.write_gif("res.gif")
try :
await ctx.send(file=discord.File('res.gif'))
except:
await ctx.send("File to large")
@commands.command()
async def gifmp4(self,ctx) :
import moviepy.video.fx.all as vfx
os.chdir(path+"\\sounds")
url = await AV.ghwnt(ctx)
form = url[-3:]
AV.dwn(url,"base."+form)
video = VideoFileClip("base."+form)
url = await AV.ahwnt(ctx)
AV.dwn(url,"base.mp3")
audio = AudioFileClip("base.mp3")
clips = []
if video.duration > audio.duration :
clips.append(video.subclip(0, audio.duration))
else :
leng=audio.duration-video.duration
clips.append(video)
while leng >= video.duration :
clips.append(video)
leng -= video.duration
clips.append(video.subclip(0,leng))
video = concatenate_videoclips(clips)
video = video.set_audio(audio)
video.write_videofile("res.mp4")
try :
await ctx.send(file=discord.File('res.mp4'))
except:
await ctx.send("File to large")
def setup(bot):
bot.add_cog(AV(bot)) | 0.069065 | 0.081264 |
import os
import sys
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
config_folder = os.path.join(current_folder, "..", "..", "matrix", "python")
sys.path.append(config_folder)
from console_formatter import Console_Formatter
from tf_index_controler import INDEX_CONTROLER
from tf_dataset_retriever import DATASET_RETRIEVER
import numpy as np
import time
import shutil
import csv
class DATASET_LABEL_ENCODER:
#PUBLIC
current_folder = os.path.join(os.getcwd())
dataset_folder = os.path.join(current_folder, 'dataset_folder')
index_file = os.path.join(current_folder, "dataset_index.txt")
dataset_mark_name = ""
index_list = []
#FOR ENCODING INDEX LIST
index_path_list = []
index_label_list = []
index_code_list = []
#FOR ENCODING FUNC
label_list = []
code_list = []
use_gpu = False
#PRIVATE
program_name_ = __name__
consoler_ = Console_Formatter(program_name_)
retriever_ = DATASET_RETRIEVER()
count_ = 0
def __init__(self, use_gpu=False):
self.use_gpu = use_gpu
def __del__(self):
pass
def init(self):
self.retriever_.init()
def encoding_index_list(self):
status = True
path_label_list = []
while status:
img_path, label, status = self.retriever_.retrieve_data()
if not status:
break
code = self.encoding_list_search(label.strip())
self.index_path_list = np.append(self.index_path_list, img_path.strip())
self.index_label_list = np.append(self.index_label_list, label.strip())
self.index_code_list = np.append(self.index_code_list, code)
path_label_list = np.append(path_label_list, img_path.strip()+","+label.strip())
return path_label_list
def encoding_list_search(self, label):
if self.label_list == []:
self.encoding_function(label)
return self.code_list[0]
label_it = iter(self.label_list)
code_it = iter(self.code_list)
while True:
try:
label_ = next(label_it)
code_ = next(code_it)
if label_.strip() == label.strip():
return code_
except StopIteration:
self.encoding_function(label)
return self.code_list[0]
def encoding_function(self, label):
if self.label_list == []:
self.label_list = [label.strip()]
self.code_list = [self.count_]
#self.label_list.insert(0, label.strip())
#self.code_list.insert(0, self.count_)
self.count_ += 1
return True
self.label_list.insert(len(self.label_list), label.strip())
self.code_list.insert(len(self.code_list), self.count_)
self.count_ += 1
return True
def load_index_file(self, data_path=None):
self.index_list = self.retriever_.load_index_file(data_path)
return self.index_list
def set_dataset_folder(self, data_path=None):
self.dataset_folder = self.dataset_folder if data_path == None else data_path
self.retriever_.dataset_folder = self.dataset_folder
return self.check_path(self.dataset_folder)
def set_index_file(self, file_path=None):
self.index_file = self.index_file if file_path == None else file_path
self.retriever_.index_file = self.index_file
return self.check_path(self.index_file)
def check_path(self, path):
return os.path.exists(path)
def current_time(self):
return time.strftime("%Y%m%d%H%M%S", time.localtime()) #%Y-%m-%d %H:%M:%S
def current_time_stamp(self):
return time.time() | include/tf_nn_motor/models/tf_dataset_label_encoder.py | import os
import sys
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
config_folder = os.path.join(current_folder, "..", "..", "matrix", "python")
sys.path.append(config_folder)
from console_formatter import Console_Formatter
from tf_index_controler import INDEX_CONTROLER
from tf_dataset_retriever import DATASET_RETRIEVER
import numpy as np
import time
import shutil
import csv
class DATASET_LABEL_ENCODER:
#PUBLIC
current_folder = os.path.join(os.getcwd())
dataset_folder = os.path.join(current_folder, 'dataset_folder')
index_file = os.path.join(current_folder, "dataset_index.txt")
dataset_mark_name = ""
index_list = []
#FOR ENCODING INDEX LIST
index_path_list = []
index_label_list = []
index_code_list = []
#FOR ENCODING FUNC
label_list = []
code_list = []
use_gpu = False
#PRIVATE
program_name_ = __name__
consoler_ = Console_Formatter(program_name_)
retriever_ = DATASET_RETRIEVER()
count_ = 0
def __init__(self, use_gpu=False):
self.use_gpu = use_gpu
def __del__(self):
pass
def init(self):
self.retriever_.init()
def encoding_index_list(self):
status = True
path_label_list = []
while status:
img_path, label, status = self.retriever_.retrieve_data()
if not status:
break
code = self.encoding_list_search(label.strip())
self.index_path_list = np.append(self.index_path_list, img_path.strip())
self.index_label_list = np.append(self.index_label_list, label.strip())
self.index_code_list = np.append(self.index_code_list, code)
path_label_list = np.append(path_label_list, img_path.strip()+","+label.strip())
return path_label_list
def encoding_list_search(self, label):
if self.label_list == []:
self.encoding_function(label)
return self.code_list[0]
label_it = iter(self.label_list)
code_it = iter(self.code_list)
while True:
try:
label_ = next(label_it)
code_ = next(code_it)
if label_.strip() == label.strip():
return code_
except StopIteration:
self.encoding_function(label)
return self.code_list[0]
def encoding_function(self, label):
if self.label_list == []:
self.label_list = [label.strip()]
self.code_list = [self.count_]
#self.label_list.insert(0, label.strip())
#self.code_list.insert(0, self.count_)
self.count_ += 1
return True
self.label_list.insert(len(self.label_list), label.strip())
self.code_list.insert(len(self.code_list), self.count_)
self.count_ += 1
return True
def load_index_file(self, data_path=None):
self.index_list = self.retriever_.load_index_file(data_path)
return self.index_list
def set_dataset_folder(self, data_path=None):
self.dataset_folder = self.dataset_folder if data_path == None else data_path
self.retriever_.dataset_folder = self.dataset_folder
return self.check_path(self.dataset_folder)
def set_index_file(self, file_path=None):
self.index_file = self.index_file if file_path == None else file_path
self.retriever_.index_file = self.index_file
return self.check_path(self.index_file)
def check_path(self, path):
return os.path.exists(path)
def current_time(self):
return time.strftime("%Y%m%d%H%M%S", time.localtime()) #%Y-%m-%d %H:%M:%S
def current_time_stamp(self):
return time.time() | 0.096354 | 0.062245 |
import shutil
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import collections
from scipy.optimize import minimize_scalar
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
def PlotWithSlices(df, data_name, output_dir):
for group_name in ['Gender', 'AgeGroup', 'Family1', 'Family2', 'Family3', 'Family4', 'Education1', 'Career1', 'Career2', 'Language1', 'Word']:
grouped_df = df.groupby([group_name])[kCols].mean()
# grouped_df.to_csv(output_dir / (data_name + '_' + group_name + '_raw.csv'), index=True)
full_group_name = '@'.join([data_name, group_name])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(group_name)
z_label = grouped_df.index.to_numpy().tolist()
print(z_label)
cmap = plt.get_cmap('viridis')
colors = cmap(np.linspace(0, 1, len(z_label)))
for key in z_label:
x = np.arange(0, 9)
color = colors[z_label.index(key)]
z = z_label.index(key)
mdf = grouped_df.loc[key]
y1 = mdf[cols1].to_numpy(dtype='float')
y2 = mdf[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
inflection1y = line1(inflection1)
inflection2y = line2(inflection2)
ax.plot(x, y1, zs=z, zdir='x', c=color, label='F1', linewidth=3.0)
ax.plot(x, y2, zs=z, zdir='x', c=color, label='F2')
ax.plot([inflection1, inflection1], [inflection1y-100, inflection1y+100], zs=z, zdir='x', c='black')
ax.plot([inflection2, inflection2], [inflection2y-100, inflection2y+100], zs=z, zdir='x', c='black')
ax.set(xticks=range(len(z_label)), xticklabels=z_label)
plt.title(full_group_name)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def PlotNoSlice(df, full_group_name, output_dir):
x = np.arange(0, 9)
y1 = df[cols1].to_numpy(dtype='float')
y2 = df[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 break')
plt.axvline(x=inflection2, linestyle='-.', label='F2 break')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.break.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def SlicePlotDataS(df, output_dir):
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
matched_rows = []
sa_a1_sb_a1 = df[df['IsSba2'] == 'No']
#sa_a1_sb_a1.to_csv(output_dir / 'sa_a1_sb_a1_raw.csv', index=False)
sa_a1_sb_a1_mean = sa_a1_sb_a1.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a1_mean.to_csv(output_dir / 'sa_a1_sb_a1_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[0], 'sa_a1_sb_a1_a', output_dir)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[1], 'sa_a1_sb_a1_b', output_dir)
sa_a1_sb_a2 = df[df['IsSba2'] == 'Yes']
#sa_a1_sb_a2.to_csv(output_dir / 'sa_a1_sb_a2_raw.csv', index=False)
sa_a1_sb_a2_mean = sa_a1_sb_a2.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a2_mean.to_csv(output_dir / 'sa_a1_sb_a2_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[0], 'sa_a1_sb_a2_a', output_dir)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[1], 'sa_a1_sb_a2_b', output_dir)
matched_rows = []
for _, row in df.iterrows():
comps = row['Filename'].split('_')
lang = comps[0]
pos = comps[4]
if lang == 'S' and pos == 'b' and row['Annotation'] == 'a2':
matched_rows.append(row)
input_df = pd.DataFrame(matched_rows)
PlotWithSlices(input_df, 'all_s_sb_a2', output_dir)
def SlicePlotDataM(df, output_dir):
m_sb_a1 = df[df['IsSba2'] == 'No']
PlotWithSlices(m_sb_a1, 'm_sb_a1', output_dir)
m_sb_a1_mean = m_sb_a1.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a1_mean.iloc[0], 'm_sb_a1', output_dir)
m_sb_a2 = df[df['IsSba2'] == 'Yes']
PlotWithSlices(m_sb_a2, 'm_sb_a2', output_dir)
m_sb_a2_mean = m_sb_a2.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a2_mean.iloc[0], 'm_sb_a2', output_dir)
input_base_dir = Path('./analysis/output/')
output_base_dir = Path('./analysis/output/break/')
shutil.rmtree(output_base_dir, ignore_errors=True)
output_base_dir.mkdir(parents=True, exist_ok=True)
df = pd.read_csv(input_base_dir / 'S_all_plot_raw_data.csv')
SlicePlotDataS(df, output_base_dir)
df = pd.read_csv(input_base_dir / 'M_all_plot_raw_data.csv')
SlicePlotDataM(df, output_base_dir) | analysis/plot_break.py | import shutil
from pathlib import Path
import itertools
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import collections
from scipy.optimize import minimize_scalar
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
def PlotWithSlices(df, data_name, output_dir):
for group_name in ['Gender', 'AgeGroup', 'Family1', 'Family2', 'Family3', 'Family4', 'Education1', 'Career1', 'Career2', 'Language1', 'Word']:
grouped_df = df.groupby([group_name])[kCols].mean()
# grouped_df.to_csv(output_dir / (data_name + '_' + group_name + '_raw.csv'), index=True)
full_group_name = '@'.join([data_name, group_name])
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
print(group_name)
z_label = grouped_df.index.to_numpy().tolist()
print(z_label)
cmap = plt.get_cmap('viridis')
colors = cmap(np.linspace(0, 1, len(z_label)))
for key in z_label:
x = np.arange(0, 9)
color = colors[z_label.index(key)]
z = z_label.index(key)
mdf = grouped_df.loc[key]
y1 = mdf[cols1].to_numpy(dtype='float')
y2 = mdf[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
inflection1y = line1(inflection1)
inflection2y = line2(inflection2)
ax.plot(x, y1, zs=z, zdir='x', c=color, label='F1', linewidth=3.0)
ax.plot(x, y2, zs=z, zdir='x', c=color, label='F2')
ax.plot([inflection1, inflection1], [inflection1y-100, inflection1y+100], zs=z, zdir='x', c='black')
ax.plot([inflection2, inflection2], [inflection2y-100, inflection2y+100], zs=z, zdir='x', c='black')
ax.set(xticks=range(len(z_label)), xticklabels=z_label)
plt.title(full_group_name)
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.savefig(output_dir / (full_group_name + '.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def PlotNoSlice(df, full_group_name, output_dir):
x = np.arange(0, 9)
y1 = df[cols1].to_numpy(dtype='float')
y2 = df[cols2].to_numpy(dtype='float')
coeff1 = np.polyfit(x, y1, 4)
coeff2 = np.polyfit(x, y2, 4)
line1 = np.poly1d(coeff1)
line2 = np.poly1d(coeff2)
line1dd = np.polyder(line1, 2)
line2dd = np.polyder(line2, 2)
line1dd_max = minimize_scalar(-line1dd,
bounds=(0, 8), method='bounded')
line2dd_max = minimize_scalar(-line2dd,
bounds=(0, 8), method='bounded')
inflection1 = line1dd_max.x
inflection2 = line2dd_max.x
# Plot f1/f2
plt.plot(x, y1, 'o')
plt.plot(x, y2, 'x')
plt.plot(x, line1(x), label='F1 fitted')
plt.plot(x, line2(x), label='F2 fitted')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.fitted.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
# Plot deriv and inflection
plt.plot(x, line1dd(x), label='F1 2nd deriv')
plt.plot(x, line2dd(x), label='F2 2nd deriv')
plt.axvline(x=inflection1, linestyle=':', label='F1 break')
plt.axvline(x=inflection2, linestyle='-.', label='F2 break')
plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left")
plt.title(full_group_name)
plt.savefig(output_dir / (full_group_name + '.break.png'),
bbox_inches="tight")
plt.clf()
plt.cla()
def SlicePlotDataS(df, output_dir):
cols1 = ['F1_' + str(i) for i in range(3, 20, 2)]
cols2 = ['F2_' + str(i) for i in range(3, 20, 2)]
kCols = cols1 + cols2
matched_rows = []
sa_a1_sb_a1 = df[df['IsSba2'] == 'No']
#sa_a1_sb_a1.to_csv(output_dir / 'sa_a1_sb_a1_raw.csv', index=False)
sa_a1_sb_a1_mean = sa_a1_sb_a1.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a1_mean.to_csv(output_dir / 'sa_a1_sb_a1_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[0], 'sa_a1_sb_a1_a', output_dir)
PlotNoSlice(sa_a1_sb_a1_mean.iloc[1], 'sa_a1_sb_a1_b', output_dir)
sa_a1_sb_a2 = df[df['IsSba2'] == 'Yes']
#sa_a1_sb_a2.to_csv(output_dir / 'sa_a1_sb_a2_raw.csv', index=False)
sa_a1_sb_a2_mean = sa_a1_sb_a2.groupby(['Pos'])[kCols].mean()
#sa_a1_sb_a2_mean.to_csv(output_dir / 'sa_a1_sb_a2_mean.csv', index=True)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[0], 'sa_a1_sb_a2_a', output_dir)
PlotNoSlice(sa_a1_sb_a2_mean.iloc[1], 'sa_a1_sb_a2_b', output_dir)
matched_rows = []
for _, row in df.iterrows():
comps = row['Filename'].split('_')
lang = comps[0]
pos = comps[4]
if lang == 'S' and pos == 'b' and row['Annotation'] == 'a2':
matched_rows.append(row)
input_df = pd.DataFrame(matched_rows)
PlotWithSlices(input_df, 'all_s_sb_a2', output_dir)
def SlicePlotDataM(df, output_dir):
m_sb_a1 = df[df['IsSba2'] == 'No']
PlotWithSlices(m_sb_a1, 'm_sb_a1', output_dir)
m_sb_a1_mean = m_sb_a1.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a1_mean.iloc[0], 'm_sb_a1', output_dir)
m_sb_a2 = df[df['IsSba2'] == 'Yes']
PlotWithSlices(m_sb_a2, 'm_sb_a2', output_dir)
m_sb_a2_mean = m_sb_a2.groupby(['IsSba2'])[kCols].mean()
PlotNoSlice(m_sb_a2_mean.iloc[0], 'm_sb_a2', output_dir)
input_base_dir = Path('./analysis/output/')
output_base_dir = Path('./analysis/output/break/')
shutil.rmtree(output_base_dir, ignore_errors=True)
output_base_dir.mkdir(parents=True, exist_ok=True)
df = pd.read_csv(input_base_dir / 'S_all_plot_raw_data.csv')
SlicePlotDataS(df, output_base_dir)
df = pd.read_csv(input_base_dir / 'M_all_plot_raw_data.csv')
SlicePlotDataM(df, output_base_dir) | 0.545044 | 0.401923 |
import requests # We use Python "requests" module to do HTTP GET query
import json # Import JSON encoder and decode module
from operator import itemgetter
from apicem_config import * # APIC-EM IP is assigned in apicem_config.py
requests.packages.urllib3.disable_warnings() # Remove this line if not using Python 3
device_list = []
# create device id list
url = "https://"+apicem_ip+"/api/v0/network-device/count" # API base url
resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/count" query
response_json = resp.json() # Get the json-encoded content from response with "response_json = resp.json()
count = response_json["response"] # Total count of network-device and convert it to string
if count > 0 :
device_list = []
url = "https://"+apicem_ip+"/api/v0/network-device/1/"+str(count) # API base url, convert 'count' to string
resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/{startIndex}/{recordsToReturn}" query
response_json = resp.json() # Get the json-encoded content from response
for item in response_json["response"]:
device_list.append([item["hostname"],item["type"],item["managementIpAddress"],item["id"]])
device_list.sort()
else:
print ("No network device found !")
# Assuming the first network device in list is used
# First find out if this network device has been assigned a location
print ("Assuming the first network device in list is used:",device_list[3][3])
url = "https://"+apicem_ip+"/api/v0/network-device/"+device_list[3][3]+"/location"
r= requests.get(url,verify=False)
response_json = r.json()
# Find out location detail if this network device has been assigned a location
if r.status_code == 200:
l_id = response_json["response"]["location"]
print ("This is the location id of this network device:",l_id)
print( "Now we query the detail of this location by this id")
url = "https://"+apicem_ip+"/api/v0/location/"+l_id
r2= requests.get(url,verify=False)
r2_json = r2.json()
print ("Location by id query status: ",r2.status_code)
print (json.dumps(r2_json["response"],indent = 4))
else :
print ("Network-device-id-location query status:",r.status_code)
print (response_json)
print ("No location has been assigned to this network device") | lab5-04-get-network-device-id-location.py | import requests # We use Python "requests" module to do HTTP GET query
import json # Import JSON encoder and decode module
from operator import itemgetter
from apicem_config import * # APIC-EM IP is assigned in apicem_config.py
requests.packages.urllib3.disable_warnings() # Remove this line if not using Python 3
device_list = []
# create device id list
url = "https://"+apicem_ip+"/api/v0/network-device/count" # API base url
resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/count" query
response_json = resp.json() # Get the json-encoded content from response with "response_json = resp.json()
count = response_json["response"] # Total count of network-device and convert it to string
if count > 0 :
device_list = []
url = "https://"+apicem_ip+"/api/v0/network-device/1/"+str(count) # API base url, convert 'count' to string
resp= requests.get(url,verify=False) # The response (result) from "GET /network-device/{startIndex}/{recordsToReturn}" query
response_json = resp.json() # Get the json-encoded content from response
for item in response_json["response"]:
device_list.append([item["hostname"],item["type"],item["managementIpAddress"],item["id"]])
device_list.sort()
else:
print ("No network device found !")
# Assuming the first network device in list is used
# First find out if this network device has been assigned a location
print ("Assuming the first network device in list is used:",device_list[3][3])
url = "https://"+apicem_ip+"/api/v0/network-device/"+device_list[3][3]+"/location"
r= requests.get(url,verify=False)
response_json = r.json()
# Find out location detail if this network device has been assigned a location
if r.status_code == 200:
l_id = response_json["response"]["location"]
print ("This is the location id of this network device:",l_id)
print( "Now we query the detail of this location by this id")
url = "https://"+apicem_ip+"/api/v0/location/"+l_id
r2= requests.get(url,verify=False)
r2_json = r2.json()
print ("Location by id query status: ",r2.status_code)
print (json.dumps(r2_json["response"],indent = 4))
else :
print ("Network-device-id-location query status:",r.status_code)
print (response_json)
print ("No location has been assigned to this network device") | 0.330579 | 0.064801 |
import os
import sys
import warnings
warnings.simplefilter("ignore")
import csv
import math
import tkinter
import tkinter.filedialog
import tkinter.messagebox
import numpy
from scipy.misc import imresize
from keras.models import load_model
import keras.backend
from matplotlib import pyplot as plt
from loadConfiguration import Configuration
from imagedObject import FileImagedObject
from getObjectHierarchyLabels import getObjectHierarchyLabels
#Returns the resultant image (or layer activation) of a certain filter at a certain layer in a model given the input layer,
#output layer, output layer filter number and input data.
def getLayerActivation(modelToUse,inputLayerName,outputLayerName,filterNumber,inputImageData):
inputTensor=modelToUse.get_layer(name=inputLayerName,index=0).input
outputTensor=modelToUse.get_layer(name=outputLayerName).output
outputFunction=keras.backend.function([inputTensor],[outputTensor])
expandedInput=numpy.expand_dims(inputImageData,axis=0) #Adds a batch axis of size 1 to int input data as the model requires data in this format.
fullOutputArray=numpy.array(outputFunction([expandedInput,0])) #The zero argument indicates to use the validation mode of the model (no dropout).
#The first axis is used when multiple pairs of input and output layers are
#used in keras.backend.function(), and in this case has a size of 1. The second axis is the batch number, which also has a size
# of 1 in this case. By using a particular filter number for the final index in the numpy array the result is a 2d image.
return fullOutputArray[0,0,:,:,filterNumber]
#Resizes the outputs of getLayerActivation() to a particular size and adds the together
def addLayerActivations(modelToUse,inputLayerName,outputLayerNames,filterNumbers,inputImageData,outputShape):
if(len(outputLayerNames)!=len(filterNumbers)):
raise Exception("Each output layer needs an associated filter")
currentSum=numpy.zeros(shape=outputShape)
for i in range(0,len(outputLayerNames)):
currentActivation=getLayerActivation(modelToUse,inputLayerName,outputLayerNames[i],filterNumbers[i],inputImageData)
currentActivation=imresize(currentActivation,outputShape)
currentSum=numpy.add(currentSum,currentActivation)
return currentSum
def createObjectHierarchyLabelString(labels):
outputString="Labels in object hierarchy:"+"\n"
for currentLevelIndex,currentLevelLabels in enumerate(labels):
outputString+=("\n"+"Level: "+str(currentLevelIndex))
for currentLabel in currentLevelLabels:
outputString+=("\n"+" "+currentLabel)
return outputString
def main():
gui=tkinter.Tk()
gui.withdraw() #Hides the main window of the GUI as it is not needed.
input("You will now select the B-CNN model file, press enter to continue")
modelPath=tkinter.filedialog.askopenfilename()
print("B-CNN model file "+modelPath+" selected")
loadedModel=load_model(modelPath)
print("\n")
input("You will now select an appropiate inputConfiguration.txt file that is compatible with the model, press enter to continue")
inputConfigurationFilePath=tkinter.filedialog.askopenfilename()
print("inputConfiguration file "+inputConfigurationFilePath+" selected")
inputConfiguration=Configuration(inputConfigurationFilePath,"=")
allowedFileSuffixes=inputConfiguration.getConfigurationValue("useFileSuffix","raw")
channelsPerImagedObject=1 if(type(allowedFileSuffixes)==str) else len(allowedFileSuffixes)
desiredImageSize=inputConfiguration.getConfigurationValue("desiredImageSize","int")
contigiousEqualAreaRejectionThreshold=inputConfiguration.getConfigurationValue("contigiousEqualAreaRejectionThreshold","int")
objectTypeLabels=inputConfiguration.getConfigurationValue("allowedObjectType","raw")
#A map between object types and their corresponding label lists is created.
objectTypeLabelDictionary={i[0]:tuple(i[1:]) for i in objectTypeLabels}
objectTypePossibleLabelSets,objectHierarchyDepth=getObjectHierarchyLabels(list(objectTypeLabelDictionary.values()))
#The input configuration is printed below
print("\n")
print("Input configuration loaded:")
print(" Original file suffixes:")
for currentSuffix in allowedFileSuffixes:
print(" "+currentSuffix)
print(" Number of channels for input objects: "+str(channelsPerImagedObject))
print(" Image size: "+str(desiredImageSize)+" pixels")
print(" Contigious colour area rejection threshold: "+("Disabled" if(contigiousEqualAreaRejectionThreshold is None) else str(contigiousEqualAreaRejectionThreshold)))
print(" Labels at each level in the object type heirarchy:")
for i in range(0,objectHierarchyDepth):
print(" Level "+str(i)+":")
for j in objectTypePossibleLabelSets[i]:
print(" "+j)
#The input images are now selected
imageFilePaths=["" for i in range(0,channelsPerImagedObject)]
print("\n")
print("\n")
print("File selection for an input object will now occur")
for i in range(0,channelsPerImagedObject):
print("\n")
input("Image "+str(i+1)+" will now be loaded; original training file suffix was "+str(allowedFileSuffixes[i])+". Press enter to continue")
currentChosenFilePath=tkinter.filedialog.askopenfilename()
print("File path "+currentChosenFilePath+" was chosen" if(currentChosenFilePath!="") else "No file chosen")
imageFilePaths[i]=currentChosenFilePath
print("\n")
print("The following files were chosen:")
for currentIndex,currentFilePath in enumerate(imageFilePaths):
print(" "+allowedFileSuffixes[currentIndex]+" slot: "+currentFilePath)
input("An ImagedObject will now be created from these files, press enter to continue")
loadedImagedObject=FileImagedObject(imageFilePaths," ",None,desiredImageSize,contigiousEqualAreaRejectionThreshold)
if(loadedImagedObject.nonBlankImageCount==0): #If no images in loadedImagedObject are usable the program will exit when the user is ready.
print("None of the loaded images are usable due to the following reasons that may not be limited to only one: ")
print(" Images being rejected due to defects such as a non square shape, being detected as being from the edge of a survey")
print(" Too many channels did not have an image chosen")
input("Press enter to exit")
sys.exit()
predictedImage=numpy.expand_dims(loadedImagedObject.imageData,axis=0)
predictedProbabilities=loadedModel.predict(predictedImage)
predictedClasses=[currentProbabilites.argmax() for currentProbabilites in predictedProbabilities] #Represents the most likely labels using integers.
predictedClassesStrings=[objectTypePossibleLabelSets[i][predictedClasses[i]] for i in range(0,objectHierarchyDepth)]
#Displays the predicted probabilities in ther nerminal and writes them to a file
outputPredictedProbabilitiesFile=open("predictedProbabilites.txt","w")
print("\n")
print("Saving predicted probabilities at location "+os.getcwd()+"/predictedProbabilities.txt")
print("Predicted label is: "+str(predictedClassesStrings)+", predicted label probabilites are: ")
outputPredictedProbabilitiesFile.write("Predicted label is:"+str(predictedClassesStrings)+", predicted label probabilites are: ")
for i in range(0,objectHierarchyDepth):
print("Label level "+str(i)+":")
outputPredictedProbabilitiesFile.write("\n"+"Label level "+str(i)+":")
for j,currentLabel in enumerate(objectTypePossibleLabelSets[i]):
currentProbability=(predictedProbabilities[i][0,j])*100.0
print(" "+currentLabel+": "+str(currentProbability)+"%")
outputPredictedProbabilitiesFile.write("\n"+" "+currentLabel+": "+str(currentProbability)+"%")
outputPredictedProbabilitiesFile.close()
print("\n")
print("Plots will now be created and saved")
#Plots are created below
numberOfImages=loadedImagedObject.imageData.shape[2]
subplotDivision=math.ceil(math.sqrt(numberOfImages)) #Done so the images are arranged in a shape as close to a square as possible.
#The image channels of loadedImagedObject are shown.
imageDataFigure=plt.figure(figsize=(8*subplotDivision,8*subplotDivision))
plt.suptitle("Plot of ImagedObject channels")
for i in range(0,numberOfImages):
locationString=str(subplotDivision)+str(subplotDivision)+str(i+1)
currentimageDataAxes=imageDataFigure.add_subplot(locationString)
currentimageDataAxes.set_title(allowedFileSuffixes[i]+" channel slot")
currentimageDataAxes.imshow(loadedImagedObject.imageData[:,:,i],cmap="hot")
print("Saving plot of ImagedObject channels at location "+os.getcwd()+"/imageDataFigure.png")
imageDataFigure.savefig("imageDataFigure.png")
outputLayerNames=["out"+str(i+1)+"LocationHeatmap" for i in range(0,objectHierarchyDepth)] #Each output location heatmap is labeled sequentially from the output location heatmap closest to the input layer.
totalLocationHeatmap=addLayerActivations(loadedModel,"mainInput",outputLayerNames,predictedClasses,loadedImagedObject.imageData,(loadedImagedObject.imageData.shape[0],loadedImagedObject.imageData.shape[1]))
#The previously created location heatmap is shown.
heatmapFigure,heatmapAxes=plt.subplots(figsize=(8,8))
heatmapAxes.set_title("Total location heatmap")
heatmapAxes.imshow(totalLocationHeatmap)
print("Saving plot of total location heatmap at location "+os.getcwd()+"/totalLocationHeatmap.png")
heatmapFigure.savefig("totalLocationHeatmap.png")
#The image channels of loadedImagedObject are shown with an overlay of the previously created location heatmap.
imageDataLocationHeatmapFigure=plt.figure(figsize=(8*subplotDivision,8*subplotDivision))
plt.suptitle("Plot of ImagedObject channels with total location heatmap overlay")
for i in range(0,numberOfImages):
locationString=str(subplotDivision)+str(subplotDivision)+str(i+1)
currentimageDataLocationHeatmapAxes=imageDataLocationHeatmapFigure.add_subplot(locationString)
currentimageDataLocationHeatmapAxes.set_title(allowedFileSuffixes[i]+" channel slot")
currentimageDataLocationHeatmapAxes.imshow(loadedImagedObject.imageData[:,:,i],cmap="hot")
currentimageDataLocationHeatmapAxes.imshow(totalLocationHeatmap,alpha=0.4,cmap="winter")
print("Saving plot of ImaghedObject channels with total location heatmap overlay at location "+os.getcwd()+"/totalLocationHeatmap.png")
imageDataLocationHeatmapFigure.savefig("imageDataTotalLocationHeatmapFigure.png")
main() | bcnnSingleObjectClassifier.py | import os
import sys
import warnings
warnings.simplefilter("ignore")
import csv
import math
import tkinter
import tkinter.filedialog
import tkinter.messagebox
import numpy
from scipy.misc import imresize
from keras.models import load_model
import keras.backend
from matplotlib import pyplot as plt
from loadConfiguration import Configuration
from imagedObject import FileImagedObject
from getObjectHierarchyLabels import getObjectHierarchyLabels
#Returns the resultant image (or layer activation) of a certain filter at a certain layer in a model given the input layer,
#output layer, output layer filter number and input data.
def getLayerActivation(modelToUse,inputLayerName,outputLayerName,filterNumber,inputImageData):
inputTensor=modelToUse.get_layer(name=inputLayerName,index=0).input
outputTensor=modelToUse.get_layer(name=outputLayerName).output
outputFunction=keras.backend.function([inputTensor],[outputTensor])
expandedInput=numpy.expand_dims(inputImageData,axis=0) #Adds a batch axis of size 1 to int input data as the model requires data in this format.
fullOutputArray=numpy.array(outputFunction([expandedInput,0])) #The zero argument indicates to use the validation mode of the model (no dropout).
#The first axis is used when multiple pairs of input and output layers are
#used in keras.backend.function(), and in this case has a size of 1. The second axis is the batch number, which also has a size
# of 1 in this case. By using a particular filter number for the final index in the numpy array the result is a 2d image.
return fullOutputArray[0,0,:,:,filterNumber]
#Resizes the outputs of getLayerActivation() to a particular size and adds the together
def addLayerActivations(modelToUse,inputLayerName,outputLayerNames,filterNumbers,inputImageData,outputShape):
if(len(outputLayerNames)!=len(filterNumbers)):
raise Exception("Each output layer needs an associated filter")
currentSum=numpy.zeros(shape=outputShape)
for i in range(0,len(outputLayerNames)):
currentActivation=getLayerActivation(modelToUse,inputLayerName,outputLayerNames[i],filterNumbers[i],inputImageData)
currentActivation=imresize(currentActivation,outputShape)
currentSum=numpy.add(currentSum,currentActivation)
return currentSum
def createObjectHierarchyLabelString(labels):
outputString="Labels in object hierarchy:"+"\n"
for currentLevelIndex,currentLevelLabels in enumerate(labels):
outputString+=("\n"+"Level: "+str(currentLevelIndex))
for currentLabel in currentLevelLabels:
outputString+=("\n"+" "+currentLabel)
return outputString
def main():
gui=tkinter.Tk()
gui.withdraw() #Hides the main window of the GUI as it is not needed.
input("You will now select the B-CNN model file, press enter to continue")
modelPath=tkinter.filedialog.askopenfilename()
print("B-CNN model file "+modelPath+" selected")
loadedModel=load_model(modelPath)
print("\n")
input("You will now select an appropiate inputConfiguration.txt file that is compatible with the model, press enter to continue")
inputConfigurationFilePath=tkinter.filedialog.askopenfilename()
print("inputConfiguration file "+inputConfigurationFilePath+" selected")
inputConfiguration=Configuration(inputConfigurationFilePath,"=")
allowedFileSuffixes=inputConfiguration.getConfigurationValue("useFileSuffix","raw")
channelsPerImagedObject=1 if(type(allowedFileSuffixes)==str) else len(allowedFileSuffixes)
desiredImageSize=inputConfiguration.getConfigurationValue("desiredImageSize","int")
contigiousEqualAreaRejectionThreshold=inputConfiguration.getConfigurationValue("contigiousEqualAreaRejectionThreshold","int")
objectTypeLabels=inputConfiguration.getConfigurationValue("allowedObjectType","raw")
#A map between object types and their corresponding label lists is created.
objectTypeLabelDictionary={i[0]:tuple(i[1:]) for i in objectTypeLabels}
objectTypePossibleLabelSets,objectHierarchyDepth=getObjectHierarchyLabels(list(objectTypeLabelDictionary.values()))
#The input configuration is printed below
print("\n")
print("Input configuration loaded:")
print(" Original file suffixes:")
for currentSuffix in allowedFileSuffixes:
print(" "+currentSuffix)
print(" Number of channels for input objects: "+str(channelsPerImagedObject))
print(" Image size: "+str(desiredImageSize)+" pixels")
print(" Contigious colour area rejection threshold: "+("Disabled" if(contigiousEqualAreaRejectionThreshold is None) else str(contigiousEqualAreaRejectionThreshold)))
print(" Labels at each level in the object type heirarchy:")
for i in range(0,objectHierarchyDepth):
print(" Level "+str(i)+":")
for j in objectTypePossibleLabelSets[i]:
print(" "+j)
#The input images are now selected
imageFilePaths=["" for i in range(0,channelsPerImagedObject)]
print("\n")
print("\n")
print("File selection for an input object will now occur")
for i in range(0,channelsPerImagedObject):
print("\n")
input("Image "+str(i+1)+" will now be loaded; original training file suffix was "+str(allowedFileSuffixes[i])+". Press enter to continue")
currentChosenFilePath=tkinter.filedialog.askopenfilename()
print("File path "+currentChosenFilePath+" was chosen" if(currentChosenFilePath!="") else "No file chosen")
imageFilePaths[i]=currentChosenFilePath
print("\n")
print("The following files were chosen:")
for currentIndex,currentFilePath in enumerate(imageFilePaths):
print(" "+allowedFileSuffixes[currentIndex]+" slot: "+currentFilePath)
input("An ImagedObject will now be created from these files, press enter to continue")
loadedImagedObject=FileImagedObject(imageFilePaths," ",None,desiredImageSize,contigiousEqualAreaRejectionThreshold)
if(loadedImagedObject.nonBlankImageCount==0): #If no images in loadedImagedObject are usable the program will exit when the user is ready.
print("None of the loaded images are usable due to the following reasons that may not be limited to only one: ")
print(" Images being rejected due to defects such as a non square shape, being detected as being from the edge of a survey")
print(" Too many channels did not have an image chosen")
input("Press enter to exit")
sys.exit()
predictedImage=numpy.expand_dims(loadedImagedObject.imageData,axis=0)
predictedProbabilities=loadedModel.predict(predictedImage)
predictedClasses=[currentProbabilites.argmax() for currentProbabilites in predictedProbabilities] #Represents the most likely labels using integers.
predictedClassesStrings=[objectTypePossibleLabelSets[i][predictedClasses[i]] for i in range(0,objectHierarchyDepth)]
#Displays the predicted probabilities in ther nerminal and writes them to a file
outputPredictedProbabilitiesFile=open("predictedProbabilites.txt","w")
print("\n")
print("Saving predicted probabilities at location "+os.getcwd()+"/predictedProbabilities.txt")
print("Predicted label is: "+str(predictedClassesStrings)+", predicted label probabilites are: ")
outputPredictedProbabilitiesFile.write("Predicted label is:"+str(predictedClassesStrings)+", predicted label probabilites are: ")
for i in range(0,objectHierarchyDepth):
print("Label level "+str(i)+":")
outputPredictedProbabilitiesFile.write("\n"+"Label level "+str(i)+":")
for j,currentLabel in enumerate(objectTypePossibleLabelSets[i]):
currentProbability=(predictedProbabilities[i][0,j])*100.0
print(" "+currentLabel+": "+str(currentProbability)+"%")
outputPredictedProbabilitiesFile.write("\n"+" "+currentLabel+": "+str(currentProbability)+"%")
outputPredictedProbabilitiesFile.close()
print("\n")
print("Plots will now be created and saved")
#Plots are created below
numberOfImages=loadedImagedObject.imageData.shape[2]
subplotDivision=math.ceil(math.sqrt(numberOfImages)) #Done so the images are arranged in a shape as close to a square as possible.
#The image channels of loadedImagedObject are shown.
imageDataFigure=plt.figure(figsize=(8*subplotDivision,8*subplotDivision))
plt.suptitle("Plot of ImagedObject channels")
for i in range(0,numberOfImages):
locationString=str(subplotDivision)+str(subplotDivision)+str(i+1)
currentimageDataAxes=imageDataFigure.add_subplot(locationString)
currentimageDataAxes.set_title(allowedFileSuffixes[i]+" channel slot")
currentimageDataAxes.imshow(loadedImagedObject.imageData[:,:,i],cmap="hot")
print("Saving plot of ImagedObject channels at location "+os.getcwd()+"/imageDataFigure.png")
imageDataFigure.savefig("imageDataFigure.png")
outputLayerNames=["out"+str(i+1)+"LocationHeatmap" for i in range(0,objectHierarchyDepth)] #Each output location heatmap is labeled sequentially from the output location heatmap closest to the input layer.
totalLocationHeatmap=addLayerActivations(loadedModel,"mainInput",outputLayerNames,predictedClasses,loadedImagedObject.imageData,(loadedImagedObject.imageData.shape[0],loadedImagedObject.imageData.shape[1]))
#The previously created location heatmap is shown.
heatmapFigure,heatmapAxes=plt.subplots(figsize=(8,8))
heatmapAxes.set_title("Total location heatmap")
heatmapAxes.imshow(totalLocationHeatmap)
print("Saving plot of total location heatmap at location "+os.getcwd()+"/totalLocationHeatmap.png")
heatmapFigure.savefig("totalLocationHeatmap.png")
#The image channels of loadedImagedObject are shown with an overlay of the previously created location heatmap.
imageDataLocationHeatmapFigure=plt.figure(figsize=(8*subplotDivision,8*subplotDivision))
plt.suptitle("Plot of ImagedObject channels with total location heatmap overlay")
for i in range(0,numberOfImages):
locationString=str(subplotDivision)+str(subplotDivision)+str(i+1)
currentimageDataLocationHeatmapAxes=imageDataLocationHeatmapFigure.add_subplot(locationString)
currentimageDataLocationHeatmapAxes.set_title(allowedFileSuffixes[i]+" channel slot")
currentimageDataLocationHeatmapAxes.imshow(loadedImagedObject.imageData[:,:,i],cmap="hot")
currentimageDataLocationHeatmapAxes.imshow(totalLocationHeatmap,alpha=0.4,cmap="winter")
print("Saving plot of ImaghedObject channels with total location heatmap overlay at location "+os.getcwd()+"/totalLocationHeatmap.png")
imageDataLocationHeatmapFigure.savefig("imageDataTotalLocationHeatmapFigure.png")
main() | 0.197677 | 0.301928 |
from PyQt4 import QtGui, QtCore
class MyDoubleSpinBox(QtGui.QDoubleSpinBox):
"""Selects all text once it receives a focusInEvent.
Use this widget instead of the usual QDoubleSpinBox for quick editing.
"""
def __init__(self, parent):
super(MyDoubleSpinBox, self).__init__()
self.setDecimals(3)
def focusInEvent(self, e):
super(MyDoubleSpinBox, self).focusInEvent(e)
QtCore.QTimer.singleShot(100, self.afterFocus)
def afterFocus(self):
self.selectAll()
class QNamedPushButton(QtGui.QPushButton):
"""Push button with a name identifier. Use this when multiple push buttons
are connected to the same slot.
Signals:
clicked_name(object) - Emitted whenever user clicks the button. It sends
its name.
"""
clicked_name = QtCore.pyqtSignal(object)
def __init__(self, label, name, parent):
super(QNamedPushButton, self).__init__(label, parent)
self.name = name
self.clicked.connect(self.handleClicked)
def handleClicked(self):
self.clicked_name.emit(self.name)
class QMultipleSpinBoxEdit(QtGui.QWidget):
"""Widget to edit a list of floats.
Signals:
valueChanged(list) - Emits a list of values whenever any value is changed.
"""
valueChanged = QtCore.pyqtSignal(list)
def __init__(self, attribute_names, parent, attribute_values=None):
super(QMultipleSpinBoxEdit, self).__init__(parent)
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.attribute_names = attribute_names
if attribute_values is None:
self.attribute_values = [0.0]*len(self.attribute_names)
else:
self.attribute_values = attribute_values
self.makeSpinBoxes()
def makeSpinBoxes(self):
self.spin_boxes = []
for an, av in zip(self.attribute_names, self.attribute_values):
sb = MyDoubleSpinBox(self)
sb.setToolTip(an)
sb.setDecimals(3)
sb.setMinimum(-10000.)
sb.setMaximum(10000.)
sb.setValue(av)
sb.valueChanged.connect(self.handleValueChanged)
self.vbox.addWidget(sb)
self.spin_boxes.append(sb)
def handleValueChanged(self):
self.valueChanged.emit([sb.value() for sb in self.spin_boxes])
def editAttributes(self, new_attribute_names, new_attribute_values=None):
for sb in self.spin_boxes:
self.vbox.removeWidget(sb)
sb.valueChanged.disconnect(self.handleValueChanged)
sb.deleteLater()
self.spin_boxes = []
self.attribute_names = new_attribute_names
if new_attribute_values is None:
self.attribute_values = [0.0]*len(self.attribute_names)
else:
self.attribute_values = new_attribute_values
self.makeSpinBoxes() | rampage/widgets/CommonWidgets.py | from PyQt4 import QtGui, QtCore
class MyDoubleSpinBox(QtGui.QDoubleSpinBox):
"""Selects all text once it receives a focusInEvent.
Use this widget instead of the usual QDoubleSpinBox for quick editing.
"""
def __init__(self, parent):
super(MyDoubleSpinBox, self).__init__()
self.setDecimals(3)
def focusInEvent(self, e):
super(MyDoubleSpinBox, self).focusInEvent(e)
QtCore.QTimer.singleShot(100, self.afterFocus)
def afterFocus(self):
self.selectAll()
class QNamedPushButton(QtGui.QPushButton):
"""Push button with a name identifier. Use this when multiple push buttons
are connected to the same slot.
Signals:
clicked_name(object) - Emitted whenever user clicks the button. It sends
its name.
"""
clicked_name = QtCore.pyqtSignal(object)
def __init__(self, label, name, parent):
super(QNamedPushButton, self).__init__(label, parent)
self.name = name
self.clicked.connect(self.handleClicked)
def handleClicked(self):
self.clicked_name.emit(self.name)
class QMultipleSpinBoxEdit(QtGui.QWidget):
"""Widget to edit a list of floats.
Signals:
valueChanged(list) - Emits a list of values whenever any value is changed.
"""
valueChanged = QtCore.pyqtSignal(list)
def __init__(self, attribute_names, parent, attribute_values=None):
super(QMultipleSpinBoxEdit, self).__init__(parent)
self.vbox = QtGui.QVBoxLayout()
self.setLayout(self.vbox)
self.attribute_names = attribute_names
if attribute_values is None:
self.attribute_values = [0.0]*len(self.attribute_names)
else:
self.attribute_values = attribute_values
self.makeSpinBoxes()
def makeSpinBoxes(self):
self.spin_boxes = []
for an, av in zip(self.attribute_names, self.attribute_values):
sb = MyDoubleSpinBox(self)
sb.setToolTip(an)
sb.setDecimals(3)
sb.setMinimum(-10000.)
sb.setMaximum(10000.)
sb.setValue(av)
sb.valueChanged.connect(self.handleValueChanged)
self.vbox.addWidget(sb)
self.spin_boxes.append(sb)
def handleValueChanged(self):
self.valueChanged.emit([sb.value() for sb in self.spin_boxes])
def editAttributes(self, new_attribute_names, new_attribute_values=None):
for sb in self.spin_boxes:
self.vbox.removeWidget(sb)
sb.valueChanged.disconnect(self.handleValueChanged)
sb.deleteLater()
self.spin_boxes = []
self.attribute_names = new_attribute_names
if new_attribute_values is None:
self.attribute_values = [0.0]*len(self.attribute_names)
else:
self.attribute_values = new_attribute_values
self.makeSpinBoxes() | 0.653901 | 0.232713 |