text
stringlengths 2
999k
|
|---|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class WebAppFirewallPolicySummary(object):
"""
Summary of the WebAppFirewallPolicy.
"""
def __init__(self, **kwargs):
"""
Initializes a new WebAppFirewallPolicySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this WebAppFirewallPolicySummary.
:type id: str
:param display_name:
The value to assign to the display_name property of this WebAppFirewallPolicySummary.
:type display_name: str
:param compartment_id:
The value to assign to the compartment_id property of this WebAppFirewallPolicySummary.
:type compartment_id: str
:param time_created:
The value to assign to the time_created property of this WebAppFirewallPolicySummary.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this WebAppFirewallPolicySummary.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this WebAppFirewallPolicySummary.
:type lifecycle_state: str
:param lifecycle_details:
The value to assign to the lifecycle_details property of this WebAppFirewallPolicySummary.
:type lifecycle_details: str
:param freeform_tags:
The value to assign to the freeform_tags property of this WebAppFirewallPolicySummary.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this WebAppFirewallPolicySummary.
:type defined_tags: dict(str, dict(str, object))
:param system_tags:
The value to assign to the system_tags property of this WebAppFirewallPolicySummary.
:type system_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'display_name': 'str',
'compartment_id': 'str',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'lifecycle_details': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'system_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'display_name': 'displayName',
'compartment_id': 'compartmentId',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'lifecycle_details': 'lifecycleDetails',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'system_tags': 'systemTags'
}
self._id = None
self._display_name = None
self._compartment_id = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._lifecycle_details = None
self._freeform_tags = None
self._defined_tags = None
self._system_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this WebAppFirewallPolicySummary.
The `OCID`__ of the WebAppFirewallPolicy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this WebAppFirewallPolicySummary.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this WebAppFirewallPolicySummary.
The `OCID`__ of the WebAppFirewallPolicy.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this WebAppFirewallPolicySummary.
:type: str
"""
self._id = id
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this WebAppFirewallPolicySummary.
WebAppFirewallPolicy display name, can be renamed.
:return: The display_name of this WebAppFirewallPolicySummary.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this WebAppFirewallPolicySummary.
WebAppFirewallPolicy display name, can be renamed.
:param display_name: The display_name of this WebAppFirewallPolicySummary.
:type: str
"""
self._display_name = display_name
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this WebAppFirewallPolicySummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this WebAppFirewallPolicySummary.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this WebAppFirewallPolicySummary.
The `OCID`__ of the compartment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this WebAppFirewallPolicySummary.
:type: str
"""
self._compartment_id = compartment_id
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this WebAppFirewallPolicySummary.
The time the WebAppFirewallPolicy was created. An RFC3339 formatted datetime string.
:return: The time_created of this WebAppFirewallPolicySummary.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this WebAppFirewallPolicySummary.
The time the WebAppFirewallPolicy was created. An RFC3339 formatted datetime string.
:param time_created: The time_created of this WebAppFirewallPolicySummary.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this WebAppFirewallPolicySummary.
The time the WebAppFirewallPolicy was updated. An RFC3339 formatted datetime string.
:return: The time_updated of this WebAppFirewallPolicySummary.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this WebAppFirewallPolicySummary.
The time the WebAppFirewallPolicy was updated. An RFC3339 formatted datetime string.
:param time_updated: The time_updated of this WebAppFirewallPolicySummary.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
**[Required]** Gets the lifecycle_state of this WebAppFirewallPolicySummary.
The current state of the WebAppFirewallPolicy.
:return: The lifecycle_state of this WebAppFirewallPolicySummary.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this WebAppFirewallPolicySummary.
The current state of the WebAppFirewallPolicy.
:param lifecycle_state: The lifecycle_state of this WebAppFirewallPolicySummary.
:type: str
"""
self._lifecycle_state = lifecycle_state
@property
def lifecycle_details(self):
"""
Gets the lifecycle_details of this WebAppFirewallPolicySummary.
A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in FAILED state.
:return: The lifecycle_details of this WebAppFirewallPolicySummary.
:rtype: str
"""
return self._lifecycle_details
@lifecycle_details.setter
def lifecycle_details(self, lifecycle_details):
"""
Sets the lifecycle_details of this WebAppFirewallPolicySummary.
A message describing the current state in more detail.
For example, can be used to provide actionable information for a resource in FAILED state.
:param lifecycle_details: The lifecycle_details of this WebAppFirewallPolicySummary.
:type: str
"""
self._lifecycle_details = lifecycle_details
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this WebAppFirewallPolicySummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:return: The freeform_tags of this WebAppFirewallPolicySummary.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this WebAppFirewallPolicySummary.
Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only.
Example: `{\"bar-key\": \"value\"}`
:param freeform_tags: The freeform_tags of this WebAppFirewallPolicySummary.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this WebAppFirewallPolicySummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:return: The defined_tags of this WebAppFirewallPolicySummary.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this WebAppFirewallPolicySummary.
Defined tags for this resource. Each key is predefined and scoped to a namespace.
Example: `{\"foo-namespace\": {\"bar-key\": \"value\"}}`
:param defined_tags: The defined_tags of this WebAppFirewallPolicySummary.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
@property
def system_tags(self):
"""
**[Required]** Gets the system_tags of this WebAppFirewallPolicySummary.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:return: The system_tags of this WebAppFirewallPolicySummary.
:rtype: dict(str, dict(str, object))
"""
return self._system_tags
@system_tags.setter
def system_tags(self, system_tags):
"""
Sets the system_tags of this WebAppFirewallPolicySummary.
Usage of system tag keys. These predefined keys are scoped to namespaces.
Example: `{\"orcl-cloud\": {\"free-tier-retained\": \"true\"}}`
:param system_tags: The system_tags of this WebAppFirewallPolicySummary.
:type: dict(str, dict(str, object))
"""
self._system_tags = system_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
import os,rootpath
rootpath.append(pattern='main.py') # add the directory of main.py to PATH
from kivy.app import App
from kivy.lang import Builder
from kivy.properties import DictProperty,StringProperty,ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.clock import Clock
from middleware.plugin_handler import PluginHandler
from middleware.widget_handler import WidgetHandler
class Hotkey(BoxLayout):
""" Add hotkeys for the App
Attributes:
keycode: list, e.g. ['lctrl']
modifiers: list, e.g. ['s']
"""
keycode=ListProperty()
modifiers=ListProperty()
def __init__(self, **kwargs):
super(Hotkey, self).__init__(**kwargs)
'''add keyboard binding with clock, otherwise will lead to
System No Response'''
Clock.schedule_once(self.add_binding) # if not use clock, will conflict
def add_binding(self,*args):
self._keyboard = Window.request_keyboard(
self._keyboard_closed, self, 'text')
self._keyboard.bind(on_key_down=self._on_keyboard_down,on_key_up=self._on_keyboard_up)
self.bind(keycode=self.trigger_hotkey)
self.bind(modifiers=self.trigger_hotkey)
def _keyboard_closed(self):
self.modifiers=[]
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if keycode not in self.keycode:
self.keycode.append(keycode[1])
if 'lctrl' not in self.modifiers and keycode[1] in ["lctrl"]:
self.modifiers.append(keycode[1])
return True
else:
return True
def _on_keyboard_up(self, window=None, keycode=None):
self.keycode =[]
self.modifiers = []
def trigger_hotkey(self,*args):
if 'f1' in self.keycode:
app=App.get_running_app()
app.plugin_manager.open()
if 'f5' in self.keycode:
widget_handler=WidgetHandler()
plugin_handler=PluginHandler()
plugins=[]
'''reload current processing plugin'''
p1=widget_handler.processing_screens.current
plugin_handler.plugins[p1]['wrapper'].reload()
widget_handler.processing_screens.current=p1
'''reload current display plugin'''
p2=widget_handler.display_screens.current
plugin_handler.plugins[p2]['wrapper'].reload()
widget_handler.display_screens.current=p2
if 'f2' in self.keycode:
app=App.get_running_app()
app.plugin_manager.reload_all_plugins()
class Test(App):
"""docstring for Test."""
def __init__(self):
super(Test, self).__init__()
def build(self):
from main import MainWindow
from core.plugin_wrapper import PluginWrapper
plugin_wrapper = PluginWrapper('plugins.processing.hotkey')
hotkey=Hotkey()
app = MainWindow()
window = app.build()
app.plugins['hotkey']={
'type':'processing','disabled':False,'instance':hotkey,'wrapper':plugin_wrapper}
return window
if __name__ == '__main__':
Test().run()
|
#!/usr/bin/env python3
from argparse import ArgumentParser
import glob
import os
import cv2
import numpy as np
try:
from tqdm import tqdm
except ImportError:
# In case tqdm isn't installed we don't use a progressbar.
tqdm = lambda x, desc, smoothing, total: x
print('Tqdm not found, not showing progress.')
import utils
parser = ArgumentParser(
description='Downscale the cityscapes dataset by a provided factor.')
parser.add_argument(
'--downscale_factor', required=True, type=utils.positive_int,
help='Factor by which the images will be downscaled.')
parser.add_argument(
'--cityscapes_root', required=True, type=utils.readable_directory,
help='Path to the cityscapes dataset root.')
parser.add_argument(
'--target_root', required=True, type=utils.writeable_directory,
help='Location used to store the downscaled data.')
parser.add_argument(
'--label_threshold', default=0.75, type=utils.zero_one_float,
help='The threshold applied to the dominant label to decide which ambiguous'
' cases are mapped to the void label.')
parser.add_argument(
'--store_jpeg', action='store_true', default=False,
help='TODO.')
def main():
args = parser.parse_args()
# Get filenames
image_filenames = sorted(glob.glob(
os.path.join(args.cityscapes_root, 'leftImg8bit') + '/*/*/*.png'))
label_filenames = sorted(glob.glob(
os.path.join(args.cityscapes_root, 'gt') + '*/*/*/*labelIds.png'))
for image_filename, label_filename in tqdm(
zip(image_filenames, label_filenames),
desc='Resizing', smoothing=0.01, total=len(image_filenames)):
# Resize the color image.
image = cv2.imread(image_filename)
h, w, _ = image.shape
h = h // args.downscale_factor
w = w // args.downscale_factor
image = cv2.resize(image, (w, h))
target = image_filename.replace(args.cityscapes_root, args.target_root)
if args.store_jpeg:
target = target.replace('.png', '.jpg')
target_path = os.path.dirname(target)
if not os.path.exists(target_path):
os.makedirs(target_path)
if args.store_jpeg:
cv2.imwrite(target, image, [int(cv2.IMWRITE_JPEG_QUALITY), 75])
else:
cv2.imwrite(target, image)
# Resize the label image.
labels = cv2.imread(label_filename, cv2.IMREAD_GRAYSCALE)
h, w = labels.shape
h = h // args.downscale_factor
w = w // args.downscale_factor
labels = utils.soft_resize_labels(
labels, (w, h), args.label_threshold, void_label=0)
target = label_filename.replace(args.cityscapes_root, args.target_root)
target_path = os.path.dirname(target)
if not os.path.exists(target_path):
os.makedirs(target_path)
cv2.imwrite(target, labels)
if __name__ == '__main__':
main()
|
"""
Create Wordclouds
Movie Example:
python word.py --path some_movie_count.json --movie YourMovieName --mask some_mask.jpg --pixels 1200
Disney Example:
python word.py --movie Coco --type tfidf --mask coco.jpg --pixels 1200
"""
import os
import argparse
from Reviewer.cloud import WordCloudGenerator
def parse_arguments() -> argparse.Namespace:
""" Parse command line inputs """
masks = tuple(os.listdir("images/masks"))
parser = argparse.ArgumentParser(description='Scraper')
parser.add_argument('--movie', help='Movie', default="Frozen")
parser.add_argument('--type', help='Type of top words', choices=("tfidf", "relative"), default="TFIDF")
parser.add_argument('--mask', help='Mask url', choices=masks, default="coco.jpg")
parser.add_argument('--pixels', help='Minimum number of pixels', default=500, type=int)
parser.add_argument('--path', help='Path to count or tfidf data', type=str)
args = parser.parse_args()
if args.type == "tfidf":
args.type = "TF-IDF"
else:
args.type = "TF-IDF-Relative"
return args
def main():
args = parse_arguments()
wc = WordCloudGenerator()
wc.generate_image(movie=args.movie, word_type=args.type, mask=args.mask, pixels=args.pixels, save=True,
path="data/"+args.path)
if __name__ == "__main__":
main()
|
def extractLapinetlBlogspotCom(item):
'''
Parser for 'lapinetl.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.forms.models import model_to_dict
from networkapi.admin_permission import AdminPermission
from networkapi.auth import has_perm
from networkapi.exception import InvalidValueError
from networkapi.infrastructure.xml_utils import dumps_networkapi
from networkapi.interface.models import Interface
from networkapi.rack.models import Rack
from networkapi.rack.models import RackError
from networkapi.rest import RestResource
from networkapi.rest import UserNotAuthorizedError
class RackGetByEquipResource(RestResource):
log = logging.getLogger('RackGetByEquipResource')
def handle_get(self, request, user, *args, **kwargs):
"""Handles GET requests to find all Racks
URLs: /rack/find/
"""
self.log.info('Get Rack by equipment id')
try:
# User permission
if not has_perm(user, AdminPermission.EQUIPMENT_MANAGEMENT, AdminPermission.READ_OPERATION):
self.log.error(
u'User does not have permission to perform the operation.')
return self.not_authorized()
# Get XML data
equip_id = kwargs.get('equip_id')
equip_id = int(equip_id)
rack_map = []
interface = Interface()
try:
racks = Rack.objects.filter(id_sw1__id=equip_id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
try:
racks = Rack.objects.filter(id_sw2__id=equip_id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
try:
racks = Rack.objects.filter(id_ilo__id=equip_id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
try:
interfaces = interface.search(equip_id)
for interf in interfaces:
sw_router = interf.get_switch_and_router_interface_from_host_interface(
interf.protegida)
try:
racks = Rack.objects.filter(
id_sw1__id=sw_router.equipamento.id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
try:
racks = Rack.objects.filter(
id_sw2__id=sw_router.equipamento.id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
try:
racks = Rack.objects.filter(
id_ilo__id=sw_router.equipamento.id)
for cont in racks:
rack_map.append(model_to_dict(cont))
return self.response(dumps_networkapi({'rack': rack_map}))
except:
pass
except:
pass
return self.response(dumps_networkapi({'rack': rack_map}))
except UserNotAuthorizedError:
return self.not_authorized()
except RackError:
return self.response_error(1)
except InvalidValueError, e:
return self.response_error(269, e.param, e.value)
|
from setuptools import setup, find_packages
version = "1.2.0"
setup(
name="holland.backup.mysqldump",
version=version,
description="MySQLDump Backup/Restore Plugins",
long_description="""\
Plugin support to provide backup and restore functionality
through mysqldump backups
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords="",
author="Rackspace",
author_email="holland-devel@googlegroups.com",
url="http://hollandbackup.org",
license="GNU GPLv2",
packages=find_packages(exclude=["ez_setup", "examples", "tests", "tests.*"]),
include_package_data=True,
zip_safe=True,
test_suite="tests",
tests_require=["holland >= 0.9.6"],
install_requires=[
#'mock' python 2.*
#'unittest' python > 3.3
],
extras_require={"mysql": "holland.lib.mysql", "common": "holland.lib.common"},
entry_points="""
[holland.backup]
mysqldump = holland.backup.mysqldump:Provider [mysql, common]
[holland.restore]
mysqldump = holland.restore.mysqldump:MySQLRestore
""",
namespace_packages=["holland", "holland.backup"],
)
|
from .globals import request_context
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
MiddlewareMixin = object
class RequestContextMiddleware(MiddlewareMixin):
def process_request(self, request):
request_context.init_by_request(request)
def process_response(self, request, response):
request_context.clear()
return response
|
# Copyright (c) 2016-2020 Renata Hodovan, Akos Kiss.
#
# Licensed under the BSD 3-Clause License
# <LICENSE.rst or https://opensource.org/licenses/BSD-3-Clause>.
# This file may not be copied, modified, or distributed except
# according to those terms.
from ..config import as_list
from . import CallableDecorator
class UniqueIdDecorator(CallableDecorator):
"""
Decorator for SUT calls to extend issues with ``'id'`` property.
**Mandatory parameter of the decorator:**
- ``properties``: array of issue property names, which are concatenated
(separated by a space) to form the new ``'id'`` property.
**Example configuration snippet:**
.. code-block:: ini
[sut.foo]
call=fuzzinator.call.StdinSubprocessCall
call.decorate(0)=fuzzinator.call.RegexFilter
call.decorate(1)=fuzzinator.call.UniqueIdDecorator
[sut.foo.call]
command=/home/alice/foo/bin/foo -
[sut.foo.call.decorate(0)]
stderr=[": (?P<file>[^:]+):(?P<line>[0-9]+): (?P<func>[^:]+): (?P<msg>Assertion `.*' failed)"]
[sut.foo.call.decorate(1)]
properties=["msg", "file", "func"]
"""
def decorator(self, properties, **kwargs):
properties = as_list(properties) if properties else None
def wrapper(fn):
def filter(*args, **kwargs):
issue = fn(*args, **kwargs)
if not issue:
return issue
prop_lst = [issue.get(x, '') for x in properties]
issue['id'] = ' '.join(prop.decode('utf-8', errors='ignore') if isinstance(prop, bytes) else prop for prop in prop_lst)
return issue
return filter
return wrapper
|
"""
This file offers the methods to automatically retrieve the graph Synechococcus sp. WH 7803.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:02:44.896157
The undirected graph Synechococcus sp. WH 7803 has 2524 nodes and 146898
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04614 and has 18 connected components, where the component
with most nodes has 2468 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 98, the mean node degree is 116.40,
and the node degree mode is 3. The top 5 most central nodes are 32051.SynWH7803_1617
(degree 895), 32051.SynWH7803_0048 (degree 776), 32051.SynWH7803_1778 (degree
632), 32051.SynWH7803_0385 (degree 622) and 32051.SynWH7803_1737 (degree
593).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import SynechococcusSpWh7803
# Then load the graph
graph = SynechococcusSpWh7803()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def SynechococcusSpWh7803(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Synechococcus sp. WH 7803 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Synechococcus sp. WH 7803 graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 18:02:44.896157
The undirected graph Synechococcus sp. WH 7803 has 2524 nodes and 146898
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.04614 and has 18 connected components, where the component
with most nodes has 2468 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 98, the mean node degree is 116.40,
and the node degree mode is 3. The top 5 most central nodes are 32051.SynWH7803_1617
(degree 895), 32051.SynWH7803_0048 (degree 776), 32051.SynWH7803_1778 (degree
632), 32051.SynWH7803_0385 (degree 622) and 32051.SynWH7803_1737 (degree
593).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import SynechococcusSpWh7803
# Then load the graph
graph = SynechococcusSpWh7803()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="SynechococcusSpWh7803",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class BuildScripts(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""BuildScripts - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BuildScripts):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
# %%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import phd.stats
import phd.viz
import phd.mscl
colors, palette = phd.viz.phd_style()
def compute_mean_sem(df):
n_tot = len(df)
n_surv = np.sum(df['survival'].astype(int))
mean_chan = df['effective_channels'].mean()
prob = n_surv / n_tot
sem_chan = df['effective_channels'].std() / np.sqrt(len(df))
prob_err = n_surv * (n_tot - n_surv) / n_tot**3
out_dict = {'mean_chan': mean_chan, 'p_survival': prob, 'chan_err': sem_chan,
'prob_err': np.sqrt(prob_err)}
return pd.Series(out_dict)
# Load the data with the three shock logistic regression.
data = pd.read_csv('../../data/ch9_mscl_si/mscl_survival_data_three_shock.csv')
# Load the samples and the stats.
samples = pd.read_csv('../../data/ch9_mscl_si/three_shock_complete_mcmc_traces.csv')
pooled_samples = pd.read_csv('../../data/ch9_mscl_si/pooled_complete_mcmc_traces.csv')
indiv_samples = pd.read_csv('../../data/ch9_mscl_si/complete_mcmc_shock_rate_idx.csv')
pooled_data = pd.read_csv('../../data/ch9_mscl_si/pooled_mscl_survival_data.csv')
idx = {0: 'slow', 1: 'medium', 2: 'fast'}
# %%
# Instantiate the complicated figure
fig = plt.figure(figsize=(6,5))
gs = gridspec.GridSpec(22, 30)
# Add panel labels.
fig.text(0.05, 0.9, '(A)', fontsize=8)
fig.text(0.5, 0.9, '(B)', fontsize=8)
fig.text(0.05, 0.48, '(C)', fontsize=8)
fig.text(0.5, 0.48, '(D)', fontsize=8)
# Top panels
surv_ax1 = fig.add_subplot(gs[0, 0:14])
surv_ax2 = fig.add_subplot(gs[0, 16:])
death_ax1 = fig.add_subplot(gs[8, 0:14])
death_ax2 = fig.add_subplot(gs[8, 16:])
surv_ax3 = fig.add_subplot(gs[12, 0:14])
surv_ax4 = fig.add_subplot(gs[12, 16:])
death_ax3 = fig.add_subplot(gs[21, 0:14])
death_ax4 = fig.add_subplot(gs[21, 16:])
points = [surv_ax1, surv_ax2, surv_ax3, surv_ax4,
death_ax1, death_ax2, death_ax3, death_ax4]
# Survival prob axes.
prob_ax1 = fig.add_subplot(gs[1:8, 0:14])
prob_ax2 = fig.add_subplot(gs[1:8, 16:])
prob_ax3 = fig.add_subplot(gs[13:21, 0:14])
prob_ax4 = fig.add_subplot(gs[13:21, 16:])
curves = [prob_ax1, prob_ax2, prob_ax3, prob_ax4]
# Format the various axes
for i, ax in enumerate(points):
# ax.set_xticks([])
if i <= 3:
ax.set_xticks([])
ax.spines['bottom'].set_visible(False)
ax.set_yticks([])
ax.set_facecolor('none')
ax.set_xlim([0, 1E3])
ax.tick_params(labelsize=8)
# ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
for i, ax in enumerate(curves):
ax.tick_params(labelsize=8)
ax.set_xlim([0, 1E3])
ax.set_ylim([-0.02, 1.02])
ax.set_xticklabels([])
ax.spines['bottom'].set_visible(False)
ax.set_xticks([])
if i % 2 == 1:
ax.set_ylabel('')
ax.set_yticklabels([])
else:
ax.set_ylabel('survival probablity', fontsize=8)
if i < 2:
ax.set_xticklabels([])
bottoms = [death_ax1, death_ax2, death_ax3, death_ax4]
for i, ax in enumerate(bottoms):
ax.set_xlabel('effective channel number', fontsize=8)
# Set the titles
tops = [surv_ax1, surv_ax2, surv_ax3, surv_ax4]
titles = ['slow shock ($< 0.5$ Hz)', 'intermediate shock (0.5 - 1.0 Hz)',
'fast shock ($>$ 1.0 Hz)', 'all shocks']
for i, ax in enumerate(tops):
ax.set_title(
titles[i], fontsize=8, y=1.1,
bbox={'boxstyle':'square', 'facecolor':'white'})
ax.set_xticklabels([])
# Plot the survival curves and credible regions.
chan_range = np.logspace(0, 3, 500)
for i in range(3):
# Set the credible regions.
cred_region = np.zeros((2, len(chan_range)))
for j, c in enumerate(chan_range):
logit = samples['beta_0__{}'.format(
i)] + samples['beta_1__{}'.format(i)] * np.log(c)
prob = (1 + np.exp(-logit))**-1
cred_region[:, j] = phd.stats.compute_hpd(prob, 0.95)
# Fill the shaded cred regions.
_ = curves[i].fill_between(
chan_range, cred_region[0, :], cred_region[1, :], color=colors['light_red'], alpha=0.5)
# Draw the outlines.
_ = curves[i].plot(chan_range, cred_region[0, :],
color=colors['red'], lw=0.5)
_ = curves[i].plot(chan_range, cred_region[1, :],
color=colors['red'], lw=0.5)
# Plot the data.
rates = ['slow', 'medium', 'fast']
for i, r in enumerate(rates):
_data = data[data['shock_class'] == r]
surv = _data[_data['survival'] == True]
death = _data[_data['survival'] == False]
ys = np.random.normal(0, 0.1, len(surv))
yd = np.random.normal(0, 0.1, len(death))
_ = tops[i].plot(surv['effective_channels'], ys, 'k.', ms=1, alpha=0.75)
_ = bottoms[i].plot(death['effective_channels'],
yd, 'k.', ms=1, alpha=0.75)
# Plot the binned data.
bin_width = 50
binned = phd.mscl.density_binning(
_data, bin_width=bin_width, groupby='shock_class', input_key='effective_channels', min_cells=20)
grouped = binned.groupby('bin_number').apply(compute_mean_sem)
_ = curves[i].errorbar(grouped['mean_chan'], grouped['p_survival'], xerr=grouped['chan_err'],
yerr=grouped['prob_err'], color=colors['black'], lw=0.75, linestyle='none',
marker='o', ms=4, label='{} channels/bin'.format(bin_width), zorder=100,
markeredgewidth=0.5, markeredgecolor='white')
# Plot the strain data.
grouped = _data.groupby(['rbs']).apply(compute_mean_sem)
_ = curves[i].errorbar(grouped['mean_chan'], grouped['p_survival'],
xerr=grouped['chan_err'], yerr=grouped['prob_err'],
color=colors['blue'], lw=0.75, ms=4, marker='o',
label='1 SD mutant / bin', linestyle='none', zorder=100,
markeredgecolor='white', markeredgewidth=0.5)
# Plot the pooled data.
surv = data[data['survival'] == True]
death = data[data['survival'] == False]
ys = np.random.normal(0, 0.1, len(surv))
yd = np.random.normal(0, 0.1, len(death))
surv_ax4.plot(surv['effective_channels'], ys, 'k.', ms=1, alpha=0.75)
death_ax4.plot(death['effective_channels'], yd, 'k.', ms=1, alpha=0.75)
# Plot the pooled data credible regions.
cred_region = np.zeros((2, len(chan_range)))
for i, c in enumerate(chan_range):
logit = pooled_samples['beta_0'] + pooled_samples['beta_1'] * np.log(c)
prob = (1 + np.exp(-logit))**-1
cred_region[:, i] = phd.stats.compute_hpd(prob, 0.95)
_ = prob_ax4.fill_between(
chan_range, cred_region[0, :], cred_region[1, :], color=colors['light_red'], alpha=0.5, label='__nolegend__')
_ = prob_ax4.plot(
chan_range, cred_region[0, :], color=colors['red'], lw=0.75, label='__nolegend__')
_ = prob_ax4.plot(
chan_range, cred_region[1, :], color=colors['red'], lw=0.75, label='__nolegend__')
# Plot the binned pooled data
pooled_data['idx'] = 1
binned = phd.mscl.density_binning(
pooled_data, bin_width=bin_width, groupby='idx', input_key='effective_channels', min_cells=20)
grouped = binned.groupby('bin_number').apply(compute_mean_sem)
_ = prob_ax4.errorbar(grouped['mean_chan'], grouped['p_survival'], xerr=grouped['chan_err'],
yerr=grouped['prob_err'], linestyle='none', lw=0.75, ms=4, marker='o', color=colors['black'],
label='{} channels / bin'.format(bin_width), zorder=100,
markeredgecolor='white', markeredgewidth=0.5)
# Plot the strain bins.
grouped = pooled_data.groupby(['rbs']).apply(compute_mean_sem)
_ = prob_ax4.errorbar(grouped['mean_chan'], grouped['p_survival'], xerr=grouped['chan_err'], yerr=grouped['prob_err'],
color=colors['blue'], lw=0.75, ms=4, marker='o',
label='1 SD mutant / bin', linestyle='none', zorder=100,
markeredgecolor='white', markeredgewidth=0.5)
_leg = prob_ax1.legend(fontsize=8, handlelength=0.75)
plt.subplots_adjust(hspace=0.8)
plt.savefig('../figs/ch9_figS10.pdf', bbox_inches='tight')
plt.savefig('../figs/ch9_figS10.png', bbox_inches='tight')
# %%
flow_rates = np.sort(data['flow_rate'].unique())
# Set up the complicated figure.
fig, ax = plt.subplots(3, 4, figsize=(8, 5), sharex=False, sharey=True)
phd.viz.despine(ax.ravel())
# Properly format and label.
for i in range(4):
ax[-1, i].set_xlabel('effective channel number', fontsize=8)
if i < 3:
ax[i, 0].set_ylabel('survival probability', fontsize=8)
ax = ax.ravel()
for i in range(len(ax) - 5):
ax[i].set_xticklabels([])
ax[7].set_xlabel('effective channel number', fontsize=8)
for a in ax:
a.tick_params(labelsize=8)
a.set_xlim([0, 1000])
a.set_ylim([-0.15, 1.15])
ax[-1].axis('off')
# Iterate through each shock group and plot the prediction, credible region, and bins.
for i, r in enumerate(flow_rates):
# Plot the credible regions.
cred_region = np.zeros((2, len(chan_range)))
for j, c in enumerate(chan_range):
logit = indiv_samples['beta_0__{}'.format(i)] + indiv_samples['beta_1__{}'.format(i)] * np.log(c)
prob = (1 + np.exp(-logit))**-1
cred_region[:, j] = phd.stats.compute_hpd(prob, 0.95)
_ = ax[i].fill_between(chan_range, cred_region[0, :], cred_region[1, :], color=colors['light_red'], alpha=0.5)
_ = ax[i].plot(chan_range, cred_region[0, :], color=colors['light_red'])
_ = ax[i].plot(chan_range, cred_region[1, :], color=colors['light_red'])
_ = ax[i].hlines(1.1, 0, 1E3, color='w', lw=11)
_ = ax[i].hlines(-0.1, 0, 1E3, color='w', lw=11)
# Add the title for the shock rate.
# Isolate the data to be plotted.
_data = data[data['flow_rate']==r]
surv = _data[_data['survival']==True]
death = _data[_data['survival']==False]
# Plot the points.
ys = np.random.normal(loc=1.1, scale=0.01, size=len(surv))
yd = np.random.normal(loc=-0.1, scale=0.01, size=len(death))
_ = ax[i].plot(surv['effective_channels'], ys, 'k.', ms=1, alpha=0.5, label='__nolegend__')
_ = ax[i].plot(death['effective_channels'], yd, 'k.', ms=1, alpha=0.5, label='__nolegend__')
rbs_binned = _data.groupby('rbs').apply(compute_mean_sem)
rbs_df = pd.DataFrame(rbs_binned).reset_index()
_ = ax[i].errorbar(rbs_df['mean_chan'], rbs_df['p_survival'], xerr=rbs_df['chan_err'], yerr=rbs_df['prob_err'],
ms=4, color=colors['blue'], lw=1, linestyle='none', fmt='o', zorder=1000, label='1 SD mutant / bin',
markeredgecolor='white', markeredgewidth=0.5)
if i == 0:
phd.viz.titlebox(ax[i], '{:.3f} Hz; N = {}'.format(r, len(_data)), size=8, color=colors['black'], bgcolor='white',
pad=0.05, boxsize='12%')
else:
phd.viz.titlebox(ax[i], '{:.2f} Hz; N = {}'.format(r, len(_data)), size=8, color=colors['black'], bgcolor='white',
pad=0.05, boxsize='12%')
_leg = ax[0].legend(fontsize=6)
plt.savefig('../figs/ch9_figS11.pdf', bbox_inches='tight')
plt.savefig('../figs/ch9_figS11.png', bbox_inches='tight')
# %%
|
from speakListen import hear
from speakListen import speak
import docx
import fitz
import time
from rich.console import Console # pip3 install Rich
from rich.table import Table
from colorama import Fore
def ms_word():
"""[Print and speak out a ms_word docx file as specified in the path]
"""
# TODO : Take location input from the user
try:
speak("Enter the document's location - ")
location = input("Enter the document's location - ")
file_loc = doubleslash(location)
doc = docx.Document(file_loc)
fullText = []
for para in doc.paragraphs:
fullText.append(para.text)
#print(fullText)
doc_file = '\n'.join(fullText)
print(doc_file)
speak(doc_file)
except Exception as exp:
#print(exp)
print(f"ERROR - {exp}")
print(Fore.YELLOW + "I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it.")
return "None"
def pdf_read():
"""[Print and speak out the pdf on specified path]
"""
try:
speak("Enter the document's location - ")
location = input("Enter the document's location - ")
path = doubleslash(location)
pdf = fitz.open(path)
details = pdf.metadata # Stores the meta-data which generally includes Author name and Title of book/document.
total_pages = pdf.pageCount # Stores the total number of pages
except Exception as exp:
print(f"ERROR - {exp}")
print(Fore.YELLOW + "I could'nt locate the file!\nIf you didn't specify the extension of the file, please specify it.")
return "None"
try :
""" 1. Author
2. Creator
3. Producer
4. Title """
author = details["author"]
#print("Author : ",author)
title = details["title"]
#print("Title : ",title)
#print(details)
#print("Total Pages : ",total_pages)
book_details(author, title, total_pages)
speak(f" Title {title}")
speak(f" Author {author}")
speak(f" Total Pages {total_pages}")
# TODO : Deal with the Index
toc = pdf.get_toc()
print("Say 1 or \"ONLY PRINT INDEX\" - if you want me to print the book's index.\nSay 2 if you want me to print and make me speak out the book's index.\nSay any key if you don't want to print the index.'")
speak("Say 1 or only print index if you want me to print the book's index.\nSay 2 if you want me to print and make me speak out the book's index.\nSay any key if you don't want to print the index.'")
q = hear().lower()
if "only print" in q or "1" in q or "one" in q or "vone" in q or 'only' in q or "index only" in q or 'only' in q or "print only" in q:
print_index(toc)
time.sleep(15)
elif "speak" in q or "2" in q or 'two' in q:
print_n_speak_index(toc)
time.sleep(10)
elif q == "None":
print("I could'nt understand what you just said!")
speak("I could'nt understand what you just said!")
time.sleep(4)
else:
time.sleep(4)
pass
"""Allow the user to do the following
1. Read/speak a page
2. Read/speak a range of pages
3. Lesson
4. Read/speak a whole book
"""
#time.sleep(5)
print("____________________________________________________________________________________________________________")
print("1. Print/speak a single page\n2. Print/speak a range of pages\n3. Print/speak a Lesson\n4. Read/speak a whole book")
speak("1. Print/speak a single page\n2. Print/speak a range of pages\n3. Print/speak a Lesson\n4. Read/speak a whole book")
q = hear().lower()
if "single" in q or "one" in q or "vone" in q or "one page" in q or "vone page" in q or "1 page" in q:
try:
pgno = int(input("Page Number - "))
page = pdf.load_page(pgno - 1)
text = page.get_text('text')
print("\n\n")
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
except Exception:
print("Sorry, I could recognize what you entered. Please re-enter the Page Number.")
speak("Sorry, I could recognize what you entered. Please re-enter the Page Number.")
pgno = input("Page no. - ")
page = pdf.load_page(pgno - 1)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
elif 'range' in q or "multiple" in q:
try:
start_pg_no = int(input("Starting Page Number - "))
end_pg_no = int(input("End Page Number - "))
for i in range(start_pg_no - 1, end_pg_no):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
except Exception:
print("Sorry, I could recognize what you entered. Please re-enter the Page Number.")
speak("Sorry, I could recognize what you entered. Please re-enter the Page Number.")
start_pg_no = int(input("Starting Page Number - "))
end_pg_no = int(input("End Page Number - "))
for i in range(start_pg_no - 1, end_pg_no - 1):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
elif 'lesson' in q:
try:
key = input("Lesson name - ")
start_pg_no, end_pg_no = search_in_toc(toc, key, total_pages)
if start_pg_no != None and end_pg_no != None:
start_pg_no, end_pg_no = map(int,search_in_toc(toc, key, total_pages))
for i in range(start_pg_no - 1, end_pg_no):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
else:
print("Try Again.")
speak("Try Again.")
speak("Lesson name")
key = input("Lesson name - ")
start_pg_no, end_pg_no = map(int,search_in_toc(toc, key, total_pages))
if start_pg_no != None and end_pg_no != None:
for i in range(start_pg_no - 1, end_pg_no):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
except Exception:
print("Try Again! Lesson could not be found.")
speak("Try Again.Lesson could not be found")
speak("Lesson name")
key = input("Lesson name - ")
start_pg_no, end_pg_no = search_in_toc(toc, key, total_pages)
if start_pg_no != None and end_pg_no != None:
start_pg_no, end_pg_no = map(int,search_in_toc(toc, key, total_pages))
for i in range(start_pg_no - 1, end_pg_no):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
else:
print("Sorry, I cannot find the perticular lesson.")
speak("Sorry, I cannot find the perticular lesson.")
elif "whole" in q or 'complete' in q:
for i in range(total_pages):
page = pdf.load_page(i)
text = page.get_text('text')
print(text.replace('\t',' '))
speak(text.replace('\t',' '))
elif q == "None":
print("I could'nt understand what you just said!")
speak("I could'nt understand what you just said!")
else:
print("You didn't say a valid command!")
time.sleep(5)
except Exception as e:
print(e)
pass
pdf.close()
def doubleslash(text):
"""Replaces / with //
Args:
text (str): location
Returns:
str: formatted location
"""
return text.replace('\\' , '\\\\')
def print_index(toc):
"""Prints out the index in proper format with title name and page number
Args:
toc (nested list): toc[1] - Topic name
toc[2] - Page number
"""
dash = "-"*(100 - 7)
space = " "*47
print(f"{space}INDEX")
print(f"\n\nName : {dash} PageNo.\n\n\n")
for topic in toc:
eq_dash = "-"*(100 - len(topic[1]))
print(f"{topic[1]} {eq_dash} {topic[2]}")
def print_n_speak_index(toc):
"""Along with printing, it speaks out the index too.
Args:
toc (nested list): toc[1] - Topic name
toc[2] - Page number
"""
dash = "-"*(100 - 7)
space = " "*47
print(f"{space}INDEX")
print(f"\n\nName : {dash} PageNo.\n\n\n\n")
for topic in toc:
eq_dash = "-"*(100 - len(topic[1]))
print(f"{topic[1]} {eq_dash} {topic[2]}")
speak(f"{topic[1]} {topic[2]}")
def search_in_toc(toc, key, totalpg):
"""Searches a particular lesson name provided as a parameter in toc and returns its starting and ending page numbers.
Args:
toc (nested list): toc[1] - Topic name
toc[2] - Page number
key (str): the key to be found
totalpg (int): total pages in book/document
Returns:
int: staring and ending page numbers of lesson found.
If not found then return None
"""
for i in range(len(toc) - 1):
topic = toc[i]
if i != len(toc) - 2:
if topic[1] == key:
nexttopic = toc[i + 1]
return (topic[2], nexttopic[2])
elif topic[1].lower() == key:
nexttopic = toc[i + 1]
return (topic[2], nexttopic[2])
else:
if topic[1] == key:
return (topic[2], totalpg)
elif topic[1].lower() == key:
return (topic[2], totalpg)
return None,None
def book_details(author, title, total_pages):
"""Creates a table of book details like author name, title, and total pages.
Args:
author (str): Name of author
title (str): title of the book
total_pages (int): total pages in the book
"""
table = Table(title="\nBook Details :- ", show_lines = True)
table.add_column("Sr. No.", style="magenta", no_wrap=True)
table.add_column("Property", style="cyan")
table.add_column("Value", justify="left", style="green")
table.add_row("1", "Title", f"{title}")
table.add_row("2", "Author", f"{author}")
table.add_row("3", "Pages", f"{total_pages}")
console = Console()
console.print(table)
#ms_word()
#pdf_read()
#book_details("abc", "abcde", 12)
|
import requests
import json
import time
from collections import OrderedDict
from test_framework.test_framework import OpenBazaarTestFramework, TestFailure
class CompleteModeratedWithTimeout(OpenBazaarTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 3
def run_test(self):
alice = self.nodes[0]
bob = self.nodes[1]
charlie = self.nodes[2]
# generate some coins and send them to bob
time.sleep(4)
api_url = bob["gateway_url"] + "wallet/address"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
address = resp["address"]
elif r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Address endpoint not found")
else:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Unknown response")
self.send_bitcoin_cmd("sendtoaddress", address, 10)
time.sleep(20)
# create a profile for charlie
pro = {"name": "Charlie"}
api_url = charlie["gateway_url"] + "ob/profile"
r = requests.post(api_url, data=json.dumps(pro, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Profile post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Profile POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# make charlie a moderator
with open('testdata/moderation.json') as listing_file:
moderation_json = json.load(listing_file, object_pairs_hook=OrderedDict)
api_url = charlie["gateway_url"] + "ob/moderator"
r = requests.put(api_url, data=json.dumps(moderation_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Moderator post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Moderator POST failed. Reason: %s", resp["reason"])
moderatorId = charlie["peerId"]
time.sleep(4)
# post profile for alice
with open('testdata/profile.json') as profile_file:
profile_json = json.load(profile_file, object_pairs_hook=OrderedDict)
api_url = alice["gateway_url"] + "ob/profile"
requests.post(api_url, data=json.dumps(profile_json, indent=4))
# post listing to alice
with open('testdata/listing.json') as listing_file:
listing_json = json.load(listing_file, object_pairs_hook=OrderedDict)
if self.bitcoincash:
listing_json["metadata"]["pricingCurrency"] = "tbch"
slug = listing_json["slug"]
listing_json["moderators"] = [moderatorId]
listing_json["metadata"]["escrowTimeoutHours"] = 1000
api_url = alice["gateway_url"] + "ob/listing"
r = requests.post(api_url, data=json.dumps(listing_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Listing post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Listing POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
slug = resp["slug"]
time.sleep(4)
# get listing hash
api_url = alice["gateway_url"] + "ob/listings/" + alice["peerId"]
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't get listing index")
resp = json.loads(r.text)
listingId = resp[0]["hash"]
# bob send order
with open('testdata/order_direct.json') as order_file:
order_json = json.load(order_file, object_pairs_hook=OrderedDict)
order_json["items"][0]["listingHash"] = listingId
order_json["moderator"] = moderatorId
api_url = bob["gateway_url"] + "ob/purchase"
r = requests.post(api_url, data=json.dumps(order_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Purchase post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
self.print_logs(alice, "ob.log")
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Purchase POST failed. Reason: %s", resp["reason"])
resp = json.loads(r.text)
orderId = resp["orderId"]
payment_address = resp["paymentAddress"]
payment_amount = resp["amount"]
# check the purchase saved correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob incorrectly saved as funded")
# check the sale saved correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_PAYMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice purchase saved in incorrect state")
if resp["funded"] == True:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice incorrectly saved as funded")
# fund order
spend = {
"address": payment_address,
"amount": payment_amount,
"feeLevel": "NORMAL"
}
api_url = bob["gateway_url"] + "wallet/spend"
r = requests.post(api_url, data=json.dumps(spend, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Spend post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Spend POST failed. Reason: %s", resp["reason"])
time.sleep(20)
# check bob detected payment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to detect his payment")
if resp["funded"] == False:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob incorrectly saved as unfunded")
# check alice detected payment
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "AWAITING_FULFILLMENT":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to detect payment")
if resp["funded"] == False:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice incorrectly saved as unfunded")
# alice send order fulfillment
with open('testdata/fulfillment.json') as fulfillment_file:
fulfillment_json = json.load(fulfillment_file, object_pairs_hook=OrderedDict)
fulfillment_json["slug"] = slug
fulfillment_json["orderId"] = orderId
api_url = alice["gateway_url"] + "ob/orderfulfillment"
r = requests.post(api_url, data=json.dumps(fulfillment_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Fulfillment post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Fulfillment POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check bob received fulfillment
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to detect order fulfillment")
# check alice set fulfillment correctly
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "FULFILLED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to order fulfillment")
# bob send order completion
with open('testdata/completion.json') as completion_file:
completion_json = json.load(completion_file, object_pairs_hook=OrderedDict)
completion_json["orderId"] = orderId
completion_json["ratings"][0]["slug"] = slug
api_url = bob["gateway_url"] + "ob/ordercompletion"
r = requests.post(api_url, data=json.dumps(completion_json, indent=4))
if r.status_code == 404:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Completion post endpoint not found")
elif r.status_code != 200:
resp = json.loads(r.text)
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Completion POST failed. Reason: %s", resp["reason"])
time.sleep(4)
# check alice received completion
api_url = alice["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Alice")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to detect order completion")
# check bob set completion correctly
api_url = bob["gateway_url"] + "ob/order/" + orderId
r = requests.get(api_url)
if r.status_code != 200:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Couldn't load order from Bob")
resp = json.loads(r.text)
if resp["state"] != "COMPLETED":
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Bob failed to order completion")
self.send_bitcoin_cmd("generate", 1)
time.sleep(2)
# Check the funds moved into alice's wallet
api_url = alice["gateway_url"] + "wallet/balance"
r = requests.get(api_url)
if r.status_code == 200:
resp = json.loads(r.text)
confirmed = int(resp["confirmed"])
#unconfirmed = int(resp["unconfirmed"])
if confirmed <= 0:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Alice failed to receive the multisig payout")
else:
raise TestFailure("CompleteModeratedWithTimeout - FAIL: Failed to query Alice's balance")
print("CompleteModeratedWithTimeout - PASS")
if __name__ == '__main__':
print("Running CompleteModeratedWithTimeout")
CompleteModeratedWithTimeout().main(["--regtest", "--disableexchangerates"])
|
# Copyright 2016, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Error codes
These are the helper functions that will emit the error exit codes. They
can abstractly check conditions or values directly. The release of statement
temporaries from context is automatic.
Also formatting errors is done here, avoiding PyErr_Format as much as
possible.
And releasing of values, as this is what the error case commonly does.
"""
from nuitka import Options
from nuitka.PythonVersions import python_version
from .ExceptionCodes import getExceptionIdentifier
from .Indentation import getCommentCode, indented
from .LineNumberCodes import getErrorLineNumberUpdateCode
from .templates.CodeTemplatesExceptions import (
template_error_catch_exception,
template_error_catch_quick_exception,
template_error_format_string_exception
)
def getErrorExitReleaseCode(context):
temp_release = '\n'.join(
"Py_DECREF( %s );" % tmp_name
for tmp_name in
context.getCleanupTempnames()
)
keeper_variables = context.getExceptionKeeperVariables()
if keeper_variables[0] is not None:
temp_release += "\nPy_DECREF( %s );" % keeper_variables[0]
temp_release += "\nPy_XDECREF( %s );" % keeper_variables[1]
temp_release += "\nPy_XDECREF( %s );" % keeper_variables[2]
return temp_release
def getErrorExitBoolCode(condition, emit, context,
needs_check = True, quick_exception = None):
assert not condition.endswith(';')
if not needs_check:
getAssertionCode("!(%s)" % condition, emit)
return
context.markAsNeedsExceptionVariables()
if quick_exception:
emit(
indented(
template_error_catch_quick_exception % {
"condition" : condition,
"exception_exit" : context.getExceptionEscape(),
"quick_exception" : getExceptionIdentifier(quick_exception),
"release_temps" : indented(
getErrorExitReleaseCode(context)
),
"line_number_code" : indented(
getErrorLineNumberUpdateCode(context)
)
},
0
)
)
else:
emit(
indented(
template_error_catch_exception % {
"condition" : condition,
"exception_exit" : context.getExceptionEscape(),
"release_temps" : indented(
getErrorExitReleaseCode(context)
),
"line_number_code" : indented(
getErrorLineNumberUpdateCode(context)
)
},
0
)
)
def getErrorExitCode(check_name, emit, context, quick_exception = None, needs_check = True):
if needs_check:
getErrorExitBoolCode(
condition = "%s == NULL" % check_name,
quick_exception = quick_exception,
emit = emit,
context = context
)
else:
getAssertionCode("%s != NULL" % check_name, emit)
def getErrorFormatExitBoolCode(condition, exception, args, emit, context):
assert not condition.endswith(';')
context.markAsNeedsExceptionVariables()
if len(args) == 1 and type(args[0]) is str:
from .ConstantCodes import getModuleConstantCode
set_exception = [
"exception_type = %s;" % exception,
"Py_INCREF( exception_type );",
"exception_value = %s;" % getModuleConstantCode(
constant = args[0],
),
"exception_tb = NULL;"
]
else:
set_exception = [
"exception_type = %s;" % exception,
"Py_INCREF( exception_type );",
"exception_value = Py%s_FromFormat( %s );" % (
"String" if python_version < 300 else "Unicode",
", ".join( '"%s"' % arg for arg in args )
),
"exception_tb = NULL;"
]
if python_version >= 300:
keeper_vars = context.getExceptionKeeperVariables()
if keeper_vars[0] is not None:
set_exception.append(
"ADD_EXCEPTION_CONTEXT( &%s, &%s );" % (
keeper_vars[0],
keeper_vars[1]
)
)
else:
set_exception.append(
"NORMALIZE_EXCEPTION( &exception_type, &exception_value, &exception_tb );"
)
set_exception.append(
"CHAIN_EXCEPTION( exception_value );"
)
emit(
template_error_format_string_exception % {
"condition" : condition,
"exception_exit" : context.getExceptionEscape(),
"set_exception" : indented(set_exception),
"release_temps" : indented(
getErrorExitReleaseCode(context)
),
"line_number_code" : indented(
getErrorLineNumberUpdateCode(context)
)
}
)
def getErrorVariableDeclarations():
return (
"PyObject *exception_type = NULL, *exception_value = NULL;",
"PyTracebackObject *exception_tb = NULL;",
"NUITKA_MAY_BE_UNUSED int exception_lineno = -1;"
)
def getExceptionKeeperVariableNames(keeper_index):
# For finally handlers of Python3, which have conditions on assign and
# use.
debug = Options.isDebug() and python_version >= 300
if debug:
keeper_obj_init = " = NULL"
else:
keeper_obj_init = ""
return (
"PyObject *exception_keeper_type_%d%s;" % (
keeper_index,
keeper_obj_init
),
"PyObject *exception_keeper_value_%d%s;" % (
keeper_index,
keeper_obj_init
),
"PyTracebackObject *exception_keeper_tb_%d%s;" % (
keeper_index,
keeper_obj_init
),
"NUITKA_MAY_BE_UNUSED int exception_keeper_lineno_%d%s;" % (
keeper_index,
" = -1" if debug else ""
)
)
def getExceptionPreserverVariableNames(preserver_id):
# For finally handlers of Python3, which have conditions on assign and
# use.
debug = Options.isDebug() and python_version >= 300
if debug:
preserver_obj_init = " = NULL"
else:
preserver_obj_init = ""
return (
"PyObject *exception_preserved_type_%d%s;" % (
preserver_id,
preserver_obj_init
),
"PyObject *exception_preserved_value_%d%s;" % (
preserver_id,
preserver_obj_init
),
"PyTracebackObject *exception_preserved_tb_%d%s;" % (
preserver_id,
preserver_obj_init
),
)
def getErrorFormatExitCode(check_name, exception, args, emit, context):
getErrorFormatExitBoolCode(
condition = "%s == NULL" % check_name,
exception = exception,
args = args,
emit = emit,
context = context
)
def getReleaseCode(release_name, emit, context):
assert release_name is None or len(release_name) > 2
if context.needsCleanup(release_name):
emit("Py_DECREF( %s );" % release_name)
context.removeCleanupTempName(release_name)
def getReleaseCodes(release_names, emit, context):
for release_name in release_names:
getReleaseCode(
release_name = release_name,
emit = emit,
context = context
)
def getMustNotGetHereCode(reason, context, emit):
getCommentCode(reason, emit)
provider = context.getOwner()
emit(
"NUITKA_CANNOT_GET_HERE( %(function_identifier)s );" % {
"function_identifier" : provider.getCodeName()
}
)
if provider.isExpressionGeneratorObjectBody():
emit("return;")
elif provider.isExpressionCoroutineObjectBody():
emit("return;")
elif provider.isCompiledPythonModule():
emit("return MOD_RETURN_VALUE( NULL );")
else:
emit("return NULL;")
def getAssertionCode(check, emit):
emit("assert( %s );" % check)
|
import os
import simtk.openmm as mm
class CheckpointReporter():
'''
CheckpointReporter saves periodic checkpoints of a simulation.
The checkpoints will overwrite old files -- only the latest three will be kept.
State XML files can be saved together, in case the checkpoint files are broken.
Parameters
----------
file : string
The file to write to.
Any current contents will be overwritten.
The latest three checkpoint will be kept with the step appended to the file name.
reportInterval : int
The interval (in time steps) at which to write checkpoints.
xml : string, optional
If provided, the state will be serialized into XML format and saved together with checkpoint.
Any current contents will be overwritten.
The latest three XML files will be kept with the step appended to the file name.
'''
def __init__(self, file, reportInterval, xml=None):
self._reportInterval = reportInterval
self._file = file
self._xml = xml
if type(file) is not str:
raise Exception('file should be str')
def describeNextReport(self, simulation):
"""Get information about the next report this object will generate.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
Returns
-------
tuple
A five element tuple. The first element is the number of steps
until the next report. The remaining elements specify whether
that report will require positions, velocities, forces, and
energies respectively.
"""
steps = self._reportInterval - simulation.currentStep % self._reportInterval
return (steps, True, True, False, False, False)
def report(self, simulation, state):
"""Generate a report.
Parameters
----------
simulation : Simulation
The Simulation to generate a report for
state : State
The current state of the simulation
"""
filename = self._file + '_%i' % simulation.currentStep
with open(filename, 'wb') as out:
out.write(simulation.context.createCheckpoint())
file_prev3 = self._file + '_%i' % (simulation.currentStep - 3 * self._reportInterval)
if os.path.exists(file_prev3):
os.remove(file_prev3)
if self._xml is not None:
xml_name = self._xml + '_%i' % simulation.currentStep
xml = mm.XmlSerializer.serialize(state)
with open(xml_name, 'w') as f:
f.write(xml)
xml_prev3 = self._xml + '_%i' % (simulation.currentStep - 3 * self._reportInterval)
if os.path.exists(xml_prev3):
os.remove(xml_prev3)
|
from scipy.fft import fft
import numpy as np
def estimate_broadband_noise(signal, TR, low_freq_cutoff = 0.2):
'''Function that estimates broadband noise of 1/f like BOLD data
This function calculates the fourier transform of the input signals,
and then estimates the average broadband power as the median intensity
of spectral components above low_freq_cutoff. This is a simple scheme based
off of the observation that (1) the classic 1/f component of the BOLD signal
is mainly eliminated after ~.2 Hz, and (2) median is used instead of mean as
a cheap solution to overcome the fact that there may be some noise components
that crop up because of breathing, or cardiac effects - it would be better
to handle this with a more principled approach though..
Parameters
----------
signal : numpy.ndarray
numpy array with shape <n_regions, n_timepoints>
TR : float
repitition time in seconds
low_freq_cutoff : float, default = 0.2
only use frequency content above
this threshold (specified in Hz)
Returns
-------
broadband_pow : float
the median power in broadband range
(or amplitude? need to double check...)
'''
regions = signal.shape[0]
N = signal.shape[1]
broadband_pow = np.zeros(regions)
T = TR
x = np.linspace(0.0, N*T, N)
xf = np.linspace(0.0, 1.0/(2.0*T), N//2)
broadband_inds = np.where(xf >= low_freq_cutoff)[0]
for i in range(regions):
yf = np.abs(fft(signal[i,:]))
broadband_pow[i] = np.median(yf[broadband_inds])
return broadband_pow
def calc_tsnr(data_array, defined_timepoints = None):
'''Calculates temporal signal to noise ratio
Function to calculate the temporal signal to noise
ratio (mean/std) for a region or series of regions.
Unless you have a good reason for doing otherwise,
this should probably be done prior to any denoising.
Parameters
----------
data_array : numpy.ndarray
data array shape <num_regions, num_timepoints>
defined_timepoints : list, or None
if None, then all timepoints will be used, if
defined timepoints is specified, only the
specified timepoints will be used
Returns
-------
tsnr : numpy.ndarray
numpy array with tsnr for each region
'''
if type(defined_timepoints) != type(None):
means = np.mean(data_array[:,defined_timepoints],axis=1)
stds = np.std(data_array[:,defined_timepoints], axis=1)
else:
means = np.mean(data_array,axis=1)
stds = np.std(data_array, axis=1)
return means/stds
|
#
# This file is part of Brazil Data Cube Collection Builder.
# Copyright (C) 2019-2020 INPE.
#
# Brazil Data Cube Collection Builder is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
#
"""Describe Celery task handling for Landsat products."""
# Python Native
import logging
import os
import subprocess
from datetime import datetime
# 3rdparty
from botocore.exceptions import EndpointConnectionError
from sqlalchemy.exc import InvalidRequestError
from urllib3.exceptions import NewConnectionError, MaxRetryError
# Builder
from ...celery import celery_app
from ...config import Config
from ...db import db_aws
from ..base_task import RadcorTask
from ..utils import refresh_assets_view, remove_file, upload_file
from .download import download_landsat_images
from .google import download_from_google
from .harmonization import landsat_harmonize
from .publish import publish
from .utils import LandsatProduct, factory
def is_valid_tar_gz(file_path: str):
"""Check tar file integrity."""
try:
retcode = subprocess.call(['gunzip', '-t', file_path])
return retcode == 0
except BaseException:
return False
class LandsatTask(RadcorTask):
"""Define abstraction of Landsat-8 - DN and SR products."""
def get_tile_id(self, scene_id, **kwargs):
"""Retrieve tile from sceneid."""
fragments = scene_id.split('_')
return fragments[2]
def get_tile_date(self, scene_id, **kwargs):
"""Retrieve tile date from sceneid."""
fragments = scene_id.split('_')
return datetime.strptime(fragments[3], '%Y%m%d')
def download(self, scene):
"""Perform download landsat image from USGS.
Args:
scene (dict) - Scene containing activity
Returns:
dict Scene with landsat compressed file
"""
# Create/Update activity
activity_history = self.create_execution(scene)
try:
scene_id = scene['sceneid']
# Get Landsat collection handler
landsat_scene = factory.get_from_sceneid(scene_id, level=1)
activity_args = scene.get('args', {})
collection_item = self.get_collection_item(activity_history.activity)
# Output product dir
productdir = landsat_scene.compressed_file().parent
productdir.mkdir(parents=True, exist_ok=True)
digital_number_file = landsat_scene.compressed_file()
valid = False
# When file exists, check persistence
if digital_number_file.exists() and digital_number_file.is_file():
logging.info('File {} downloaded. Checking file integrity...'.format(str(digital_number_file)))
# Check Landsat 8 tar gz is valid
valid = is_valid_tar_gz(str(digital_number_file))
file = str(digital_number_file)
if not valid:
# Ensure file is removed since it may be corrupted
remove_file(str(digital_number_file))
try:
# Download from google
logging.info('Download Landsat {} -> e={} v={} from Google...'.format(
scene_id, digital_number_file.exists(), valid)
)
file, link = download_from_google(scene_id, str(productdir))
activity_args['provider'] = link
except BaseException:
logging.info('Download Landsat {} from USGS...'.format(scene_id))
file = download_landsat_images(activity_args['link'], productdir)
activity_args['provider'] = activity_args['link']
else:
logging.warning('File {} is valid. Skipping'.format(str(digital_number_file)))
collection_item.compressed_file = str(file).replace(Config.DATA_DIR, '')
cloud = activity_args.get('cloud')
if cloud:
collection_item.cloud_cover = cloud
activity_args['file'] = str(file)
except BaseException as e:
logging.error('An error occurred during task execution - {}'.format(activity_history.activity_id),
exc_info=True)
raise e
collection_item.save()
scene['args'] = activity_args
# Create new activity 'correctionLC8' to continue task chain
scene['activity_type'] = 'correctionLC8'
return scene
def publish(self, scene):
"""Publish and persist collection on database.
Args:
scene - Serialized Activity
"""
scene['activity_type'] = 'publishLC8'
# Create/Update activity
activity_history = self.create_execution(scene)
# Get collection level to publish. Default is l1
# TODO: Check in database the scenes level 2 already published. We must set to level 2
collection_level = scene['args'].get('level') or 1
landsat_scene = factory.get_from_sceneid(scene['sceneid'], level=collection_level)
try:
assets = publish(self.get_collection_item(activity_history.activity), activity_history.activity)
except InvalidRequestError as e:
# Error related with Transaction on AWS
# TODO: Is it occurs on local instance?
logging.error("Transaction Error on activity - {}".format(activity_history.activity_id), exc_info=True)
db_aws.session.rollback()
raise e
except BaseException as e:
logging.error("An error occurred during task execution - {}".format(activity_history.activity_id),
exc_info=True)
raise e
scene['activity_type'] = 'uploadLC8'
scene['args']['assets'] = assets
# Refresh for everything except for L1
if landsat_scene.level > 1:
refresh_assets_view()
return scene
def upload(self, scene):
"""Upload collection to AWS.
Make sure to set `AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY` and
`AWS_REGION_NAME` defined in `bdc_collection_builder.config.Config`.
Args:
scene - Serialized Activity
"""
scene['activity_type'] = 'uploadLC8'
# Create/Update activity
self.create_execution(scene)
assets = scene['args']['assets']
for entry in assets.values():
file_without_prefix = entry['asset'].replace('{}/'.format(Config.AWS_BUCKET_NAME), '')
upload_file(entry['file'], Config.AWS_BUCKET_NAME, file_without_prefix)
@staticmethod
def espa_done(scene: LandsatProduct):
"""Check espa-science has executed successfully."""
fs = scene.get_files()
return len(fs) > 0
def correction(self, scene):
"""Apply atmospheric correction on collection.
Args:
scene - Serialized Activity
"""
import subprocess
import tarfile
scene['activity_type'] = 'correctionLC8'
scene_id = scene['sceneid']
# Get Resolver for Landsat scene level 2
landsat_scene = factory.get_from_sceneid(scene_id, level=2)
landsat_scene_level_1 = factory.get_from_sceneid(scene_id, level=1)
scene['collection_id'] = landsat_scene.id
# Create/Update activity
execution = self.create_execution(scene)
try:
params = dict(
app=scene['activity_type'],
sceneid=scene['sceneid'],
file=scene['args']['file']
)
output_path = landsat_scene.path()
output_path.mkdir(exist_ok=True, parents=True)
input_dir = landsat_scene_level_1.compressed_file().parent
with tarfile.open(scene['args']['file']) as compressed_file:
# Extracting to temp directory
compressed_file.extractall(landsat_scene_level_1.compressed_file().parent)
cmd = 'run_lasrc_ledaps_fmask.sh {}'.format(landsat_scene_level_1.scene_id)
logging.warning('cmd {}'.format(cmd))
env = dict(**os.environ, INDIR=str(input_dir), OUTDIR=str(output_path))
process = subprocess.Popen(cmd, shell=True, env=env, stdin=subprocess.PIPE)
process.wait()
assert process.returncode == 0
pathrow = landsat_scene.tile_id()
params['pathrow'] = pathrow
# Product dir
productdir = landsat_scene.path()
logging.info('Checking for the ESPA generated files in {}'.format(productdir))
if not LandsatTask.espa_done(landsat_scene):
raise RuntimeError('Error in atmospheric correction')
scene['args']['file'] = str(productdir)
except BaseException as e:
logging.error('Error at correction Landsat {}, id={} - {}'.format(scene_id, execution.activity_id, str(e)))
raise e
finally:
# Remove extracted files
for f in landsat_scene_level_1.compressed_file_bands():
if f.exists():
f.unlink()
scene['activity_type'] = 'publishLC8'
scene['args']['level'] = landsat_scene.level
return scene
def harmonize(self, scene):
"""Apply Harmonization on Landsat collection.
Args:
scene - Serialized Activity
"""
# Set Collection Level 3 - BDC
scene['args']['level'] = 3
landsat_scene = factory.get_from_sceneid(scene['sceneid'], level=scene['args']['level'])
# Set Collection to the Landsat NBAR (Nadir BRDF Adjusted Reflectance)
scene['collection_id'] = landsat_scene.id
scene['activity_type'] = 'harmonizeLC8'
# Create/Update activity
activity_history = self.create_execution(scene)
logging.debug('Starting Harmonization Landsat...')
activity_history.activity.activity_type = 'harmonizeLC8'
activity_history.start = datetime.utcnow()
activity_history.save()
try:
# Get ESPA output dir
harmonized_dir = landsat_harmonize(self.get_collection_item(activity_history.activity), activity_history.activity)
scene['args']['file'] = harmonized_dir
except BaseException as e:
logging.error('Error at Harmonize Landsat {}'.format(e))
raise e
scene['activity_type'] = 'publishLC8'
return scene
@celery_app.task(base=LandsatTask,
queue='download',
max_retries=72,
autoretry_for=(NewConnectionError, MaxRetryError),
default_retry_delay=Config.TASK_RETRY_DELAY)
def download_landsat(scene):
"""Represent a celery task definition for handling Landsat-8 Download files.
This celery tasks listen only for queues 'download'.
It also retries following errors occurs:
- NewConnectionError, MaxRetryError Internet Connection Problem
Args:
scene (dict): Radcor Activity
Returns:
Returns processed activity
"""
return download_landsat.download(scene)
@celery_app.task(base=LandsatTask, queue='atm-correction')
def atm_correction_landsat(scene):
"""Represent a celery task definition for handling Landsat Atmospheric correction - sen2cor.
This celery tasks listen only for queues 'atm-correction'.
Args:
scene (dict): Radcor Activity with "correctionLC8" app context
Returns:
Returns processed activity
"""
return atm_correction_landsat.correction(scene)
@celery_app.task(base=LandsatTask,
queue='publish',
max_retries=3,
autoretry_for=(InvalidRequestError,),
default_retry_delay=Config.TASK_RETRY_DELAY)
def publish_landsat(scene):
"""Represent a celery task definition for handling Landsat Publish TIFF files generation.
This celery tasks listen only for queues 'publish'.
It also retries following errors occurs:
- InvalidRequestError Error related with transaction error on multiple access to database.
Args:
scene (dict): Radcor Activity with "publishLC8" app context
Returns:
Returns processed activity
"""
return publish_landsat.publish(scene)
@celery_app.task(base=LandsatTask,
queue='upload',
max_retries=3,
auto_retry=(EndpointConnectionError, NewConnectionError,),
default_retry_delay=Config.TASK_RETRY_DELAY)
def upload_landsat(scene):
"""Represent a celery task definition for handling Landsat8 Upload TIFF to AWS.
This celery tasks listen only for queues 'uploadLC8'.
Args:
scene (dict): Radcor Activity with "uploadLC8" app context
"""
upload_landsat.upload(scene)
@celery_app.task(base=LandsatTask, queue='harmonization')
def harmonization_landsat(scene):
"""Represent a celery task definition for harmonizing Landsat8.
This celery tasks listen only for queues 'harmonizeLC8'.
Args:
scene (dict): Radcor Activity with "harmonizeLC8" app context
"""
return harmonization_landsat.harmonize(scene)
|
import pytest
from tartiflette.language.ast import InterfaceTypeExtensionNode
def test_interfacetypeextensionnode__init__():
interface_type_extension_node = InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
)
assert interface_type_extension_node.name == "interfaceTypeExtensionName"
assert (
interface_type_extension_node.directives
== "interfaceTypeExtensionDirectives"
)
assert (
interface_type_extension_node.fields == "interfaceTypeExtensionFields"
)
assert (
interface_type_extension_node.location
== "interfaceTypeExtensionLocation"
)
@pytest.mark.parametrize(
"interface_type_extension_node,other,expected",
[
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
Ellipsis,
False,
),
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionNameBis",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
False,
),
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectivesBis",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
False,
),
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFieldsBis",
location="interfaceTypeExtensionLocation",
),
False,
),
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocationBis",
),
False,
),
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
True,
),
],
)
def test_interfacetypeextensionnode__eq__(
interface_type_extension_node, other, expected
):
assert (interface_type_extension_node == other) is expected
@pytest.mark.parametrize(
"interface_type_extension_node,expected",
[
(
InterfaceTypeExtensionNode(
name="interfaceTypeExtensionName",
directives="interfaceTypeExtensionDirectives",
fields="interfaceTypeExtensionFields",
location="interfaceTypeExtensionLocation",
),
"InterfaceTypeExtensionNode("
"name='interfaceTypeExtensionName', "
"directives='interfaceTypeExtensionDirectives', "
"fields='interfaceTypeExtensionFields', "
"location='interfaceTypeExtensionLocation')",
)
],
)
def test_interfacetypeextensionnode__repr__(
interface_type_extension_node, expected
):
assert interface_type_extension_node.__repr__() == expected
|
from __future__ import absolute_import
import pytest as _pytest
from mock import patch as _patch, MagicMock as _MagicMock
from flytekit.common.exceptions import user as _user_exceptions
from flytekit.common.tasks import task as _task
from flytekit.models import task as _task_models
from flytekit.models.core import identifier as _identifier
@_patch("flytekit.engines.loader.get_engine")
def test_fetch_latest(mock_get_engine):
admin_task = _task_models.Task(
_identifier.Identifier(_identifier.ResourceType.TASK, "p1", "d1", "n1", "v1"),
_MagicMock(),
)
mock_engine = _MagicMock()
mock_engine.fetch_latest_task = _MagicMock(
return_value=admin_task
)
mock_get_engine.return_value = mock_engine
task = _task.SdkTask.fetch_latest("p1", "d1", "n1")
assert task.id == admin_task.id
@_patch("flytekit.engines.loader.get_engine")
def test_fetch_latest_not_exist(mock_get_engine):
mock_engine = _MagicMock()
mock_engine.fetch_latest_task = _MagicMock(
return_value=None
)
mock_get_engine.return_value = mock_engine
with _pytest.raises(_user_exceptions.FlyteEntityNotExistException):
_task.SdkTask.fetch_latest("p1", "d1", "n1")
|
# -*- coding: UTF-8 -*-
from ZUI_MDP_solution import *
from unittest import TestCase
import itertools as it
import numpy as np
import numpy.testing as nptest
# Taken from http://www.neuraldump.net/2017/06/how-to-suppress-python-unittest-warnings/.
def ignore_warnings(test_func):
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
test_func(self, *args, **kwargs)
return do_test
class TestGridWorld2x2(TestCase):
rtol = 1e-4 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('2x2')
def test_is_obstacle_at(self):
self.assertFalse(self.gw._is_obstacle([0, 0]))
self.assertFalse(self.gw._is_obstacle([0, 1]))
self.assertFalse(self.gw._is_obstacle([1, 0]))
self.assertFalse(self.gw._is_obstacle([1, 1]))
def test_is_on_grid_true(self):
self.assertTrue(self.gw._is_on_grid([0, 0]),msg='The point [{},{}] should be on the grid.'.format(0, 0))
self.assertTrue(self.gw._is_on_grid([1, 1]), msg='The point [{},{}] should be on the grid.'.format(1, 1))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [2,0], [0,2], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_2x2_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q_from_V_zeros(self):
V = np.zeros((self.gw.n_states + 1,))
desired_Q = np.array([[-0.04, -0.04, -0.04, -0.04],
[1., 1., 1., 1.],
[-0.04, -0.04, -0.04, -0.04],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_ones(self):
V = np.ones((self.gw.n_states + 1,))
desired_Q = np.array([[0.96, 0.96, 0.96, 0.96],
[2., 2., 2., 2.],
[0.96, 0.96, 0.96, 0.96],
[0., 0., 0., 0.],
[1., 1., 1., 1.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q_from_V_init(self):
V = np.max(self.gw.rewards,axis=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
nptest.assert_allclose(self.gw.Q_from_V(V=V), desired_Q, rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.array([0.752, 1., -0.08, -1., 0.])
Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.array([0.9178081, 1., 0.66027364, -1., 0., ])
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q,policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = np.array([1, 0, 0, 0, 0], dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.array([[0.024, 0.752, 0.024, -0.08],
[1., 1., 1., 1.],
[-0.176, -0.848, -0.176, -0.08],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_policy = self.gw.Q2policy(desired_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.array([[0.88602712, 0.9178081, 0.67999927, 0.85205443],
[1., 1., 1., 1.],
[0.66027364, -0.6821919, 0.45424578, 0.64602658],
[-1., -1., -1., -1.],
[0., 0., 0., 0.]])
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x3(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x3')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,3], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x3_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_3x3_V_single_iter.npy')
Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_3x3_V.npy')
Q = np.load('./test_data/test_gw_3x3_Q.npy')
policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = np.array([1, 1, 0, 0, 3, 0, 0, 3, 2, 0],dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_3x3_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_3x3_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld3x4(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('3x4')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if i == 1 and j == 1:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [3,0], [0,4], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_3x4_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_3x4_V_single_iter.npy')
Q = np.load('./test_data/test_gw_3x4_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_3x4_V.npy')
Q = np.load('./test_data/test_gw_3x4_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_policy = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0],dtype=int)
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_3x4_V.npy')
Q = np.load('./test_data/test_gw_3x4_Q.npy')
policy = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0, 3, 3, 3, 0],dtype=int)
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_3x4_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_3x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol)
class TestGridWorld4x4(TestCase):
rtol = 1e-3 # relative tolerance for comparing two floats
atol = 1e-08 # absolute tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('4x4')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if (i,j) in [(1,1),(2,2)]:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [4,0], [0,4], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_4x4_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol, atol=self.atol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_4x4_V_single_iter.npy')
Q = np.load('./test_data/test_gw_4x4_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_4x4_V.npy')
Q = np.load('./test_data/test_gw_4x4_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_4x4_V.npy')
Q = np.load('./test_data/test_gw_4x4_Q.npy')
policy = np.load('./test_data/test_gw_4x4_policy.npy')
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_policy = np.load('./test_data/test_gw_4x4_policy.npy')
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_4x4_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
#actual_policy = self.gw.Q2policy(actual_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol, atol=self.atol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_4x4_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
class TestGridWorld5x5(TestCase):
rtol = 1e-4 # relative tolerance for comparing two floats
atol = 1e-08 # absolute tolerance for comparing two floats
def setUp(self):
self.gw = GridWorld.get_world('5x5')
def test_is_obstacle_at(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
if (i,j) in [(1,0), (1,1), (2,2)]:
continue
self.assertFalse(self.gw._is_obstacle([i, j]), msg='No obstacle should be at [{},{}].'.format(i,j))
self.assertTrue(self.gw._is_obstacle([1, 1]), msg='An obstacle should be at [{},{}].'.format(1, 1))
def test_is_on_grid_true(self):
for i,j in it.product(range(self.gw.n_rows),range(self.gw.n_columns)):
self.assertTrue(self.gw._is_on_grid([i, j]),msg='The point [{},{}] should be on the grid.'.format(i, j))
def test_is_on_grid_false(self):
for point in ([-1,0], [-2,-2], [5,0], [0,5], [5,5], [0,-1]):
self.assertFalse(self.gw._is_on_grid(point),msg='The point [{}] should not be on the grid.'.format(point))
def test_transition_proba(self):
true_transition_proba = np.load('./test_data/test_gw_5x5_transition_proba.npy')
nptest.assert_allclose(self.gw.transition_proba,true_transition_proba,rtol=self.rtol, atol=self.atol)
def test_Q2V_single(self):
desired_V = np.load('./test_data/test_gw_5x5_V_single_iter.npy')
Q = np.load('./test_data/test_gw_5x5_Q_single_iter.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2V(self):
desired_V = np.load('./test_data/test_gw_5x5_V.npy')
Q = np.load('./test_data/test_gw_5x5_Q.npy')
actual_V = self.gw.Q2V(Q)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2V should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
actual_V = self.gw.Q2V(Q)
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2Vbypolicy(self):
desired_V = np.load('./test_data/test_gw_5x5_V.npy')
Q = np.load('./test_data/test_gw_5x5_Q.npy')
policy = np.load('./test_data/test_gw_5x5_policy.npy')
actual_V = self.gw.Q2Vbypolicy(Q, policy)
self.assertEqual(actual_V.shape, (self.gw.n_states + 1,), msg='Q2Vbypolicy should return array V of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_V.shape))
nptest.assert_allclose(actual_V, desired_V, rtol=self.rtol, atol=self.atol)
def test_Q2policy(self):
Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_policy = np.load('./test_data/test_gw_5x5_policy.npy')
actual_policy = self.gw.Q2policy(Q)
self.assertEqual(actual_policy.shape, (self.gw.n_states + 1,), msg='Q2policy should return array policy of'
' shape {} but has returned V with shape {}.'.format(
self.gw.n_states + 1,
actual_policy.shape))
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol)
@ignore_warnings
def test_value_iteration_single_iter(self):
actual_Q = self.gw.value_iteration(max_iter=1)
desired_Q = np.load('./test_data/test_gw_5x5_Q_single_iter.npy')
desired_Q_shape = (self.gw.n_states + 1,self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Value_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_value_iteration(self):
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
actual_Q = self.gw.value_iteration()
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
def test_policy_iteration_policy_only(self):
actual_policy = self.gw.Q2policy(self.gw.policy_iteration())
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_policy = self.gw.Q2policy(desired_Q)
nptest.assert_allclose(actual_policy, desired_policy, rtol=self.rtol, atol=self.atol)
def test_policy_iteration(self):
actual_Q = self.gw.policy_iteration()
desired_Q = np.load('./test_data/test_gw_5x5_Q.npy')
desired_Q_shape = (self.gw.n_states + 1, self.gw.n_actions)
self.assertEqual(actual_Q.shape, desired_Q_shape,
msg='Policy_iteration should return array Q of'
' shape {} but has returned V with shape {}.'.format(
desired_Q_shape,
actual_Q.shape))
nptest.assert_allclose(actual_Q, desired_Q, rtol=self.rtol, atol=self.atol)
|
from dso.client import Client
from jpype import *
from jpype import java
import lithops
import os
dso=os.environ.get('DSO')
def my_function(x):
client = Client(dso)
d = client.getAtomicCounter("cnt")
return d.increment(x)
if __name__ == '__main__':
fexec = lithops.FunctionExecutor(runtime='0track/lithops-dso:1.1')
fexec.call_async(my_function, 3)
client = Client(dso)
c = client.getAtomicCounter("cnt")
print("counter: "+str(c.tally()))
print(fexec.get_result())
print("counter: "+str(c.tally()))
|
from otp.otpbase.PythonUtil import Functor
from direct.gui.DirectGui import *
from panda3d.core import *
from direct.distributed import ClockDelta
from direct.fsm import StateData
from direct.task.Task import Task
import ClosetGlobals
from toontown.toonbase import TTLocalizer
from toontown.toonbase import ToontownGlobals
from toontown.toontowngui import TTDialog
from toontown.toon import ToonDNA
from toontown.makeatoon.MakeAToonGlobals import *
from toontown.makeatoon import ShuffleButton
class TrunkGUI(StateData.StateData):
notify = directNotify.newCategory('TrunkGUI')
def __init__(self, isOwner, doneEvent, cancelEvent, swapHatEvent, swapGlassesEvent, swapBackpackEvent, swapShoesEvent, deleteEvent, hatList=None, glassesList=None, backpackList=None, shoesList=None):
StateData.StateData.__init__(self, doneEvent)
self.toon = None
self.hatList = hatList
self.glassesList = glassesList
self.backpackList = backpackList
self.shoesList = shoesList
self.isOwner = isOwner
self.swapHatEvent = swapHatEvent
self.swapGlassesEvent = swapGlassesEvent
self.swapBackpackEvent = swapBackpackEvent
self.swapShoesEvent = swapShoesEvent
self.deleteEvent = deleteEvent
self.cancelEvent = cancelEvent
self.genderChange = 0
self.verify = None
return
def load(self):
self.gui = loader.loadModel('phase_3/models/gui/tt_m_gui_mat_mainGui')
guiRArrowUp = self.gui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowRollover = self.gui.find('**/tt_t_gui_mat_arrowUp')
guiRArrowDown = self.gui.find('**/tt_t_gui_mat_arrowDown')
guiRArrowDisabled = self.gui.find('**/tt_t_gui_mat_arrowDisabled')
guiArrowRotateUp = self.gui.find('**/tt_t_gui_mat_arrowRotateUp')
guiArrowRotateDown = self.gui.find('**/tt_t_gui_mat_arrowRotateDown')
shuffleFrame = self.gui.find('**/tt_t_gui_mat_shuffleFrame')
shuffleArrowUp = self.gui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDown = self.gui.find('**/tt_t_gui_mat_shuffleArrowDown')
shuffleArrowRollover = self.gui.find('**/tt_t_gui_mat_shuffleArrowUp')
shuffleArrowDisabled = self.gui.find('**/tt_t_gui_mat_shuffleArrowDisabled')
self.parentFrame = DirectFrame(relief=DGG.RAISED, pos=(0.98, 0, 0.416), frameColor=(1,
0,
0,
0))
def addFrame(posZ, text):
return DirectFrame(parent=self.parentFrame, image=shuffleFrame, image_scale=halfButtonInvertScale, relief=None, pos=(0, 0, posZ), hpr=(0,
0,
3), scale=1.2, frameColor=(1,
1,
1,
1), text=text, text_scale=0.0575, text_pos=(-0.001,
-0.015), text_fg=(1,
1,
1,
1))
def addButton(parent, scale, hoverScale, posX, command, extraArg):
return DirectButton(parent=parent, relief=None, image=(shuffleArrowUp,
shuffleArrowDown,
shuffleArrowRollover,
shuffleArrowDisabled), image_scale=scale, image1_scale=hoverScale, image2_scale=hoverScale, pos=(posX, 0, 0), command=command, extraArgs=[extraArg])
self.hatFrame = addFrame(0.1, TTLocalizer.TrunkHatGUI)
self.hatLButton = addButton(self.hatFrame, halfButtonScale, halfButtonHoverScale, -0.2, self.swapHat, -1)
self.hatRButton = addButton(self.hatFrame, halfButtonInvertScale, halfButtonInvertHoverScale, 0.2, self.swapHat, 1)
self.glassesFrame = addFrame(-0.15, TTLocalizer.TrunkGlassesGUI)
self.glassesLButton = addButton(self.glassesFrame, halfButtonScale, halfButtonHoverScale, -0.2, self.swapGlasses, -1)
self.glassesRButton = addButton(self.glassesFrame, halfButtonInvertScale, halfButtonInvertHoverScale, 0.2, self.swapGlasses, 1)
self.backpackFrame = addFrame(-0.4, TTLocalizer.TrunkBackpackGUI)
self.backpackLButton = addButton(self.backpackFrame, halfButtonScale, halfButtonHoverScale, -0.2, self.swapBackpack, -1)
self.backpackRButton = addButton(self.backpackFrame, halfButtonInvertScale, halfButtonInvertHoverScale, 0.2, self.swapBackpack, 1)
self.shoesFrame = addFrame(-0.65, TTLocalizer.TrunkShoesGUI)
self.shoesLButton = addButton(self.shoesFrame, halfButtonScale, halfButtonHoverScale, -0.2, self.swapShoes, -1)
self.shoesRButton = addButton(self.shoesFrame, halfButtonInvertScale, halfButtonInvertHoverScale, 0.2, self.swapShoes, 1)
self.parentFrame.hide()
self.shuffleFetchMsg = 'TrunkShuffle'
self.shuffleButton = ShuffleButton.ShuffleButton(self, self.shuffleFetchMsg)
self.gui = loader.loadModel('phase_3/models/gui/create_a_toon_gui')
self.cancelButton = DirectButton(relief=None, image=(self.gui.find('**/CrtAtoon_Btn2_UP'), self.gui.find('**/CrtAtoon_Btn2_DOWN'), self.gui.find('**/CrtAtoon_Btn2_RLLVR')), pos=(0.15,
0,
-0.85), command=self.__handleCancel, text=('', TTLocalizer.MakeAToonCancel, TTLocalizer.MakeAToonCancel), text_font=ToontownGlobals.getInterfaceFont(), text_scale=0.08, text_pos=(0,
-0.03), text_fg=(1,
1,
1,
1), text_shadow=(0,
0,
0,
1))
self.cancelButton.hide()
self.rotateL = DirectButton(relief=None, pos=(-0.15, 0, 0.85), image=(guiArrowRotateUp,
guiArrowRotateDown,
guiArrowRotateUp,
guiArrowRotateDown), image_scale=(-0.7, 0.7, 0.7), image1_scale=(-0.8, 0.8,
0.8), image2_scale=(-0.8,
0.8,
0.8))
self.rotateL.hide()
self.rotateL.bind(DGG.B1PRESS, self.__rotateLDown)
self.rotateL.bind(DGG.B1RELEASE, self.__rotateLUp)
self.rotateR = DirectButton(relief=None, pos=(0.15, 0, 0.85), image=(guiArrowRotateUp,
guiArrowRotateDown,
guiArrowRotateUp,
guiArrowRotateDown), image_scale=(0.7, 0.7, 0.7), image1_scale=(0.8, 0.8,
0.8), image2_scale=(0.8,
0.8,
0.8))
self.rotateR.hide()
self.rotateR.bind(DGG.B1PRESS, self.__rotateRDown)
self.rotateR.bind(DGG.B1RELEASE, self.__rotateRUp)
if self.isOwner:
trashcanGui = loader.loadModel('phase_3/models/gui/trashcan_gui')
trashImage = (trashcanGui.find('**/TrashCan_CLSD'), trashcanGui.find('**/TrashCan_OPEN'), trashcanGui.find('**/TrashCan_RLVR'))
self.trashPanel = DirectFrame(parent=aspect2d, image=DGG.getDefaultDialogGeom(), image_color=(1,
1,
0.75,
0.8), image_scale=(0.36,
0,
1.2), pos=(-0.86,
0,
0.1), relief=None)
def addTrashButton(posZ, text, extraArg):
return DirectButton(parent=self.trashPanel, image=trashImage, relief=None, pos=(-0.09, 0, posZ), command=self.__handleDelete, text=text, extraArgs=[extraArg], scale=(0.5,
0.5,
0.5), text_font=ToontownGlobals.getInterfaceFont(), text_scale=0.12, text_pos=(0.3,
0), text_fg=(0.8,
0.2,
0.2,
1), text_shadow=(0,
0,
0,
1), textMayChange=0)
self.hatTrashButton = addTrashButton(0.5, TTLocalizer.TrunkDeleteHat, ToonDNA.HAT)
self.glassesTrashButton = addTrashButton(0.2, TTLocalizer.TrunkDeleteGlasses, ToonDNA.GLASSES)
self.backpackTrashButton = addTrashButton(-0.1, TTLocalizer.TrunkDeleteBackpack, ToonDNA.BACKPACK)
self.shoesTrashButton = addTrashButton(-0.4, TTLocalizer.TrunkDeleteShoes, ToonDNA.SHOES)
self.button = DirectButton(relief=None, image=(self.gui.find('**/CrtAtoon_Btn1_UP'), self.gui.find('**/CrtAtoon_Btn1_DOWN'), self.gui.find('**/CrtAtoon_Btn1_RLLVR')), pos=(-0.15,
0,
-0.85), command=self.__handleButton, text=('', TTLocalizer.MakeAToonDone, TTLocalizer.MakeAToonDone), text_font=ToontownGlobals.getInterfaceFont(), text_scale=0.08, text_pos=(0,
-0.03), text_fg=(1,
1,
1,
1), text_shadow=(0,
0,
0,
1))
trashcanGui.removeNode()
return
def unload(self):
taskMgr.remove(self.taskName('rotateL'))
taskMgr.remove(self.taskName('rotateR'))
self.ignore('verifyDone')
self.gui.removeNode()
del self.gui
self.parentFrame.destroy()
self.hatFrame.destroy()
self.glassesFrame.destroy()
self.backpackFrame.destroy()
self.shoesFrame.destroy()
self.hatLButton.destroy()
self.hatRButton.destroy()
self.glassesLButton.destroy()
self.glassesRButton.destroy()
self.backpackLButton.destroy()
self.backpackRButton.destroy()
self.shoesLButton.destroy()
self.shoesRButton.destroy()
del self.parentFrame
del self.hatFrame
del self.glassesFrame
del self.backpackFrame
del self.shoesFrame
del self.hatLButton
del self.hatRButton
del self.glassesLButton
del self.glassesRButton
del self.backpackLButton
del self.backpackRButton
del self.shoesLButton
del self.shoesRButton
self.shuffleButton.unload()
self.ignore('MAT-newToonCreated')
self.cancelButton.destroy()
del self.cancelButton
self.rotateL.destroy()
del self.rotateL
self.rotateR.destroy()
del self.rotateR
if self.isOwner:
self.hatTrashButton.destroy()
self.glassesTrashButton.destroy()
self.backpackTrashButton.destroy()
self.shoesTrashButton.destroy()
self.button.destroy()
del self.hatTrashButton
del self.glassesTrashButton
del self.backpackTrashButton
del self.shoesTrashButton
del self.button
self.trashPanel.destroy()
del self.trashPanel
if self.verify:
self.verify.cleanup()
del self.verify
def showButtons(self):
self.parentFrame.show()
self.cancelButton.show()
self.rotateL.show()
self.rotateR.show()
if self.isOwner:
self.hatTrashButton.show()
self.glassesTrashButton.show()
self.backpackTrashButton.show()
self.shoesTrashButton.show()
self.button.show()
def hideButtons(self):
self.parentFrame.hide()
self.cancelButton.hide()
self.rotateL.hide()
self.rotateR.hide()
if self.isOwner:
self.hatTrashButton.hide()
self.glassesTrashButton.hide()
self.backpackTrashButton.hide()
self.shoesTrashButton.hide()
self.button.hide()
def enter(self, toon):
self.notify.debug('enter')
base.disableMouse()
self.toon = toon
self.setupScrollInterface()
currHat = self.toon.hat
currHatIdx = self.hats.index(currHat)
self.swapHat(currHatIdx - self.hatChoice)
currGlasses = self.toon.glasses
currGlassesIdx = self.glasses.index(currGlasses)
self.swapGlasses(currGlassesIdx - self.glassesChoice)
currBackpack = self.toon.backpack
currBackpackIdx = self.backpacks.index(currBackpack)
self.swapBackpack(currBackpackIdx - self.backpackChoice)
currShoes = self.toon.shoes
currShoesIdx = self.shoes.index(currShoes)
self.swapShoes(currShoesIdx - self.shoesChoice)
choicePool = [self.hats,
self.glasses,
self.backpacks,
self.shoes]
self.shuffleButton.setChoicePool(choicePool)
self.accept(self.shuffleFetchMsg, self.changeAccessories)
self.acceptOnce('MAT-newToonCreated', self.shuffleButton.cleanHistory)
def exit(self):
try:
del self.toon
except:
self.notify.warning('TrunkGUI: toon not found')
self.hideButtons()
self.ignore('enter')
self.ignore('next')
self.ignore('last')
self.ignore(self.shuffleFetchMsg)
def setupButtons(self):
self.acceptOnce('last', self.__handleBackward)
self.acceptOnce('next', self.__handleForward)
return
def setupScrollInterface(self):
self.notify.debug('setupScrollInterface')
if self.hatList == None:
self.hatList = self.toon.getHatList()
if self.glassesList == None:
self.glassesList = self.toon.getGlassesList()
if self.backpackList == None:
self.backpackList = self.toon.getBackpackList()
if self.shoesList == None:
self.shoesList = self.toon.getShoesList()
self.hats = []
self.glasses = []
self.backpacks = []
self.shoes = []
self.hats.append((self.toon.hat[0], self.toon.hat[1], self.toon.hat[2]))
self.glasses.append((self.toon.glasses[0], self.toon.glasses[1], self.toon.glasses[2]))
self.backpacks.append((self.toon.backpack[0], self.toon.backpack[1], self.toon.backpack[2]))
self.shoes.append((self.toon.shoes[0], self.toon.shoes[1], self.toon.shoes[2]))
i = 0
while i < len(self.hatList):
self.hats.append((self.hatList[i], self.hatList[(i + 1)], self.hatList[(i + 2)]))
i = i + 3
i = 0
while i < len(self.glassesList):
self.glasses.append((self.glassesList[i], self.glassesList[(i + 1)], self.glassesList[(i + 2)]))
i = i + 3
i = 0
while i < len(self.backpackList):
self.backpacks.append((self.backpackList[i], self.backpackList[(i + 1)], self.backpackList[(i + 2)]))
i = i + 3
i = 0
while i < len(self.shoesList):
self.shoes.append((self.shoesList[i], self.shoesList[(i + 1)], self.shoesList[(i + 2)]))
i = i + 3
self.hatChoice = 0
self.glassesChoice = 0
self.backpackChoice = 0
self.shoesChoice = 0
self.swapHat(0)
self.swapGlasses(0)
self.swapBackpack(0)
self.swapShoes(0)
self.updateTrashButtons()
self.setupButtons()
return
def updateTrashButtons(self):
if not self.isOwner:
return
if len(self.hats) < 2 or self.toon.hat[0] == 0:
self.hatTrashButton['state'] = DGG.DISABLED
else:
self.hatTrashButton['state'] = DGG.NORMAL
if len(self.glasses) < 2 or self.toon.glasses[0] == 0:
self.glassesTrashButton['state'] = DGG.DISABLED
else:
self.glassesTrashButton['state'] = DGG.NORMAL
if len(self.backpacks) < 2 or self.toon.backpack[0] == 0:
self.backpackTrashButton['state'] = DGG.DISABLED
else:
self.backpackTrashButton['state'] = DGG.NORMAL
if len(self.shoes) < 2 or self.toon.shoes[0] == 0:
self.shoesTrashButton['state'] = DGG.DISABLED
else:
self.shoesTrashButton['state'] = DGG.NORMAL
def rotateToonL(self, task):
self.toon.setH(self.toon.getH() - 4)
return Task.cont
def rotateToonR(self, task):
self.toon.setH(self.toon.getH() + 4)
return Task.cont
def __rotateLUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('rotateL'))
def __rotateLDown(self, event):
messenger.send('wakeup')
task = Task(self.rotateToonL)
taskMgr.add(task, self.taskName('rotateL'))
def __rotateRUp(self, event):
messenger.send('wakeup')
taskMgr.remove(self.taskName('rotateR'))
def __rotateRDown(self, event):
messenger.send('wakeup')
task = Task(self.rotateToonR)
taskMgr.add(task, self.taskName('rotateR'))
def setGender(self, gender):
self.ownerGender = gender
self.genderChange = 1
def swapHat(self, offset):
length = len(self.hats)
self.hatChoice += offset
if self.hatChoice <= 0:
self.hatChoice = 0
self.updateScrollButtons(self.hatChoice, length, 0, self.hatLButton, self.hatRButton)
if self.hatChoice < 0 or self.hatChoice >= len(self.hats) or len(self.hats[self.hatChoice]) != 3:
self.notify.warning('hatChoice index is out of range!')
return
hat = self.hats[self.hatChoice]
self.toon.setHat(hat[0], hat[1], hat[2])
if self.swapHatEvent != None:
messenger.send(self.swapHatEvent)
messenger.send('wakeup')
return
def swapGlasses(self, offset):
length = len(self.glasses)
self.glassesChoice += offset
if self.glassesChoice <= 0:
self.glassesChoice = 0
self.updateScrollButtons(self.glassesChoice, length, 0, self.glassesLButton, self.glassesRButton)
if self.glassesChoice < 0 or self.glassesChoice >= len(self.glasses) or len(self.glasses[self.glassesChoice]) != 3:
self.notify.warning('glassesChoice index is out of range!')
return
glasses = self.glasses[self.glassesChoice]
self.toon.setGlasses(glasses[0], glasses[1], glasses[2])
if self.swapGlassesEvent != None:
messenger.send(self.swapGlassesEvent)
messenger.send('wakeup')
return
def swapBackpack(self, offset):
length = len(self.backpacks)
self.backpackChoice += offset
if self.backpackChoice <= 0:
self.backpackChoice = 0
self.updateScrollButtons(self.backpackChoice, length, 0, self.backpackLButton, self.backpackRButton)
if self.backpackChoice < 0 or self.backpackChoice >= len(self.backpacks) or len(self.backpacks[self.backpackChoice]) != 3:
self.notify.warning('backpackChoice index is out of range!')
return
backpack = self.backpacks[self.backpackChoice]
self.toon.setBackpack(backpack[0], backpack[1], backpack[2])
if self.swapBackpackEvent != None:
messenger.send(self.swapBackpackEvent)
messenger.send('wakeup')
return
def swapShoes(self, offset):
length = len(self.shoes)
self.shoesChoice += offset
if self.shoesChoice <= 0:
self.shoesChoice = 0
self.updateScrollButtons(self.shoesChoice, length, 0, self.shoesLButton, self.shoesRButton)
if self.shoesChoice < 0 or self.shoesChoice >= len(self.shoes) or len(self.shoes[self.shoesChoice]) != 3:
self.notify.warning('shoesChoice index is out of range!')
return
shoes = self.shoes[self.shoesChoice]
self.toon.setShoes(shoes[0], shoes[1], shoes[2])
if self.swapShoesEvent != None:
messenger.send(self.swapShoesEvent)
messenger.send('wakeup')
return
def updateScrollButtons(self, choice, length, startTex, lButton, rButton):
if choice >= length - 1:
rButton['state'] = DGG.DISABLED
else:
rButton['state'] = DGG.NORMAL
if choice <= 0:
lButton['state'] = DGG.DISABLED
else:
lButton['state'] = DGG.NORMAL
def __handleForward(self):
self.doneStatus = 'next'
messenger.send(self.doneEvent)
def __handleBackward(self):
self.doneStatus = 'last'
messenger.send(self.doneEvent)
def resetClothes(self, style):
if self.toon:
oldHat = style[ToonDNA.HAT]
oldGlasses = style[ToonDNA.GLASSES]
oldBackpack = style[ToonDNA.BACKPACK]
oldShoes = style[ToonDNA.SHOES]
self.toon.setHat(oldHat[0], oldHat[1], oldHat[2])
self.toon.setGlasses(oldGlasses[0], oldGlasses[1], oldGlasses[2])
self.toon.setBackpack(oldBackpack[0], oldBackpack[1], oldBackpack[2])
self.toon.setShoes(oldShoes[0], oldShoes[1], oldShoes[2])
self.toon.loop('neutral', 0)
def changeAccessories(self):
self.notify.debug('Entering changeAccessories')
NoItem = (0, 0, 0)
newChoice = self.shuffleButton.getCurrChoice()
if newChoice[0] in self.hats:
newHatIndex = self.hats.index(newChoice[0])
else:
newHatIndex = self.hats.index(NoItem)
if newChoice[1] in self.glasses:
newGlassesIndex = self.glasses.index(newChoice[1])
else:
newGlassesIndex = self.glasses.index(NoItem)
if newChoice[2] in self.backpacks:
newBackpackIndex = self.backpacks.index(newChoice[2])
else:
newBackpackIndex = self.backpacks.index(NoItem)
if newChoice[3] in self.shoes:
newShoesIndex = self.shoes.index(newChoice[3])
else:
newShoesIndex = self.shoes.index(NoItem)
oldHatIndex = self.hatChoice
oldGlassesIndex = self.glassesChoice
oldBackpackIndex = self.backpackChoice
oldShoesIndex = self.shoesChoice
self.swapHat(newHatIndex - oldHatIndex)
self.swapGlasses(newGlassesIndex - oldGlassesIndex)
self.swapBackpack(newBackpackIndex - oldBackpackIndex)
self.swapShoes(newShoesIndex - oldShoesIndex)
def getCurrToonSetting(self):
return [
self.hats[self.hatChoice],
self.glasses[self.glassesChoice],
self.backpacks[self.backpackChoice],
self.shoes[self.shoesChoice]]
def removeHat(self, index):
listLen = len(self.hats)
if index < listLen:
del self.hats[index]
if self.hatChoice > index:
self.hatChoice -= 1
else:
if self.hatChoice == index:
self.hatChoice = 0
return 1
return 0
def removeGlasses(self, index):
listLen = len(self.glasses)
if index < listLen:
del self.glasses[index]
if self.glassesChoice > index:
self.glassesChoice -= 1
else:
if self.glassesChoice == index:
self.glassesChoice = 0
return 1
return 0
def removeBackpack(self, index):
listLen = len(self.backpacks)
if index < listLen:
del self.backpacks[index]
if self.backpackChoice > index:
self.backpackChoice -= 1
else:
if self.backpackChoice == index:
self.backpackChoice = 0
return 1
return 0
def removeShoes(self, index):
listLen = len(self.shoes)
if index < listLen:
del self.shoes[index]
if self.shoesChoice > index:
self.shoesChoice -= 1
else:
if self.shoesChoice == index:
self.shoesChoice = 0
return 1
return 0
def __handleButton(self):
self.doneStatus = 'next'
messenger.send(self.doneEvent)
messenger.send('wakeup')
def __handleCancel(self):
messenger.send(self.cancelEvent)
messenger.send('wakeup')
def __handleDelete(self, which):
abortDeletion = False
if which == ToonDNA.HAT:
item = TTLocalizer.TrunkHat
else:
if which == ToonDNA.GLASSES:
item = TTLocalizer.TrunkGlasses
else:
if which == ToonDNA.BACKPACK:
item = TTLocalizer.TrunkBackpack
else:
item = TTLocalizer.TrunkShoes
self.verify = TTDialog.TTGlobalDialog(doneEvent='verifyDone', message=TTLocalizer.ClosetVerifyDelete % item, style=TTDialog.TwoChoice)
self.verify.show()
self.accept('verifyDone', Functor(self.__handleVerifyDelete, which))
messenger.send('wakeup')
def __handleVerifyDelete(self, which):
status = self.verify.doneStatus
self.ignore('verifyDone')
self.verify.cleanup()
del self.verify
self.verify = None
if status == 'ok':
messenger.send(self.deleteEvent, [which])
messenger.send('wakeup')
return
def taskName(self, idString):
return idString + '-TrunkGUI'
|
import os
# import matplotlib.pyplot as plt
import numpy as np
from pascal_voc_writer import Writer
import json
import cv2
#%% src path
src_json_path = '/home/peyman/mnt/data/DATA/ouster_data/create_dataset/PixelAnnotationTool/data/anns/'
img_folder = '/home/peyman/mnt/data/DATA/ouster_data/create_dataset/PixelAnnotationTool/data/images/'
j_file_list = os.listdir(src_json_path)
print (len(j_file_list))
# file_name = os.path.basename()
# print (os.path.splitext('asdfasdf/asdfasf.json'))
# print (os.path.splitext('asdfasdf/asdfasf.json'))
#%% dst path
dst_path_xml = '/home/peyman/mnt/data/DATA/ouster_data/create_dataset/PixelAnnotationTool/data/labels/'
for f in j_file_list:
j_file = src_json_path + f
print (j_file)
filename = os.path.basename(j_file)
path_to_xml = dst_path_xml + (os.path.splitext(filename)[0])
xml_file = path_to_xml + '.xml'
print (xml_file)
with open(j_file, 'r') as f:
data = json.load(f)
cnts = data['shapes']
# print (data['shapes'])
pts =[]
writer = Writer(data['imagePath'], data['imageWidth'], data['imageHeight'])
for i in range(len(cnts)):
# if (data['shapes'][i]['label']) == 'post'
name = data['shapes'][i]['label']
if (name=='other'):
continue
pts = data['shapes'][i]['points']
pts = np.array(pts, np.int32)
x,y,w,h = cv2.boundingRect(pts)
xmin, ymin, xmax, ymax = x, y, x+w, y+h
name = data['shapes'][i]['label']
path = img_folder + data['imagePath']
writer.addObject(name, xmin, ymin, xmax, ymax)
writer.save(xml_file)
with open(j_file, 'r') as f:
data = json.load(f)
cnts = data['shapes']
# print (data['shapes'])
pts =[]
writer = Writer(data['imagePath'], data['imageWidth'], data['imageHeight'])
for i in range(len(cnts)):
# if (data['shapes'][i]['label']) == 'post'
name = data['shapes'][i]['label']
if (name=='other'):
continue
pts = data['shapes'][i]['points']
pts = np.array(pts, np.int32)
x,y,w,h = cv2.boundingRect(pts)
xmin, ymin, xmax, ymax = x, y, x+w, y+h
name = data['shapes'][i]['label']
path = img_folder + data['imagePath']
writer.addObject(name, xmin, ymin, xmax, ymax)
writer.save(xml_file)
|
# -*- coding: utf-8 -*-
"""
Javelin Web2Py Events Controller
"""
# metadata
__author__ = "Jeremy Jacobson"
__copyright__ = "(c) 2013, Jacobson and Varni, LLC"
__date__ = "7/22/2013"
__email__ = "jjacobson93@gmail.com"
__status__ = "Development"
__data__ = {'name' : 'events', 'label' : 'Events', 'description' : 'Manage and create events',
'icon' : 'calendar', 'u-icon' : u'\uf073', 'color':'red', 'required' : True}
from applications.javelin.ctr_data import ctr_enabled, get_ctr_data
from applications.javelin.private.utils import flattenDict
from gluon.contrib import simplejson as json
from gluon.tools import Service
service = Service(globals())
@auth.requires_login()
@auth.requires_membership('standard')
def index():
"""Loads the index page for the 'Events' controller
:returns: a dictionary to pass to the view with the list of modules_enabled and the active module ('events')
"""
return dict(ctr_enabled=ctr_enabled, ctr_data=get_ctr_data(), active_module='events')
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def data(start=None, end=None, _=None):
"""Loads the event data
:param start: the start time
:param end: the end time
:returns: a list of events between the start and end times
"""
if (start is not None) and (end is not None):
start = int(start)
end = int(end)
total_weeks = (end - start)/604800
event_list = db(db.events.recurring==False,
( (db.events.start_time >= start) & (db.events.start_time < end) ) |
( (db.events.end_time >= start) & (db.events.end_time < end) ) |
( (db.events.start_time <= start) & (db.events.end_time >= end) ) ).select(db.events.ALL).as_list()
recur_events = db(db.events.recurring==True,
( (db.events.start_time < end) &
( (db.events.end_recur==None) |
( (db.events.end_recur!=None) & (db.events.end_recur > start) ) ) ) ).select(db.events.ALL).as_list()
for e in recur_events:
if e['end_recur']:
if e['start_time'] > start: a = start
else: a = e['start_time']
if e['end_recur'] > end: b = end
else: b = e['end_recur']
weeks = (b - a)/604800
else:
weeks = (end - e['start_time'])/604800
if weeks > total_weeks: weeks = total_weeks
for i in range(weeks):
event_list.append(e)
e = e.copy()
e['start_time'] += 604800
e['end_time'] += 604800
event_list = [dict(('start', v) if k=='start_time' else (('end', v) if k=='end_time' else (k,v)) for k, v in d.items()) for d in event_list]
else:
event_list = db().select(db.events.ALL, orderby=db.events.start_time).as_list()
return event_list
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def add_event(title, start, end, notes=None, allDay=False, recurring=False, end_recur=None):
"""Adds an event
:param title: the title for the event
:param start: start time for the event
:param end: end time for the event
:param notes: notes for the event
:returns: a dictionary with a response of success or failure
"""
select = db(db.events.title==title).select().as_list()
if len(select) == 0:
response = db.events.insert(title=title, start_time=start, end_time=end, allDay=allDay, notes=notes, recurring=recurring, end_recur=end_recur)
return dict(response=response)
else:
return dict(exists=True)
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def delete_event(id):
"""Deletes an event
:param id: the id of the event
:returns: a dictionary with a response of success or failure
"""
response = db(db.events.id==id).delete()
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def attendance_data(event_id):
data = db((db.person.grade==9) | (db.person.leader==True)).select(db.person.id, db.person.student_id, db.person.last_name,
db.person.first_name, db.attendance.present, db.attendance.event_id,
db.events.title, db.person.grade, db.person.leader,
left=[db.attendance.on((db.person.id==db.attendance.person_id) & (db.attendance.event_id==event_id)),
db.events.on(db.events.id==db.attendance.event_id)],
orderby=db.person.last_name|db.person.first_name).as_list()
data = [dict(('_'.join(k),v) if k != ('person','id') else ('id',v) for k,v in flattenDict(d).items()) for d in data]
return data
@auth.requires_login()
@auth.requires_membership('standard')
@service.json
def quick_attendance(event_id, person_id=None, student_id=None, present=True):
if person_id:
response = db.attendance.update_or_insert((db.attendance.person_id==person_id) & (db.attendance.event_id==event_id),
person_id=person_id, event_id=event_id, present=present)
elif student_id:
person = db(db.person.student_id==student_id).select().first()
if person:
response = db.attendance.update_or_insert((db.attendance.person_id==person.id) & (db.attendance.event_id==event_id),
person_id=person.id, event_id=event_id, present=present)
else:
return dict(error=True)
return dict(response=response)
@auth.requires_login()
@auth.requires_membership('standard')
def call():
"""Call function used when calling a function from an HTTP request"""
return service()
|
from flask import Flask, request, jsonify
from cloudevents.http import from_http
import logging, json
from vcenter import Session
from datetime import date
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(levelname)s %(name)s %(threadName)s : %(message)s')
app = Flask(__name__)
@app.route('/', methods=['GET','POST'])
def echo():
if request.method == 'GET':
sc = 200
msg = 'POST to this endpoint to apply custom attributes to a VM object within a cloudEvent message'
message = {
'status': sc,
'message': msg,
}
resp = jsonify(message)
resp.status_code = sc
return resp
if request.method == 'POST':
try:
event = from_http(request.headers, request.get_data(),None)
data = event.data
# hack to handle non JSON payload, e.g. xml
if not isinstance(data,dict):
data = str(event.data)
cevent = {
"attributes": event._attributes,
"data": data
}
app.logger.debug(f'"***cloud event*** {json.dumps(cevent)}')
# CloudEvent - simple validation
ref_vm = cevent['data']['Vm']['Vm']
ref_user = cevent['data']['UserName']
subject = cevent['attributes']['subject']
vc_s = Session()
attr_owner, attr_creation_date, attr_last_poweredon = vc_s.get_vm_attributes()
vm_obj = vc_s.get_vm(ref_vm['Value'])
if not vm_obj:
sc = 404
msg = f"could not find vm with moRef: {ref_vm['Value']}"
app.logger.error(msg)
message = {
'status': sc,
'error': msg,
}
resp = jsonify(message)
resp.status_code = sc
return resp
if subject in ["DrsVmPoweredOnEvent", "VmPoweredOnEvent"]:
app.logger.info(f"Apply attribute > {attr_last_poweredon.name}")
vc_s.set_custom_attr(
entity=vm_obj,
key=attr_last_poweredon.key,
value=date.today().strftime("%d/%m/%Y")
)
if subject in ["VmCreatedEvent", "VmClonedEvent", "VmRegisteredEvent"]:
app.logger.debug(f"Apply attribute > {attr_owner.name}")
vc_s.set_custom_attr(
entity=vm_obj,
key=attr_owner.key,
value=ref_user
)
app.logger.debug(f"Apply attribute > {attr_creation_date.name}")
vc_s.set_custom_attr(
entity=vm_obj,
key=attr_creation_date.key,
value=date.today().strftime("%d/%m/%Y")
)
vc_s.close()
app.logger.debug(f"End of event")
return {}, 204
except Exception as e:
sc = 500
msg = f'could not decode cloud event: {e}'
app.logger.error(msg)
message = {
'status': sc,
'error': msg,
}
resp = jsonify(message)
resp.status_code = sc
return resp
# hint: run with FLASK_ENV=development FLASK_APP=handler.py flask run
if __name__ == "__main__":
app.run()
|
"""@package docstring
Documentation for this module.
More details.
"""
def func():
"""Documentation for a function.
More details.
"""
pass
class PyClass:
"""Documentation for a class.
More details.
"""
def __init__(self):
"""The constructor."""
self._memVar = 0;
def PyMethod(self):
"""Documentation for a method."""
pass
## @package pyexample
# Documentation for this module.
#
# More details.
## Documentation for a function.
#
# More details.
def func():
pass
## Documentation for a class.
#
# More details.
class PyClass:
## The constructor.
def __init__(self):
self._memVar = 0;
## Documentation for a method.
# @param self The object pointer.
def PyMethod(self):
pass
## A class variable.
classVar = 0;
## @var _memVar
# a member variable
|
import turtle
window = turtle.Screen()
turtle.reset()
turtle.shape("classic")
turtle.bgcolor("dark slate gray")
turtle.color("alice blue")
turtle.speed(4)
turtle.pensize(2)
for i in 1, 2, 3, 4, 5, 6:
turtle.left(30)
turtle.forward(40)
turtle.left(90)
turtle.forward(40)
turtle.left(90)
turtle.forward(40)
turtle.left(90)
turtle.forward(40)
turtle.left(90)
window.exitonclick()
|
from pathlib import Path
import numpy as np
if __name__ == '__main__':
with Path('vocab.words.txt').open(encoding='utf-8') as f:
word_to_idx = {line.strip(): idx for idx, line in enumerate(f)}
with Path('vocab.words.txt').open(encoding='utf-8') as f:
word_to_found = {line.strip(): False for line in f}
size_vocab = len(word_to_idx)
embeddings = np.zeros((size_vocab, 300))
found = 0
print('Reading W2V file (may take a while)')
with Path('../../sgns.zhihu.bigram').open(encoding='utf-8') as f:
for line_idx, line in enumerate(f):
if line_idx % 100000 == 0:
print('- At line {}'.format(line_idx))
line = line.strip().split()
if len(line) != 300 + 1:
continue
word = line[0]
embedding = line[1:]
if (word in word_to_idx) and (not word_to_found[word]):
word_to_found[word] = True
found += 1
word_idx = word_to_idx[word]
embeddings[word_idx] = embedding
print('- done. Found {} vectors for {} words'.format(found, size_vocab))
# 保存 np.array
np.savez_compressed('w2v.npz', embeddings=embeddings)
|
CLICKHOUSE_HOST = 'localhost'
VERTICA_CONNECTION_PARAMS = {
'host': '127.0.0.1',
'port': 5433,
'user': 'dbadmin',
'password': '',
'database': 'docker',
'autocommit': True,
}
UPLOAD_BATCH_SIZE = 10_000
NUMBER_OF_BATCHES = 1000
BENCHMARK_ITERATIONS = 10
|
# coding: utf-8
# -----------------------------------------------------------------------------------
# <copyright company="Aspose" file="get_table_cell_request.py">
# Copyright (c) 2021 Aspose.Words for Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# -----------------------------------------------------------------------------------
import json
from six.moves.urllib.parse import quote
from asposewordscloud import *
from asposewordscloud.models import *
from asposewordscloud.models.requests import *
from asposewordscloud.models.responses import *
class GetTableCellRequest(BaseRequestObject):
"""
Request model for get_table_cell operation.
Initializes a new instance.
:param name The filename of the input document.
:param table_row_path The path to the table row in the document tree.
:param index Object index.
:param folder Original document folder.
:param storage Original document storage.
:param load_encoding Encoding that will be used to load an HTML (or TXT) document if the encoding is not specified in HTML.
:param password Password for opening an encrypted document.
"""
def __init__(self, name, table_row_path, index, folder=None, storage=None, load_encoding=None, password=None):
self.name = name
self.table_row_path = table_row_path
self.index = index
self.folder = folder
self.storage = storage
self.load_encoding = load_encoding
self.password = password
def create_http_request(self, api_client):
# verify the required parameter 'name' is set
if self.name is None:
raise ValueError("Missing the required parameter `name` when calling `get_table_cell`") # noqa: E501
# verify the required parameter 'table_row_path' is set
if self.table_row_path is None:
raise ValueError("Missing the required parameter `table_row_path` when calling `get_table_cell`") # noqa: E501
# verify the required parameter 'index' is set
if self.index is None:
raise ValueError("Missing the required parameter `index` when calling `get_table_cell`") # noqa: E501
path = '/v4.0/words/{name}/{tableRowPath}/cells/{index}'
path_params = {}
if self.name is not None:
path_params['name'] = self.name # noqa: E501
else:
path_params['name'] = '' # noqa: E501
if self.table_row_path is not None:
path_params['tableRowPath'] = self.table_row_path # noqa: E501
else:
path_params['tableRowPath'] = '' # noqa: E501
if self.index is not None:
path_params['index'] = self.index # noqa: E501
else:
path_params['index'] = '' # noqa: E501
# path parameters
collection_formats = {}
if path_params:
path_params = api_client.sanitize_for_serialization(path_params)
path_params = api_client.parameters_to_tuples(path_params, collection_formats)
for k, v in path_params:
# specified safe chars, encode everything
path = path.replace(
'{%s}' % k,
quote(str(v), safe=api_client.configuration.safe_chars_for_path_param)
)
# remove optional path parameters
path = path.replace('//', '/')
query_params = []
if self.folder is not None:
query_params.append(('folder', self.folder)) # noqa: E501
if self.storage is not None:
query_params.append(('storage', self.storage)) # noqa: E501
if self.load_encoding is not None:
query_params.append(('loadEncoding', self.load_encoding)) # noqa: E501
if self.password is not None:
query_params.append(('password', self.password)) # noqa: E501
header_params = {}
form_params = []
body_params = None
return {
"method": "GET",
"path": path,
"query_params": query_params,
"header_params": header_params,
"form_params": form_params,
"body": body_params,
"collection_formats": collection_formats,
"response_type": 'TableCellResponse' # noqa: E501
}
def get_response_type(self):
return 'TableCellResponse' # noqa: E501
def deserialize_response(self, api_client, response):
return self.deserialize(response, TableCellResponse, api_client)
|
import os
import sys
import six
try:
from urllib import urlencode, urlopen
from urllib2 import Request
except ImportError:
from urllib.parse import urlencode
from urllib.request import urlopen, Request
from pandaserver.taskbuffer.TaskBuffer import taskBuffer
from pandaserver.config import panda_config
taskBuffer.init(panda_config.dbhost,panda_config.dbpasswd,nDBConnection=1)
d = taskBuffer.queryDatasetWithMap({'name':sys.argv[1]})
node={}
node['vuid'] = d.vuid
node['site'] = sys.argv[2]
try:
baseURLSSL = os.environ['PANDA_URL_SSL']
except KeyError:
baseURLSSL = 'https://localhost:25443/server/panda'
url = '{0}/datasetCompleted'.format(baseURLSSL)
rdata = six.b(urlencode(node))
req = Request(url)
fd = urlopen(req, rdata)
data = fd.read()
print(data)
|
from controller import Control
import mongomock
from elasticmock import elasticmock
from datetime import datetime
from freezegun import freeze_time
from datetime import datetime
from unittest import TestCase
from service import *
from projects.job import Job
import random
from sqlite3.dbapi2 import Error
class SQLiteToMongoTest(TestCase):
sqlite_engine = None
sql_create_projects_table = """ CREATE TABLE IF NOT EXISTS projects (
id integer PRIMARY KEY,
name text NOT NULL,
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
); """
def create_table(self, conn):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(self.sql_create_projects_table)
except Error as e:
print(e)
def sqlite_populate_many(self, sqlite):
objects = []
conn = sqlite.conn
self.create_table(conn)
c = conn.cursor()
for i in range(100):
insert_stm = "INSERT INTO projects (name) VALUES('name_" + str(i) + "')"
c.execute(insert_stm)
conn.commit()
@mongomock.patch(servers=(('example.com', 27017),))
def test_transfer_elastic_to_mongo(self):
control = Control({'retryable': False})
sqlite = SQLiteDB()
self.sqlite_populate_many(sqlite)
mongodb = MongoDB(connection = 'example.com')
control = control.add_source(sqlite).add_destination(mongodb)
assert control != None
docs = control.run(Job(from_time=datetime(2020, 11, 11), to_time= datetime(2020, 11, 12)))
assert len(docs) == 500
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module for topics configuration management
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
# Init logging
import logging
import sys
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.StephenSorriaux.ansible_kafka_admin.plugins.module_utils.kafka_lib_topic import process_module_topics
from ansible_collections.StephenSorriaux.ansible_kafka_admin.plugins.module_utils.kafka_lib_commons import (
module_commons, module_zookeeper_commons, module_topic_commons,
DOCUMENTATION_COMMON
)
# Default logging
# TODO: refactor all this logging logic
log = logging.getLogger('kafka')
log.addHandler(logging.StreamHandler(sys.stdout))
log.setLevel(logging.INFO)
ANSIBLE_METADATA = {'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: kafka_topics
short_description: Manage Kafka topics
description:
- Configure Kafka topics.
- Not compatible avec Kafka version < 0.11.0.
author:
- Stephen SORRIAUX
- ryarnyah
options:
topics:
description:
- Topics to create. @See kafka_topic for options
mark_others_as_absent:
description:
- make non listed topics as absent, thus triggering the deletion
- of topics absent from the `topics` listing
zookeeper:
description:
- 'the zookeeper connection.'
zookeeper_auth_scheme:
description:
- 'when zookeeper is configured to use authentication, schema used to '
- 'connect to zookeeper.'
default: 'digest'
choices: [digest, sasl]
zookeeper_auth_value:
description:
- 'when zookeeper is configured to use authentication, value used to '
- 'connect.'
zookeeper_ssl_check_hostname:
description:
- 'when using ssl for zookeeper, check if certificate for hostname is '
- 'correct.'
default: True
zookeeper_ssl_cafile:
description:
- 'when using ssl for zookeeper, content of ca cert file or path to '
- 'ca cert file.'
zookeeper_ssl_certfile:
description:
- 'when using ssl for zookeeper, content of cert file or path to '
- 'server cert file.'
zookeeper_ssl_keyfile:
description:
- 'when using ssl for zookeeper, content of keyfile or path to '
- 'server cert key file.'
zookeeper_ssl_password:
description:
- 'when using ssl for zookeeper, password for ssl_keyfile.'
zookeeper_sleep_time:
description:
- 'when updating number of partitions and while checking for'
- 'the ZK node, the time to sleep (in seconds) between'
- 'each checks.'
default: 5
zookeeper_max_retries:
description:
- 'when updating number of partitions and while checking for'
- 'the ZK node, maximum of try to do before failing'
default: 5
kafka_sleep_time:
description:
- 'when updating number of partitions and while checking for'
- 'kafka to applied, the time to sleep (in seconds) between'
- 'each checks.'
default: 5
kafka_max_retries:
description:
- 'when updating number of partitions and while checking for'
- 'kafka to applied, maximum of try to do before failing'
default: 5
''' + DOCUMENTATION_COMMON
EXAMPLES = '''
# creates a topic 'test' with provided configuation for plaintext
- name: create topics
kafka_topics:
topics:
- name: 'test'
partitions: 2
replica_factor: 1
options:
retention.ms: 574930
flush.ms: 12345
state: 'present'
zookeeper: >
"{{ hostvars['zk']['ansible_eth0']['ipv4']['address'] }}:2181"
bootstrap_servers: >
"{{ hostvars['kafka1']['ansible_eth0']['ipv4']['address'] }}:9092,
{{ hostvars['kafka2']['ansible_eth0']['ipv4']['address'] }}:9092"
# deletes a topic
- name: delete topic
kafka_lib:
topics:
- name: 'test'
state: 'absent'
zookeeper: >
"{{ hostvars['zk']['ansible_eth0']['ipv4']['address'] }}:2181"
bootstrap_servers: >
"{{ hostvars['kafka1']['ansible_eth0']['ipv4']['address'] }}:9092,
{{ hostvars['kafka2']['ansible_eth0']['ipv4']['address'] }}:9092"
'''
def main():
"""
Module usage
"""
spec = dict(
mark_others_as_absent=dict(type='bool', default=False),
topics=dict(
type='list',
elements='dict',
required=True,
options=dict(
name=dict(type='str', required=True),
state=dict(choices=['present', 'absent'], default='present'),
**module_topic_commons
)
),
**module_commons
)
spec.update(module_zookeeper_commons)
module = AnsibleModule(
argument_spec=spec,
supports_check_mode=True
)
process_module_topics(module)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright (c) 2018, SELCO Foundation and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestVertical(unittest.TestCase):
pass
|
#
# Cython/Python language types
#
from __future__ import absolute_import
import copy
import hashlib
import re
try:
reduce
except NameError:
from functools import reduce
from functools import partial
from Cython.Utils import cached_function
from .Code import UtilityCode, LazyUtilityCode, TempitaUtilityCode
from . import StringEncoding
from . import Naming
from .Errors import error, warning, CannotSpecialize
class BaseType(object):
#
# Base class for all Cython types including pseudo-types.
# List of attribute names of any subtypes
subtypes = []
_empty_declaration = None
_specialization_name = None
default_format_spec = None
def can_coerce_to_pyobject(self, env):
return False
def can_coerce_from_pyobject(self, env):
return False
def can_coerce_to_pystring(self, env, format_spec=None):
return False
def convert_to_pystring(self, cvalue, code, format_spec=None):
raise NotImplementedError("C types that support string formatting must override this method")
def cast_code(self, expr_code):
return "((%s)%s)" % (self.empty_declaration_code(), expr_code)
def empty_declaration_code(self, pyrex=False):
if pyrex:
return self.declaration_code('', pyrex=True)
if self._empty_declaration is None:
self._empty_declaration = self.declaration_code('')
return self._empty_declaration
def specialization_name(self):
if self._specialization_name is None:
# This is not entirely robust.
common_subs = (self.empty_declaration_code()
.replace("unsigned ", "unsigned_")
.replace("long long", "long_long")
.replace(" ", "__"))
self._specialization_name = re.sub(
'[^a-zA-Z0-9_]', lambda x: '_%x_' % ord(x.group(0)), common_subs)
return self._specialization_name
def base_declaration_code(self, base_code, entity_code):
if entity_code:
return "%s %s" % (base_code, entity_code)
else:
return base_code
def __deepcopy__(self, memo):
"""
Types never need to be copied, if we do copy, Unfortunate Things
Will Happen!
"""
return self
def get_fused_types(self, result=None, seen=None, subtypes=None):
subtypes = subtypes or self.subtypes
if not subtypes:
return None
if result is None:
result = []
seen = set()
for attr in subtypes:
list_or_subtype = getattr(self, attr)
if list_or_subtype:
if isinstance(list_or_subtype, BaseType):
list_or_subtype.get_fused_types(result, seen)
else:
for subtype in list_or_subtype:
subtype.get_fused_types(result, seen)
return result
def specialize_fused(self, env):
if env.fused_to_specific:
return self.specialize(env.fused_to_specific)
return self
@property
def is_fused(self):
"""
Whether this type or any of its subtypes is a fused type
"""
# Add this indirection for the is_fused property to allow overriding
# get_fused_types in subclasses.
return self.get_fused_types()
def deduce_template_params(self, actual):
"""
Deduce any template params in this (argument) type given the actual
argument type.
https://en.cppreference.com/w/cpp/language/function_template#Template_argument_deduction
"""
return {}
def __lt__(self, other):
"""
For sorting. The sorting order should correspond to the preference of
conversion from Python types.
Override to provide something sensible. This is only implemented so that
python 3 doesn't trip
"""
return id(type(self)) < id(type(other))
def py_type_name(self):
"""
Return the name of the Python type that can coerce to this type.
"""
def typeof_name(self):
"""
Return the string with which fused python functions can be indexed.
"""
if self.is_builtin_type or self.py_type_name() == 'object':
index_name = self.py_type_name()
else:
index_name = str(self)
return index_name
def check_for_null_code(self, cname):
"""
Return the code for a NULL-check in case an UnboundLocalError should
be raised if an entry of this type is referenced before assignment.
Returns None if no check should be performed.
"""
return None
def invalid_value(self):
"""
Returns the most invalid value an object of this type can assume as a
C expression string. Returns None if no such value exists.
"""
class PyrexType(BaseType):
#
# Base class for all Cython types
#
# is_pyobject boolean Is a Python object type
# is_extension_type boolean Is a Python extension type
# is_final_type boolean Is a final extension type
# is_numeric boolean Is a C numeric type
# is_int boolean Is a C integer type
# is_float boolean Is a C floating point type
# is_complex boolean Is a C complex type
# is_void boolean Is the C void type
# is_array boolean Is a C array type
# is_ptr boolean Is a C pointer type
# is_null_ptr boolean Is the type of NULL
# is_reference boolean Is a C reference type
# is_rvalue_reference boolean Is a C++ rvalue reference type
# is_const boolean Is a C const type
# is_volatile boolean Is a C volatile type
# is_cv_qualified boolean Is a C const or volatile type
# is_cfunction boolean Is a C function type
# is_struct_or_union boolean Is a C struct or union type
# is_struct boolean Is a C struct type
# is_enum boolean Is a C enum type
# is_cpp_enum boolean Is a C++ scoped enum type
# is_typedef boolean Is a typedef type
# is_string boolean Is a C char * type
# is_pyunicode_ptr boolean Is a C PyUNICODE * type
# is_cpp_string boolean Is a C++ std::string type
# is_unicode_char boolean Is either Py_UCS4 or Py_UNICODE
# is_returncode boolean Is used only to signal exceptions
# is_error boolean Is the dummy error type
# is_buffer boolean Is buffer access type
# is_pythran_expr boolean Is Pythran expr
# is_numpy_buffer boolean Is Numpy array buffer
# has_attributes boolean Has C dot-selectable attributes
# needs_cpp_construction boolean Needs C++ constructor and destructor when used in a cdef class
# needs_refcounting boolean Needs code to be generated similar to incref/gotref/decref.
# Largely used internally.
# default_value string Initial value that can be assigned before first user assignment.
# declaration_value string The value statically assigned on declaration (if any).
# entry Entry The Entry for this type
#
# declaration_code(entity_code,
# for_display = 0, dll_linkage = None, pyrex = 0)
# Returns a code fragment for the declaration of an entity
# of this type, given a code fragment for the entity.
# * If for_display, this is for reading by a human in an error
# message; otherwise it must be valid C code.
# * If dll_linkage is not None, it must be 'DL_EXPORT' or
# 'DL_IMPORT', and will be added to the base type part of
# the declaration.
# * If pyrex = 1, this is for use in a 'cdef extern'
# statement of a Cython include file.
#
# assignable_from(src_type)
# Tests whether a variable of this type can be
# assigned a value of type src_type.
#
# same_as(other_type)
# Tests whether this type represents the same type
# as other_type.
#
# as_argument_type():
# Coerces array and C function types into pointer type for use as
# a formal argument type.
#
is_pyobject = 0
is_unspecified = 0
is_extension_type = 0
is_final_type = 0
is_builtin_type = 0
is_cython_builtin_type = 0
is_numeric = 0
is_int = 0
is_float = 0
is_complex = 0
is_void = 0
is_array = 0
is_ptr = 0
is_null_ptr = 0
is_reference = 0
is_rvalue_reference = 0
is_const = 0
is_volatile = 0
is_cv_qualified = 0
is_cfunction = 0
is_struct_or_union = 0
is_cpp_class = 0
is_cpp_string = 0
is_struct = 0
is_enum = 0
is_cpp_enum = False
is_typedef = 0
is_string = 0
is_pyunicode_ptr = 0
is_unicode_char = 0
is_returncode = 0
is_error = 0
is_buffer = 0
is_ctuple = 0
is_memoryviewslice = 0
is_pythran_expr = 0
is_numpy_buffer = 0
has_attributes = 0
needs_cpp_construction = 0
needs_refcounting = 0
default_value = ""
declaration_value = ""
def resolve(self):
# If a typedef, returns the base type.
return self
def specialize(self, values):
# Returns the concrete type if this is a fused type, or otherwise the type itself.
# May raise Errors.CannotSpecialize on failure
return self
def literal_code(self, value):
# Returns a C code fragment representing a literal
# value of this type.
return str(value)
def __str__(self):
return self.declaration_code("", for_display = 1).strip()
def same_as(self, other_type, **kwds):
return self.same_as_resolved_type(other_type.resolve(), **kwds)
def same_as_resolved_type(self, other_type):
return self == other_type or other_type is error_type
def subtype_of(self, other_type):
return self.subtype_of_resolved_type(other_type.resolve())
def subtype_of_resolved_type(self, other_type):
return self.same_as(other_type)
def assignable_from(self, src_type):
return self.assignable_from_resolved_type(src_type.resolve())
def assignable_from_resolved_type(self, src_type):
return self.same_as(src_type)
def as_argument_type(self):
return self
def is_complete(self):
# A type is incomplete if it is an unsized array,
# a struct whose attributes are not defined, etc.
return 1
def is_simple_buffer_dtype(self):
return (self.is_int or self.is_float or self.is_complex or self.is_pyobject or
self.is_extension_type or self.is_ptr)
def struct_nesting_depth(self):
# Returns the number levels of nested structs. This is
# used for constructing a stack for walking the run-time
# type information of the struct.
return 1
def global_init_code(self, entry, code):
# abstract
pass
def needs_nonecheck(self):
return 0
def _assign_from_py_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None, extra_args=None):
args = ', ' + ', '.join('%s' % arg for arg in extra_args) if extra_args else ''
convert_call = "%s(%s%s)" % (
from_py_function or self.from_py_function,
source_code,
args,
)
if self.is_enum:
convert_call = typecast(self, c_long_type, convert_call)
return '%s = %s; %s' % (
result_code,
convert_call,
code.error_goto_if(error_condition or self.error_condition(result_code), error_pos))
def _generate_dummy_refcounting(self, code, *ignored_args, **ignored_kwds):
if self.needs_refcounting:
raise NotImplementedError("Ref-counting operation not yet implemented for type %s" %
self)
def _generate_dummy_refcounting_assignment(self, code, cname, rhs_cname, *ignored_args, **ignored_kwds):
if self.needs_refcounting:
raise NotImplementedError("Ref-counting operation not yet implemented for type %s" %
self)
code.putln("%s = %s" % (cname, rhs_cname))
generate_incref = generate_xincref = generate_decref = generate_xdecref \
= generate_decref_clear = generate_xdecref_clear \
= generate_gotref = generate_xgotref = generate_giveref = generate_xgiveref \
= _generate_dummy_refcounting
generate_decref_set = generate_xdecref_set = _generate_dummy_refcounting_assignment
def nullcheck_string(self, code, cname):
if self.needs_refcounting:
raise NotImplementedError("Ref-counting operation not yet implemented for type %s" %
self)
code.putln("1")
def public_decl(base_code, dll_linkage):
if dll_linkage:
return "%s(%s)" % (dll_linkage, base_code.replace(',', ' __PYX_COMMA '))
else:
return base_code
def create_typedef_type(name, base_type, cname, is_external=0, namespace=None):
is_fused = base_type.is_fused
if base_type.is_complex or is_fused:
if is_external:
if is_fused:
msg = "Fused"
else:
msg = "Complex"
raise ValueError("%s external typedefs not supported" % msg)
return base_type
else:
return CTypedefType(name, base_type, cname, is_external, namespace)
class CTypedefType(BaseType):
#
# Pseudo-type defined with a ctypedef statement in a
# 'cdef extern from' block.
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# qualified_name string
# typedef_name string
# typedef_cname string
# typedef_base_type PyrexType
# typedef_is_external bool
is_typedef = 1
typedef_is_external = 0
to_py_utility_code = None
from_py_utility_code = None
subtypes = ['typedef_base_type']
def __init__(self, name, base_type, cname, is_external=0, namespace=None):
assert not base_type.is_complex
self.typedef_name = name
self.typedef_cname = cname
self.typedef_base_type = base_type
self.typedef_is_external = is_external
self.typedef_namespace = namespace
def invalid_value(self):
return self.typedef_base_type.invalid_value()
def resolve(self):
return self.typedef_base_type.resolve()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.typedef_name
else:
base_code = public_decl(self.typedef_cname, dll_linkage)
if self.typedef_namespace is not None and not pyrex:
base_code = "%s::%s" % (self.typedef_namespace.empty_declaration_code(), base_code)
return self.base_declaration_code(base_code, entity_code)
def as_argument_type(self):
return self
def cast_code(self, expr_code):
# If self is really an array (rather than pointer), we can't cast.
# For example, the gmp mpz_t.
if self.typedef_base_type.is_array:
base_type = self.typedef_base_type.base_type
return CPtrType(base_type).cast_code(expr_code)
else:
return BaseType.cast_code(self, expr_code)
def specialize(self, values):
base_type = self.typedef_base_type.specialize(values)
namespace = self.typedef_namespace.specialize(values) if self.typedef_namespace else None
if base_type is self.typedef_base_type and namespace is self.typedef_namespace:
return self
else:
return create_typedef_type(self.typedef_name, base_type, self.typedef_cname,
0, namespace)
def __repr__(self):
return "<CTypedefType %s>" % self.typedef_cname
def __str__(self):
return self.typedef_name
def _create_utility_code(self, template_utility_code,
template_function_name):
type_name = type_identifier(self.typedef_cname)
utility_code = template_utility_code.specialize(
type = self.typedef_cname,
TypeName = type_name)
function_name = template_function_name % type_name
return utility_code, function_name
def create_to_py_utility_code(self, env):
if self.typedef_is_external:
if not self.to_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": self.to_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
pass
elif base_type.is_cpp_string:
cname = "__pyx_convert_PyObject_string_to_py_%s" % type_identifier(self)
context = {
'cname': cname,
'type': self.typedef_cname,
}
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
"string.to_py", "CppConvert.pyx", context=context))
self.to_py_function = cname
return True
if self.to_py_utility_code:
env.use_utility_code(self.to_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_to_py_utility_code(env)
def create_from_py_utility_code(self, env):
if self.typedef_is_external:
if not self.from_py_utility_code:
base_type = self.typedef_base_type
if type(base_type) is CIntType:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
elif base_type.is_float:
pass # XXX implement!
elif base_type.is_complex:
pass # XXX implement!
elif base_type.is_cpp_string:
cname = '__pyx_convert_string_from_py_%s' % type_identifier(self)
context = {
'cname': cname,
'type': self.typedef_cname,
}
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
"string.from_py", "CppConvert.pyx", context=context))
self.from_py_function = cname
return True
if self.from_py_utility_code:
env.use_utility_code(self.from_py_utility_code)
return True
# delegation
return self.typedef_base_type.create_from_py_utility_code(env)
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
if to_py_function is None:
to_py_function = self.to_py_function
return self.typedef_base_type.to_py_call_code(
source_code, result_code, result_type, to_py_function)
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
return self.typedef_base_type.from_py_call_code(
source_code, result_code, error_pos, code,
from_py_function or self.from_py_function,
error_condition or self.error_condition(result_code)
)
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.empty_declaration_code()
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load_cached(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load_cached(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load_cached(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def error_condition(self, result_code):
if self.typedef_is_external:
if self.exception_value:
condition = "(%s == %s)" % (
result_code, self.cast_code(self.exception_value))
if self.exception_check:
condition += " && PyErr_Occurred()"
return condition
# delegation
return self.typedef_base_type.error_condition(result_code)
def __getattr__(self, name):
return getattr(self.typedef_base_type, name)
def py_type_name(self):
return self.typedef_base_type.py_type_name()
def can_coerce_to_pyobject(self, env):
return self.typedef_base_type.can_coerce_to_pyobject(env)
def can_coerce_from_pyobject(self, env):
return self.typedef_base_type.can_coerce_from_pyobject(env)
class MemoryViewSliceType(PyrexType):
is_memoryviewslice = 1
default_value = "{ 0, 0, { 0 }, { 0 }, { 0 } }"
has_attributes = 1
needs_refcounting = 1 # Ideally this would be true and reference counting for
# memoryview and pyobject code could be generated in the same way.
# However, memoryviews are sufficiently specialized that this doesn't
# seem practical. Implement a limited version of it for now
scope = None
# These are special cased in Defnode
from_py_function = None
to_py_function = None
exception_value = None
exception_check = True
subtypes = ['dtype']
def __init__(self, base_dtype, axes):
"""
MemoryViewSliceType(base, axes)
Base is the C base type; axes is a list of (access, packing) strings,
where access is one of 'full', 'direct' or 'ptr' and packing is one of
'contig', 'strided' or 'follow'. There is one (access, packing) tuple
for each dimension.
the access specifiers determine whether the array data contains
pointers that need to be dereferenced along that axis when
retrieving/setting:
'direct' -- No pointers stored in this dimension.
'ptr' -- Pointer stored in this dimension.
'full' -- Check along this dimension, don't assume either.
the packing specifiers specify how the array elements are laid-out
in memory.
'contig' -- The data is contiguous in memory along this dimension.
At most one dimension may be specified as 'contig'.
'strided' -- The data isn't contiguous along this dimension.
'follow' -- Used for C/Fortran contiguous arrays, a 'follow' dimension
has its stride automatically computed from extents of the other
dimensions to ensure C or Fortran memory layout.
C-contiguous memory has 'direct' as the access spec, 'contig' as the
*last* axis' packing spec and 'follow' for all other packing specs.
Fortran-contiguous memory has 'direct' as the access spec, 'contig' as
the *first* axis' packing spec and 'follow' for all other packing
specs.
"""
from . import Buffer, MemoryView
self.dtype = base_dtype
self.axes = axes
self.ndim = len(axes)
self.flags = MemoryView.get_buf_flags(self.axes)
self.is_c_contig, self.is_f_contig = MemoryView.is_cf_contig(self.axes)
assert not (self.is_c_contig and self.is_f_contig)
self.mode = MemoryView.get_mode(axes)
self.writable_needed = False
if not self.dtype.is_fused:
self.dtype_name = Buffer.mangle_dtype_name(self.dtype)
def __hash__(self):
return hash(self.__class__) ^ hash(self.dtype) ^ hash(tuple(self.axes))
def __eq__(self, other):
if isinstance(other, BaseType):
return self.same_as_resolved_type(other)
else:
return False
def __ne__(self, other):
# TODO drop when Python2 is dropped
return not (self == other)
def same_as_resolved_type(self, other_type):
return ((other_type.is_memoryviewslice and
#self.writable_needed == other_type.writable_needed and # FIXME: should be only uni-directional
self.dtype.same_as(other_type.dtype) and
self.axes == other_type.axes) or
other_type is error_type)
def needs_nonecheck(self):
return True
def is_complete(self):
# incomplete since the underlying struct doesn't have a cython.memoryview object.
return 0
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
# XXX: we put these guards in for now...
assert not dll_linkage
from . import MemoryView
base_code = StringEncoding.EncodedString(
str(self) if pyrex or for_display else MemoryView.memviewslice_cname)
return self.base_declaration_code(
base_code,
entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'mvs_class_'+self.specialization_suffix(),
None,
visibility='extern')
scope.parent_type = self
scope.directives = {}
scope.declare_var('_data', c_char_ptr_type, None,
cname='data', is_cdef=1)
return True
def declare_attribute(self, attribute, env, pos):
from . import MemoryView, Options
scope = self.scope
if attribute == 'shape':
scope.declare_var('shape',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='shape',
is_cdef=1)
elif attribute == 'strides':
scope.declare_var('strides',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='strides',
is_cdef=1)
elif attribute == 'suboffsets':
scope.declare_var('suboffsets',
c_array_type(c_py_ssize_t_type,
Options.buffer_max_dims),
pos,
cname='suboffsets',
is_cdef=1)
elif attribute in ("copy", "copy_fortran"):
ndim = len(self.axes)
follow_dim = [('direct', 'follow')]
contig_dim = [('direct', 'contig')]
to_axes_c = follow_dim * (ndim - 1) + contig_dim
to_axes_f = contig_dim + follow_dim * (ndim -1)
dtype = self.dtype
if dtype.is_cv_qualified:
dtype = dtype.cv_base_type
to_memview_c = MemoryViewSliceType(dtype, to_axes_c)
to_memview_f = MemoryViewSliceType(dtype, to_axes_f)
for to_memview, cython_name in [(to_memview_c, "copy"),
(to_memview_f, "copy_fortran")]:
copy_func_type = CFuncType(
to_memview,
[CFuncTypeArg("memviewslice", self, None)])
copy_cname = MemoryView.copy_c_or_fortran_cname(to_memview)
entry = scope.declare_cfunction(
cython_name,
copy_func_type, pos=pos, defining=1,
cname=copy_cname)
utility = MemoryView.get_copy_new_utility(pos, self, to_memview)
env.use_utility_code(utility)
MemoryView.use_cython_array_utility_code(env)
elif attribute in ("is_c_contig", "is_f_contig"):
# is_c_contig and is_f_contig functions
for (c_or_f, cython_name) in (('C', 'is_c_contig'), ('F', 'is_f_contig')):
is_contig_name = MemoryView.get_is_contig_func_name(c_or_f, self.ndim)
cfunctype = CFuncType(
return_type=c_bint_type,
args=[CFuncTypeArg("memviewslice", self, None)],
exception_value="-1",
)
entry = scope.declare_cfunction(cython_name,
cfunctype,
pos=pos,
defining=1,
cname=is_contig_name)
entry.utility_code_definition = MemoryView.get_is_contig_utility(c_or_f, self.ndim)
return True
def get_entry(self, node, cname=None, type=None):
from . import MemoryView, Symtab
if cname is None:
assert node.is_simple() or node.is_temp or node.is_elemental
cname = node.result()
if type is None:
type = node.type
entry = Symtab.Entry(cname, cname, type, node.pos)
return MemoryView.MemoryViewSliceBufferEntry(entry)
def conforms_to(self, dst, broadcast=False, copying=False):
"""
Returns True if src conforms to dst, False otherwise.
If conformable, the types are the same, the ndims are equal, and each axis spec is conformable.
Any packing/access spec is conformable to itself.
'direct' and 'ptr' are conformable to 'full'.
'contig' and 'follow' are conformable to 'strided'.
Any other combo is not conformable.
"""
from . import MemoryView
src = self
#if not copying and self.writable_needed and not dst.writable_needed:
# return False
src_dtype, dst_dtype = src.dtype, dst.dtype
# We can add but not remove const/volatile modifiers
# (except if we are copying by value, then anything is fine)
if not copying:
if src_dtype.is_const and not dst_dtype.is_const:
return False
if src_dtype.is_volatile and not dst_dtype.is_volatile:
return False
# const/volatile checks are done, remove those qualifiers
if src_dtype.is_cv_qualified:
src_dtype = src_dtype.cv_base_type
if dst_dtype.is_cv_qualified:
dst_dtype = dst_dtype.cv_base_type
if src_dtype != dst_dtype:
return False
if src.ndim != dst.ndim:
if broadcast:
src, dst = MemoryView.broadcast_types(src, dst)
else:
return False
for src_spec, dst_spec in zip(src.axes, dst.axes):
src_access, src_packing = src_spec
dst_access, dst_packing = dst_spec
if src_access != dst_access and dst_access != 'full':
return False
if src_packing != dst_packing and dst_packing != 'strided' and not copying:
return False
return True
def valid_dtype(self, dtype, i=0):
"""
Return whether type dtype can be used as the base type of a
memoryview slice.
We support structs, numeric types and objects
"""
if dtype.is_complex and dtype.real_type.is_int:
return False
if dtype.is_struct and dtype.kind == 'struct':
for member in dtype.scope.var_entries:
if not self.valid_dtype(member.type):
return False
return True
return (
dtype.is_error or
# Pointers are not valid (yet)
# (dtype.is_ptr and valid_memslice_dtype(dtype.base_type)) or
(dtype.is_array and i < 8 and self.valid_dtype(dtype.base_type, i + 1)) or
dtype.is_numeric or
dtype.is_pyobject or
dtype.is_fused or # accept this as it will be replaced by specializations later
(dtype.is_typedef and self.valid_dtype(dtype.typedef_base_type))
)
def validate_memslice_dtype(self, pos):
if not self.valid_dtype(self.dtype):
error(pos, "Invalid base type for memoryview slice: %s" % self.dtype)
def assert_direct_dims(self, pos):
for access, packing in self.axes:
if access != 'direct':
error(pos, "All dimensions must be direct")
return False
return True
def transpose(self, pos):
if not self.assert_direct_dims(pos):
return error_type
return MemoryViewSliceType(self.dtype, self.axes[::-1])
def specialization_name(self):
return '%s_%s' % (
super(MemoryViewSliceType,self).specialization_name(),
self.specialization_suffix())
def specialization_suffix(self):
return "%s_%s" % (self.axes_to_name(), self.dtype_name)
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def check_for_null_code(self, cname):
return cname + '.memview'
def create_from_py_utility_code(self, env):
from . import MemoryView, Buffer
# We don't have 'code', so use a LazyUtilityCode with a callback.
def lazy_utility_callback(code):
context['dtype_typeinfo'] = Buffer.get_type_information_cname(code, self.dtype)
return TempitaUtilityCode.load(
"ObjectToMemviewSlice", "MemoryView_C.c", context=context)
env.use_utility_code(MemoryView.memviewslice_init_code)
env.use_utility_code(LazyUtilityCode(lazy_utility_callback))
if self.is_c_contig:
c_or_f_flag = "__Pyx_IS_C_CONTIG"
elif self.is_f_contig:
c_or_f_flag = "__Pyx_IS_F_CONTIG"
else:
c_or_f_flag = "0"
suffix = self.specialization_suffix()
funcname = "__Pyx_PyObject_to_MemoryviewSlice_" + suffix
context = dict(
MemoryView.context,
buf_flag = self.flags,
ndim = self.ndim,
axes_specs = ', '.join(self.axes_to_code()),
dtype_typedecl = self.dtype.empty_declaration_code(),
struct_nesting_depth = self.dtype.struct_nesting_depth(),
c_or_f_flag = c_or_f_flag,
funcname = funcname,
)
self.from_py_function = funcname
return True
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
# NOTE: auto-detection of readonly buffers is disabled:
# writable = self.writable_needed or not self.dtype.is_const
writable = not self.dtype.is_const
return self._assign_from_py_code(
source_code, result_code, error_pos, code, from_py_function, error_condition,
extra_args=['PyBUF_WRITABLE' if writable else '0'])
def create_to_py_utility_code(self, env):
self._dtype_to_py_func, self._dtype_from_py_func = self.dtype_object_conversion_funcs(env)
return True
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
assert self._dtype_to_py_func
assert self._dtype_from_py_func
to_py_func = "(PyObject *(*)(char *)) " + self._dtype_to_py_func
from_py_func = "(int (*)(char *, PyObject *)) " + self._dtype_from_py_func
tup = (result_code, source_code, self.ndim, to_py_func, from_py_func, self.dtype.is_pyobject)
return "%s = __pyx_memoryview_fromslice(%s, %s, %s, %s, %d);" % tup
def dtype_object_conversion_funcs(self, env):
get_function = "__pyx_memview_get_%s" % self.dtype_name
set_function = "__pyx_memview_set_%s" % self.dtype_name
context = dict(
get_function = get_function,
set_function = set_function,
)
if self.dtype.is_pyobject:
utility_name = "MemviewObjectToObject"
else:
self.dtype.create_to_py_utility_code(env)
to_py_function = self.dtype.to_py_function
from_py_function = None
if not self.dtype.is_const:
self.dtype.create_from_py_utility_code(env)
from_py_function = self.dtype.from_py_function
if not (to_py_function or from_py_function):
return "NULL", "NULL"
if not to_py_function:
get_function = "NULL"
if not from_py_function:
set_function = "NULL"
utility_name = "MemviewDtypeToObject"
error_condition = (self.dtype.error_condition('value') or
'PyErr_Occurred()')
context.update(
to_py_function=to_py_function,
from_py_function=from_py_function,
dtype=self.dtype.empty_declaration_code(),
error_condition=error_condition,
)
utility = TempitaUtilityCode.load_cached(
utility_name, "MemoryView_C.c", context=context)
env.use_utility_code(utility)
return get_function, set_function
def axes_to_code(self):
"""Return a list of code constants for each axis"""
from . import MemoryView
d = MemoryView._spec_to_const
return ["(%s | %s)" % (d[a], d[p]) for a, p in self.axes]
def axes_to_name(self):
"""Return an abbreviated name for our axes"""
from . import MemoryView
d = MemoryView._spec_to_abbrev
return "".join(["%s%s" % (d[a], d[p]) for a, p in self.axes])
def error_condition(self, result_code):
return "!%s.memview" % result_code
def __str__(self):
from . import MemoryView
axes_code_list = []
for idx, (access, packing) in enumerate(self.axes):
flag = MemoryView.get_memoryview_flag(access, packing)
if flag == "strided":
axes_code_list.append(":")
else:
if flag == 'contiguous':
have_follow = [p for a, p in self.axes[idx - 1:idx + 2]
if p == 'follow']
if have_follow or self.ndim == 1:
flag = '1'
axes_code_list.append("::" + flag)
if self.dtype.is_pyobject:
dtype_name = self.dtype.name
else:
dtype_name = self.dtype
return "%s[%s]" % (dtype_name, ", ".join(axes_code_list))
def specialize(self, values):
"""This does not validate the base type!!"""
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return MemoryViewSliceType(dtype, self.axes)
return self
def cast_code(self, expr_code):
return expr_code
# When memoryviews are increfed currently seems heavily special-cased.
# Therefore, use our own function for now
def generate_incref(self, code, name, **kwds):
pass
def generate_incref_memoryviewslice(self, code, slice_cname, have_gil):
# TODO ideally would be done separately
code.putln("__PYX_INC_MEMVIEW(&%s, %d);" % (slice_cname, int(have_gil)))
# decref however did look to always apply for memoryview slices
# with "have_gil" set to True by default
def generate_xdecref(self, code, cname, nanny, have_gil):
code.putln("__PYX_XCLEAR_MEMVIEW(&%s, %d);" % (cname, int(have_gil)))
def generate_decref(self, code, cname, nanny, have_gil):
# Fall back to xdecref since we don't care to have a separate decref version for this.
self.generate_xdecref(code, cname, nanny, have_gil)
def generate_xdecref_clear(self, code, cname, clear_before_decref, **kwds):
self.generate_xdecref(code, cname, **kwds)
code.putln("%s.memview = NULL; %s.data = NULL;" % (cname, cname))
def generate_decref_clear(self, code, cname, **kwds):
# memoryviews don't currently distinguish between xdecref and decref
self.generate_xdecref_clear(code, cname, **kwds)
# memoryviews don't participate in giveref/gotref
generate_gotref = generate_xgotref = generate_xgiveref = generate_giveref = lambda *args: None
class BufferType(BaseType):
#
# Delegates most attribute lookups to the base type.
# (Anything not defined here or in the BaseType is delegated.)
#
# dtype PyrexType
# ndim int
# mode str
# negative_indices bool
# cast bool
# is_buffer bool
# writable bool
is_buffer = 1
writable = True
subtypes = ['dtype']
def __init__(self, base, dtype, ndim, mode, negative_indices, cast):
self.base = base
self.dtype = dtype
self.ndim = ndim
self.buffer_ptr_type = CPtrType(dtype)
self.mode = mode
self.negative_indices = negative_indices
self.cast = cast
self.is_numpy_buffer = self.base.name == "ndarray"
def can_coerce_to_pyobject(self,env):
return True
def can_coerce_from_pyobject(self,env):
return True
def as_argument_type(self):
return self
def specialize(self, values):
dtype = self.dtype.specialize(values)
if dtype is not self.dtype:
return BufferType(self.base, dtype, self.ndim, self.mode,
self.negative_indices, self.cast)
return self
def get_entry(self, node):
from . import Buffer
assert node.is_name
return Buffer.BufferEntry(node.entry)
def __getattr__(self, name):
return getattr(self.base, name)
def __repr__(self):
return "<BufferType %r>" % self.base
def __str__(self):
# avoid ', ', as fused functions split the signature string on ', '
cast_str = ''
if self.cast:
cast_str = ',cast=True'
return "%s[%s,ndim=%d%s]" % (self.base, self.dtype, self.ndim,
cast_str)
def assignable_from(self, other_type):
if other_type.is_buffer:
return (self.same_as(other_type, compare_base=False) and
self.base.assignable_from(other_type.base))
return self.base.assignable_from(other_type)
def same_as(self, other_type, compare_base=True):
if not other_type.is_buffer:
return other_type.same_as(self.base)
return (self.dtype.same_as(other_type.dtype) and
self.ndim == other_type.ndim and
self.mode == other_type.mode and
self.cast == other_type.cast and
(not compare_base or self.base.same_as(other_type.base)))
class PyObjectType(PyrexType):
#
# Base class for all Python object types (reference-counted).
#
# buffer_defaults dict or None Default options for bu
name = "object"
is_pyobject = 1
default_value = "0"
declaration_value = "0"
buffer_defaults = None
is_extern = False
is_subclassed = False
is_gc_simple = False
builtin_trashcan = False # builtin type using trashcan
needs_refcounting = True
def __str__(self):
return "Python object"
def __repr__(self):
return "<PyObjectType>"
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def default_coerced_ctype(self):
"""The default C type that this Python type coerces to, or None."""
return None
def assignable_from(self, src_type):
# except for pointers, conversion will be attempted
return not src_type.is_ptr or src_type.is_string or src_type.is_pyunicode_ptr
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "object"
else:
base_code = public_decl("PyObject", dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if (not self.is_complete()) or self.is_extension_type:
return "(PyObject *)" + cname
else:
return cname
def py_type_name(self):
return "object"
def __lt__(self, other):
"""
Make sure we sort highest, as instance checking on py_type_name
('object') is always true
"""
return False
def global_init_code(self, entry, code):
code.put_init_var_to_py_none(entry, nanny=False)
def check_for_null_code(self, cname):
return cname
def generate_incref(self, code, cname, nanny):
if nanny:
code.putln("__Pyx_INCREF(%s);" % self.as_pyobject(cname))
else:
code.putln("Py_INCREF(%s);" % self.as_pyobject(cname))
def generate_xincref(self, code, cname, nanny):
if nanny:
code.putln("__Pyx_XINCREF(%s);" % self.as_pyobject(cname))
else:
code.putln("Py_XINCREF(%s);" % self.as_pyobject(cname))
def generate_decref(self, code, cname, nanny, have_gil):
# have_gil is for the benefit of memoryviewslice - it's ignored here
assert have_gil
self._generate_decref(code, cname, nanny, null_check=False, clear=False)
def generate_xdecref(self, code, cname, nanny, have_gil):
# in this (and other) PyObjectType functions, have_gil is being
# passed to provide a common interface with MemoryviewSlice.
# It's ignored here
self._generate_decref(code, cname, nanny, null_check=True,
clear=False)
def generate_decref_clear(self, code, cname, clear_before_decref, nanny, have_gil):
self._generate_decref(code, cname, nanny, null_check=False,
clear=True, clear_before_decref=clear_before_decref)
def generate_xdecref_clear(self, code, cname, clear_before_decref=False, nanny=True, have_gil=None):
self._generate_decref(code, cname, nanny, null_check=True,
clear=True, clear_before_decref=clear_before_decref)
def generate_gotref(self, code, cname):
code.putln("__Pyx_GOTREF(%s);" % self.as_pyobject(cname))
def generate_xgotref(self, code, cname):
code.putln("__Pyx_XGOTREF(%s);" % self.as_pyobject(cname))
def generate_giveref(self, code, cname):
code.putln("__Pyx_GIVEREF(%s);" % self.as_pyobject(cname))
def generate_xgiveref(self, code, cname):
code.putln("__Pyx_XGIVEREF(%s);" % self.as_pyobject(cname))
def generate_decref_set(self, code, cname, rhs_cname):
code.putln("__Pyx_DECREF_SET(%s, %s);" % (cname, rhs_cname))
def generate_xdecref_set(self, code, cname, rhs_cname):
code.putln("__Pyx_XDECREF_SET(%s, %s);" % (cname, rhs_cname))
def _generate_decref(self, code, cname, nanny, null_check=False,
clear=False, clear_before_decref=False):
prefix = '__Pyx' if nanny else 'Py'
X = 'X' if null_check else ''
if clear:
if clear_before_decref:
if not nanny:
X = '' # CPython doesn't have a Py_XCLEAR()
code.putln("%s_%sCLEAR(%s);" % (prefix, X, cname))
else:
code.putln("%s_%sDECREF(%s); %s = 0;" % (
prefix, X, self.as_pyobject(cname), cname))
else:
code.putln("%s_%sDECREF(%s);" % (
prefix, X, self.as_pyobject(cname)))
def nullcheck_string(self, cname):
return cname
builtin_types_that_cannot_create_refcycles = set([
'object', 'bool', 'int', 'long', 'float', 'complex',
'bytearray', 'bytes', 'unicode', 'str', 'basestring'
])
builtin_types_with_trashcan = set([
'dict', 'list', 'set', 'frozenset', 'tuple', 'type',
])
class BuiltinObjectType(PyObjectType):
# objstruct_cname string Name of PyObject struct
is_builtin_type = 1
has_attributes = 1
base_type = None
module_name = '__builtin__'
require_exact = 1
# fields that let it look like an extension type
vtabslot_cname = None
vtabstruct_cname = None
vtabptr_cname = None
typedef_flag = True
is_external = True
decl_type = 'PyObject'
def __init__(self, name, cname, objstruct_cname=None):
self.name = name
self.cname = cname
self.typeptr_cname = "(&%s)" % cname
self.objstruct_cname = objstruct_cname
self.is_gc_simple = name in builtin_types_that_cannot_create_refcycles
self.builtin_trashcan = name in builtin_types_with_trashcan
if name == 'type':
# Special case the type type, as many C API calls (and other
# libraries) actually expect a PyTypeObject* for type arguments.
self.decl_type = objstruct_cname
if name == 'Exception':
self.require_exact = 0
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def __str__(self):
return "%s object" % self.name
def __repr__(self):
return "<%s>"% self.cname
def default_coerced_ctype(self):
if self.name in ('bytes', 'bytearray'):
return c_char_ptr_type
elif self.name == 'bool':
return c_bint_type
elif self.name == 'float':
return c_double_type
return None
def assignable_from(self, src_type):
if isinstance(src_type, BuiltinObjectType):
if self.name == 'basestring':
return src_type.name in ('str', 'unicode', 'basestring')
else:
return src_type.name == self.name
elif src_type.is_extension_type:
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (src_type.module_name == '__builtin__' and
src_type.name == self.name)
else:
return True
def typeobj_is_available(self):
return True
def attributes_known(self):
return True
def subtype_of(self, type):
return type.is_pyobject and type.assignable_from(self)
def type_check_function(self, exact=True):
type_name = self.name
if type_name == 'str':
type_check = 'PyString_Check'
elif type_name == 'basestring':
type_check = '__Pyx_PyBaseString_Check'
elif type_name == 'Exception':
type_check = '__Pyx_PyException_Check'
elif type_name == 'bytearray':
type_check = 'PyByteArray_Check'
elif type_name == 'frozenset':
type_check = 'PyFrozenSet_Check'
else:
type_check = 'Py%s_Check' % type_name.capitalize()
if exact and type_name not in ('bool', 'slice', 'Exception'):
type_check += 'Exact'
return type_check
def isinstance_code(self, arg):
return '%s(%s)' % (self.type_check_function(exact=False), arg)
def type_test_code(self, arg, notnone=False, exact=True):
type_check = self.type_check_function(exact=exact)
check = 'likely(%s(%s))' % (type_check, arg)
if not notnone:
check += '||((%s) == Py_None)' % arg
if self.name == 'basestring':
name = '(PY_MAJOR_VERSION < 3 ? "basestring" : "str")'
else:
name = '"%s"' % self.name
return check + ' || __Pyx_RaiseUnexpectedTypeError(%s, %s)' % (name, arg)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
base_code = public_decl(self.decl_type, dll_linkage)
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def as_pyobject(self, cname):
if self.decl_type == 'PyObject':
return cname
else:
return "(PyObject *)" + cname
def cast_code(self, expr_code, to_object_struct = False):
return "((%s*)%s)" % (
to_object_struct and self.objstruct_cname or self.decl_type, # self.objstruct_cname may be None
expr_code)
def py_type_name(self):
return self.name
class PyExtensionType(PyObjectType):
#
# A Python extension type.
#
# name string
# scope CClassScope Attribute namespace
# visibility string
# typedef_flag boolean
# base_type PyExtensionType or None
# module_name string or None Qualified name of defining module
# objstruct_cname string Name of PyObject struct
# objtypedef_cname string Name of PyObject struct typedef
# typeobj_cname string or None C code fragment referring to type object
# typeptr_cname string or None Name of pointer to external type object
# vtabslot_cname string Name of C method table member
# vtabstruct_cname string Name of C method table struct
# vtabptr_cname string Name of pointer to C method table
# vtable_cname string Name of C method table definition
# early_init boolean Whether to initialize early (as opposed to during module execution).
# defered_declarations [thunk] Used to declare class hierarchies in order
# check_size 'warn', 'error', 'ignore' What to do if tp_basicsize does not match
is_extension_type = 1
has_attributes = 1
early_init = 1
objtypedef_cname = None
def __init__(self, name, typedef_flag, base_type, is_external=0, check_size=None):
self.name = name
self.scope = None
self.typedef_flag = typedef_flag
if base_type is not None:
base_type.is_subclassed = True
self.base_type = base_type
self.module_name = None
self.objstruct_cname = None
self.typeobj_cname = None
self.typeptr_cname = None
self.vtabslot_cname = None
self.vtabstruct_cname = None
self.vtabptr_cname = None
self.vtable_cname = None
self.is_external = is_external
self.check_size = check_size or 'warn'
self.defered_declarations = []
def set_scope(self, scope):
self.scope = scope
if scope:
scope.parent_type = self
def needs_nonecheck(self):
return True
def subtype_of_resolved_type(self, other_type):
if other_type.is_extension_type or other_type.is_builtin_type:
return self is other_type or (
self.base_type and self.base_type.subtype_of(other_type))
else:
return other_type is py_object_type
def typeobj_is_available(self):
# Do we have a pointer to the type object?
return self.typeptr_cname
def typeobj_is_imported(self):
# If we don't know the C name of the type object but we do
# know which module it's defined in, it will be imported.
return self.typeobj_cname is None and self.module_name is not None
def assignable_from(self, src_type):
if self == src_type:
return True
if isinstance(src_type, PyExtensionType):
if src_type.base_type is not None:
return self.assignable_from(src_type.base_type)
if isinstance(src_type, BuiltinObjectType):
# FIXME: This is an ugly special case that we currently
# keep supporting. It allows users to specify builtin
# types as external extension types, while keeping them
# compatible with the real builtin types. We already
# generate a warning for it. Big TODO: remove!
return (self.module_name == '__builtin__' and
self.name == src_type.name)
return False
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0, deref = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
objstruct = self.objstruct_cname
else:
objstruct = "struct %s" % self.objstruct_cname
base_code = public_decl(objstruct, dll_linkage)
if deref:
assert not entity_code
else:
entity_code = "*%s" % entity_code
return self.base_declaration_code(base_code, entity_code)
def type_test_code(self, py_arg, notnone=False):
none_check = "((%s) == Py_None)" % py_arg
type_check = "likely(__Pyx_TypeTest(%s, %s))" % (
py_arg, self.typeptr_cname)
if notnone:
return type_check
else:
return "likely(%s || %s)" % (none_check, type_check)
def attributes_known(self):
return self.scope is not None
def __str__(self):
return self.name
def __repr__(self):
return "<PyExtensionType %s%s>" % (self.scope.class_name,
("", " typedef")[self.typedef_flag])
def py_type_name(self):
if not self.module_name:
return self.name
return "__import__(%r, None, None, ['']).%s" % (self.module_name,
self.name)
class CType(PyrexType):
#
# Base class for all C types (non-reference-counted).
#
# to_py_function string C function for converting to Python object
# from_py_function string C function for constructing from Python object
#
to_py_function = None
from_py_function = None
exception_value = None
exception_check = 1
def create_to_py_utility_code(self, env):
return self.to_py_function is not None
def create_from_py_utility_code(self, env):
return self.from_py_function is not None
def can_coerce_to_pyobject(self, env):
return self.create_to_py_utility_code(env)
def can_coerce_from_pyobject(self, env):
return self.create_from_py_utility_code(env)
def error_condition(self, result_code):
conds = []
if self.is_string or self.is_pyunicode_ptr:
conds.append("(!%s)" % result_code)
elif self.exception_value is not None:
conds.append("(%s == (%s)%s)" % (result_code, self.sign_and_name(), self.exception_value))
if self.exception_check:
conds.append("PyErr_Occurred()")
if len(conds) > 0:
return " && ".join(conds)
else:
return 0
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
func = self.to_py_function if to_py_function is None else to_py_function
assert func
if self.is_string or self.is_cpp_string:
if result_type.is_builtin_type:
result_type_name = result_type.name
if result_type_name in ('bytes', 'str', 'unicode'):
func = func.replace("Object", result_type_name.title(), 1)
elif result_type_name == 'bytearray':
func = func.replace("Object", "ByteArray", 1)
return '%s = %s(%s)' % (
result_code,
func,
source_code or 'NULL')
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
return self._assign_from_py_code(
source_code, result_code, error_pos, code, from_py_function, error_condition)
class PythranExpr(CType):
# Pythran object of a given type
to_py_function = "__Pyx_pythran_to_python"
is_pythran_expr = True
writable = True
has_attributes = 1
def __init__(self, pythran_type, org_buffer=None):
self.org_buffer = org_buffer
self.pythran_type = pythran_type
self.name = self.pythran_type
self.cname = self.pythran_type
self.from_py_function = "from_python<%s>" % (self.pythran_type)
self.scope = None
def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0):
assert not pyrex
return "%s %s" % (self.cname, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
# FIXME: fake C scope, might be better represented by a struct or C++ class scope
self.scope = scope = Symtab.CClassScope('', None, visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_var("ndim", c_long_type, pos=None, cname="value", is_cdef=True)
scope.declare_cproperty(
"shape", c_ptr_type(c_long_type), "__Pyx_PythranShapeAccessor",
doc="Pythran array shape",
visibility="extern",
nogil=True,
)
return True
def __eq__(self, other):
return isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type
def __ne__(self, other):
return not (isinstance(other, PythranExpr) and self.pythran_type == other.pythran_type)
def __hash__(self):
return hash(self.pythran_type)
class CConstOrVolatileType(BaseType):
"A C const or volatile type"
subtypes = ['cv_base_type']
is_cv_qualified = 1
def __init__(self, base_type, is_const=0, is_volatile=0):
self.cv_base_type = base_type
self.is_const = is_const
self.is_volatile = is_volatile
if base_type.has_attributes and base_type.scope is not None:
from .Symtab import CConstOrVolatileScope
self.scope = CConstOrVolatileScope(base_type.scope, is_const, is_volatile)
def cv_string(self):
cvstring = ""
if self.is_const:
cvstring = "const " + cvstring
if self.is_volatile:
cvstring = "volatile " + cvstring
return cvstring
def __repr__(self):
return "<CConstOrVolatileType %s%r>" % (self.cv_string(), self.cv_base_type)
def __str__(self):
return self.declaration_code("", for_display=1)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
cv = self.cv_string()
if for_display or pyrex:
return cv + self.cv_base_type.declaration_code(entity_code, for_display, dll_linkage, pyrex)
else:
return self.cv_base_type.declaration_code(cv + entity_code, for_display, dll_linkage, pyrex)
def specialize(self, values):
base_type = self.cv_base_type.specialize(values)
if base_type == self.cv_base_type:
return self
return CConstOrVolatileType(base_type,
self.is_const, self.is_volatile)
def deduce_template_params(self, actual):
return self.cv_base_type.deduce_template_params(actual)
def can_coerce_to_pyobject(self, env):
return self.cv_base_type.can_coerce_to_pyobject(env)
def can_coerce_from_pyobject(self, env):
return self.cv_base_type.can_coerce_from_pyobject(env)
def create_to_py_utility_code(self, env):
if self.cv_base_type.create_to_py_utility_code(env):
self.to_py_function = self.cv_base_type.to_py_function
return True
def same_as_resolved_type(self, other_type):
if other_type.is_cv_qualified:
return self.cv_base_type.same_as_resolved_type(other_type.cv_base_type)
# Accept cv LHS <- non-cv RHS.
return self.cv_base_type.same_as_resolved_type(other_type)
def __getattr__(self, name):
return getattr(self.cv_base_type, name)
def CConstType(base_type):
return CConstOrVolatileType(base_type, is_const=1)
class FusedType(CType):
"""
Represents a Fused Type. All it needs to do is keep track of the types
it aggregates, as it will be replaced with its specific version wherever
needed.
See http://wiki.cython.org/enhancements/fusedtypes
types [PyrexType] is the list of types to be fused
name str the name of the ctypedef
"""
is_fused = 1
exception_check = 0
def __init__(self, types, name=None):
# Use list rather than set to preserve order (list should be short).
flattened_types = []
for t in types:
if t.is_fused:
# recursively merge in subtypes
for subtype in t.types:
if subtype not in flattened_types:
flattened_types.append(subtype)
elif t not in flattened_types:
flattened_types.append(t)
self.types = flattened_types
self.name = name
def declaration_code(self, entity_code, for_display = 0,
dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return self.name
raise Exception("This may never happen, please report a bug")
def __repr__(self):
return 'FusedType(name=%r)' % self.name
def specialize(self, values):
if self in values:
return values[self]
else:
raise CannotSpecialize()
def get_fused_types(self, result=None, seen=None):
if result is None:
return [self]
if self not in seen:
result.append(self)
seen.add(self)
class CVoidType(CType):
#
# C "void" type
#
is_void = 1
to_py_function = "__Pyx_void_to_None"
def __repr__(self):
return "<CVoidType>"
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "void"
else:
base_code = public_decl("void", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_complete(self):
return 0
class InvisibleVoidType(CVoidType):
#
# For use with C++ constructors and destructors return types.
# Acts like void, but does not print out a declaration.
#
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = "[void]"
else:
base_code = public_decl("", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class CNumericType(CType):
#
# Base class for all C numeric types.
#
# rank integer Relative size
# signed integer 0 = unsigned, 1 = unspecified, 2 = explicitly signed
#
is_numeric = 1
default_value = "0"
has_attributes = True
scope = None
sign_words = ("unsigned ", "", "signed ")
def __init__(self, rank, signed = 1):
self.rank = rank
if rank > 0 and signed == SIGNED:
# Signed is meaningless for anything but char, and complicates
# type promotion.
signed = 1
self.signed = signed
def sign_and_name(self):
s = self.sign_words[self.signed]
n = rank_to_type_name[self.rank]
return s + n
def __repr__(self):
return "<CNumericType %s>" % self.sign_and_name()
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
type_name = self.sign_and_name()
if pyrex or for_display:
base_code = type_name.replace('PY_LONG_LONG', 'long long')
else:
base_code = public_decl(type_name, dll_linkage)
base_code = StringEncoding.EncodedString(base_code)
return self.base_declaration_code(base_code, entity_code)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname=" ")
return True
def __lt__(self, other):
"""Sort based on rank, preferring signed over unsigned"""
if other.is_numeric:
return self.rank > other.rank and self.signed >= other.signed
# Prefer numeric types over others
return True
def py_type_name(self):
if self.rank <= 4:
return "(int, long)"
return "float"
class ForbidUseClass:
def __repr__(self):
raise RuntimeError()
def __str__(self):
raise RuntimeError()
ForbidUse = ForbidUseClass()
class CIntLike(object):
"""Mixin for shared behaviour of C integers and enums.
"""
to_py_function = None
from_py_function = None
to_pyunicode_utility = None
default_format_spec = 'd'
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
if type(self).to_py_function is None:
self.to_py_function = "__Pyx_PyInt_From_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntToPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": self.to_py_function}))
return True
def create_from_py_utility_code(self, env):
if type(self).from_py_function is None:
self.from_py_function = "__Pyx_PyInt_As_" + self.specialization_name()
env.use_utility_code(TempitaUtilityCode.load_cached(
"CIntFromPy", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"FROM_PY_FUNCTION": self.from_py_function}))
return True
@staticmethod
def _parse_format(format_spec):
padding = ' '
if not format_spec:
return ('d', 0, padding)
format_type = format_spec[-1]
if format_type in ('o', 'd', 'x', 'X'):
prefix = format_spec[:-1]
elif format_type.isdigit():
format_type = 'd'
prefix = format_spec
else:
return (None, 0, padding)
if not prefix:
return (format_type, 0, padding)
if prefix[0] == '-':
prefix = prefix[1:]
if prefix and prefix[0] == '0':
padding = '0'
prefix = prefix.lstrip('0')
if prefix.isdigit():
return (format_type, int(prefix), padding)
return (None, 0, padding)
def can_coerce_to_pystring(self, env, format_spec=None):
format_type, width, padding = self._parse_format(format_spec)
return format_type is not None and width <= 2**30
def convert_to_pystring(self, cvalue, code, format_spec=None):
if self.to_pyunicode_utility is None:
utility_code_name = "__Pyx_PyUnicode_From_" + self.specialization_name()
to_pyunicode_utility = TempitaUtilityCode.load_cached(
"CIntToPyUnicode", "TypeConversion.c",
context={"TYPE": self.empty_declaration_code(),
"TO_PY_FUNCTION": utility_code_name})
self.to_pyunicode_utility = (utility_code_name, to_pyunicode_utility)
else:
utility_code_name, to_pyunicode_utility = self.to_pyunicode_utility
code.globalstate.use_utility_code(to_pyunicode_utility)
format_type, width, padding_char = self._parse_format(format_spec)
return "%s(%s, %d, '%s', '%s')" % (utility_code_name, cvalue, width, padding_char, format_type)
class CIntType(CIntLike, CNumericType):
is_int = 1
typedef_flag = 0
exception_value = -1
def get_to_py_type_conversion(self):
if self.rank < list(rank_to_type_name).index('int'):
# This assumes sizeof(short) < sizeof(int)
return "PyInt_FromLong"
else:
# Py{Int|Long}_From[Unsigned]Long[Long]
Prefix = "Int"
SignWord = ""
TypeName = "Long"
if not self.signed:
Prefix = "Long"
SignWord = "Unsigned"
if self.rank >= list(rank_to_type_name).index('PY_LONG_LONG'):
Prefix = "Long"
TypeName = "LongLong"
return "Py%s_From%s%s" % (Prefix, SignWord, TypeName)
def assignable_from_resolved_type(self, src_type):
return src_type.is_int or src_type.is_enum or src_type is error_type
def invalid_value(self):
if rank_to_type_name[int(self.rank)] == 'char':
return "'?'"
else:
# We do not really know the size of the type, so return
# a 32-bit literal and rely on casting to final type. It will
# be negative for signed ints, which is good.
return "0xbad0bad0"
def overflow_check_binop(self, binop, env, const_rhs=False):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
type = self.empty_declaration_code()
name = self.specialization_name()
if binop == "lshift":
env.use_utility_code(TempitaUtilityCode.load_cached(
"LeftShift", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'SIGNED': self.signed}))
else:
if const_rhs:
binop += "_const"
if type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': name}))
elif type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': name}))
elif self.rank <= 1:
# sizeof(short) < sizeof(int)
return "__Pyx_%s_%s_no_overflow" % (binop, name)
else:
_load_overflow_base(env)
env.use_utility_code(TempitaUtilityCode.load_cached(
"SizeCheck", "Overflow.c",
context={'TYPE': type, 'NAME': name}))
env.use_utility_code(TempitaUtilityCode.load_cached(
"Binop", "Overflow.c",
context={'TYPE': type, 'NAME': name, 'BINOP': binop}))
return "__Pyx_%s_%s_checking_overflow" % (binop, name)
def _load_overflow_base(env):
env.use_utility_code(UtilityCode.load("Common", "Overflow.c"))
for type in ('int', 'long', 'long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseSigned", "Overflow.c",
context={'INT': type, 'NAME': type.replace(' ', '_')}))
for type in ('unsigned int', 'unsigned long', 'unsigned long long'):
env.use_utility_code(TempitaUtilityCode.load_cached(
"BaseCaseUnsigned", "Overflow.c",
context={'UINT': type, 'NAME': type.replace(' ', '_')}))
class CAnonEnumType(CIntType):
is_enum = 1
def sign_and_name(self):
return 'int'
class CReturnCodeType(CIntType):
to_py_function = "__Pyx_Owned_Py_None"
is_returncode = True
exception_check = False
default_format_spec = ''
def can_coerce_to_pystring(self, env, format_spec=None):
return not format_spec
def convert_to_pystring(self, cvalue, code, format_spec=None):
return "__Pyx_NewRef(%s)" % code.globalstate.get_py_string_const(StringEncoding.EncodedString("None")).cname
class CBIntType(CIntType):
to_py_function = "__Pyx_PyBool_FromLong"
from_py_function = "__Pyx_PyObject_IsTrue"
exception_check = 1 # for C++ bool
default_format_spec = ''
def can_coerce_to_pystring(self, env, format_spec=None):
return not format_spec or super(CBIntType, self).can_coerce_to_pystring(env, format_spec)
def convert_to_pystring(self, cvalue, code, format_spec=None):
if format_spec:
return super(CBIntType, self).convert_to_pystring(cvalue, code, format_spec)
# NOTE: no caching here as the string constant cnames depend on the current module
utility_code_name = "__Pyx_PyUnicode_FromBInt_" + self.specialization_name()
to_pyunicode_utility = TempitaUtilityCode.load_cached(
"CBIntToPyUnicode", "TypeConversion.c", context={
"TRUE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("True")).cname,
"FALSE_CONST": code.globalstate.get_py_string_const(StringEncoding.EncodedString("False")).cname,
"TO_PY_FUNCTION": utility_code_name,
})
code.globalstate.use_utility_code(to_pyunicode_utility)
return "%s(%s)" % (utility_code_name, cvalue)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if for_display:
base_code = 'bool'
elif pyrex:
base_code = 'bint'
else:
base_code = public_decl('int', dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __repr__(self):
return "<CNumericType bint>"
def __str__(self):
return 'bint'
def py_type_name(self):
return "bool"
class CPyUCS4IntType(CIntType):
# Py_UCS4
is_unicode_char = True
# Py_UCS4 coerces from and to single character unicode strings (or
# at most two characters on 16bit Unicode builds), but we also
# allow Python integers as input. The value range for Py_UCS4
# is 0..1114111, which is checked when converting from an integer
# value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UCS4"
def can_coerce_to_pystring(self, env, format_spec=None):
return False # does the right thing anyway
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsUCS4", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UCS4"
class CPyUnicodeIntType(CIntType):
# Py_UNICODE
is_unicode_char = True
# Py_UNICODE coerces from and to single character unicode strings,
# but we also allow Python integers as input. The value range for
# Py_UNICODE is 0..1114111, which is checked when converting from
# an integer value.
to_py_function = "PyUnicode_FromOrdinal"
from_py_function = "__Pyx_PyObject_AsPy_UNICODE"
def can_coerce_to_pystring(self, env, format_spec=None):
return False # does the right thing anyway
def create_from_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached("ObjectAsPyUnicode", "TypeConversion.c"))
return True
def sign_and_name(self):
return "Py_UNICODE"
class CPyHashTType(CIntType):
to_py_function = "__Pyx_PyInt_FromHash_t"
from_py_function = "__Pyx_PyInt_AsHash_t"
def sign_and_name(self):
return "Py_hash_t"
class CPySSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "__Pyx_PyIndex_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSSizeTType(CIntType):
to_py_function = "PyInt_FromSsize_t"
from_py_function = "PyInt_AsSsize_t"
def sign_and_name(self):
return "Py_ssize_t"
class CSizeTType(CIntType):
to_py_function = "__Pyx_PyInt_FromSize_t"
def sign_and_name(self):
return "size_t"
class CPtrdiffTType(CIntType):
def sign_and_name(self):
return "ptrdiff_t"
class CFloatType(CNumericType):
is_float = 1
to_py_function = "PyFloat_FromDouble"
from_py_function = "__pyx_PyFloat_AsDouble"
exception_value = -1
def __init__(self, rank, math_h_modifier = ''):
CNumericType.__init__(self, rank, 1)
self.math_h_modifier = math_h_modifier
if rank == RANK_FLOAT:
self.from_py_function = "__pyx_PyFloat_AsFloat"
def assignable_from_resolved_type(self, src_type):
return (src_type.is_numeric and not src_type.is_complex) or src_type is error_type
def invalid_value(self):
return Naming.PYX_NAN
class CComplexType(CNumericType):
is_complex = 1
to_py_function = "__pyx_PyComplex_FromComplex"
has_attributes = 1
scope = None
def __init__(self, real_type):
while real_type.is_typedef and not real_type.typedef_is_external:
real_type = real_type.typedef_base_type
self.funcsuffix = "_%s" % real_type.specialization_name()
if real_type.is_float:
self.math_h_modifier = real_type.math_h_modifier
else:
self.math_h_modifier = "_UNUSED"
self.real_type = real_type
CNumericType.__init__(self, real_type.rank + 0.5, real_type.signed)
self.binops = {}
self.from_parts = "%s_from_parts" % self.specialization_name()
self.default_value = "%s(0, 0)" % self.from_parts
def __eq__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type == other.real_type
else:
return False
def __ne__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type != other.real_type
else:
return True
def __lt__(self, other):
if isinstance(self, CComplexType) and isinstance(other, CComplexType):
return self.real_type < other.real_type
else:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return ~hash(self.real_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
real_code = self.real_type.declaration_code("", for_display, dll_linkage, pyrex)
base_code = "%s complex" % real_code
else:
base_code = public_decl(self.sign_and_name(), dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def sign_and_name(self):
real_type_name = self.real_type.specialization_name()
real_type_name = real_type_name.replace('long__double','long_double')
real_type_name = real_type_name.replace('PY_LONG_LONG','long_long')
return Naming.type_prefix + real_type_name + "_complex"
def assignable_from(self, src_type):
# Temporary hack/feature disabling, see #441
if (not src_type.is_complex and src_type.is_numeric and src_type.is_typedef
and src_type.typedef_is_external):
return False
elif src_type.is_pyobject:
return True
else:
return super(CComplexType, self).assignable_from(src_type)
def assignable_from_resolved_type(self, src_type):
return (src_type.is_complex and self.real_type.assignable_from_resolved_type(src_type.real_type)
or src_type.is_numeric and self.real_type.assignable_from_resolved_type(src_type)
or src_type is error_type)
def attributes_known(self):
if self.scope is None:
from . import Symtab
self.scope = scope = Symtab.CClassScope(
'',
None,
visibility="extern")
scope.parent_type = self
scope.directives = {}
scope.declare_var("real", self.real_type, None, cname="real", is_cdef=True)
scope.declare_var("imag", self.real_type, None, cname="imag", is_cdef=True)
scope.declare_cfunction(
"conjugate",
CFuncType(self, [CFuncTypeArg("self", self, None)], nogil=True),
pos=None,
defining=1,
cname="__Pyx_c_conj%s" % self.funcsuffix)
return True
def _utility_code_context(self):
return {
'type': self.empty_declaration_code(),
'type_name': self.specialization_name(),
'real_type': self.real_type.empty_declaration_code(),
'func_suffix': self.funcsuffix,
'm': self.math_h_modifier,
'is_float': int(self.real_type.is_float)
}
def create_declaration_utility_code(self, env):
# This must always be run, because a single CComplexType instance can be shared
# across multiple compilations (the one created in the module scope)
env.use_utility_code(UtilityCode.load_cached('Header', 'Complex.c'))
env.use_utility_code(UtilityCode.load_cached('RealImag', 'Complex.c'))
env.use_utility_code(TempitaUtilityCode.load_cached(
'Declarations', 'Complex.c', self._utility_code_context()))
env.use_utility_code(TempitaUtilityCode.load_cached(
'Arithmetic', 'Complex.c', self._utility_code_context()))
return True
def can_coerce_to_pyobject(self, env):
return True
def can_coerce_from_pyobject(self, env):
return True
def create_to_py_utility_code(self, env):
env.use_utility_code(UtilityCode.load_cached('ToPy', 'Complex.c'))
return True
def create_from_py_utility_code(self, env):
env.use_utility_code(TempitaUtilityCode.load_cached(
'FromPy', 'Complex.c', self._utility_code_context()))
self.from_py_function = "__Pyx_PyComplex_As_" + self.specialization_name()
return True
def lookup_op(self, nargs, op):
try:
return self.binops[nargs, op]
except KeyError:
pass
try:
op_name = complex_ops[nargs, op]
self.binops[nargs, op] = func_name = "__Pyx_c_%s%s" % (op_name, self.funcsuffix)
return func_name
except KeyError:
return None
def unary_op(self, op):
return self.lookup_op(1, op)
def binary_op(self, op):
return self.lookup_op(2, op)
def py_type_name(self):
return "complex"
def cast_code(self, expr_code):
return expr_code
complex_ops = {
(1, '-'): 'neg',
(1, 'zero'): 'is_zero',
(2, '+'): 'sum',
(2, '-'): 'diff',
(2, '*'): 'prod',
(2, '/'): 'quot',
(2, '**'): 'pow',
(2, '=='): 'eq',
}
class CPyTSSTType(CType):
#
# PEP-539 "Py_tss_t" type
#
declaration_value = "Py_tss_NEEDS_INIT"
def __repr__(self):
return "<Py_tss_t>"
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = "Py_tss_t"
else:
base_code = public_decl("Py_tss_t", dll_linkage)
return self.base_declaration_code(base_code, entity_code)
class CPointerBaseType(CType):
# common base type for pointer/array types
#
# base_type CType Reference type
subtypes = ['base_type']
def __init__(self, base_type):
self.base_type = base_type
if base_type.is_cv_qualified:
base_type = base_type.cv_base_type
for char_type in (c_char_type, c_uchar_type, c_schar_type):
if base_type.same_as(char_type):
self.is_string = 1
break
else:
if base_type.same_as(c_py_unicode_type):
self.is_pyunicode_ptr = 1
if self.is_string and not base_type.is_error:
if base_type.signed == 2:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_As%sSString"
elif base_type.signed:
self.to_py_function = "__Pyx_PyObject_FromString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_As%sString"
else:
self.to_py_function = "__Pyx_PyObject_FromCString"
if self.is_ptr:
self.from_py_function = "__Pyx_PyObject_As%sUString"
if self.is_ptr:
self.from_py_function %= '' if self.base_type.is_const else 'Writable'
self.exception_value = "NULL"
elif self.is_pyunicode_ptr and not base_type.is_error:
self.to_py_function = "__Pyx_PyUnicode_FromUnicode"
if self.is_ptr:
self.from_py_function = "__Pyx_PyUnicode_AsUnicode"
self.exception_value = "NULL"
def py_type_name(self):
if self.is_string:
return "bytes"
elif self.is_pyunicode_ptr:
return "unicode"
else:
return super(CPointerBaseType, self).py_type_name()
def literal_code(self, value):
if self.is_string:
assert isinstance(value, str)
return '"%s"' % StringEncoding.escape_byte_string(value)
return str(value)
class CArrayType(CPointerBaseType):
# base_type CType Element type
# size integer or None Number of elements
is_array = 1
to_tuple_function = None
def __init__(self, base_type, size):
super(CArrayType, self).__init__(base_type)
self.size = size
def __eq__(self, other):
if isinstance(other, CType) and other.is_array and self.size == other.size:
return self.base_type.same_as(other.base_type)
return False
def __hash__(self):
return hash(self.base_type) + 28 # arbitrarily chosen offset
def __repr__(self):
return "<CArrayType %s %s>" % (self.size, repr(self.base_type))
def same_as_resolved_type(self, other_type):
return ((other_type.is_array and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def assignable_from_resolved_type(self, src_type):
# C arrays are assigned by value, either Python containers or C arrays/pointers
if src_type.is_pyobject:
return True
if src_type.is_ptr or src_type.is_array:
return self.base_type.assignable_from(src_type.base_type)
return False
def element_ptr_type(self):
return c_ptr_type(self.base_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if self.size is not None:
dimension_code = self.size
else:
dimension_code = ""
if entity_code.startswith("*"):
entity_code = "(%s)" % entity_code
return self.base_type.declaration_code(
"%s[%s]" % (entity_code, dimension_code),
for_display, dll_linkage, pyrex)
def as_argument_type(self):
return c_ptr_type(self.base_type)
def is_complete(self):
return self.size is not None
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CArrayType(base_type, self.size)
def deduce_template_params(self, actual):
if isinstance(actual, CArrayType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return {}
def can_coerce_to_pyobject(self, env):
return self.base_type.can_coerce_to_pyobject(env)
def can_coerce_from_pyobject(self, env):
return self.base_type.can_coerce_from_pyobject(env)
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return self.to_py_function
if not self.base_type.create_to_py_utility_code(env):
return False
safe_typename = self.base_type.specialization_name()
to_py_function = "__Pyx_carray_to_py_%s" % safe_typename
to_tuple_function = "__Pyx_carray_to_tuple_%s" % safe_typename
from .UtilityCode import CythonUtilityCode
context = {
'cname': to_py_function,
'to_tuple_cname': to_tuple_function,
'base_type': self.base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.to_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.to_tuple_function = to_tuple_function
self.to_py_function = to_py_function
return True
def to_py_call_code(self, source_code, result_code, result_type, to_py_function=None):
func = self.to_py_function if to_py_function is None else to_py_function
if self.is_string or self.is_pyunicode_ptr:
return '%s = %s(%s)' % (
result_code,
func,
source_code)
target_is_tuple = result_type.is_builtin_type and result_type.name == 'tuple'
return '%s = %s(%s, %s)' % (
result_code,
self.to_tuple_function if target_is_tuple else func,
source_code,
self.size)
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return self.from_py_function
if not self.base_type.create_from_py_utility_code(env):
return False
from_py_function = "__Pyx_carray_from_py_%s" % self.base_type.specialization_name()
from .UtilityCode import CythonUtilityCode
context = {
'cname': from_py_function,
'base_type': self.base_type,
}
env.use_utility_code(CythonUtilityCode.load(
"carray.from_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.from_py_function = from_py_function
return True
def from_py_call_code(self, source_code, result_code, error_pos, code,
from_py_function=None, error_condition=None):
assert not error_condition, '%s: %s' % (error_pos, error_condition)
call_code = "%s(%s, %s, %s)" % (
from_py_function or self.from_py_function,
source_code, result_code, self.size)
return code.error_goto_if_neg(call_code, error_pos)
class CPtrType(CPointerBaseType):
# base_type CType Reference type
is_ptr = 1
default_value = "0"
def __hash__(self):
return hash(self.base_type) + 27 # arbitrarily chosen offset
def __eq__(self, other):
if isinstance(other, CType) and other.is_ptr:
return self.base_type.same_as(other.base_type)
return False
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<CPtrType %s>" % repr(self.base_type)
def same_as_resolved_type(self, other_type):
return ((other_type.is_ptr and
self.base_type.same_as(other_type.base_type))
or other_type is error_type)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CPtrType.declaration_code: pointer to", self.base_type ###
return self.base_type.declaration_code(
"*%s" % entity_code,
for_display, dll_linkage, pyrex)
def assignable_from_resolved_type(self, other_type):
if other_type is error_type:
return 1
if other_type.is_null_ptr:
return 1
if self.base_type.is_cv_qualified:
self = CPtrType(self.base_type.cv_base_type)
if self.base_type.is_cfunction:
if other_type.is_ptr:
other_type = other_type.base_type.resolve()
if other_type.is_cfunction:
return self.base_type.pointer_assignable_from_resolved_type(other_type)
else:
return 0
if (self.base_type.is_cpp_class and other_type.is_ptr
and other_type.base_type.is_cpp_class and other_type.base_type.is_subclass(self.base_type)):
return 1
if other_type.is_array or other_type.is_ptr:
return self.base_type.is_void or self.base_type.same_as(other_type.base_type)
return 0
def specialize(self, values):
base_type = self.base_type.specialize(values)
if base_type == self.base_type:
return self
else:
return CPtrType(base_type)
def deduce_template_params(self, actual):
if isinstance(actual, CPtrType):
return self.base_type.deduce_template_params(actual.base_type)
else:
return {}
def invalid_value(self):
return "1"
def find_cpp_operation_type(self, operator, operand_type=None):
if self.base_type.is_cpp_class:
return self.base_type.find_cpp_operation_type(operator, operand_type)
return None
class CNullPtrType(CPtrType):
is_null_ptr = 1
class CReferenceBaseType(BaseType):
is_fake_reference = 0
# Common base type for C reference and C++ rvalue reference types.
def __init__(self, base_type):
self.ref_base_type = base_type
def __repr__(self):
return "<%s %s>" % repr(self.__class__.__name__, self.ref_base_type)
def specialize(self, values):
base_type = self.ref_base_type.specialize(values)
if base_type == self.ref_base_type:
return self
else:
return type(self)(base_type)
def deduce_template_params(self, actual):
return self.ref_base_type.deduce_template_params(actual)
def __getattr__(self, name):
return getattr(self.ref_base_type, name)
class CReferenceType(CReferenceBaseType):
is_reference = 1
def __str__(self):
return "%s &" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return self.ref_base_type.declaration_code(
"&%s" % entity_code,
for_display, dll_linkage, pyrex)
class CFakeReferenceType(CReferenceType):
is_fake_reference = 1
def __str__(self):
return "%s [&]" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
#print "CReferenceType.declaration_code: pointer to", self.base_type ###
return "__Pyx_FakeReference<%s> %s" % (self.ref_base_type.empty_declaration_code(), entity_code)
class CppRvalueReferenceType(CReferenceBaseType):
is_rvalue_reference = 1
def __str__(self):
return "%s &&" % self.ref_base_type
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return self.ref_base_type.declaration_code(
"&&%s" % entity_code,
for_display, dll_linkage, pyrex)
class CFuncType(CType):
# return_type CType
# args [CFuncTypeArg]
# has_varargs boolean
# exception_value string
# exception_check boolean True if PyErr_Occurred check needed
# calling_convention string Function calling convention
# nogil boolean Can be called without gil
# with_gil boolean Acquire gil around function body
# templates [string] or None
# cached_specialized_types [CFuncType] cached specialized versions of the CFuncType if defined in a pxd
# from_fused boolean Indicates whether this is a specialized
# C function
# is_strict_signature boolean function refuses to accept coerced arguments
# (used for optimisation overrides)
# is_const_method boolean
# is_static_method boolean
is_cfunction = 1
original_sig = None
cached_specialized_types = None
from_fused = False
is_const_method = False
subtypes = ['return_type', 'args']
def __init__(self, return_type, args, has_varargs = 0,
exception_value = None, exception_check = 0, calling_convention = "",
nogil = 0, with_gil = 0, is_overridable = 0, optional_arg_count = 0,
is_const_method = False, is_static_method=False,
templates = None, is_strict_signature = False):
self.return_type = return_type
self.args = args
self.has_varargs = has_varargs
self.optional_arg_count = optional_arg_count
self.exception_value = exception_value
self.exception_check = exception_check
self.calling_convention = calling_convention
self.nogil = nogil
self.with_gil = with_gil
self.is_overridable = is_overridable
self.is_const_method = is_const_method
self.is_static_method = is_static_method
self.templates = templates
self.is_strict_signature = is_strict_signature
def __repr__(self):
arg_reprs = list(map(repr, self.args))
if self.has_varargs:
arg_reprs.append("...")
if self.exception_value:
except_clause = " %r" % self.exception_value
else:
except_clause = ""
if self.exception_check:
except_clause += "?"
return "<CFuncType %s %s[%s]%s>" % (
repr(self.return_type),
self.calling_convention_prefix(),
",".join(arg_reprs),
except_clause)
def with_with_gil(self, with_gil):
if with_gil == self.with_gil:
return self
else:
return CFuncType(
self.return_type, self.args, self.has_varargs,
self.exception_value, self.exception_check,
self.calling_convention, self.nogil,
with_gil,
self.is_overridable, self.optional_arg_count,
self.is_const_method, self.is_static_method,
self.templates, self.is_strict_signature)
def calling_convention_prefix(self):
cc = self.calling_convention
if cc:
return cc + " "
else:
return ""
def as_argument_type(self):
return c_ptr_type(self)
def same_c_signature_as(self, other_type, as_cmethod = 0):
return self.same_c_signature_as_resolved_type(
other_type.resolve(), as_cmethod)
def same_c_signature_as_resolved_type(self, other_type, as_cmethod=False, as_pxd_definition=False,
exact_semantics=True):
# If 'exact_semantics' is false, allow any equivalent C signatures
# if the Cython semantics are compatible, i.e. the same or wider for 'other_type'.
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if self.is_overridable != other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, nargs):
if not self.args[i].type.same_as(other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if as_pxd_definition:
# A narrowing of the return type declared in the pxd is allowed.
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
else:
if not self.return_type.same_as(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if exact_semantics:
if self.exception_check != other_type.exception_check:
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
elif not self._is_exception_compatible_with(other_type):
return 0
return 1
def _same_exception_value(self, other_exc_value):
if self.exception_value == other_exc_value:
return 1
if self.exception_check != '+':
return 0
if not self.exception_value or not other_exc_value:
return 0
if self.exception_value.type != other_exc_value.type:
return 0
if self.exception_value.entry and other_exc_value.entry:
if self.exception_value.entry.cname != other_exc_value.entry.cname:
return 0
if self.exception_value.name != other_exc_value.name:
return 0
return 1
def compatible_signature_with(self, other_type, as_cmethod = 0):
return self.compatible_signature_with_resolved_type(other_type.resolve(), as_cmethod)
def compatible_signature_with_resolved_type(self, other_type, as_cmethod):
#print "CFuncType.same_c_signature_as_resolved_type:", \
# self, other_type, "as_cmethod =", as_cmethod ###
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
if not self.is_overridable and other_type.is_overridable:
return 0
nargs = len(self.args)
if nargs - self.optional_arg_count != len(other_type.args) - other_type.optional_arg_count:
return 0
if self.optional_arg_count < other_type.optional_arg_count:
return 0
# When comparing C method signatures, the first argument
# is exempt from compatibility checking (the proper check
# is performed elsewhere).
for i in range(as_cmethod, len(other_type.args)):
if not self.args[i].type.same_as(
other_type.args[i].type):
return 0
if self.has_varargs != other_type.has_varargs:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.same_calling_convention_as(other_type):
return 0
if self.nogil != other_type.nogil:
return 0
if not self._is_exception_compatible_with(other_type):
return 0
self.original_sig = other_type.original_sig or other_type
return 1
def _is_exception_compatible_with(self, other_type):
# narrower exception checks are ok, but prevent mismatches
if self.exception_check == '+' and other_type.exception_check != '+':
# must catch C++ exceptions if we raise them
return 0
if not other_type.exception_check or other_type.exception_value is not None:
# if other does not *always* check exceptions, self must comply
if not self._same_exception_value(other_type.exception_value):
return 0
if self.exception_check and self.exception_check != other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
return 1
def narrower_c_signature_than(self, other_type, as_cmethod = 0):
return self.narrower_c_signature_than_resolved_type(other_type.resolve(), as_cmethod)
def narrower_c_signature_than_resolved_type(self, other_type, as_cmethod):
if other_type is error_type:
return 1
if not other_type.is_cfunction:
return 0
nargs = len(self.args)
if nargs != len(other_type.args):
return 0
for i in range(as_cmethod, nargs):
if not self.args[i].type.subtype_of_resolved_type(other_type.args[i].type):
return 0
else:
self.args[i].needs_type_test = other_type.args[i].needs_type_test \
or not self.args[i].type.same_as(other_type.args[i].type)
if self.has_varargs != other_type.has_varargs:
return 0
if self.optional_arg_count != other_type.optional_arg_count:
return 0
if not self.return_type.subtype_of_resolved_type(other_type.return_type):
return 0
if not self.exception_check and other_type.exception_check:
# a redundant exception check doesn't make functions incompatible, but a missing one does
return 0
if not self._same_exception_value(other_type.exception_value):
return 0
return 1
def same_calling_convention_as(self, other):
## XXX Under discussion ...
## callspec_words = ("__stdcall", "__cdecl", "__fastcall")
## cs1 = self.calling_convention
## cs2 = other.calling_convention
## if (cs1 in callspec_words or
## cs2 in callspec_words):
## return cs1 == cs2
## else:
## return True
sc1 = self.calling_convention == '__stdcall'
sc2 = other.calling_convention == '__stdcall'
return sc1 == sc2
def same_as_resolved_type(self, other_type, as_cmethod=False):
return self.same_c_signature_as_resolved_type(other_type, as_cmethod=as_cmethod) \
and self.nogil == other_type.nogil
def pointer_assignable_from_resolved_type(self, rhs_type):
# Accept compatible exception/nogil declarations for the RHS.
if rhs_type is error_type:
return 1
if not rhs_type.is_cfunction:
return 0
return rhs_type.same_c_signature_as_resolved_type(self, exact_semantics=False) \
and not (self.nogil and not rhs_type.nogil)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
with_calling_convention = 1):
arg_decl_list = []
for arg in self.args[:len(self.args)-self.optional_arg_count]:
arg_decl_list.append(
arg.type.declaration_code("", for_display, pyrex = pyrex))
if self.is_overridable:
arg_decl_list.append("int %s" % Naming.skip_dispatch_cname)
if self.optional_arg_count:
arg_decl_list.append(self.op_arg_struct.declaration_code(Naming.optional_args_cname))
if self.has_varargs:
arg_decl_list.append("...")
arg_decl_code = ", ".join(arg_decl_list)
if not arg_decl_code and not pyrex:
arg_decl_code = "void"
trailer = ""
if (pyrex or for_display) and not self.return_type.is_pyobject:
if self.exception_value and self.exception_check:
trailer = " except? %s" % self.exception_value
elif self.exception_value:
trailer = " except %s" % self.exception_value
elif self.exception_check == '+':
trailer = " except +"
elif self.exception_check and for_display:
# not spelled out by default, unless for human eyes
trailer = " except *"
if self.nogil:
trailer += " nogil"
if not with_calling_convention:
cc = ''
else:
cc = self.calling_convention_prefix()
if (not entity_code and cc) or entity_code.startswith("*"):
entity_code = "(%s%s)" % (cc, entity_code)
cc = ""
if self.is_const_method:
trailer += " const"
return self.return_type.declaration_code(
"%s%s(%s)%s" % (cc, entity_code, arg_decl_code, trailer),
for_display, dll_linkage, pyrex)
def function_header_code(self, func_name, arg_code):
if self.is_const_method:
trailer = " const"
else:
trailer = ""
return "%s%s(%s)%s" % (self.calling_convention_prefix(),
func_name, arg_code, trailer)
def signature_string(self):
s = self.empty_declaration_code()
return s
def signature_cast_string(self):
s = self.declaration_code("(*)", with_calling_convention=False)
return '(%s)' % s
def specialize(self, values):
result = CFuncType(self.return_type.specialize(values),
[arg.specialize(values) for arg in self.args],
has_varargs = self.has_varargs,
exception_value = self.exception_value,
exception_check = self.exception_check,
calling_convention = self.calling_convention,
nogil = self.nogil,
with_gil = self.with_gil,
is_overridable = self.is_overridable,
optional_arg_count = self.optional_arg_count,
is_const_method = self.is_const_method,
is_static_method = self.is_static_method,
templates = self.templates)
result.from_fused = self.is_fused
return result
def opt_arg_cname(self, arg_name):
return self.op_arg_struct.base_type.scope.lookup(arg_name).cname
# Methods that deal with Fused Types
# All but map_with_specific_entries should be called only on functions
# with fused types (and not on their corresponding specific versions).
def get_all_specialized_permutations(self, fused_types=None):
"""
Permute all the types. For every specific instance of a fused type, we
want all other specific instances of all other fused types.
It returns an iterable of two-tuples of the cname that should prefix
the cname of the function, and a dict mapping any fused types to their
respective specific types.
"""
assert self.is_fused
if fused_types is None:
fused_types = self.get_fused_types()
return get_all_specialized_permutations(fused_types)
def get_all_specialized_function_types(self):
"""
Get all the specific function types of this one.
"""
assert self.is_fused
if self.entry.fused_cfunction:
return [n.type for n in self.entry.fused_cfunction.nodes]
elif self.cached_specialized_types is not None:
return self.cached_specialized_types
result = []
permutations = self.get_all_specialized_permutations()
new_cfunc_entries = []
for cname, fused_to_specific in permutations:
new_func_type = self.entry.type.specialize(fused_to_specific)
if self.optional_arg_count:
# Remember, this method is set by CFuncDeclaratorNode
self.declare_opt_arg_struct(new_func_type, cname)
new_entry = copy.deepcopy(self.entry)
new_func_type.specialize_entry(new_entry, cname)
new_entry.type = new_func_type
new_func_type.entry = new_entry
result.append(new_func_type)
new_cfunc_entries.append(new_entry)
cfunc_entries = self.entry.scope.cfunc_entries
try:
cindex = cfunc_entries.index(self.entry)
except ValueError:
cfunc_entries.extend(new_cfunc_entries)
else:
cfunc_entries[cindex:cindex+1] = new_cfunc_entries
self.cached_specialized_types = result
return result
def get_fused_types(self, result=None, seen=None, subtypes=None):
"""Return fused types in the order they appear as parameter types"""
return super(CFuncType, self).get_fused_types(result, seen,
subtypes=['args'])
def specialize_entry(self, entry, cname):
assert not self.is_fused
specialize_entry(entry, cname)
def can_coerce_to_pyobject(self, env):
# duplicating the decisions from create_to_py_utility_code() here avoids writing out unused code
if self.has_varargs or self.optional_arg_count:
return False
if self.to_py_function is not None:
return self.to_py_function
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.can_coerce_to_pyobject(env):
return False
if not self.return_type.is_pyobject and not self.return_type.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
# FIXME: it seems we're trying to coerce in more cases than we should
if self.to_py_function is not None:
return self.to_py_function
if not self.can_coerce_to_pyobject(env):
return False
from .UtilityCode import CythonUtilityCode
# include argument names into the c function name to ensure cname is unique
# between functions with identical types but different argument names
from .Symtab import punycodify_name
def arg_name_part(arg):
return "%s%s" % (len(arg.name), punycodify_name(arg.name)) if arg.name else "0"
arg_names = [ arg_name_part(arg) for arg in self.args ]
arg_names = "_".join(arg_names)
safe_typename = type_identifier(self, pyrex=True)
to_py_function = "__Pyx_CFunc_%s_to_py_%s" % (safe_typename, arg_names)
for arg in self.args:
if not arg.type.is_pyobject and not arg.type.create_from_py_utility_code(env):
return False
if not self.return_type.is_pyobject and not self.return_type.create_to_py_utility_code(env):
return False
def declared_type(ctype):
type_displayname = str(ctype.declaration_code("", for_display=True))
if ctype.is_pyobject:
arg_ctype = type_name = type_displayname
if ctype.is_builtin_type:
arg_ctype = ctype.name
elif not ctype.is_extension_type:
type_name = 'object'
type_displayname = None
else:
type_displayname = repr(type_displayname)
elif ctype is c_bint_type:
type_name = arg_ctype = 'bint'
else:
type_name = arg_ctype = type_displayname
if ctype is c_double_type:
type_displayname = 'float'
else:
type_displayname = repr(type_displayname)
return type_name, arg_ctype, type_displayname
class Arg(object):
def __init__(self, arg_name, arg_type):
self.name = arg_name
self.type = arg_type
self.type_cname, self.ctype, self.type_displayname = declared_type(arg_type)
if self.return_type.is_void:
except_clause = 'except *'
elif self.return_type.is_pyobject:
except_clause = ''
elif self.exception_value:
except_clause = ('except? %s' if self.exception_check else 'except %s') % self.exception_value
else:
except_clause = 'except *'
context = {
'cname': to_py_function,
'args': [Arg(arg.name or 'arg%s' % ix, arg.type) for ix, arg in enumerate(self.args)],
'return_type': Arg('return', self.return_type),
'except_clause': except_clause,
}
# FIXME: directives come from first defining environment and do not adapt for reuse
env.use_utility_code(CythonUtilityCode.load(
"cfunc.to_py", "CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context, compiler_directives=dict(env.global_scope().directives)))
self.to_py_function = to_py_function
return True
def specialize_entry(entry, cname):
"""
Specialize an entry of a copied fused function or method
"""
entry.is_fused_specialized = True
entry.name = get_fused_cname(cname, entry.name)
if entry.is_cmethod:
entry.cname = entry.name
if entry.is_inherited:
entry.cname = StringEncoding.EncodedString(
"%s.%s" % (Naming.obj_base_cname, entry.cname))
else:
entry.cname = get_fused_cname(cname, entry.cname)
if entry.func_cname:
entry.func_cname = get_fused_cname(cname, entry.func_cname)
def get_fused_cname(fused_cname, orig_cname):
"""
Given the fused cname id and an original cname, return a specialized cname
"""
assert fused_cname and orig_cname
return StringEncoding.EncodedString('%s%s%s' % (Naming.fused_func_prefix,
fused_cname, orig_cname))
def unique(somelist):
seen = set()
result = []
for obj in somelist:
if obj not in seen:
result.append(obj)
seen.add(obj)
return result
def get_all_specialized_permutations(fused_types):
return _get_all_specialized_permutations(unique(fused_types))
def _get_all_specialized_permutations(fused_types, id="", f2s=()):
fused_type, = fused_types[0].get_fused_types()
result = []
for newid, specific_type in enumerate(fused_type.types):
# f2s = dict(f2s, **{ fused_type: specific_type })
f2s = dict(f2s)
f2s.update({ fused_type: specific_type })
if id:
cname = '%s_%s' % (id, newid)
else:
cname = str(newid)
if len(fused_types) > 1:
result.extend(_get_all_specialized_permutations(
fused_types[1:], cname, f2s))
else:
result.append((cname, f2s))
return result
def specialization_signature_string(fused_compound_type, fused_to_specific):
"""
Return the signature for a specialization of a fused type. e.g.
floating[:] ->
'float' or 'double'
cdef fused ft:
float[:]
double[:]
ft ->
'float[:]' or 'double[:]'
integral func(floating) ->
'int (*func)(float)' or ...
"""
fused_types = fused_compound_type.get_fused_types()
if len(fused_types) == 1:
fused_type = fused_types[0]
else:
fused_type = fused_compound_type
return fused_type.specialize(fused_to_specific).typeof_name()
def get_specialized_types(type):
"""
Return a list of specialized types in their declared order.
"""
assert type.is_fused
if isinstance(type, FusedType):
result = list(type.types)
for specialized_type in result:
specialized_type.specialization_string = specialized_type.typeof_name()
else:
result = []
for cname, f2s in get_all_specialized_permutations(type.get_fused_types()):
specialized_type = type.specialize(f2s)
specialized_type.specialization_string = (
specialization_signature_string(type, f2s))
result.append(specialized_type)
return result
class CFuncTypeArg(BaseType):
# name string
# cname string
# type PyrexType
# pos source file position
# FIXME: is this the right setup? should None be allowed here?
not_none = False
or_none = False
accept_none = True
accept_builtin_subtypes = False
annotation = None
subtypes = ['type']
def __init__(self, name, type, pos, cname=None, annotation=None):
self.name = name
if cname is not None:
self.cname = cname
else:
self.cname = Naming.var_prefix + name
if annotation is not None:
self.annotation = annotation
self.type = type
self.pos = pos
self.needs_type_test = False # TODO: should these defaults be set in analyse_types()?
def __repr__(self):
return "%s:%s" % (self.name, repr(self.type))
def declaration_code(self, for_display = 0):
return self.type.declaration_code(self.cname, for_display)
def specialize(self, values):
return CFuncTypeArg(self.name, self.type.specialize(values), self.pos, self.cname)
def is_forwarding_reference(self):
if self.type.is_rvalue_reference:
if (isinstance(self.type.ref_base_type, TemplatePlaceholderType)
and not self.type.ref_base_type.is_cv_qualified):
return True
return False
class ToPyStructUtilityCode(object):
requires = None
def __init__(self, type, forward_decl, env):
self.type = type
self.header = "static PyObject* %s(%s)" % (type.to_py_function,
type.declaration_code('s'))
self.forward_decl = forward_decl
self.env = env
def __eq__(self, other):
return isinstance(other, ToPyStructUtilityCode) and self.header == other.header
def __hash__(self):
return hash(self.header)
def get_tree(self, **kwargs):
pass
def put_code(self, output):
code = output['utility_code_def']
proto = output['utility_code_proto']
code.putln("%s {" % self.header)
code.putln("PyObject* res;")
code.putln("PyObject* member;")
code.putln("res = __Pyx_PyDict_NewPresized(%d); if (unlikely(!res)) return NULL;" %
len(self.type.scope.var_entries))
for member in self.type.scope.var_entries:
nameconst_cname = code.get_py_string_const(member.name, identifier=True)
code.putln("%s; if (unlikely(!member)) goto bad;" % (
member.type.to_py_call_code('s.%s' % member.cname, 'member', member.type)))
code.putln("if (unlikely(PyDict_SetItem(res, %s, member) < 0)) goto bad;" % nameconst_cname)
code.putln("Py_DECREF(member);")
code.putln("return res;")
code.putln("bad:")
code.putln("Py_XDECREF(member);")
code.putln("Py_DECREF(res);")
code.putln("return NULL;")
code.putln("}")
# This is a bit of a hack, we need a forward declaration
# due to the way things are ordered in the module...
if self.forward_decl:
proto.putln(self.type.empty_declaration_code() + ';')
proto.putln(self.header + ";")
def inject_tree_and_scope_into(self, module_node):
pass
class CStructOrUnionType(CType):
# name string
# cname string
# kind string "struct" or "union"
# scope StructOrUnionScope, or None if incomplete
# typedef_flag boolean
# packed boolean
# entry Entry
is_struct_or_union = 1
has_attributes = 1
exception_check = True
def __init__(self, name, kind, scope, typedef_flag, cname, packed=False, in_cpp=False):
self.name = name
self.cname = cname
self.kind = kind
self.scope = scope
self.typedef_flag = typedef_flag
self.is_struct = kind == 'struct'
self.to_py_function = "%s_to_py_%s" % (
Naming.convert_func_prefix, self.specialization_name())
self.from_py_function = "%s_from_py_%s" % (
Naming.convert_func_prefix, self.specialization_name())
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
self.packed = packed
self.needs_cpp_construction = self.is_struct and in_cpp
def can_coerce_to_pyobject(self, env):
if self._convert_to_py_code is False:
return None # tri-state-ish
if env.outer_scope is None:
return False
if self._convert_to_py_code is None:
is_union = not self.is_struct
unsafe_union_types = set()
safe_union_types = set()
for member in self.scope.var_entries:
member_type = member.type
if not member_type.can_coerce_to_pyobject(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
if is_union:
if member_type.is_ptr or member_type.is_cpp_class:
unsafe_union_types.add(member_type)
else:
safe_union_types.add(member_type)
if unsafe_union_types and (safe_union_types or len(unsafe_union_types) > 1):
# unsafe mix of safe and unsafe to convert types
self.from_py_function = None
self._convert_from_py_code = False
return False
return True
def create_to_py_utility_code(self, env):
if not self.can_coerce_to_pyobject(env):
return False
if self._convert_to_py_code is None:
for member in self.scope.var_entries:
member.type.create_to_py_utility_code(env)
forward_decl = self.entry.visibility != 'extern' and not self.typedef_flag
self._convert_to_py_code = ToPyStructUtilityCode(self, forward_decl, env)
env.use_utility_code(self._convert_to_py_code)
return True
def can_coerce_from_pyobject(self, env):
if env.outer_scope is None or self._convert_from_py_code is False:
return False
for member in self.scope.var_entries:
if not member.type.can_coerce_from_pyobject(env):
return False
return True
def create_from_py_utility_code(self, env):
if env.outer_scope is None:
return False
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
if not self.scope.var_entries:
# There are obviously missing fields; don't allow instantiation
# where absolutely no content is provided.
return False
for member in self.scope.var_entries:
if not member.type.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type=self,
var_entries=self.scope.var_entries,
funcname=self.from_py_function,
)
env.use_utility_code(UtilityCode.load_cached("RaiseUnexpectedTypeError", "ObjectHandling.c"))
from .UtilityCode import CythonUtilityCode
self._convert_from_py_code = CythonUtilityCode.load(
"FromPyStructUtility" if self.is_struct else "FromPyUnionUtility",
"CConvert.pyx",
outer_module_scope=env.global_scope(), # need access to types declared in module
context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def __repr__(self):
return "<CStructOrUnionType %s %s%s>" % (
self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
base_code = self.name
else:
if self.typedef_flag:
base_code = self.cname
else:
base_code = "%s %s" % (self.kind, self.cname)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def __eq__(self, other):
try:
return (isinstance(other, CStructOrUnionType) and
self.name == other.name)
except AttributeError:
return False
def __lt__(self, other):
try:
return self.name < other.name
except AttributeError:
# this is arbitrary, but it makes sure we always have
# *some* kind of order
return False
def __hash__(self):
return hash(self.cname) ^ hash(self.kind)
def is_complete(self):
return self.scope is not None
def attributes_known(self):
return self.is_complete()
def can_be_complex(self):
# Does the struct consist of exactly two identical floats?
fields = self.scope.var_entries
if len(fields) != 2: return False
a, b = fields
return (a.type.is_float and b.type.is_float and
a.type.empty_declaration_code() ==
b.type.empty_declaration_code())
def struct_nesting_depth(self):
child_depths = [x.type.struct_nesting_depth()
for x in self.scope.var_entries]
return max(child_depths) + 1
def cast_code(self, expr_code):
if self.is_struct:
return expr_code
return super(CStructOrUnionType, self).cast_code(expr_code)
cpp_string_conversions = ("std::string",)
builtin_cpp_conversions = {
# type element template params
"std::pair": 2,
"std::vector": 1,
"std::list": 1,
"std::set": 1,
"std::unordered_set": 1,
"std::map": 2,
"std::unordered_map": 2,
"std::complex": 1,
}
class CppClassType(CType):
# name string
# cname string
# scope CppClassScope
# templates [string] or None
is_cpp_class = 1
has_attributes = 1
needs_cpp_construction = 1
exception_check = True
namespace = None
# For struct-like declaration.
kind = "struct"
packed = False
typedef_flag = False
subtypes = ['templates']
def __init__(self, name, scope, cname, base_classes, templates=None, template_type=None):
self.name = name
self.cname = cname
self.scope = scope
self.base_classes = base_classes
self.operators = []
self.templates = templates
self.template_type = template_type
self.num_optional_templates = sum(is_optional_template_param(T) for T in templates or ())
if templates:
self.specializations = {tuple(zip(templates, templates)): self}
else:
self.specializations = {}
self.is_cpp_string = cname in cpp_string_conversions
def use_conversion_utility(self, from_or_to):
pass
def maybe_unordered(self):
if 'unordered' in self.cname:
return 'unordered_'
else:
return ''
def can_coerce_from_pyobject(self, env):
if self.cname in builtin_cpp_conversions:
template_count = builtin_cpp_conversions[self.cname]
for ix, T in enumerate(self.templates or []):
if ix >= template_count:
break
if T.is_pyobject or not T.can_coerce_from_pyobject(env):
return False
return True
elif self.cname in cpp_string_conversions:
return True
return False
def create_from_py_utility_code(self, env):
if self.from_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if T.is_pyobject or not T.create_from_py_utility_code(env):
return False
tags.append(T.specialization_name())
context[X[ix]] = T
if self.cname in cpp_string_conversions:
cls = 'string'
tags = type_identifier(self),
else:
cls = self.cname[5:]
cname = '__pyx_convert_%s_from_py_%s' % (cls, '__and_'.join(tags))
context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
})
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx",
context=context, compiler_directives=env.directives))
self.from_py_function = cname
return True
def can_coerce_to_pyobject(self, env):
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if T.is_pyobject or not T.can_coerce_to_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.cname in builtin_cpp_conversions or self.cname in cpp_string_conversions:
X = "XYZABC"
tags = []
context = {}
for ix, T in enumerate(self.templates or []):
if ix >= builtin_cpp_conversions[self.cname]:
break
if not T.create_to_py_utility_code(env):
return False
tags.append(T.specialization_name())
context[X[ix]] = T
if self.cname in cpp_string_conversions:
cls = 'string'
prefix = 'PyObject_' # gets specialised by explicit type casts in CoerceToPyTypeNode
tags = type_identifier(self),
else:
cls = self.cname[5:]
prefix = ''
cname = "__pyx_convert_%s%s_to_py_%s" % (prefix, cls, "____".join(tags))
context.update({
'cname': cname,
'maybe_unordered': self.maybe_unordered(),
'type': self.cname,
})
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
cls.replace('unordered_', '') + ".to_py", "CppConvert.pyx",
context=context, compiler_directives=env.directives))
self.to_py_function = cname
return True
def is_template_type(self):
return self.templates is not None and self.template_type is None
def get_fused_types(self, result=None, seen=None):
if result is None:
result = []
seen = set()
if self.namespace:
self.namespace.get_fused_types(result, seen)
if self.templates:
for T in self.templates:
T.get_fused_types(result, seen)
return result
def specialize_here(self, pos, template_values=None):
if not self.is_template_type():
error(pos, "'%s' type is not a template" % self)
return error_type
if len(self.templates) - self.num_optional_templates <= len(template_values) < len(self.templates):
num_defaults = len(self.templates) - len(template_values)
partial_specialization = self.declaration_code('', template_params=template_values)
# Most of the time we don't need to declare anything typed to these
# default template arguments, but when we do there's no way in C++
# to reference this directly. However, it is common convention to
# provide a typedef in the template class that resolves to each
# template type. For now, allow the user to specify this name as
# the template parameter.
# TODO: Allow typedefs in cpp classes and search for it in this
# classes scope as a concrete name we could use.
template_values = template_values + [
TemplatePlaceholderType(
"%s::%s" % (partial_specialization, param.name), True)
for param in self.templates[-num_defaults:]]
if len(self.templates) != len(template_values):
error(pos, "%s templated type receives %d arguments, got %d" %
(self.name, len(self.templates), len(template_values)))
return error_type
has_object_template_param = False
for value in template_values:
if value.is_pyobject:
has_object_template_param = True
error(pos,
"Python object type '%s' cannot be used as a template argument" % value)
if has_object_template_param:
return error_type
return self.specialize(dict(zip(self.templates, template_values)))
def specialize(self, values):
if not self.templates and not self.namespace:
return self
if self.templates is None:
self.templates = []
key = tuple(values.items())
if key in self.specializations:
return self.specializations[key]
template_values = [t.specialize(values) for t in self.templates]
specialized = self.specializations[key] = \
CppClassType(self.name, None, self.cname, [], template_values, template_type=self)
# Need to do these *after* self.specializations[key] is set
# to avoid infinite recursion on circular references.
specialized.base_classes = [b.specialize(values) for b in self.base_classes]
if self.namespace is not None:
specialized.namespace = self.namespace.specialize(values)
specialized.scope = self.scope.specialize(values, specialized)
if self.cname == 'std::vector':
# vector<bool> is special cased in the C++ standard, and its
# accessors do not necessarily return references to the underlying
# elements (which may be bit-packed).
# http://www.cplusplus.com/reference/vector/vector-bool/
# Here we pretend that the various methods return bool values
# (as the actual returned values are coercable to such, and
# we don't support call expressions as lvalues).
T = values.get(self.templates[0], None)
if T and not T.is_fused and T.empty_declaration_code() == 'bool':
for bit_ref_returner in ('at', 'back', 'front'):
if bit_ref_returner in specialized.scope.entries:
specialized.scope.entries[bit_ref_returner].type.return_type = T
return specialized
def deduce_template_params(self, actual):
if actual.is_cv_qualified:
actual = actual.cv_base_type
if actual.is_reference:
actual = actual.ref_base_type
if self == actual:
return {}
elif actual.is_cpp_class:
self_template_type = self
while getattr(self_template_type, 'template_type', None):
self_template_type = self_template_type.template_type
def all_bases(cls):
yield cls
for parent in cls.base_classes:
for base in all_bases(parent):
yield base
for actual_base in all_bases(actual):
template_type = actual_base
while getattr(template_type, 'template_type', None):
template_type = template_type.template_type
if (self_template_type.empty_declaration_code()
== template_type.empty_declaration_code()):
return reduce(
merge_template_deductions,
[formal_param.deduce_template_params(actual_param)
for (formal_param, actual_param)
in zip(self.templates, actual_base.templates)],
{})
else:
return {}
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0,
template_params = None):
if template_params is None:
template_params = self.templates
if self.templates:
template_strings = [param.declaration_code('', for_display, None, pyrex)
for param in template_params
if not is_optional_template_param(param) and not param.is_fused]
if for_display:
brackets = "[%s]"
else:
brackets = "<%s> "
templates = brackets % ",".join(template_strings)
else:
templates = ""
if pyrex or for_display:
base_code = "%s%s" % (self.name, templates)
else:
base_code = "%s%s" % (self.cname, templates)
if self.namespace is not None:
base_code = "%s::%s" % (self.namespace.empty_declaration_code(), base_code)
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def is_subclass(self, other_type):
if self.same_as_resolved_type(other_type):
return 1
for base_class in self.base_classes:
if base_class.is_subclass(other_type):
return 1
return 0
def subclass_dist(self, super_type):
if self.same_as_resolved_type(super_type):
return 0
elif not self.base_classes:
return float('inf')
else:
return 1 + min(b.subclass_dist(super_type) for b in self.base_classes)
def same_as_resolved_type(self, other_type):
if other_type.is_cpp_class:
if self == other_type:
return 1
# This messy logic is needed due to GH Issue #1852.
elif (self.cname == other_type.cname and
(self.template_type and other_type.template_type
or self.templates
or other_type.templates)):
if self.templates == other_type.templates:
return 1
for t1, t2 in zip(self.templates, other_type.templates):
if is_optional_template_param(t1) and is_optional_template_param(t2):
break
if not t1.same_as_resolved_type(t2):
return 0
return 1
return 0
def assignable_from_resolved_type(self, other_type):
# TODO: handle operator=(...) here?
if other_type is error_type:
return True
elif other_type.is_cpp_class:
return other_type.is_subclass(self)
elif other_type.is_string and self.cname in cpp_string_conversions:
return True
def attributes_known(self):
return self.scope is not None
def find_cpp_operation_type(self, operator, operand_type=None):
operands = [self]
if operand_type is not None:
operands.append(operand_type)
# pos == None => no errors
operator_entry = self.scope.lookup_operator_for_types(None, operator, operands)
if not operator_entry:
return None
func_type = operator_entry.type
if func_type.is_ptr:
func_type = func_type.base_type
return func_type.return_type
def get_constructor(self, pos):
constructor = self.scope.lookup('<init>')
if constructor is not None:
return constructor
# Otherwise: automatically declare no-args default constructor.
# Make it "nogil" if the base classes allow it.
nogil = True
for base in self.base_classes:
base_constructor = base.scope.lookup('<init>')
if base_constructor and not base_constructor.type.nogil:
nogil = False
break
func_type = CFuncType(self, [], exception_check='+', nogil=nogil)
return self.scope.declare_cfunction(u'<init>', func_type, pos)
def check_nullary_constructor(self, pos, msg="stack allocated"):
constructor = self.scope.lookup(u'<init>')
if constructor is not None and best_match([], constructor.all_alternatives()) is None:
error(pos, "C++ class must have a nullary constructor to be %s" % msg)
class CppScopedEnumType(CType):
# name string
# doc string or None
# cname string
is_cpp_enum = True
def __init__(self, name, cname, underlying_type, namespace=None, doc=None):
self.name = name
self.doc = doc
self.cname = cname
self.values = []
self.underlying_type = underlying_type
self.namespace = namespace
def __str__(self):
return self.name
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
type_name = self.name
else:
if self.namespace:
type_name = "%s::%s" % (
self.namespace.empty_declaration_code(),
self.cname
)
else:
type_name = "__PYX_ENUM_CLASS_DECL %s" % self.cname
type_name = public_decl(type_name, dll_linkage)
return self.base_declaration_code(type_name, entity_code)
def create_from_py_utility_code(self, env):
if self.from_py_function:
return True
if self.underlying_type.create_from_py_utility_code(env):
self.from_py_function = '(%s)%s' % (
self.cname, self.underlying_type.from_py_function
)
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.underlying_type.create_to_py_utility_code(env):
# Using a C++11 lambda here, which is fine since
# scoped enums are a C++11 feature
self.to_py_function = '[](const %s& x){return %s((%s)x);}' % (
self.cname,
self.underlying_type.to_py_function,
self.underlying_type.empty_declaration_code()
)
return True
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
rst = CythonUtilityCode.load(
"CppScopedEnumType", "CpdefEnums.pyx",
context={
"name": self.name,
"cname": self.cname.split("::")[-1],
"items": tuple(self.values),
"underlying_type": self.underlying_type.empty_declaration_code(),
"enum_doc": self.doc,
},
outer_module_scope=env.global_scope())
env.use_utility_code(rst)
class TemplatePlaceholderType(CType):
def __init__(self, name, optional=False):
self.name = name
self.optional = optional
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if entity_code:
return self.name + " " + entity_code
else:
return self.name
def specialize(self, values):
if self in values:
return values[self]
else:
return self
def deduce_template_params(self, actual):
return {self: actual}
def same_as_resolved_type(self, other_type):
if isinstance(other_type, TemplatePlaceholderType):
return self.name == other_type.name
else:
return 0
def __hash__(self):
return hash(self.name)
def __cmp__(self, other):
if isinstance(other, TemplatePlaceholderType):
return cmp(self.name, other.name)
else:
return cmp(type(self), type(other))
def __eq__(self, other):
if isinstance(other, TemplatePlaceholderType):
return self.name == other.name
else:
return False
def is_optional_template_param(type):
return isinstance(type, TemplatePlaceholderType) and type.optional
class CEnumType(CIntLike, CType):
# name string
# doc string or None
# cname string or None
# typedef_flag boolean
# values [string], populated during declaration analysis
is_enum = 1
signed = 1
rank = -1 # Ranks below any integer type
def __init__(self, name, cname, typedef_flag, namespace=None, doc=None):
self.name = name
self.doc = doc
self.cname = cname
self.values = []
self.typedef_flag = typedef_flag
self.namespace = namespace
self.default_value = "(%s) 0" % self.empty_declaration_code()
def __str__(self):
return self.name
def __repr__(self):
return "<CEnumType %s %s%s>" % (self.name, self.cname,
("", " typedef")[self.typedef_flag])
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
base_code = self.name
else:
if self.namespace:
base_code = "%s::%s" % (
self.namespace.empty_declaration_code(), self.cname)
elif self.typedef_flag:
base_code = self.cname
else:
base_code = "enum %s" % self.cname
base_code = public_decl(base_code, dll_linkage)
return self.base_declaration_code(base_code, entity_code)
def specialize(self, values):
if self.namespace:
namespace = self.namespace.specialize(values)
if namespace != self.namespace:
return CEnumType(
self.name, self.cname, self.typedef_flag, namespace)
return self
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
env.use_utility_code(CythonUtilityCode.load(
"EnumType", "CpdefEnums.pyx",
context={"name": self.name,
"items": tuple(self.values),
"enum_doc": self.doc,
},
outer_module_scope=env.global_scope()))
class CTupleType(CType):
# components [PyrexType]
is_ctuple = True
def __init__(self, cname, components):
self.cname = cname
self.components = components
self.size = len(components)
self.to_py_function = "%s_to_py_%s" % (Naming.convert_func_prefix, self.cname)
self.from_py_function = "%s_from_py_%s" % (Naming.convert_func_prefix, self.cname)
self.exception_check = True
self._convert_to_py_code = None
self._convert_from_py_code = None
def __str__(self):
return "(%s)" % ", ".join(str(c) for c in self.components)
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
if pyrex or for_display:
return str(self)
else:
return self.base_declaration_code(self.cname, entity_code)
def can_coerce_to_pyobject(self, env):
for component in self.components:
if not component.can_coerce_to_pyobject(env):
return False
return True
def can_coerce_from_pyobject(self, env):
for component in self.components:
if not component.can_coerce_from_pyobject(env):
return False
return True
def create_to_py_utility_code(self, env):
if self._convert_to_py_code is False:
return None # tri-state-ish
if self._convert_to_py_code is None:
for component in self.components:
if not component.create_to_py_utility_code(env):
self.to_py_function = None
self._convert_to_py_code = False
return False
context = dict(
struct_type_decl=self.empty_declaration_code(),
components=self.components,
funcname=self.to_py_function,
size=len(self.components)
)
self._convert_to_py_code = TempitaUtilityCode.load(
"ToPyCTupleUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_to_py_code)
return True
def create_from_py_utility_code(self, env):
if self._convert_from_py_code is False:
return None # tri-state-ish
if self._convert_from_py_code is None:
for component in self.components:
if not component.create_from_py_utility_code(env):
self.from_py_function = None
self._convert_from_py_code = False
return False
context = dict(
struct_type_decl=self.empty_declaration_code(),
components=self.components,
funcname=self.from_py_function,
size=len(self.components)
)
self._convert_from_py_code = TempitaUtilityCode.load(
"FromPyCTupleUtility", "TypeConversion.c", context=context)
env.use_utility_code(self._convert_from_py_code)
return True
def cast_code(self, expr_code):
return expr_code
def c_tuple_type(components):
components = tuple(components)
cname = Naming.ctuple_type_prefix + type_list_identifier(components)
tuple_type = CTupleType(cname, components)
return tuple_type
class UnspecifiedType(PyrexType):
# Used as a placeholder until the type can be determined.
is_unspecified = 1
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<unspecified>"
def same_as_resolved_type(self, other_type):
return False
class ErrorType(PyrexType):
# Used to prevent propagation of error messages.
is_error = 1
exception_value = "0"
exception_check = 0
to_py_function = "dummy"
from_py_function = "dummy"
def create_to_py_utility_code(self, env):
return True
def create_from_py_utility_code(self, env):
return True
def declaration_code(self, entity_code,
for_display = 0, dll_linkage = None, pyrex = 0):
return "<error>"
def same_as_resolved_type(self, other_type):
return 1
def error_condition(self, result_code):
return "dummy"
rank_to_type_name = (
"char", # 0
"short", # 1
"int", # 2
"long", # 3
"PY_LONG_LONG", # 4
"float", # 5
"double", # 6
"long double", # 7
)
_rank_to_type_name = list(rank_to_type_name)
RANK_INT = _rank_to_type_name.index('int')
RANK_LONG = _rank_to_type_name.index('long')
RANK_FLOAT = _rank_to_type_name.index('float')
UNSIGNED = 0
SIGNED = 2
error_type = ErrorType()
unspecified_type = UnspecifiedType()
py_object_type = PyObjectType()
c_void_type = CVoidType()
c_uchar_type = CIntType(0, UNSIGNED)
c_ushort_type = CIntType(1, UNSIGNED)
c_uint_type = CIntType(2, UNSIGNED)
c_ulong_type = CIntType(3, UNSIGNED)
c_ulonglong_type = CIntType(4, UNSIGNED)
c_char_type = CIntType(0)
c_short_type = CIntType(1)
c_int_type = CIntType(2)
c_long_type = CIntType(3)
c_longlong_type = CIntType(4)
c_schar_type = CIntType(0, SIGNED)
c_sshort_type = CIntType(1, SIGNED)
c_sint_type = CIntType(2, SIGNED)
c_slong_type = CIntType(3, SIGNED)
c_slonglong_type = CIntType(4, SIGNED)
c_float_type = CFloatType(5, math_h_modifier='f')
c_double_type = CFloatType(6)
c_longdouble_type = CFloatType(7, math_h_modifier='l')
c_float_complex_type = CComplexType(c_float_type)
c_double_complex_type = CComplexType(c_double_type)
c_longdouble_complex_type = CComplexType(c_longdouble_type)
c_anon_enum_type = CAnonEnumType(-1)
c_returncode_type = CReturnCodeType(RANK_INT)
c_bint_type = CBIntType(RANK_INT)
c_py_unicode_type = CPyUnicodeIntType(RANK_INT-0.5, UNSIGNED)
c_py_ucs4_type = CPyUCS4IntType(RANK_LONG-0.5, UNSIGNED)
c_py_hash_t_type = CPyHashTType(RANK_LONG+0.5, SIGNED)
c_py_ssize_t_type = CPySSizeTType(RANK_LONG+0.5, SIGNED)
c_ssize_t_type = CSSizeTType(RANK_LONG+0.5, SIGNED)
c_size_t_type = CSizeTType(RANK_LONG+0.5, UNSIGNED)
c_ptrdiff_t_type = CPtrdiffTType(RANK_LONG+0.75, SIGNED)
c_null_ptr_type = CNullPtrType(c_void_type)
c_void_ptr_type = CPtrType(c_void_type)
c_void_ptr_ptr_type = CPtrType(c_void_ptr_type)
c_char_ptr_type = CPtrType(c_char_type)
c_const_char_ptr_type = CPtrType(CConstType(c_char_type))
c_uchar_ptr_type = CPtrType(c_uchar_type)
c_const_uchar_ptr_type = CPtrType(CConstType(c_uchar_type))
c_char_ptr_ptr_type = CPtrType(c_char_ptr_type)
c_int_ptr_type = CPtrType(c_int_type)
c_py_unicode_ptr_type = CPtrType(c_py_unicode_type)
c_const_py_unicode_ptr_type = CPtrType(CConstType(c_py_unicode_type))
c_py_ssize_t_ptr_type = CPtrType(c_py_ssize_t_type)
c_ssize_t_ptr_type = CPtrType(c_ssize_t_type)
c_size_t_ptr_type = CPtrType(c_size_t_type)
# GIL state
c_gilstate_type = CEnumType("PyGILState_STATE", "PyGILState_STATE", True)
c_threadstate_type = CStructOrUnionType("PyThreadState", "struct", None, 1, "PyThreadState")
c_threadstate_ptr_type = CPtrType(c_threadstate_type)
# PEP-539 "Py_tss_t" type
c_pytss_t_type = CPyTSSTType()
# the Py_buffer type is defined in Builtin.py
c_py_buffer_type = CStructOrUnionType("Py_buffer", "struct", None, 1, "Py_buffer")
c_py_buffer_ptr_type = CPtrType(c_py_buffer_type)
# Not sure whether the unsigned versions and 'long long' should be in there
# long long requires C99 and might be slow, and would always get preferred
# when specialization happens through calling and not indexing
cy_integral_type = FusedType([c_short_type, c_int_type, c_long_type],
name="integral")
# Omitting long double as it might be slow
cy_floating_type = FusedType([c_float_type, c_double_type], name="floating")
cy_numeric_type = FusedType([c_short_type,
c_int_type,
c_long_type,
c_float_type,
c_double_type,
c_float_complex_type,
c_double_complex_type], name="numeric")
# buffer-related structs
c_buf_diminfo_type = CStructOrUnionType("__Pyx_Buf_DimInfo", "struct",
None, 1, "__Pyx_Buf_DimInfo")
c_pyx_buffer_type = CStructOrUnionType("__Pyx_Buffer", "struct", None, 1, "__Pyx_Buffer")
c_pyx_buffer_ptr_type = CPtrType(c_pyx_buffer_type)
c_pyx_buffer_nd_type = CStructOrUnionType("__Pyx_LocalBuf_ND", "struct",
None, 1, "__Pyx_LocalBuf_ND")
cython_memoryview_type = CStructOrUnionType("__pyx_memoryview_obj", "struct",
None, 0, "__pyx_memoryview_obj")
memoryviewslice_type = CStructOrUnionType("memoryviewslice", "struct",
None, 1, "__Pyx_memviewslice")
modifiers_and_name_to_type = {
#(signed, longness, name) : type
(0, 0, "char"): c_uchar_type,
(1, 0, "char"): c_char_type,
(2, 0, "char"): c_schar_type,
(0, -1, "int"): c_ushort_type,
(0, 0, "int"): c_uint_type,
(0, 1, "int"): c_ulong_type,
(0, 2, "int"): c_ulonglong_type,
(1, -1, "int"): c_short_type,
(1, 0, "int"): c_int_type,
(1, 1, "int"): c_long_type,
(1, 2, "int"): c_longlong_type,
(2, -1, "int"): c_sshort_type,
(2, 0, "int"): c_sint_type,
(2, 1, "int"): c_slong_type,
(2, 2, "int"): c_slonglong_type,
(1, 0, "float"): c_float_type,
(1, 0, "double"): c_double_type,
(1, 1, "double"): c_longdouble_type,
(1, 0, "complex"): c_double_complex_type, # C: float, Python: double => Python wins
(1, 0, "floatcomplex"): c_float_complex_type,
(1, 0, "doublecomplex"): c_double_complex_type,
(1, 1, "doublecomplex"): c_longdouble_complex_type,
#
(1, 0, "void"): c_void_type,
(1, 0, "Py_tss_t"): c_pytss_t_type,
(1, 0, "bint"): c_bint_type,
(0, 0, "Py_UNICODE"): c_py_unicode_type,
(0, 0, "Py_UCS4"): c_py_ucs4_type,
(2, 0, "Py_hash_t"): c_py_hash_t_type,
(2, 0, "Py_ssize_t"): c_py_ssize_t_type,
(2, 0, "ssize_t") : c_ssize_t_type,
(0, 0, "size_t") : c_size_t_type,
(2, 0, "ptrdiff_t") : c_ptrdiff_t_type,
(1, 0, "object"): py_object_type,
}
def is_promotion(src_type, dst_type):
# It's hard to find a hard definition of promotion, but empirical
# evidence suggests that the below is all that's allowed.
if src_type.is_numeric:
if dst_type.same_as(c_int_type):
unsigned = (not src_type.signed)
return (src_type.is_enum or
(src_type.is_int and
unsigned + src_type.rank < dst_type.rank))
elif dst_type.same_as(c_double_type):
return src_type.is_float and src_type.rank <= dst_type.rank
return False
def best_match(arg_types, functions, pos=None, env=None, args=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
This function is used, e.g., when deciding which overloaded method
to dispatch for C++ classes.
We first eliminate functions based on arity, and if only one
function has the correct arity, we return it. Otherwise, we weight
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
* promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
so on.
If no function is deemed a good fit, or if two or more functions have
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
# TODO: args should be a list of types, not a list of Nodes.
actual_nargs = len(arg_types)
candidates = []
errors = []
for func in functions:
error_mesg = ""
func_type = func.type
if func_type.is_ptr:
func_type = func_type.base_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error and pos is not None:
error_mesg = "Calling non-function type '%s'" % func_type
errors.append((func, error_mesg))
continue
# Check no. of args
max_nargs = len(func_type.args)
min_nargs = max_nargs - func_type.optional_arg_count
if actual_nargs < min_nargs or (not func_type.has_varargs and actual_nargs > max_nargs):
if max_nargs == min_nargs and not func_type.has_varargs:
expectation = max_nargs
elif actual_nargs < min_nargs:
expectation = "at least %s" % min_nargs
else:
expectation = "at most %s" % max_nargs
error_mesg = "Call with wrong number of arguments (expected %s, got %s)" \
% (expectation, actual_nargs)
errors.append((func, error_mesg))
continue
if func_type.templates:
deductions = reduce(
merge_template_deductions,
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types)],
{})
if deductions is None:
errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (func_type, ', '.join(map(str, arg_types)))))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in set(func_type.templates) - set(deductions.keys())]))))
else:
type_list = [deductions[param] for param in func_type.templates]
from .Symtab import Entry
specialization = Entry(
name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]),
type = func_type.specialize(deductions),
pos = func.pos)
candidates.append((specialization, specialization.type))
else:
candidates.append((func, func_type))
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
elif len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
error(pos, errmsg)
else:
error(pos, "no suitable method found")
return None
possibilities = []
bad_types = []
needed_coercions = {}
for index, (func, func_type) in enumerate(candidates):
score = [0,0,0,0,0,0,0]
for i in range(min(actual_nargs, len(func_type.args))):
src_type = arg_types[i]
dst_type = func_type.args[i].type
assignable = dst_type.assignable_from(src_type)
# Now take care of unprefixed string literals. So when you call a cdef
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
if not assignable:
c_src_type = None
if src_type.is_pyobject:
if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
c_src_type = dst_type.resolve()
else:
c_src_type = src_type.default_coerced_ctype()
elif src_type.is_pythran_expr:
c_src_type = src_type.org_buffer
if c_src_type is not None:
assignable = dst_type.assignable_from(c_src_type)
if assignable:
src_type = c_src_type
needed_coercions[func] = (i, dst_type)
if assignable:
if src_type == dst_type or dst_type.same_as(src_type):
pass # score 0
elif func_type.is_strict_signature:
break # exact match requested but not found
elif is_promotion(src_type, dst_type):
score[2] += 1
elif ((src_type.is_int and dst_type.is_int) or
(src_type.is_float and dst_type.is_float)):
score[2] += abs(dst_type.rank + (not dst_type.signed) -
(src_type.rank + (not src_type.signed))) + 1
elif dst_type.is_ptr and src_type.is_ptr:
if dst_type.base_type == c_void_type:
score[4] += 1
elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type):
score[6] += src_type.base_type.subclass_dist(dst_type.base_type)
else:
score[5] += 1
elif not src_type.is_pyobject:
score[1] += 1
else:
score[0] += 1
else:
error_mesg = "Invalid conversion from '%s' to '%s'" % (src_type, dst_type)
bad_types.append((func, error_mesg))
break
else:
possibilities.append((score, index, func)) # so we can sort it
if possibilities:
possibilities.sort()
if len(possibilities) > 1:
score1 = possibilities[0][0]
score2 = possibilities[1][0]
if score1 == score2:
if pos is not None:
error(pos, "ambiguous overloaded method")
return None
function = possibilities[0][-1]
if function in needed_coercions and env:
arg_i, coerce_to_type = needed_coercions[function]
args[arg_i] = args[arg_i].coerce_to(coerce_to_type, env)
return function
if pos is not None:
if len(bad_types) == 1:
error(pos, bad_types[0][1])
else:
error(pos, "no suitable method found")
return None
def merge_template_deductions(a, b):
if a is None or b is None:
return None
all = a
for param, value in b.items():
if param in all:
if a[param] != b[param]:
return None
else:
all[param] = value
return all
def widest_numeric_type(type1, type2):
"""Given two numeric types, return the narrowest type encompassing both of them.
"""
if type1.is_reference:
type1 = type1.ref_base_type
if type2.is_reference:
type2 = type2.ref_base_type
if type1.is_cv_qualified:
type1 = type1.cv_base_type
if type2.is_cv_qualified:
type2 = type2.cv_base_type
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
def real_type(ntype):
if ntype.is_complex:
return ntype.real_type
return ntype
widest_type = CComplexType(
widest_numeric_type(
real_type(type1),
real_type(type2)))
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
elif type1.rank < type2.rank:
widest_type = type2
elif type1.rank > type2.rank:
widest_type = type1
elif type1.signed < type2.signed:
widest_type = type1
elif type1.signed > type2.signed:
widest_type = type2
elif type1.is_typedef > type2.is_typedef:
widest_type = type1
else:
widest_type = type2
return widest_type
def numeric_type_fits(small_type, large_type):
return widest_numeric_type(small_type, large_type) == large_type
def independent_spanning_type(type1, type2):
# Return a type assignable independently from both type1 and
# type2, but do not require any interoperability between the two.
# For example, in "True * 2", it is safe to assume an integer
# result type (so spanning_type() will do the right thing),
# whereas "x = True or 2" must evaluate to a type that can hold
# both a boolean value and an integer, so this function works
# better.
if type1.is_reference ^ type2.is_reference:
if type1.is_reference:
type1 = type1.ref_base_type
else:
type2 = type2.ref_base_type
if type1 == type2:
return type1
elif (type1 is c_bint_type or type2 is c_bint_type) and (type1.is_numeric and type2.is_numeric):
# special case: if one of the results is a bint and the other
# is another C integer, we must prevent returning a numeric
# type so that we do not lose the ability to coerce to a
# Python bool if we have to.
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return error_type
return span_type
def spanning_type(type1, type2):
# Return a type assignable from both type1 and type2, or
# py_object_type if no better type is found. Assumes that the
# code that calls this will try a coercion afterwards, which will
# fail if the types cannot actually coerce to a py_object_type.
if type1 == type2:
return type1
elif type1 is py_object_type or type2 is py_object_type:
return py_object_type
elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
# Py_UNICODE behaves more like a string than an int
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return py_object_type
return span_type
def _spanning_type(type1, type2):
if type1.is_numeric and type2.is_numeric:
return widest_numeric_type(type1, type2)
elif type1.is_builtin_type and type1.name == 'float' and type2.is_numeric:
return widest_numeric_type(c_double_type, type2)
elif type2.is_builtin_type and type2.name == 'float' and type1.is_numeric:
return widest_numeric_type(type1, c_double_type)
elif type1.is_extension_type and type2.is_extension_type:
return widest_extension_type(type1, type2)
elif type1.is_pyobject or type2.is_pyobject:
return py_object_type
elif type1.assignable_from(type2):
if type1.is_extension_type and type1.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type1
elif type2.assignable_from(type1):
if type2.is_extension_type and type2.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type2
elif type1.is_ptr and type2.is_ptr:
if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class:
common_base = widest_cpp_type(type1.base_type, type2.base_type)
if common_base:
return CPtrType(common_base)
# incompatible pointers, void* will do as a result
return c_void_ptr_type
else:
return None
def widest_extension_type(type1, type2):
if type1.typeobj_is_imported() or type2.typeobj_is_imported():
return py_object_type
while True:
if type1.subtype_of(type2):
return type2
elif type2.subtype_of(type1):
return type1
type1, type2 = type1.base_type, type2.base_type
if type1 is None or type2 is None:
return py_object_type
def widest_cpp_type(type1, type2):
@cached_function
def bases(type):
all = set()
for base in type.base_classes:
all.add(base)
all.update(bases(base))
return all
common_bases = bases(type1).intersection(bases(type2))
common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set())
candidates = [b for b in common_bases if b not in common_bases_bases]
if len(candidates) == 1:
return candidates[0]
else:
# Fall back to void* for now.
return None
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def parse_basic_type(name):
base = None
if name.startswith('p_'):
base = parse_basic_type(name[2:])
elif name.startswith('p'):
base = parse_basic_type(name[1:])
elif name.endswith('*'):
base = parse_basic_type(name[:-1])
if base:
return CPtrType(base)
#
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
#
signed = 1
longness = 0
if name == 'Py_UNICODE':
signed = 0
elif name == 'Py_UCS4':
signed = 0
elif name == 'Py_hash_t':
signed = 2
elif name == 'Py_ssize_t':
signed = 2
elif name == 'ssize_t':
signed = 2
elif name == 'size_t':
signed = 0
else:
if name.startswith('u'):
name = name[1:]
signed = 0
elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
longness = 0
while name.startswith('short'):
name = name.replace('short', '', 1).strip()
longness -= 1
while name.startswith('long'):
name = name.replace('long', '', 1).strip()
longness += 1
if longness != 0 and not name:
name = 'int'
return simple_c_type(signed, longness, name)
def _construct_type_from_base(cls, base_type, *args):
if base_type is error_type:
return error_type
return cls(base_type, *args)
def c_array_type(base_type, size):
# Construct a C array type.
return _construct_type_from_base(CArrayType, base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type.is_reference:
base_type = base_type.ref_base_type
return _construct_type_from_base(CPtrType, base_type)
def c_ref_type(base_type):
# Construct a C reference type
return _construct_type_from_base(CReferenceType, base_type)
def cpp_rvalue_ref_type(base_type):
# Construct a C++ rvalue reference type
return _construct_type_from_base(CppRvalueReferenceType, base_type)
def c_const_type(base_type):
# Construct a C const type.
return _construct_type_from_base(CConstType, base_type)
def c_const_or_volatile_type(base_type, is_const, is_volatile):
# Construct a C const/volatile type.
return _construct_type_from_base(CConstOrVolatileType, base_type, is_const, is_volatile)
def same_type(type1, type2):
return type1.same_as(type2)
def assignable_from(type1, type2):
return type1.assignable_from(type2)
def typecast(to_type, from_type, expr_code):
# Return expr_code cast to a C type which can be
# assigned to to_type, assuming its existing C type
# is from_type.
if (to_type is from_type or
(not to_type.is_pyobject and assignable_from(to_type, from_type))):
return expr_code
elif (to_type is py_object_type and from_type and
from_type.is_builtin_type and from_type.name != 'type'):
# no cast needed, builtins are PyObject* already
return expr_code
else:
#print "typecast: to", to_type, "from", from_type ###
return to_type.cast_code(expr_code)
def type_list_identifier(types):
return cap_length('__and_'.join(type_identifier(type) for type in types))
_special_type_characters = {
'__': '__dunder',
'const ': '__const_',
' ': '__space_',
'*': '__ptr',
'&': '__ref',
'&&': '__fwref',
'[': '__lArr',
']': '__rArr',
'<': '__lAng',
'>': '__rAng',
'(': '__lParen',
')': '__rParen',
',': '__comma_',
'...': '__EL',
'::': '__in_',
':': '__D',
}
_escape_special_type_characters = partial(re.compile(
# join substrings in reverse order to put longer matches first, e.g. "::" before ":"
" ?(%s) ?" % "|".join(re.escape(s) for s in sorted(_special_type_characters, reverse=True))
).sub, lambda match: _special_type_characters[match.group(1)])
def type_identifier(type, pyrex=False):
decl = type.empty_declaration_code(pyrex=pyrex)
return type_identifier_from_declaration(decl)
_type_identifier_cache = {}
def type_identifier_from_declaration(decl):
safe = _type_identifier_cache.get(decl)
if safe is None:
safe = decl
safe = re.sub(' +', ' ', safe)
safe = re.sub(' ?([^a-zA-Z0-9_]) ?', r'\1', safe)
safe = _escape_special_type_characters(safe)
safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe))
_type_identifier_cache[decl] = safe
return safe
def cap_length(s, max_prefix=63, max_len=1024):
if len(s) <= max_prefix:
return s
hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6]
return '%s__%s__etc' % (hash_prefix, s[:max_len-17])
|
r1 = float(input('Digite o valor da primeira reta: '))
r2 = float(input('Digite o valor da segunda reta: '))
r3 = float(input('Digite o valor da terceira reta: '))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print('Essas três retas formam um triângulo')
else:
print('Essas retas não formam um triângulo!')
'''
if r1 < (r2-r3)%2 and r1 < r3+r2:
print('Essas três retas formam um triângulo')
else:
if r1 > (r3-r2)%2 and r1 < r3+r2:
print('Essas três retas formam um triângulo')
else:
if r2 > (r1 - r3) % 2 and r2 < r1 + r3:
print('Essas três retas não formam um triângulo')
else:
if r2 > (r3-r1)%2 and r1 < r1+r2:
print('Essas três retas formam um triângulo')
else:
if r3 > (r1 - r2) % 2 and r2 < r1 + r2:
print('Essas três retas não formam um triângulo')
else:
if r3 > (r2 - r1) % 2:
if r1 < r1 + r3:
print('Essas três retas formam um triângulo')
else:
print('Essas retas não formam um triângulo!')'''
|
import unittest
from lpminimk3.__init__ import ButtonEvent
from lpminimk3.match import ButtonMatch
from tests._vlpminimk3 import create_virtual_launchpad
class TestButtonMatch(unittest.TestCase):
def setUp(self):
self.lp = create_virtual_launchpad()
def tearDown(self):
self.lp.close()
def test_panel_contains_match_press(self):
self.lp.open()
match = ButtonMatch(self.lp.panel.buttons('up'), ButtonEvent.PRESS)
self.assertTrue(match.contains([0xb0, 0x5b, 0x7f]), 'No button matched.') # noqa
match = ButtonMatch(self.lp.panel.buttons('up', 'down', 'left'), ButtonEvent.PRESS) # noqa
self.assertTrue(match.contains([0xb0, 0x5b, 0x7f]), 'No button matched.') # noqa
self.assertTrue(match.contains([0xb0, 0x5c, 0x7f]), 'No button matched.') # noqa
self.assertTrue(match.contains([0xb0, 0x5d, 0x7f]), 'No button matched.') # noqa
self.assertFalse(match.contains([0xb0, 0x5d, 0x0]), 'Button matched, though it shouldn\'t.') # noqa
self.assertFalse(match.contains([0xb0, 0x5e, 0x7f]), 'Button matched, though it shouldn\'t.') # noqa
def test_panel_contains_match_release(self):
self.lp.open()
match = ButtonMatch(self.lp.panel.buttons('up'), ButtonEvent.RELEASE)
self.assertTrue(match.contains([0xb0, 0x5b, 0x0]), 'No button matched.') # noqa
match = ButtonMatch(self.lp.panel.buttons('up', 'down', 'left'), ButtonEvent.RELEASE) # noqa
self.assertTrue(match.contains([0xb0, 0x5b, 0x0]), 'No button matched.') # noqa
self.assertTrue(match.contains([0xb0, 0x5c, 0x0]), 'No button matched.') # noqa
self.assertTrue(match.contains([0xb0, 0x5d, 0x0]), 'No button matched.') # noqa
self.assertFalse(match.contains([0xb0, 0x5d, 0x7f]), 'Button matched, though it shouldn\'t.') # noqa
self.assertFalse(match.contains([0xb0, 0x5e, 0x0]), 'Button matched, though it shouldn\'t.') # noqa
def test_grid_contains_match_press(self):
self.lp.open()
match = ButtonMatch(self.lp.grid.buttons('0x0'), ButtonEvent.PRESS)
self.assertTrue(match.contains([0x90, 0x51, 0x7f]), 'No button matched.') # noqa
match = ButtonMatch(self.lp.panel.buttons('0x0', '1x0', '2x0'), ButtonEvent.PRESS) # noqa
self.assertTrue(match.contains([0x90, 0x51, 0x7f]), 'No button matched.') # noqa
self.assertTrue(match.contains([0x90, 0x52, 0x7f]), 'No button matched.') # noqa
self.assertTrue(match.contains([0x90, 0x53, 0x7f]), 'No button matched.') # noqa
self.assertFalse(match.contains([0x90, 0x53, 0x0]), 'Button matched, though it shouldn\'t.') # noqa
self.assertFalse(match.contains([0x90, 0x54, 0x7f]), 'Button matched, though it shouldn\'t.') # noqa
def test_grid_contains_match_release(self):
self.lp.open()
match = ButtonMatch(self.lp.grid.buttons('0x0'), ButtonEvent.RELEASE)
self.assertTrue(match.contains([0x90, 0x51, 0x0]), 'No button matched.') # noqa
match = ButtonMatch(self.lp.panel.buttons('0x0', '1x0', '2x0'), ButtonEvent.RELEASE) # noqa
self.assertTrue(match.contains([0x90, 0x51, 0x0]), 'No button matched.') # noqa
self.assertTrue(match.contains([0x90, 0x52, 0x0]), 'No button matched.') # noqa
self.assertTrue(match.contains([0x90, 0x53, 0x0]), 'No button matched.') # noqa
self.assertFalse(match.contains([0x90, 0x53, 0x7f]), 'Button matched, though it shouldn\'t.') # noqa
self.assertFalse(match.contains([0x90, 0x54, 0x0]), 'Button matched, though it shouldn\'t.') # noqa
|
from selenium import webdriver
from time import sleep
driver = webdriver.Chrome()
driver.get('https://web.whatsapp.com/')
name = input('Enter the name of user or group : ')
filepath = input('Enter your filepath (images/video): ')
input('Enter anything after scanning QR code')
user = driver.find_element_by_xpath('//span[@title = "{}"]'.format(name))
user.click()
attachment_box = driver.find_element_by_xpath('//div[@title = "Attach"]')
attachment_box.click()
image_box = driver.find_element_by_xpath(
'//input[@accept="image/*,video/mp4,video/3gpp,video/quicktime"]')
image_box.send_keys(filepath)
sleep(3)
send_button = driver.find_element_by_xpath('//span[@data-icon="send-light"]')
send_button.click()
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import random
from kaolin.metrics import render
from kaolin.utils.testing import FLOAT_TYPES
@pytest.mark.parametrize('device,dtype', FLOAT_TYPES)
class TestRender:
@pytest.fixture(autouse=True)
def lhs_mask(self, device, dtype):
return torch.tensor([[[0., 0.2, 0.1, 1.],
[0.5, 0.5, 0.9, 0.9],
[0., 1., 1., 0.9],
[0.8, 0.7, 0.2, 0.1]],
[[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.],
[1., 1., 1., 1.]]],
dtype=dtype, device=device)
@pytest.fixture(autouse=True)
def rhs_mask(self, device, dtype):
return torch.tensor([[[0.1, 0.3, 0.3, 0.9],
[0.5, 0.5, 1., 0.3],
[0., 0.9, 0.9, 0.8],
[1., 1., 0., 0.]],
[[0.3, 0.6, 0.7, 0.7],
[0.8, 0.9, 0.9, 1.],
[1., 0.9, 0.9, 0.5],
[0.8, 0.7, 0.8, 0.5]]],
dtype=dtype, device=device)
def test_mask_iou(self, lhs_mask, rhs_mask, device, dtype):
loss = render.mask_iou(lhs_mask, rhs_mask)
assert torch.allclose(loss, torch.tensor([0.3105],
dtype=dtype, device=device))
|
# import python libraries
import os
import pdb
import numpy as np
from copy import deepcopy
import pandas as pd
import pickle
# import functions
import optimizer as O # stochastic gradient descent optimizer
from vi import value_iteration
from maxent_irl import *
from assembly_tasks import *
from import_qualtrics import get_qualtrics_survey
# ----------------------------------------------- Load data ---------------------------------------------------- #
# download data from qualtrics
learning_survey_id = "SV_8eoX63z06ZhVZRA"
data_path = os.path.dirname(__file__) + "/data/"
# get_qualtrics_survey(dir_save_survey=data_path, survey_id=learning_survey_id)
# load user data
demo_path = data_path + "Human-Robot Assembly - Learning.csv"
df = pd.read_csv(demo_path)
# pre-process feature value
def process_val(x):
if x == "1 (No effort at all)":
x = 1.1
elif x == "7 (A lot of effort)":
x = 6.9
else:
x = float(x)
return x
# load user ratings
def load_features(data, user_idx, feature_idx, action_idx):
fea_mat = []
for j in action_idx:
fea_vec = []
for k in feature_idx:
fea_col = k + str(j)
fea_val = process_val(data[fea_col][user_idx])
fea_vec.append(fea_val)
fea_mat.append(fea_vec)
return fea_mat
# ----------------------------------------------- Optimization -------------------------------------------------- #
# choose our parameter initialization strategy:
# initialize parameters with constant
init = O.Constant(1.0)
# choose our optimization strategy:
# we select exponentiated stochastic gradient descent with linear learning-rate decay
optim = O.ExpSga(lr=O.linear_decay(lr0=0.6))
# --------------------------------------------- User information ------------------------------------------------ #
rank_features = False
scale_weights = False
user_id = input("Enter user id: ")
print("=======================")
print("Calculating preference for user:", user_id)
idx = df.index[df['Q1'] == user_id][0]
canonical_survey_actions = [0, 3, 1, 4, 2, 5]
preferred_order = [df[q][idx] for q in ['Q9_1', 'Q9_2', 'Q9_3', 'Q9_4', 'Q9_5', 'Q9_6']]
canonical_demo = [a for _, a in sorted(zip(preferred_order, canonical_survey_actions))]
# user ratings for features
canonical_q, complex_q = ["Q6_", "Q7_"], ["Q13_", "Q14_"]
canonical_features = load_features(df, idx, canonical_q, [2, 4, 6, 3, 5, 7])
complex_features = load_features(df, idx, complex_q, [3, 8, 15, 16, 4, 9, 10, 11])
# ---------------------------------------- Training: Learn weights ---------------------------------------------- #
# initialize canonical task
C = CanonicalTask(canonical_features)
C.set_end_state(canonical_demo)
C.enumerate_states()
C.set_terminal_idx()
if rank_features:
C.convert_to_rankings()
# demonstrations
canonical_user_demo = [canonical_demo]
canonical_trajectories = get_trajectories(C.states, canonical_user_demo, C.transition)
print("Training ...")
# using abstract features
abstract_features = np.array([C.get_features(state) for state in C.states])
norm_abstract_features = abstract_features / np.linalg.norm(abstract_features, axis=0)
canonical_rewards_abstract, canonical_weights_abstract = maxent_irl(C, norm_abstract_features,
canonical_trajectories,
optim, init)
print("Weights have been learned for the canonical task! Fingers X-ed.")
print("Weights -", canonical_weights_abstract)
# scale weights
if scale_weights:
canonical_weights_abstract /= max(canonical_weights_abstract)
# ----------------------------------------- Testing: Predict complex -------------------------------------------- #
sample_complex_demo = [1, 3, 5, 0, 2, 2, 2, 2, 4, 4, 4, 4, 6, 6, 6, 6, 7]
complex_survey_actions = [0, 4, 1, 5, 6, 7, 2, 3]
action_counts = [1, 1, 4, 1, 4, 1, 4, 1]
preferred_order = [df[q][idx] for q in ['Q15_1', 'Q15_2', 'Q15_3', 'Q15_4', 'Q15_5', 'Q15_6', 'Q15_7', 'Q15_8']]
complex_demo = []
for _, a in sorted(zip(preferred_order, complex_survey_actions)):
complex_demo += [a]*action_counts[a]
# initialize complex task
X = ComplexTask(complex_features)
X.set_end_state(sample_complex_demo)
X.enumerate_states()
X.set_terminal_idx()
if rank_features:
X.convert_to_rankings()
# using abstract features
complex_abstract_features = np.array([X.get_features(state) for state in X.states])
complex_abstract_features /= np.linalg.norm(complex_abstract_features, axis=0)
# transfer rewards to complex task
transfer_rewards_abstract = complex_abstract_features.dot(canonical_weights_abstract)
# compute q-values for each state based on learned weights
qf_transfer, _, _ = value_iteration(X.states, X.actions, X.transition, transfer_rewards_abstract, X.terminal_idx)
# score for predicting the action based on transferred rewards based on abstract features
predict_sequence, predict_score = predict_trajectory(qf_transfer, X.states, [complex_demo], X.transition,
sensitivity=0.0, consider_options=False)
print("canonical : ", canonical_demo)
print("preference: ", complex_demo)
print("prediction: ", predict_sequence)
# save_path = data_path + "learned_models/"
# pickle.dump(qf_transfer, open(save_path + "q_values_" + user_id + ".p", "wb"))
# pickle.dump(X.states, open(save_path + "states_" + user_id + ".p", "wb"))
# print("Q-values have been saved for user " + user_id + ".")
|
import logging
import os
from abc import ABC, abstractmethod
from typing import Optional
from checkov.terraform.module_loading.content import ModuleContent
from checkov.terraform.module_loading.registry import module_loader_registry
# ModuleContent allows access to a directory containing module file via the `path()`
# function. Instances may be used in a `with` context to ensure temporary directories
# are removed, if applicable.
class ModuleLoader(ABC):
def __init__(self) -> None:
module_loader_registry.register(self)
self.logger = logging.getLogger(__name__)
self.module_source: str = ""
self.current_dir: str = ""
self.dest_dir: str = ""
self.external_modules_folder_name: str = ""
self.version = "latest"
self.is_external = True
self.inner_module: Optional[str] = None
self.root_dir = "" # root dir for storing external modules
@abstractmethod
def discover(self):
"""
discover parameters from execution context of checkov. usually from env variable
"""
pass
def load(
self,
root_dir: str,
current_dir: str,
source: str,
source_version: Optional[str],
dest_dir: str,
external_modules_folder_name: str,
inner_module: Optional[str] = None,
) -> ModuleContent:
"""
This function provides an opportunity for the loader to load a module's content if it chooses to do so.
There are three resulting states that can occur when calling this function:
1) the loader can't handle the source type, in which case a ModuleContent is returned for which
the `loaded()` method will return False.
2) the loader can handle the source type and loading is successful, in which case a ModuleContent
object is returned for which `loaded()` returns True and which provides the directory containing
the module files
3) the loader tried to load the module content but and error occurred, in which case an exception
is raised.
:param current_dir: Directory containing the reference to the module.
:param source: the raw source string from the module's `source` attribute (e.g.,
"hashicorp/consul/aws" or "git::https://example.com/vpc.git?ref=v1.2.0")
:param source_version: contains content from the module's `version` attribute, if provided
:param dest_dir: where to save the downloaded module
:return: A ModuleContent object which may or may not being loaded.
"""
self.root_dir = root_dir
self.module_source = source
self.current_dir = current_dir
self.version = str(source_version)
self.dest_dir = dest_dir
self.external_modules_folder_name = external_modules_folder_name
self.inner_module = inner_module
if not self._is_matching_loader():
return ModuleContent(dir=None)
module_path = self._find_module_path()
if os.path.exists(module_path):
return ModuleContent(dir=module_path)
self.logger.debug(f"Using {self.__class__.__name__} attempting to get module "
f"{self.module_source if '@' not in self.module_source else self.module_source.split('@')[1]} "
f"version: {self.version}")
return self._load_module()
@abstractmethod
def _is_matching_loader(self) -> bool:
raise NotImplementedError()
@abstractmethod
def _load_module(self) -> ModuleContent:
raise NotImplementedError()
@abstractmethod
def _find_module_path(self) -> str:
raise NotImplementedError()
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for storing the skill data models."""
from constants import constants
from core.platform import models
from google.appengine.ext import ndb
(base_models, user_models,) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.user])
class SkillSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a skill snapshot."""
pass
class SkillSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a skill snapshot."""
pass
class SkillModel(base_models.VersionedModel):
"""Model for storing Skills.
This class should only be imported by the skill services file
and the skill model test file.
"""
SNAPSHOT_METADATA_CLASS = SkillSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SkillSnapshotContentModel
ALLOW_REVERT = False
# The description of the skill.
description = ndb.StringProperty(required=True, indexed=True)
# The schema version for each of the misconception dicts.
misconceptions_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# The schema version for each of the rubric dicts.
rubric_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# A list of misconceptions associated with the skill, in which each
# element is a dict.
misconceptions = ndb.JsonProperty(repeated=True, indexed=False)
# The rubrics for the skill that explain each difficulty level.
rubrics = ndb.JsonProperty(repeated=True, indexed=False)
# The ISO 639-1 code for the language this skill is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# The schema version for the skill_contents.
skill_contents_schema_version = ndb.IntegerProperty(
required=True, indexed=True)
# A dict representing the skill contents.
skill_contents = ndb.JsonProperty(indexed=False)
# The id to be used by the next misconception added.
next_misconception_id = ndb.IntegerProperty(required=True, indexed=False)
# The id that the skill is merged into, in case the skill has been
# marked as duplicate to another one and needs to be merged.
# This is an optional field.
superseding_skill_id = ndb.StringProperty(indexed=True)
# A flag indicating whether deduplication is complete for this skill.
# It will initially be False, and set to true only when there is a value
# for superseding_skill_id and the merge was completed.
all_questions_merged = ndb.BooleanProperty(indexed=True, required=True)
@classmethod
def get_merged_skills(cls):
"""Returns the skill models which have been merged.
Returns: list(SkillModel). List of skill models which have been merged.
"""
return [skill for skill in cls.query() if (
skill.superseding_skill_id is not None and (
len(skill.superseding_skill_id) > 0))]
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SkillModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
skill_rights = SkillRightsModel.get_by_id(self.id)
status = ''
if skill_rights.skill_is_private:
status = constants.ACTIVITY_STATUS_PRIVATE
else:
status = constants.ACTIVITY_STATUS_PUBLIC
skill_commit_log_entry = SkillCommitLogEntryModel.create(
self.id, self.version, committer_id, committer_username,
commit_type, commit_message, commit_cmds,
status, False
)
skill_commit_log_entry.skill_id = self.id
skill_commit_log_entry.put()
class SkillCommitLogEntryModel(base_models.BaseCommitLogEntryModel):
"""Log of commits to skills.
A new instance of this model is created and saved every time a commit to
SkillModel occurs.
The id for this model is of the form
'skill-{{SKILL_ID}}-{{SKILL_VERSION}}'.
"""
# The id of the skill being edited.
skill_id = ndb.StringProperty(indexed=True, required=True)
@classmethod
def _get_instance_id(cls, skill_id, version):
"""This function returns the generated id for the get_commit function
in the parent class.
Args:
skill_id: str. The id of the skill being edited.
version: int. The version number of the skill after the commit.
Returns:
str. The commit id with the skill id and version number.
"""
return 'skill-%s-%s' % (skill_id, version)
class SkillSummaryModel(base_models.BaseModel):
"""Summary model for an Oppia Skill.
This should be used whenever the content blob of the skill is not
needed (e.g. search results, etc).
A SkillSummaryModel instance stores the following information:
id, description, language_code, last_updated, created_on, version.
The key of each instance is the skill id.
"""
# The description of the skill.
description = ndb.StringProperty(required=True, indexed=True)
# The number of misconceptions associated with the skill.
misconception_count = ndb.IntegerProperty(required=True, indexed=True)
# The number of worked examples in the skill.
worked_examples_count = ndb.IntegerProperty(required=True, indexed=True)
# The ISO 639-1 code for the language this skill is written in.
language_code = ndb.StringProperty(required=True, indexed=True)
# Time when the skill model was last updated (not to be
# confused with last_updated, which is the time when the
# skill *summary* model was last updated).
skill_model_last_updated = ndb.DateTimeProperty(required=True, indexed=True)
# Time when the skill model was created (not to be confused
# with created_on, which is the time when the skill *summary*
# model was created).
skill_model_created_on = ndb.DateTimeProperty(required=True, indexed=True)
version = ndb.IntegerProperty(required=True)
class SkillRightsSnapshotMetadataModel(base_models.BaseSnapshotMetadataModel):
"""Storage model for the metadata for a skill rights snapshot."""
pass
class SkillRightsSnapshotContentModel(base_models.BaseSnapshotContentModel):
"""Storage model for the content of a skill rights snapshot."""
pass
class SkillRightsModel(base_models.VersionedModel):
"""Storage model for the rights related to a skill.
The id of each instance is the id of the corresponding skill.
"""
SNAPSHOT_METADATA_CLASS = SkillRightsSnapshotMetadataModel
SNAPSHOT_CONTENT_CLASS = SkillRightsSnapshotContentModel
ALLOW_REVERT = False
# The user_id of the creator this skill.
creator_id = ndb.StringProperty(indexed=True, required=True)
# Whether the skill is private.
skill_is_private = ndb.BooleanProperty(
indexed=True, required=True, default=True)
def _trusted_commit(
self, committer_id, commit_type, commit_message, commit_cmds):
"""Record the event to the commit log after the model commit.
Note that this extends the superclass method.
Args:
committer_id: str. The user_id of the user who committed the
change.
commit_type: str. The type of commit. Possible values are in
core.storage.base_models.COMMIT_TYPE_CHOICES.
commit_message: str. The commit description message.
commit_cmds: list(dict). A list of commands, describing changes
made in this model, which should give sufficient information to
reconstruct the commit. Each dict always contains:
cmd: str. Unique command.
and then additional arguments for that command.
"""
super(SkillRightsModel, self)._trusted_commit(
committer_id, commit_type, commit_message, commit_cmds)
committer_user_settings_model = (
user_models.UserSettingsModel.get_by_id(committer_id))
committer_username = (
committer_user_settings_model.username
if committer_user_settings_model else '')
skill_rights = SkillRightsModel.get_by_id(self.id)
status = ''
if skill_rights.skill_is_private:
status = constants.ACTIVITY_STATUS_PRIVATE
else:
status = constants.ACTIVITY_STATUS_PUBLIC
SkillCommitLogEntryModel(
id=('rights-%s-%s' % (self.id, self.version)),
user_id=committer_id,
username=committer_username,
skill_id=self.id,
commit_type=commit_type,
commit_message=commit_message,
commit_cmds=commit_cmds,
version=None,
post_commit_status=status,
post_commit_community_owned=False,
post_commit_is_private=skill_rights.skill_is_private
).put()
@classmethod
def get_unpublished_by_creator_id(cls, user_id):
"""This function returns all skill rights that correspond to skills
that are private and are created by the provided user ID.
Args:
user_id: str. The user ID of the user that created the skill rights
being fetched.
Returns:
list(SkillRightsModel). A list of skill rights models that are
private and were created by the user with the provided
user ID.
"""
return cls.query(
cls.creator_id == user_id,
cls.skill_is_private == True, # pylint: disable=singleton-comparison
cls.deleted == False) # pylint: disable=singleton-comparison
@classmethod
def get_unpublished(cls):
"""This function returns all skill rights that correspond to skills
that are private.
Returns:
list(SkillRightsModel). A list of skill rights models that are
private.
"""
return cls.query(cls.skill_is_private == True, cls.deleted == False) # pylint: disable=singleton-comparison
|
def swapFileData():
file1 = input("enter files name:- ")
file2 = input("enter files name:- ")
with open(file1, 'r') as a:
data_a = a.read()
with open(file2, 'r') as b:
data_b = b.read()
with open(file1, 'w') as a:
a.write(data_b)
with open(file2, 'w') as b:
b.write(data_a)
swapFileData()
|
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_devops_deploy_artifact
short_description: Manage a DeployArtifact resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a DeployArtifact resource in Oracle Cloud Infrastructure
- For I(state=present), creates a new deployment artifact.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
description:
description:
- Optional description about the deployment artifact.
- This parameter is updatable.
type: str
display_name:
description:
- Deployment artifact display name. Avoid entering confidential information.
- Required for create, update, delete when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is set.
- This parameter is updatable when C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["name"]
deploy_artifact_type:
description:
- Type of the deployment artifact.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
deploy_artifact_source:
description:
- ""
- Required for create using I(state=present).
- This parameter is updatable.
type: dict
suboptions:
deploy_artifact_source_type:
description:
- Specifies types of artifact sources.
type: str
choices:
- "GENERIC_ARTIFACT"
- "OCIR"
- "INLINE"
required: true
repository_id:
description:
- The OCID of a repository
- Required when deploy_artifact_source_type is 'GENERIC_ARTIFACT'
type: str
deploy_artifact_path:
description:
- Specifies the artifact path in the repository.
- Required when deploy_artifact_source_type is 'GENERIC_ARTIFACT'
type: str
deploy_artifact_version:
description:
- Users can set this as a placeholder value that refers to a pipeline parameter, for example, ${appVersion}.
- Required when deploy_artifact_source_type is 'GENERIC_ARTIFACT'
type: str
image_uri:
description:
- "Specifies OCIR Image Path - optionally include tag."
- Required when deploy_artifact_source_type is 'OCIR'
type: str
image_digest:
description:
- Specifies image digest for the version of the image.
- Applicable when deploy_artifact_source_type is 'OCIR'
type: str
base64_encoded_content:
description:
- base64 Encoded String
- Required when deploy_artifact_source_type is 'INLINE'
type: str
argument_substitution_mode:
description:
- Mode for artifact parameter substitution.
- Required for create using I(state=present).
- This parameter is updatable.
type: str
project_id:
description:
- The OCID of a project.
- Required for create using I(state=present).
type: str
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"bar-key\\": \\"value\\"}`"
- This parameter is updatable.
type: dict
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
- This parameter is updatable.
type: dict
deploy_artifact_id:
description:
- Unique artifact identifier.
- Required for update using I(state=present) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
- Required for delete using I(state=absent) when environment variable C(OCI_USE_NAME_AS_IDENTIFIER) is not set.
type: str
aliases: ["id"]
state:
description:
- The state of the DeployArtifact.
- Use I(state=present) to create or update a DeployArtifact.
- Use I(state=absent) to delete a DeployArtifact.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create deploy_artifact
oci_devops_deploy_artifact:
# required
deploy_artifact_type: deploy_artifact_type_example
deploy_artifact_source:
# required
deploy_artifact_source_type: GENERIC_ARTIFACT
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
deploy_artifact_path: deploy_artifact_path_example
deploy_artifact_version: deploy_artifact_version_example
argument_substitution_mode: argument_substitution_mode_example
project_id: "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
display_name: display_name_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update deploy_artifact
oci_devops_deploy_artifact:
# required
deploy_artifact_id: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
# optional
description: description_example
display_name: display_name_example
deploy_artifact_type: deploy_artifact_type_example
deploy_artifact_source:
# required
deploy_artifact_source_type: GENERIC_ARTIFACT
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
deploy_artifact_path: deploy_artifact_path_example
deploy_artifact_version: deploy_artifact_version_example
argument_substitution_mode: argument_substitution_mode_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Update deploy_artifact using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_devops_deploy_artifact:
# required
display_name: display_name_example
# optional
description: description_example
deploy_artifact_type: deploy_artifact_type_example
deploy_artifact_source:
# required
deploy_artifact_source_type: GENERIC_ARTIFACT
repository_id: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
deploy_artifact_path: deploy_artifact_path_example
deploy_artifact_version: deploy_artifact_version_example
argument_substitution_mode: argument_substitution_mode_example
freeform_tags: {'Department': 'Finance'}
defined_tags: {'Operations': {'CostCenter': 'US'}}
- name: Delete deploy_artifact
oci_devops_deploy_artifact:
# required
deploy_artifact_id: "ocid1.deployartifact.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
- name: Delete deploy_artifact using name (when environment variable OCI_USE_NAME_AS_IDENTIFIER is set)
oci_devops_deploy_artifact:
# required
display_name: display_name_example
state: absent
"""
RETURN = """
deploy_artifact:
description:
- Details of the DeployArtifact resource acted upon by the current operation
returned: on success
type: complex
contains:
id:
description:
- Unique identifier that is immutable on creation.
returned: on success
type: str
sample: "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx"
description:
description:
- Optional description about the artifact to be deployed.
returned: on success
type: str
sample: description_example
display_name:
description:
- Deployment artifact identifier, which can be renamed and is not necessarily unique. Avoid entering confidential information.
returned: on success
type: str
sample: display_name_example
project_id:
description:
- The OCID of a project.
returned: on success
type: str
sample: "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx"
compartment_id:
description:
- The OCID of a compartment.
returned: on success
type: str
sample: "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx"
deploy_artifact_type:
description:
- Type of the deployment artifact.
returned: on success
type: str
sample: DEPLOYMENT_SPEC
argument_substitution_mode:
description:
- Mode for artifact parameter substitution.
returned: on success
type: str
sample: NONE
deploy_artifact_source:
description:
- ""
returned: on success
type: complex
contains:
deploy_artifact_source_type:
description:
- Specifies types of artifact sources.
returned: on success
type: str
sample: INLINE
repository_id:
description:
- The OCID of a repository
returned: on success
type: str
sample: "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx"
deploy_artifact_path:
description:
- Specifies the artifact path in the repository.
returned: on success
type: str
sample: deploy_artifact_path_example
deploy_artifact_version:
description:
- Users can set this as a placeholder value that refers to a pipeline parameter, for example, ${appVersion}.
returned: on success
type: str
sample: deploy_artifact_version_example
base64_encoded_content:
description:
- base64 Encoded String
returned: on success
type: str
sample: "null"
image_uri:
description:
- "Specifies OCIR Image Path - optionally include tag."
returned: on success
type: str
sample: image_uri_example
image_digest:
description:
- Specifies image digest for the version of the image.
returned: on success
type: str
sample: image_digest_example
time_created:
description:
- Time the deployment artifact was created. Format defined by L(RFC3339,https://datatracker.ietf.org/doc/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
time_updated:
description:
- Time the deployment artifact was updated. Format defined by L(RFC3339,https://datatracker.ietf.org/doc/html/rfc3339).
returned: on success
type: str
sample: "2013-10-20T19:20:30+01:00"
lifecycle_state:
description:
- Current state of the deployment artifact.
returned: on success
type: str
sample: CREATING
lifecycle_details:
description:
- A detailed message describing the current state. For example, can be used to provide actionable information for a resource in Failed state.
returned: on success
type: str
sample: lifecycle_details_example
freeform_tags:
description:
- "Simple key-value pair that is applied without any predefined name, type or scope. Exists for cross-compatibility only. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"bar-key\\": \\"value\\"}`"
returned: on success
type: dict
sample: {'Department': 'Finance'}
defined_tags:
description:
- "Defined tags for this resource. Each key is predefined and scoped to a namespace. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"foo-namespace\\": {\\"bar-key\\": \\"value\\"}}`"
returned: on success
type: dict
sample: {'Operations': {'CostCenter': 'US'}}
system_tags:
description:
- "Usage of system tag keys. These predefined keys are scoped to namespaces. See L(Resource
Tags,https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm). Example: `{\\"orcl-cloud\\": {\\"free-tier-retained\\":
\\"true\\"}}`"
returned: on success
type: dict
sample: {}
sample: {
"id": "ocid1.resource.oc1..xxxxxxEXAMPLExxxxxx",
"description": "description_example",
"display_name": "display_name_example",
"project_id": "ocid1.project.oc1..xxxxxxEXAMPLExxxxxx",
"compartment_id": "ocid1.compartment.oc1..xxxxxxEXAMPLExxxxxx",
"deploy_artifact_type": "DEPLOYMENT_SPEC",
"argument_substitution_mode": "NONE",
"deploy_artifact_source": {
"deploy_artifact_source_type": "INLINE",
"repository_id": "ocid1.repository.oc1..xxxxxxEXAMPLExxxxxx",
"deploy_artifact_path": "deploy_artifact_path_example",
"deploy_artifact_version": "deploy_artifact_version_example",
"base64_encoded_content": null,
"image_uri": "image_uri_example",
"image_digest": "image_digest_example"
},
"time_created": "2013-10-20T19:20:30+01:00",
"time_updated": "2013-10-20T19:20:30+01:00",
"lifecycle_state": "CREATING",
"lifecycle_details": "lifecycle_details_example",
"freeform_tags": {'Department': 'Finance'},
"defined_tags": {'Operations': {'CostCenter': 'US'}},
"system_tags": {}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.devops import DevopsClient
from oci.devops.models import CreateDeployArtifactDetails
from oci.devops.models import UpdateDeployArtifactDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class DeployArtifactHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_module_resource_id_param(self):
return "deploy_artifact_id"
def get_module_resource_id(self):
return self.module.params.get("deploy_artifact_id")
def get_get_fn(self):
return self.client.get_deploy_artifact
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_deploy_artifact,
deploy_artifact_id=self.module.params.get("deploy_artifact_id"),
)
def get_required_kwargs_for_list(self):
return dict()
def get_optional_kwargs_for_list(self):
optional_list_method_params = ["project_id", "display_name"]
return dict(
(param, self.module.params[param])
for param in optional_list_method_params
if self.module.params.get(param) is not None
and (
self._use_name_as_identifier()
or (
not self.module.params.get("key_by")
or param in self.module.params.get("key_by")
)
)
)
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_deploy_artifacts, **kwargs
)
def get_create_model_class(self):
return CreateDeployArtifactDetails
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_deploy_artifact,
call_fn_args=(),
call_fn_kwargs=dict(create_deploy_artifact_details=create_details,),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdateDeployArtifactDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_deploy_artifact,
call_fn_args=(),
call_fn_kwargs=dict(
deploy_artifact_id=self.module.params.get("deploy_artifact_id"),
update_deploy_artifact_details=update_details,
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_deploy_artifact,
call_fn_args=(),
call_fn_kwargs=dict(
deploy_artifact_id=self.module.params.get("deploy_artifact_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
DeployArtifactHelperCustom = get_custom_class("DeployArtifactHelperCustom")
class ResourceHelper(DeployArtifactHelperCustom, DeployArtifactHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
description=dict(type="str"),
display_name=dict(aliases=["name"], type="str"),
deploy_artifact_type=dict(type="str"),
deploy_artifact_source=dict(
type="dict",
options=dict(
deploy_artifact_source_type=dict(
type="str",
required=True,
choices=["GENERIC_ARTIFACT", "OCIR", "INLINE"],
),
repository_id=dict(type="str"),
deploy_artifact_path=dict(type="str"),
deploy_artifact_version=dict(type="str"),
image_uri=dict(type="str"),
image_digest=dict(type="str"),
base64_encoded_content=dict(type="str"),
),
),
argument_substitution_mode=dict(type="str"),
project_id=dict(type="str"),
freeform_tags=dict(type="dict"),
defined_tags=dict(type="dict"),
deploy_artifact_id=dict(aliases=["id"], type="str"),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="deploy_artifact",
service_client_class=DevopsClient,
namespace="devops",
)
result = dict(changed=False)
if resource_helper.is_delete_using_name():
result = resource_helper.delete_using_name()
elif resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update_using_name():
result = resource_helper.update_using_name()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
|
import feedparser
import datetime
from app import db
from app.models.post import Post
from app.models.feed import Feed, APPROVED
def parseFeed(url):
return feedparser.parse(url)
def getFeed():
urls = Feed.query.filter_by(approved=APPROVED).all()
for url in urls:
feed = parseFeed(url.rss)
for item in feed.entries:
pubtime = datetime.datetime(*(item.published_parsed[0:7]))
record = Post.query.filter_by(url=item.link).first()
if record is None:
post = Post(
title=item.title,
url=item.link,
date=pubtime,
owner=url.owner,
domain=url.html,
)
db.session.add(post)
db.session.commit()
print("News added from {}".format(item.link))
|
import time
from reinforcement_learning import gym
import numpy as np
import tensorflow as tf
from reinforcement_learning import logger
from reinforcement_learning.common import explained_variance, tf_util, ActorCriticRLModel, SetVerbosity, TensorboardWriter
from reinforcement_learning.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
from reinforcement_learning.common.runners import AbstractEnvRunner
from reinforcement_learning.common.schedules import Scheduler
from reinforcement_learning.common.tf_util import mse, total_episode_reward_logger
from reinforcement_learning.common.math_util import safe_mean
def discount_with_dones(rewards, dones, gamma):
"""
Apply the discount value to the reward, where the environment is not done
:param rewards: ([float]) The rewards
:param dones: ([bool]) Whether an environment is done or not
:param gamma: (float) The discount value
:return: ([float]) The discounted rewards
"""
discounted = []
ret = 0 # Return: discounted reward
for reward, done in zip(rewards[::-1], dones[::-1]):
ret = reward + gamma * ret * (1. - done) # fixed off by one bug
discounted.append(ret)
return discounted[::-1]
class A2C(ActorCriticRLModel):
"""
The A2C (Advantage Actor Critic) model class, https://arxiv.org/abs/1602.01783
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) Discount factor
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param vf_coef: (float) Value function coefficient for the loss calculation
:param ent_coef: (float) Entropy coefficient for the loss calculation
:param max_grad_norm: (float) The maximum value for the gradient clipping
:param learning_rate: (float) The learning rate
:param alpha: (float) RMSProp decay parameter (default: 0.99)
:param momentum: (float) RMSProp momentum parameter (default: 0.0)
:param epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)
(default: 1e-5)
:param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
(used only for loading)
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, n_steps=2048, vf_coef=0.5, ent_coef=0.001, max_grad_norm=0.5,
learning_rate=1e-3, alpha=0.99, momentum=0.0, epsilon=1e-5, lr_schedule='constant',
verbose=0, tensorboard_log=None, _init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=None):
self.n_steps = n_steps
self.gamma = gamma
self.vf_coef = vf_coef
self.ent_coef = ent_coef
self.max_grad_norm = max_grad_norm
self.alpha = alpha
self.momentum = momentum
self.epsilon = epsilon
self.lr_schedule = lr_schedule
self.learning_rate = learning_rate
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.learning_rate_ph = None
self.n_batch = None
self.actions_ph = None
self.advs_ph = None
self.rewards_ph = None
self.pg_loss = None
self.vf_loss = None
self.entropy = None
self.apply_backprop = None
self.train_model = None
self.step_model = None
self.proba_step = None
self.value = None
self.initial_state = None
self.learning_rate_schedule = None
self.summary = None
super(A2C, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
# if we are loading, it is possible the environment is not known, however the obs and action space are known
if _init_setup_model:
self.setup_model()
def _make_runner(self) -> AbstractEnvRunner:
return A2CRunner(self.env, self, n_steps=self.n_steps, gamma=self.gamma)
def _get_pretrain_placeholders(self):
policy = self.train_model
if isinstance(self.action_space, gym.spaces.Discrete):
return policy.obs_ph, self.actions_ph, policy.policy
return policy.obs_ph, self.actions_ph, policy.deterministic_action
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the A2C model must be an " \
"instance of common.policies.ActorCriticPolicy."
self.graph = tf.Graph()
with self.graph.as_default():
self.set_random_seed(self.seed)
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.n_batch = self.n_envs * self.n_steps
n_batch_step = None
n_batch_train = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
n_batch_step = self.n_envs
n_batch_train = self.n_envs * self.n_steps
step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
with tf.compat.v1.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,
self.n_steps, n_batch_train, reuse=True, **self.policy_kwargs)
with tf.compat.v1.variable_scope("loss", reuse=False):
self.actions_ph = train_model.pdtype.sample_placeholder([None], name="action_ph")
self.advs_ph = tf.compat.v1.placeholder(tf.float32, [None], name="advs_ph")
self.rewards_ph = tf.compat.v1.placeholder(tf.float32, [None], name="rewards_ph")
self.learning_rate_ph = tf.compat.v1.placeholder(tf.float32, [], name="learning_rate_ph")
neglogpac = train_model.proba_distribution.neglogp(self.actions_ph)
self.entropy = tf.reduce_mean(input_tensor=train_model.proba_distribution.entropy())
self.pg_loss = tf.reduce_mean(input_tensor=self.advs_ph * neglogpac)
self.vf_loss = mse(tf.squeeze(train_model.value_flat), self.rewards_ph)
# https://arxiv.org/pdf/1708.04782.pdf#page=9, https://arxiv.org/pdf/1602.01783.pdf#page=4
# and https://github.com/dennybritz/reinforcement-learning/issues/34
# suggest to add an entropy component in order to improve exploration.
loss = self.pg_loss - self.entropy * self.ent_coef + self.vf_loss * self.vf_coef
tf.compat.v1.summary.scalar('entropy_loss', self.entropy)
tf.compat.v1.summary.scalar('policy_gradient_loss', self.pg_loss)
tf.compat.v1.summary.scalar('value_function_loss', self.vf_loss)
tf.compat.v1.summary.scalar('loss', loss)
self.params = tf_util.get_trainable_vars("model")
grads = tf.gradients(ys=loss, xs=self.params)
if self.max_grad_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.compat.v1.variable_scope("input_info", reuse=False):
tf.compat.v1.summary.scalar('discounted_rewards', tf.reduce_mean(input_tensor=self.rewards_ph))
tf.compat.v1.summary.scalar('learning_rate', tf.reduce_mean(input_tensor=self.learning_rate_ph))
tf.compat.v1.summary.scalar('advantage', tf.reduce_mean(input_tensor=self.advs_ph))
if self.full_tensorboard_log:
tf.compat.v1.summary.histogram('discounted_rewards', self.rewards_ph)
tf.compat.v1.summary.histogram('learning_rate', self.learning_rate_ph)
tf.compat.v1.summary.histogram('advantage', self.advs_ph)
if tf_util.is_image(self.observation_space):
tf.compat.v1.summary.image('observation', train_model.obs_ph)
else:
tf.compat.v1.summary.histogram('observation', train_model.obs_ph)
trainer = tf.compat.v1.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.alpha,
epsilon=self.epsilon, momentum=self.momentum)
self.apply_backprop = trainer.apply_gradients(grads)
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.proba_step = step_model.proba_step
self.value = step_model.value
self.initial_state = step_model.initial_state
tf.compat.v1.global_variables_initializer().run(session=self.sess)
self.summary = tf.compat.v1.summary.merge_all()
def _train_step(self, obs, states, rewards, masks, actions, values, update, writer=None):
"""
applies a training step to the model
:param obs: ([float]) The input observations
:param states: ([float]) The states (used for recurrent policies)
:param rewards: ([float]) The rewards from the environment
:param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)
:param actions: ([float]) The actions taken
:param values: ([float]) The logits values
:param update: (int) the current step iteration
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:return: (float, float, float) policy loss, value loss, policy entropy
"""
advs = rewards - values
cur_lr = None
for _ in range(len(obs)):
cur_lr = self.learning_rate_schedule.value()
assert cur_lr is not None, "Error: the observation input array cannon be empty"
td_map = {self.train_model.obs_ph: obs, self.actions_ph: actions, self.advs_ph: advs,
self.rewards_ph: rewards, self.learning_rate_ph: cur_lr}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + update) % 10 == 0:
run_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)
run_metadata = tf.compat.v1.RunMetadata()
summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop],
td_map, options=run_options, run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % (update * self.n_batch))
else:
summary, policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.summary, self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)
writer.add_summary(summary, update * self.n_batch)
else:
policy_loss, value_loss, policy_entropy, _ = self.sess.run(
[self.pg_loss, self.vf_loss, self.entropy, self.apply_backprop], td_map)
return policy_loss, value_loss, policy_entropy
def learn(self, total_timesteps, callback=None, log_interval=1, tb_log_name="A2C", reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) as writer:
self._setup_learn()
self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps, schedule=self.lr_schedule)
t_start = time.time()
callback.on_training_start(locals(), globals())
for update in range(1, total_timesteps // self.n_batch + 1):
callback.on_rollout_start()
# true_reward is the reward without discount
rollout = self.runner.run(callback)
# unpack
obs, states, rewards, masks, actions, values, ep_infos, true_reward = rollout
callback.update_locals(locals())
callback.on_rollout_end()
# Early stopping due to the callback
if not self.runner.continue_training:
break
self.ep_info_buf.extend(ep_infos)
_, value_loss, policy_entropy = self._train_step(obs, states, rewards, masks, actions, values,
self.num_timesteps // self.n_batch, writer)
n_seconds = time.time() - t_start
fps = int((update * self.n_batch) / n_seconds)
if writer is not None:
total_episode_reward_logger(self.episode_reward,
true_reward.reshape((self.n_envs, self.n_steps)),
masks.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
if self.verbose >= 1 and (update % log_interval == 0 or update == 1):
explained_var = explained_variance(values, rewards)
logger.record_tabular("nupdates", update)
logger.record_tabular("total_timesteps", self.num_timesteps)
logger.record_tabular("fps", fps)
logger.record_tabular("policy_entropy", float(policy_entropy))
logger.record_tabular("value_loss", float(value_loss))
logger.record_tabular("explained_variance", float(explained_var))
if len(self.ep_info_buf) > 0 and len(self.ep_info_buf[0]) > 0:
logger.logkv('ep_reward_mean', safe_mean([ep_info['r'] for ep_info in self.ep_info_buf]))
#logger.logkv('ep_len_mean', safe_mean([ep_info['l'] for ep_info in self.ep_info_buf]))
logger.dump_tabular()
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"vf_coef": self.vf_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"alpha": self.alpha,
"epsilon": self.epsilon,
"lr_schedule": self.lr_schedule,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
"n_cpu_tf_sess": self.n_cpu_tf_sess,
"seed": self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
class A2CRunner(AbstractEnvRunner):
def __init__(self, env, model, n_steps=5, gamma=0.99):
"""
A runner to learn the policy of an environment for an a2c model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) Discount factor
"""
super(A2CRunner, self).__init__(env=env, model=model, n_steps=n_steps)
self.gamma = gamma
def _run(self):
"""
Run a learning step of the model
:return: ([float], [float], [float], [bool], [float], [float])
observations, states, rewards, masks, actions, values
"""
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones = [], [], [], [], []
mb_states = self.states
ep_infos = []
for _ in range(self.n_steps):
actions, values, states, _ = self.model.step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_values.append(values)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, gym.spaces.Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
obs, rewards, dones, infos = self.env.step(clipped_actions)
self.model.num_timesteps += self.n_envs
if self.callback is not None:
# Abort training early
self.callback.update_locals(locals())
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 8
for info, reward in zip(infos, rewards):
maybe_ep_info = {'r': reward} # info.get('episode')
if maybe_ep_info is not None:
ep_infos.append(maybe_ep_info)
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
mb_dones.append(self.dones)
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=self.obs.dtype).swapaxes(1, 0).reshape(self.batch_ob_shape)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(0, 1)
mb_actions = np.asarray(mb_actions, dtype=self.env.action_space.dtype).swapaxes(0, 1)
mb_values = np.asarray(mb_values, dtype=np.float32).swapaxes(0, 1)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(0, 1)
mb_masks = mb_dones[:, :-1]
mb_dones = mb_dones[:, 1:]
true_rewards = np.copy(mb_rewards)
last_values = self.model.value(self.obs, self.states, self.dones).tolist()
# discount/bootstrap off value fn
for n, (rewards, dones, value) in enumerate(zip(mb_rewards, mb_dones, last_values)):
rewards = rewards.tolist()
dones = dones.tolist()
if dones[-1] == 0:
rewards = discount_with_dones(rewards + [value], dones + [0], self.gamma)[:-1]
else:
rewards = discount_with_dones(rewards, dones, self.gamma)
mb_rewards[n] = rewards
# convert from [n_env, n_steps, ...] to [n_steps * n_env, ...]
mb_rewards = mb_rewards.reshape(-1, *mb_rewards.shape[2:])
mb_actions = mb_actions.reshape(-1, *mb_actions.shape[2:])
mb_values = mb_values.reshape(-1, *mb_values.shape[2:])
mb_masks = mb_masks.reshape(-1, *mb_masks.shape[2:])
true_rewards = true_rewards.reshape(-1, *true_rewards.shape[2:])
return mb_obs, mb_states, mb_rewards, mb_masks, mb_actions, mb_values, ep_infos, true_rewards
|
import pytest
from pathlib import Path
import numpy as np
from types import SimpleNamespace
from dask.delayed import Delayed
import dask
from datacube.testutils import (
mk_test_image,
gen_tiff_dataset,
)
from datacube.testutils.io import native_load, rio_slurp_xarray, rio_slurp
from datacube.utils.cog import write_cog, to_cog, _write_cog
def gen_test_data(prefix, dask=False):
w, h, dtype, nodata, ndw = 96, 64, 'int16', -999, 7
aa = mk_test_image(w, h, dtype, nodata, nodata_width=ndw)
ds, gbox = gen_tiff_dataset(
SimpleNamespace(name='aa', values=aa, nodata=nodata), prefix)
extras = {}
if dask:
extras.update(dask_chunks={'time': 1})
xx = native_load(ds, **extras)
return xx.aa.isel(time=0), ds
def test_cog_file(tmpdir):
pp = Path(str(tmpdir))
xx, ds = gen_test_data(pp)
# write to file
ff = write_cog(xx, pp / "cog.tif")
assert isinstance(ff, Path)
assert ff == pp / "cog.tif"
assert ff.exists()
yy = rio_slurp_xarray(pp / "cog.tif")
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
_write_cog(np.stack([xx.values, xx.values]),
xx.geobox,
pp / "cog-2-bands.tif",
overview_levels=[])
yy, mm = rio_slurp(pp / "cog-2-bands.tif")
assert mm.gbox == xx.geobox
assert yy.shape == (2, *xx.shape)
np.testing.assert_array_equal(yy[0], xx.values)
np.testing.assert_array_equal(yy[1], xx.values)
with pytest.raises(ValueError, match="Need 2d or 3d ndarray on input"):
_write_cog(xx.values.ravel(), xx.geobox, pp / "wontwrite.tif")
def test_cog_file_dask(tmpdir):
pp = Path(str(tmpdir))
xx, ds = gen_test_data(pp, dask=True)
assert dask.is_dask_collection(xx)
path = pp / "cog.tif"
ff = write_cog(xx, path)
assert isinstance(ff, Delayed)
assert path.exists() is False
assert ff.compute() == path
assert path.exists()
yy = rio_slurp_xarray(pp / "cog.tif")
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
def test_cog_mem(tmpdir):
pp = Path(str(tmpdir))
xx, ds = gen_test_data(pp)
# write to memory 1
bb = write_cog(xx, ":mem:")
assert isinstance(bb, bytes)
path = pp / "cog1.tiff"
with open(str(path), "wb") as f:
f.write(bb)
yy = rio_slurp_xarray(path)
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
# write to memory 2
bb = to_cog(xx)
assert isinstance(bb, bytes)
path = pp / "cog2.tiff"
with open(str(path), "wb") as f:
f.write(bb)
yy = rio_slurp_xarray(path)
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
def test_cog_mem_dask(tmpdir):
pp = Path(str(tmpdir))
xx, ds = gen_test_data(pp, dask=True)
# write to memory 1
bb = write_cog(xx, ":mem:")
assert isinstance(bb, Delayed)
bb = bb.compute()
assert isinstance(bb, bytes)
path = pp / "cog1.tiff"
with open(str(path), "wb") as f:
f.write(bb)
yy = rio_slurp_xarray(path)
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
# write to memory 2
bb = to_cog(xx)
assert isinstance(bb, Delayed)
bb = bb.compute()
assert isinstance(bb, bytes)
path = pp / "cog2.tiff"
with open(str(path), "wb") as f:
f.write(bb)
yy = rio_slurp_xarray(path)
np.testing.assert_array_equal(yy.values, xx.values)
assert yy.geobox == xx.geobox
assert yy.nodata == xx.nodata
@pytest.mark.parametrize("with_dask", [True, False])
def test_cog_no_crs(tmpdir, with_dask):
pp = Path(str(tmpdir))
xx, ds = gen_test_data(pp, dask=with_dask)
del xx.attrs['crs']
with pytest.raises(ValueError):
write_cog(xx, ":mem:")
with pytest.raises(ValueError):
to_cog(xx)
|
import argparse
import json
import logging
import os
import random
import time
from datasets.factory import DatasetFactory
from rankers.chain_ranker import set_up_experiment, ChainRanker
from tools import utils
logger = logging.getLogger()
class HyperparamSearch:
def __init__(self, config, tokenizer, num_eval):
self.num_evaluations = num_eval
self.config = config
self.tokenizer = tokenizer
def __call__(self, *args, **kwargs):
results = {}
sampler = self.sampler()
try:
for i in range(self.num_evaluations):
try:
logger.info('#### Hyperparam search iteration %s ####' % i)
logger.info('Output dir: %s' % self.config.output_dir)
hyperparams = next(sampler)
self.config.set_nearest_k_visible(hyperparams['nearest_k_visible'])
self.config.set_max_expl_length(hyperparams['max_expl_length'])
self.config.set_min_expl_length(hyperparams.get('min_expl_length', 1))
self.config.set_beam_size(hyperparams.get('beam_size', 1))
self.config.set_use_partial_expl_in_ranking(hyperparams.get('use_partial_expl_in_ranking', True))
self.config.set_beam_score_relative_size(hyperparams.get('beam_score_relative_size', False))
self.config.set_average_scores_over_beams(hyperparams.get('average_scores_over_beams', True))
self.config.set_average_scores_weighted(hyperparams.get('average_scores_weighted', False))
self.config.set_eval_use_logprobs(hyperparams.get('eval_use_logprobs', False))
self.config.set_beam_score_acc(hyperparams.get('beam_score_acc', 'last'))
self.config.set_beam_fact_selection(hyperparams.get('beam_fact_selection', 'greedy'))
self.config.set_beam_decode_top_p(hyperparams.get('beam_decode_top_p', 0.3))
self.config.set_average_scores_over_partials(hyperparams.get('average_scores_over_partials', False))
self.config.set_rank_rest(hyperparams.get('rank_rest', 'seq'))
self.config.set_unused_beams_second(hyperparams.get('facts_from_unused_beams_second', True))
self.config.set_rank_scored_but_unused_2nd(hyperparams.get('rank_scored_but_unused_2nd', True))
with open(os.path.join(self.config.output_dir, "training_args_{}.json".format(i)), 'w') as f:
json.dump(self.config.to_dict(), f, indent=4)
val_dataset = DatasetFactory.load_and_cache_dataset(self.config, self.tokenizer, self.config.val_qa_path,
valid=True)
ranker = ChainRanker(self.config, self.tokenizer)
logger.info('Evaluating hyperparam candidates: %s' % hyperparams)
it_result, _ = ranker.evaluate(val_dataset, epoch_step='debug', step=i)
logger.info('Results it %s: %s' % (i, it_result))
results[i] = {'hyperparams': hyperparams, 'results': it_result}
except KeyboardInterrupt:
time.sleep(5)
continue
except StopIteration:
break
except KeyboardInterrupt:
pass
self.save_results(results)
return results
@staticmethod
def _sample():
max_expl_length = random.randrange(3, 11)
min_expl_length = random.randrange(1, min(max_expl_length, 4))
yield {
'nearest_k_visible': 130,
'beam_size': random.choice((1, 3, 5, 10)),
'max_expl_length': max_expl_length,
'min_expl_length': min_expl_length,
'use_partial_expl_in_ranking': bool(random.randrange(0, 2)),
}
@staticmethod
def sampler():
# nearest_k = 290
params = [
]
for param in params:
yield param
def save_results(self, results):
config = self.config
if not os.path.exists(config.output_dir):
os.makedirs(config.output_dir)
json_filename = 'hyperparams.json'
preds_eval_json = os.path.join(config.output_dir, json_filename)
logger.info('Saving results to %s' % preds_eval_json)
with open(preds_eval_json, 'w') as f:
json.dump(results, f, indent=4)
logger.info(
'Search results: %s'
% json.dumps(sorted([res for i, res in results.items()], key=lambda x: x['results']['map']), indent=4)
)
logger.info('Best results: %s' % json.dumps(self.find_best_hyperparams(results), indent=4))
@staticmethod
def find_best_hyperparams(results):
it_results, _ = max(
[(it_results, it_results['results']['map'])
for i, it_results in results.items()],
key=lambda x: x[1]
)
return it_results
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default=None, )
parser.add_argument('--num_eval', type=int, default=20,)
args = parser.parse_args()
_, config, tokenizer = set_up_experiment(args.config_path)
utils.initialize_logging(config=config)
search = HyperparamSearch(config, tokenizer, args.num_eval)
res = search()
|
"""
Figure built manually in SVG.
Note, these require the svgwrite package (and optionally, the svglib package to convert to pdf).
"""
from __future__ import print_function
import subprocess
from svgwrite import Drawing
filename = 'scaling_compute_totals_direct.svg'
color_phys = '#85C1E9'
color_scaled = '#EC7063'
main_font_size = 20
dwg = Drawing(filename, (2500, 2000), debug=True)
top_text = dwg.add(dwg.g(font_size=main_font_size, style="font-family: arial;"))
locs = ['NL Inputs',
'NL Outputs',
'NL Residuals',
'LN Inputs',
'LN Outputs',
'LN Residuals',
'Jacobian']
x = 650
y = 50
delta_x = 180
vertical_locs = []
for loc in locs:
top_text.add(dwg.text(loc, (x - len(loc)*4, y)))
vertical_locs.append(x)
x += delta_x
legend_text = dwg.add(dwg.g(font_size=main_font_size, style="font-family: arial;"))
legend_text.add(dwg.text('Phys', (x-1500, y-10), fill=color_phys))
legend_text.add(dwg.text('Scaled', (x-1500, y+20), fill=color_scaled))
v_lines = dwg.add(dwg.g(stroke_width=7.0, stroke=color_phys, fill='none'))
v_lines_scaled = dwg.add(dwg.g(stroke_width=7.0, stroke=color_scaled, fill='none'))
for loc in vertical_locs:
v_lines.add(dwg.line(start=(loc, y+15), end=(loc, 1200)))
extra_text = dwg.add(dwg.g(font_size=main_font_size - 3, style="font-family: arial;"))
extra_text.add(dwg.text('fwd', (vertical_locs[5] + 55, 820)))
locs = [('Problem.compute_totals()', None, []),
('_TotalJacInfo.compute_totals()', None, []),
('System.scaled_context_all()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['stagger']),
('Group._linearize()', None, []),
(' ExplicitComponent._linearize()', None, []),
(' ExplicitComponent._unscaled_context()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['stagger']),
(' Paraboloid.compute_partials()', ((4, 7), ), []),
(' ExplicitComponent._unscaled_context()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['italic', 'stagger']),
('AssembledJacobian._update()', None, []),
('System.scaled_context_all()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['italic', 'stagger']),
('DirectSolver._linearize()', None, []),
('scipy.sparse.linalg.splu', None, []),
('(Loop over right-hand-sides)', None, []),
(' _TotalJacInfo.single_input_setter()', [(0, 6)], []),
(' System.scaled_context_all()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['stagger']),
(' Group._solve_linear()', None, []),
(' DirectSolver.solve()', None, []),
(' Group._unscaled_context()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['stagger']),
(' scipy.sparse.linalg.splu.solve', ((6, 7), (7, 5)), ['stagger']),
(' Group._unscaled_context()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['italic', 'stagger']),
(' System.scaled_context_all()', ((0, 2), (0, 3), (0, 5), (0, 6)), ['italic', 'stagger']),
(' _TotalJacInfo.single_jac_setter()', [(5, 0)], []),
]
left_text = dwg.add(dwg.g(font_size=main_font_size, style="font-family: arial;"))
h_lines = dwg.add(dwg.g(stroke_width=0.7, stroke='black', fill='none'))
x = base_x = 40
y = base_y = 120
delta_y = 40
y_mids = []
for loc_tup in locs:
loc, arrows, form = loc_tup
offset = (len(loc) - len(loc.lstrip())) * 15
if 'italic' in form:
left_text.add(dwg.text(loc, (x + offset, y), style="font-style: italic;"))
else:
left_text.add(dwg.text(loc, (x + offset, y)))
y_mid = y - 5.0
y_mids.append(y_mid)
if arrows:
grid_pts = [x + 10 * len(loc)] + vertical_locs
num_arrow = len(arrows)
# Arrowheads
for i, arrow in enumerate(arrows):
start = grid_pts[arrow[0]]
end = grid_pts[arrow[1]]
if 'stagger' in form:
del_y = 3
y_use = y_mid - del_y + i*2*del_y/(num_arrow-1)
else:
y_use = y_mid
line = dwg.line(start=(start, y_use), end=(end, y_use))
h_lines.add(line)
# Arrowhead
if end > start:
ar_l = 10
else:
ar_l = -10
ar_h = 7.5
pts = ((end-ar_l, y_use+ar_h), (end, y_use), (end-ar_l, y_use-ar_h))
arrow = dwg.polyline(pts)
h_lines.add(arrow)
y += delta_y
# Phys vs scaling indicator
scaled_regions = [
(1, (2, 5)),
(1, (7, 9)),
(1, (14, 17)),
(1, (19, 20)),
(2, (2, 5)),
(2, (7, 9)),
(2, (14, 17)),
(2, (19, 20)),
(4, (2, 5)),
(4, (7, 9)),
(4, (14, 17)),
(4, (19, 20)),
(5, (2, 5)),
(5, (7, 9)),
(5, (14, 17)),
(5, (19, 20)),
]
for region in scaled_regions:
x_line = vertical_locs[region[0]]
start = base_y + region[1][0] * delta_y - 5
end = base_y + region[1][1] * delta_y - 5
line = dwg.line(start=(x_line, start), end=(x_line, end))
v_lines_scaled.add(line)
dwg.save()
print('done')
|
import collections
#from goodreads import apikey
from goodreads.client import GoodreadsClient
from goodreads.book import GoodreadsBook
from goodreads.author import GoodreadsAuthor
from goodreads.shelf import GoodreadsShelf
class TestBook():
@classmethod
def setup_class(cls):
client = GoodreadsClient('nTRaECtlyOjSmjJnLKRaiw', 'hCXp9GKlAe3sk1QIj0jXLF4UGLt9vfj54hDAfzHY')
#client.authenticate(apikey.oauth_access_token,
# apikey.oauth_access_token_secret)
cls.book = client.book('11870085')
def test_get_book(self):
assert isinstance(self.book, GoodreadsBook)
assert self.book.gid == '11870085'
assert repr(self.book) == 'The Fault in Our Stars'
def test_title(self):
assert self.book.title == 'The Fault in Our Stars'
def test_authors(self):
assert len(self.book.authors) == 1
assert isinstance(self.book.authors[0], GoodreadsAuthor)
def test_description(self):
assert self.book.description.startswith(
'"I fell in love the way you fall asleep: slowly, then all at once."')
def test_average_rating(self):
rating = float(self.book.average_rating)
assert rating >= 1.0
assert rating <= 5.0
def test_rating_dist(self):
assert self.book.rating_dist.startswith('5:')
def test_ratings_count(self):
assert self.book.ratings_count.isdigit()
def test_text_reviews_count(self):
assert self.book.text_reviews_count.isdigit()
def test_num_pages(self):
assert self.book.num_pages.isdigit()
def test_popular_shelves(self):
assert all(isinstance(shelf, GoodreadsShelf)
for shelf in self.book.popular_shelves)
def test_work(self):
assert type(self.book.work) == collections.OrderedDict
assert self.book.work['id']['#text'] == '16827462'
def test_series_works(self):
assert self.book.series_works is None
def test_publication_date(self):
assert self.book.publication_date == ('1', '10', '2012')
def test_publisher(self):
assert self.book.publisher == 'Dutton Books'
def test_language_code(self):
assert self.book.language_code == 'eng'
def test_edition_information(self):
assert self.book.edition_information is None
def test_image_url(self):
assert self.book.image_url == 'https://d2arxad8u2l0g7.cloudfront.net/books/1360206420m/11870085.jpg'
def test_small_image_url(self):
assert self.book.small_image_url == 'https://d2arxad8u2l0g7.cloudfront.net/books/1360206420s/11870085.jpg'
def test_is_ebook(self):
assert self.book.is_ebook == 'false'
def test_format(self):
assert self.book.format == 'Hardcover'
def test_isbn(self):
assert self.book.isbn == '0525478817'
def test_isbn13(self):
assert self.book.isbn13 == '9780525478812'
def test_link(self):
assert self.book.link == 'https://www.goodreads.com/book/show/11870085-the-fault-in-our-stars'
def test_reviews_widget(self):
assert self.book.reviews_widget.startswith('<style>')
assert self.book.reviews_widget.endswith('</div>')
def test_similar_books(self):
assert all(isinstance(b, GoodreadsBook)
for b in self.book.similar_books)
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Sequence, Tuple, Union
import torch
from pytorch_lightning.metrics.functional.precision_recall_curve import (
_binary_clf_curve,
_precision_recall_curve_update,
)
def _roc_update(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
) -> Tuple[torch.Tensor, torch.Tensor, int, int]:
return _precision_recall_curve_update(preds, target, num_classes, pos_label)
def _roc_compute(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: int,
pos_label: int,
sample_weights: Optional[Sequence] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],
List[torch.Tensor]]]:
if num_classes == 1:
fps, tps, thresholds = _binary_clf_curve(
preds=preds, target=target, sample_weights=sample_weights, pos_label=pos_label
)
# Add an extra threshold position
# to make sure that the curve starts at (0, 0)
tps = torch.cat([torch.zeros(1, dtype=tps.dtype, device=tps.device), tps])
fps = torch.cat([torch.zeros(1, dtype=fps.dtype, device=fps.device), fps])
thresholds = torch.cat([thresholds[0][None] + 1, thresholds])
if fps[-1] <= 0:
raise ValueError("No negative samples in targets, false positive value should be meaningless")
fpr = fps / fps[-1]
if tps[-1] <= 0:
raise ValueError("No positive samples in targets, true positive value should be meaningless")
tpr = tps / tps[-1]
return fpr, tpr, thresholds
# Recursively call per class
fpr, tpr, thresholds = [], [], []
for c in range(num_classes):
preds_c = preds[:, c]
res = roc(
preds=preds_c,
target=target,
num_classes=1,
pos_label=c,
sample_weights=sample_weights,
)
fpr.append(res[0])
tpr.append(res[1])
thresholds.append(res[2])
return fpr, tpr, thresholds
def roc(
preds: torch.Tensor,
target: torch.Tensor,
num_classes: Optional[int] = None,
pos_label: Optional[int] = None,
sample_weights: Optional[Sequence] = None,
) -> Union[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[List[torch.Tensor], List[torch.Tensor],
List[torch.Tensor]]]:
"""
Computes the Receiver Operating Characteristic (ROC).
Args:
preds: predictions from model (logits or probabilities)
target: ground truth values
num_classes: integer with number of classes. Not nessesary to provide
for binary problems.
pos_label: integer determining the positive class. Default is ``None``
which for binary problem is translate to 1. For multiclass problems
this argument should not be set as we iteratively change it in the
range [0,num_classes-1]
sample_weights: sample weights for each data point
Returns:
3-element tuple containing
fpr:
tensor with false positive rates.
If multiclass, this is a list of such tensors, one for each class.
tpr:
tensor with true positive rates.
If multiclass, this is a list of such tensors, one for each class.
thresholds:
thresholds used for computing false- and true postive rates
Example (binary case):
>>> from pytorch_lightning.metrics.functional import roc
>>> pred = torch.tensor([0, 1, 2, 3])
>>> target = torch.tensor([0, 1, 1, 1])
>>> fpr, tpr, thresholds = roc(pred, target, pos_label=1)
>>> fpr
tensor([0., 0., 0., 0., 1.])
>>> tpr
tensor([0.0000, 0.3333, 0.6667, 1.0000, 1.0000])
>>> thresholds
tensor([4, 3, 2, 1, 0])
Example (multiclass case):
>>> from pytorch_lightning.metrics.functional import roc
>>> pred = torch.tensor([[0.75, 0.05, 0.05, 0.05],
... [0.05, 0.75, 0.05, 0.05],
... [0.05, 0.05, 0.75, 0.05],
... [0.05, 0.05, 0.05, 0.75]])
>>> target = torch.tensor([0, 1, 3, 2])
>>> fpr, tpr, thresholds = roc(pred, target, num_classes=4)
>>> fpr
[tensor([0., 0., 1.]), tensor([0., 0., 1.]), tensor([0.0000, 0.3333, 1.0000]), tensor([0.0000, 0.3333, 1.0000])]
>>> tpr
[tensor([0., 1., 1.]), tensor([0., 1., 1.]), tensor([0., 0., 1.]), tensor([0., 0., 1.])]
>>> thresholds # doctest: +NORMALIZE_WHITESPACE
[tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500]),
tensor([1.7500, 0.7500, 0.0500])]
"""
preds, target, num_classes, pos_label = _roc_update(preds, target, num_classes, pos_label)
return _roc_compute(preds, target, num_classes, pos_label, sample_weights)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*
"""UdPyBlog: Multi User Blog Module"""
import os
import jinja2
import hashlib
import hmac
import string
import random
import datetime
import webapp2
import re
import logging
import json
import sys
import cgi
import time
from paging import PagedQuery
from webapp2_extras import sessions
from google.appengine.ext import db
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import app_identity
from google.appengine.api import memcache
# Models
class UdPyBlogEmptyModel():
"""Empty model allows for a quick instantiation of an entity to serve thru jinja2"""
legit = False
def __init__( self, properties ):
for property in properties:
setattr( self, property, properties[property] )
def key( self ):
return ""
class UdPyBlogEntity( db.Model ):
"""Base class for all entites to allow for creation of empty objects emulating the real entity class."""
def nl_to_br(self, string):
return re.sub(
r"\r?\n",
"<br>",
string
)
@classmethod
def get_all( cls, deleted = False ):
return cls.all()
@classmethod
def get_from_id( cls, id, deleted = False ):
return cls.get_by_id( id )
@classmethod
def empty( cls ):
return UdPyBlogEmptyModel(
{
}
)
class UdPyBlogEntityDeletable( UdPyBlogEntity ):
"""Base class for entities that allow for soft deletion."""
@classmethod
def get_all( cls, deleted = False ):
if deleted:
return cls.all()
return cls.all().filter(
"deleted =",
None
)
@classmethod
def get_from_id( cls, id, deleted = False ):
if deleted:
return cls.get_by_id( id )
entity = cls.get_by_id( id )
if not entity.deleted:
return entity
return None
class UdPyBlogUser( UdPyBlogEntity ):
"""User entities. Created thru signup"""
legit = True
username = db.StringProperty( required = True )
password = db.StringProperty( required = True )
salt = db.StringProperty( required = True )
created = db.DateTimeProperty( auto_now_add = True )
deleted = db.DateTimeProperty()
lastlog = db.DateTimeProperty( auto_now_add = True )
def get_fancy_date( self ):
return self.created.strftime( UdPyBlog.get_config( "post_date_template" ) )
@classmethod
def empty( cls ):
return UdPyBlogEmptyModel(
{
"username": "",
"created": "",
"lastlog": ""
}
)
class UdPyBlogPost( UdPyBlogEntityDeletable ):
"""Post entities."""
legit = True
subject = db.StringProperty( required = True )
cover_image = db.ReferenceProperty( required = False )
summary = db.StringProperty( required = True, multiline = True )
content = db.TextProperty( required = True )
created = db.DateTimeProperty( auto_now_add = True )
deleted = db.DateTimeProperty()
user = db.ReferenceProperty( UdPyBlogUser, collection_name = "posts" )
def get_fancy_date( self ):
return self.created.strftime( UdPyBlog.get_config( "post_date_template" ) )
def get_likes_count( self ):
return self.users_who_like.count()
def get_comments_count( self ):
return self.comments.count()
def get_summary( self ):
return self.nl_to_br( self.summary )
def delete_post( self ):
"""
Soft delete: mark entities as deleted only ( default )
Hard delete: actually deleting entitues ( not supported yet )
"""
deleted = datetime.datetime.now()
for like in self.users_who_like:
if not like.deleted:
like.deleted = deleted
like.put()
images = UdPyBlogImage.get_all().filter(
"post =",
self.key()
)
for image in images:
if not image.deleted:
image.deleted = deleted
image.put()
for comment in self.comments:
if not comment.deleted:
comment.deleted = deleted
comment.put()
self.deleted = deleted
self.put()
class UdPyBlogPostComment( UdPyBlogEntityDeletable ):
"""Comment entities."""
legit = True
subject = db.StringProperty( required = True )
note = db.TextProperty( required = True )
user = db.ReferenceProperty(
UdPyBlogUser,
collection_name = "comments"
)
post = db.ReferenceProperty(
UdPyBlogPost,
collection_name = "comments"
)
created = db.DateTimeProperty( auto_now_add = True )
deleted = db.DateTimeProperty()
@classmethod
def empty( cls, **attributes ):
defaults = {
"subject": "",
"note": "",
"created": "",
"categories": "",
"user": None,
"post": None
}
defaults.update( attributes )
return UdPyBlogEmptyModel( defaults )
def get_fancy_date( self ):
return self.created.strftime( UdPyBlog.get_config( "post_date_template" ) )
def get_comment( self ):
return self.nl_to_br( self.note )
class UdPyBlogPostLike( UdPyBlogEntityDeletable ):
"""Like Entities place collections in both posts and users. If a user removes a like, the entity gets removed."""
legit = True
post = db.ReferenceProperty(
UdPyBlogPost,
required = True,
collection_name = "users_who_like"
)
user = db.ReferenceProperty(
UdPyBlogUser,
required = True,
collection_name = "liked_posts"
)
created = db.DateTimeProperty( auto_now_add = True )
deleted = db.DateTimeProperty()
def get_fancy_date( self ):
return self.created.strftime( UdPyBlog.get_config( "post_date_template" ) )
class UdPyBlogImage( UdPyBlogEntityDeletable ):
"""Uploaded images are organized in this model."""
legit = True
session = db.StringProperty( required = True )
user = db.ReferenceProperty(
UdPyBlogUser,
required = True,
collection_name = "images"
)
post = db.ReferenceProperty( required = False )
blob_key = blobstore.BlobReferenceProperty()
deleted = db.DateTimeProperty()
created = db.DateTimeProperty( auto_now_add = True )
def get_fancy_date( self ):
return self.created.strftime( UdPyBlog.get_config( "post_date_template" ) )
# Handlers
class UdPyBlogHandler( webapp2.RequestHandler ):
"""Base handler. Supplying all the basic methods required by all subclasses. Especially authentication."""
signup = False
login = False
restricted = False
update = False
user = None
secret = "HmacSecret"
salt_length = 13
logout = False
request_override = {}
def set_cookie(self, name, value = "", path = "/" ):
self.response.headers.add_header(
"Set-Cookie",
"{}={}; path={}".format(
name,
value,
path
)
)
def delete_cookie(self, name):
self.set_cookie(name)
def get_request_var( self, var ):
if self.request_override:
if var in self.request_override:
return self.request_override[var]
return self.request.get( var )
def dispatch( self ):
self.request_override = {}
# Get a session store for this request.
self.session_store = sessions.get_store( request = self.request )
try:
# Dispatch the request.
webapp2.RequestHandler.dispatch( self )
finally:
# Save all sessions.
self.session_store.save_sessions( self.response )
@webapp2.cached_property
def session( self ):
# Returns a session using the default cookie key.
return self.session_store.get_session( backend = "memcache" )
def write( self, *a, **kw ):
self.response.out.write( *a, **kw )
def url_prefixed( self, fragment ):
return self.request.scheme + "://" + self.request.host + UdPyBlog.get_config( "blog_prefix" ) + fragment
def redirect_prefixed( self, fragment, code = None ):
self.redirect(
UdPyBlog.get_config( "blog_prefix" ) + fragment,
code = code
)
def render_str( self, template_file, **params ):
params = params or {}
params["stats"] = UdPyBlog.render_stats()
params["login_page"] = self.login
params["signup_page"] = self.signup
params["config"] = UdPyBlog.dump_config()
params["config"]["image_url_prefixed"] = self.url_prefixed(
UdPyBlog.get_config(
"image_view_url_part"
)
)
params["user"] = UdPyBlogUser.empty()
if self.user:
params["user"] = self.user
return UdPyBlog.render_template(
template_file,
**params
)
def render( self, template_file, **params ):
self.write(
self.render_str(
template_file,
**params
)
)
def render_json( self, payload ):
self.response.headers["Content-Type"] = "application/json"
self.response.out.write(
json.dumps(
payload
)
)
def add_redirection( self, redirect, append = False ):
if not redirect:
return
if redirect.find( "/login" ) > -1:
return
if redirect.find( "/signup" ) > -1:
return
redirects = self.session.get( "redirect" )
if redirects:
if not append:
return
else:
redirects = []
redirects.append( redirect )
self.session["redirect"] = redirects
return
def get_redirection( self, rightaway = True ):
if not "redirect" in self.session:
return
if not self.session.get( "redirect" ):
return
redirects = self.session.get( "redirect" )
redirect = redirects.pop( 0 )
self.session["redirect"] = redirects
if not rightaway:
return redirect
if redirect:
self.redirect( redirect )
return True
return False
def sanitize_post( self, content ):
"""Fix all blob references to actual image viewing urls. Replace/filter forbidden tags."""
quoteds = re.findall(
r"[\"\']([^\'\"]+)(?=[\"\'])",
content
)
for quoted in quoteds:
match = re.search(
r"^[\./]+" + UdPyBlog.get_config( "image_view_url_part" ) + r"(.+)$",
quoted
)
if match:
content = content.replace(
quoted,
self.url_prefixed(
UdPyBlog.get_config(
"image_view_url_part"
)
) + match.group( 1 )
)
for ( tag, replacement ) in UdPyBlog.get_config( "forbidden_tags" ):
if replacement:
replacer = ( "<" + tag, "<" + replacement ), ( "</" + tag + ">", "</" + replacement + ">" )
else:
replacer = ( "<" + tag, "" ), ( "</" + tag + ">", "" )
content = reduce( lambda a, kv: a.replace( *kv ), replacer, content )
return content
def auth( self ):
"""Authentication method. The user is authenticated on every request, extraction the info from the supplied "access" cookie."""
try:
if not self.session.get( "created" ):
self.session["created"] = time.time()
except:
logging.info( sys.exc_info() )
self.error( 500 )
# User object stored in session
if self.session.get( "user" ):
user = self.session.get( "user" )
if user.legit:
logging.info( "[auth] User is logged in from session" )
self.user = user
# No user in session? Try cookie
elif "access" in self.request.cookies:
if not self.request.cookies.get( "access" ) and not self.restricted:
return True
access = self.request.cookies.get( "access" ).split( "|" )
if len( access ) == 2:
logging.info( "[auth] Trying to login user from access cookie" )
user = UdPyBlogUser.get_from_id( int( access[1] ) )
if user and self.validate_user( user, access ):
self.user = user
self.session["user"] = self.user
# Do not redirect if override is mulidict - pending post!!
if self.user:
if not self.logout:
if self.request_override.__class__.__name__ != "UnicodeMultiDict":
if self.get_redirection():
return False
return True
# Non restricted pages allowed to continue processing
if not self.restricted:
return True
# Logout may proceed never mind the result of the current login
if self.logout:
return True
# store the original url in order to redirect on success!
redirects = [ self.request.url ]
if self.request.method == "POST":
# freeze vars for thaw in get
self.session["request_override"] = self.request.POST
redirects.append( self.request.referer )
self.session["redirect"] = redirects
self.redirect_prefixed( "login/auto" )
return False
def make_hash( self, message, salt = None ):
salt = salt or self.make_salt()
return "{}{}".format(
hmac.new(
UdPyBlog.get_config( "password_secret", True ),
message + salt,hashlib.sha256
).hexdigest(),
salt
)
def make_salt( self ):
return "".join( random.choice( "abcdef" + string.digits ) for x in xrange( self.salt_length ) )
def validate_user( self, user, access ):
hash = access[0][:( self.salt_length * -1 )]
salt = access[0][( self.salt_length * -1 ):]
return access[0] == self.make_hash(
user.username,
salt
)
def no_cache( self ):
self.response.headers.add_header(
"Cache-Control",
"no-cache, no-store, must-revalidate, max-age=0"
)
self.response.headers.add_header(
"Expires",
"0"
)
def get_image_upload_url( self ):
"""
Image upload urls tend to expire quickly and can only be used once.
The forces any upload to get a fresh url before uploading a file.
"""
self.no_cache()
bucket = app_identity.get_default_gcs_bucket_name()
return blobstore.create_upload_url(
"{}image/upload".format( UdPyBlog.get_config( "blog_prefix" ) ),
gs_bucket_name = bucket
)
def process_images( self, post = None, expiry = None ):
"""
This function deals with orphaned BLOBs in the system. It is called from
different handlers to keep the database fro bein clotted with costly junk.
It is called from
* Cron Task
* Logout
* Login
* Post Create
* Post Update
Purging orphaned images on post submission is a little rude. I assume the user
is not editing 2 posts with images at the time. In a real world scenario I
would reduce the cleanup to logout/login and cron - these situations are the
only ones safe to assume they don't harm a contributor
"""
if post:
logging.info( "[process_images] Checking for Post: {}".format( post.key() ) );
images_stored = self.user.images
images_check = []
for image in images_stored:
if post.cover_image and post.cover_image.blob_key.key() == image.blob_key.key():
logging.info( "[process_images] Skipping cover image..." );
continue
if not image.post or image.post.key() == post.key():
images_check.append( str( image.blob_key.key() ) )
images = []
quoteds = re.findall(
r"[\"\']([^\'\"]+)(?=[\"\'])",
post.content
)
for quoted in quoteds:
match = re.search(
UdPyBlog.get_config( "image_view_url_part" ) + r"(.+)$",
quoted
)
if match:
images.append( match.group( 1 ) )
if images:
logging.info( "[process_images] Post references {} images: {}".format( len( images ),images ) )
else:
images = []
logging.info( "[process_images] Images found that are related to this post or not yet to any: {}".format( images_check ) )
images_dropped = list( set( images_check ) - set( images ) )
logging.info( "[process_images] Purging {} unmapped images. ( {}... )".format( len( images_dropped ),images_dropped[0:3] ) )
else:
if self.user:
images_stored = self.user.images.filter(
"post =",
None
)
images_dropped = []
for image in images_stored:
images_dropped.append( str( image.blob_key.key() ) )
elif expiry:
logging.info( "[process_images] Purging expired images ( {} )".format( expiry ) );
images_stored = UdPyBlogImage.get_all().filter(
"post =", None
).filter(
"created <", expiry
)
images_dropped = []
for image in images_stored:
images_dropped.append( str( image.blob_key.key() ) )
for image in images_dropped:
for image_placed in UdPyBlogImage.get_all().filter(
"blob_key =",
image
):
logging.info( "[process_images] Purging {}".format( image_placed.blob_key.key() ) )
blob_info = blobstore.BlobInfo.get( image_placed.blob_key.key() )
if blob_info:
blob_info.delete()
image_placed.delete()
if post:
for blob_key in images:
try:
logging.info( "[process_images] Adding image " + blob_key )
image_placed = UdPyBlogImage.get_all().filter(
"blob_key =",
blob_key
).get()
image_placed.post = post.key()
image_placed.put()
except:
logging.info( sys.exc_info() )
self.error( 500 )
class UdPyBlogTaskHandler( UdPyBlogHandler ):
def auth( self ):
return self.request.headers.get( "X-AppEngine-Cron" ) == "true"
class UdPyBlogImageUploadPrepareHandler( blobstore_handlers.BlobstoreUploadHandler, UdPyBlogHandler ):
def get( self ):
self.no_cache()
self.render_json( {
"upload_url": self.get_image_upload_url()
} )
class UdPyBlogImageUploadHandler( blobstore_handlers.BlobstoreUploadHandler, UdPyBlogHandler ):
def post( self ):
if not self.auth():
return
try:
upload = self.get_uploads()[0]
uploaded_image = UdPyBlogImage(
session = self.request.cookies["session"],
user = self.user,
blob_key = upload.key()
)
uploaded_image.put()
self.render_json(
{
"location": self.url_prefixed(
"{}{}".format(
UdPyBlog.get_config( "image_view_url_part" ),
upload.key()
)
)
}
)
except:
logging.info( sys.exc_info() )
self.error( 500 )
class UdPyBlogImageViewHandler( blobstore_handlers.BlobstoreDownloadHandler, UdPyBlogHandler ):
def get( self, image_key ):
if not self.auth():
return
if not blobstore.get( image_key ):
self.error( 404 )
else:
self.send_blob( image_key )
class UdPyBlogPostViewHandler( UdPyBlogHandler ):
url = "post"
def get( self, post_id ):
if not self.auth():
return
if post_id.isdigit():
try:
logging.info(post_id)
post = UdPyBlogPost.get_from_id( int( post_id ) )
if not post:
self.abort(404)
return
likes_post = False
if self.user and self.user.liked_posts.filter(
"post =",
post.key()
).count() == 1:
likes_post = True
self.render(
"blog_post.html",
**{
"post": post,
"comment": UdPyBlogPostComment.empty()
}
)
return
except:
self.abort(404)
return
else:
self.redirect_prefixed( "" )
class UdPyBlogSignupSuccessHandler( UdPyBlogHandler ):
restricted = True
def get( self ):
if not self.auth():
return
self.render(
"blog_welcome.html",
**{
"redirect": self.get_redirection( False )
}
)
class UdPyBlogSignupHandlerLogout( UdPyBlogHandler ):
logout = True
restricted = False
def get( self ):
self.add_redirection( self.request.referer )
if not self.auth():
return
if self.user:
self.process_images()
redirect = self.get_redirection( False )
self.session.clear()
self.delete_cookie("session")
if "access" in self.request.cookies:
self.delete_cookie("access")
if self.user:
self.user = None
if redirect:
self.redirect( redirect )
return
self.redirect_prefixed( "" )
class UdPyBlogPostLikeHandler( UdPyBlogHandler ):
"""Register or unregister ( toggle ) likes for a specific post. Only
logged in users other than the author are allowed to like a post."""
restricted = True
def get( self, post_id ):
if self.session.get( "request_override" ).__class__.__name__ == "UnicodeMultiDict":
self.request_override = self.session.get( "request_override" )
self.session["request_override"] = None
self.post( post_id, thaw = True )
return
self.error( 403 )
return
def post( self, post_id, thaw = False ):
logging.info("LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL")
if not self.auth():
return
if not thaw:
self.add_redirection( self.request.referer, True )
if not self.user:
self.redirect_prefixed( "" )
return
post = UdPyBlogPost.get_from_id( int( post_id ) )
if not post:
logging.info( "Post <<{}>> doesn't exist".format( post_id ) )
self.abort( 404 )
if post.user.username == self.user.username:
logging.info( "User {} may not like his own post!".format( self.user.username ) )
if not thaw:
self.abort( 403 )
else:
if self.get_redirection():
return
self.redirect_prefixed( "" )
return
post_user_likes = post.users_who_like.filter(
"user =",
self.user.key()
).get()
logging.info( "post likes this post!" )
if post_user_likes:
post_user_likes.delete()
else:
post_like = UdPyBlogPostLike(
post = post,
user = self.user
)
post_like.put()
if self.get_redirection():
return
self.redirect_prefixed( "" )
return
class UdPyBlogMainHandler( UdPyBlogHandler ):
def get( self, page_id = None ):
if not self.auth():
return
posts = []
posts_query = PagedQuery(
UdPyBlogPost.all().filter(
"deleted =",
None
).order( "-created" ),
UdPyBlog.get_config( "posts_per_page" )
)
pages_total = posts_query.page_count()
if not page_id:
page_id = 1
else:
page_id = int( page_id )
posts = posts_query.fetch_page( page_id )
page_next = None
if pages_total > page_id:
page_next = ( page_id + 1 )
page_prev = None
if page_id > 1:
page_prev = ( page_id - 1 )
self.render(
"blog_main.html",
**{
"posts": posts,
"pages": pages_total,
"page_prev": page_prev,
"page_next": page_next
}
)
class UdPyBlogSignupHandler( UdPyBlogHandler ):
signup = True
fields = [
"username",
"password",
"verify",
"email"
]
required = [
"username",
"password"
]
scope = "signup"
errors = 0
args = {}
def get( self ):
# If the user chooses to log in or sign up, capture referrer!
# if the referrer is any of the login pages it will not be added!
self.add_redirection( self.request.referer )
if not self.auth():
return
for field in self.fields:
self.args[field], self.args["error_" + field] = "",""
self.render(
"signup.html",
**self.args
)
def post( self ):
if not self.auth():
return
self.args["jump"] = ""
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if not self.args["jump"] and self.args["error_" + field]:
self.args["jump"] = "{}-{}".format(
self.scope,
field
)
if self.errors > 0:
self.render(
"signup.html",
**self.args
)
else:
access_hash = self.make_hash( self.args["username"] )
salt = access_hash[( self.salt_length * -1 ):]
user = UdPyBlogUser(
username = self.args["username"],
password = self.make_hash(
self.args["password"],
salt
),
salt = salt
)
user.put()
self.session["user"] = user
self.set_cookie(
"access",
"{}|{}".format(
access_hash,
user.key().id()
)
)
blog_entity_context = {
"username": user.username
}
self.response.headers.add_header(
"Blog-Entity-Context",
json.dumps( blog_entity_context )
)
self.redirect_prefixed( "welcome" )
def validate( self,field ):
# Check for validity of entered data agains re and length reqs
# Higher level checks only if no error here
error = UdPyBlog.validate_input(
field,
self.get_request_var( field ),
field in self.required
)
if error != True:
self.errors += 1
return ( self.get_request_var( field ),error )
if field == "username":
if not self.login:
if UdPyBlogUser.get_all( True ).filter(
"username =",
self.get_request_var(
field
)
).count() > 0:
self.errors += 1
return (
self.get_request_var( field ),
"That user already exists"
)
return ( self.get_request_var( field ),"" )
if field == "subject":
return (
cgi.escape(
self.get_request_var( field )
),
""
)
if field == "summary":
return (
cgi.escape(
self.get_request_var( field )
),
""
)
if field == "verify":
input_verify = self.get_request_var( field )
if "password" in self.args and self.args["password"] != "":
if self.args["password"] != input_verify:
self.errors += 1
return ( "","Your passwords didn't match" )
return ( input_verify, "" )
return ( "","" )
if field == "email":
input_email = self.get_request_var( field )
if input_email == "":
return ( "","" )
if field == "post_id":
input_post_id = self.get_request_var( field )
if input_post_id.isdigit():
return ( input_post_id,"" )
else:
self.errors += 1
return ( "","Post id missing" )
return ( self.get_request_var( field ),"" )
class UdPyBlogPostHandler( UdPyBlogSignupHandler ):
restricted = True
fields = [
"subject",
"summary",
"content"
]
required = fields
scope = "post"
def post( self, post_id = None ):
if not self.auth():
return
self.args["update"] = self.update
self.args["jump"] = ""
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if not self.args["jump"] and self.args["error_" + field]:
self.args["jump"] = "{}-{}".format( self.scope,field )
self.args["update"] = self.update
self.args["cover_image"] = None
self.args["cover_image_url"] = None
if self.get_request_var( "cover_image_url" ):
self.args["cover_image_url"] = self.get_request_var( "cover_image_url" )
self.args["cover_image"] = UdPyBlogImage.get_all().filter(
"blob_key =",
os.path.basename(
self.get_request_var( "cover_image_url" )
)
).get()
if self.errors > 0:
self.args["upload_url_source"] = self.url_prefixed( "image/upload_url" )
self.render(
"blog_form.html",
**self.args
)
return
else:
if not self.update:
cover_image_key = None
if self.args["cover_image"]:
cover_image_key = self.args["cover_image"].key()
post = UdPyBlogPost(
subject = self.args["subject"],
summary = self.args["summary"],
content = "Your post content could not be processed. Please contact the administrator.",
cover_image = cover_image_key,
user = self.user
)
else:
post = UdPyBlogPost.get_from_id( int( post_id ) )
if not post or post.user.username != self.user.username:
self.redirect_prefixed( "post/{0}".format( self.args["post_id"] ) )
return
post.subject = self.args["subject"]
post.summary = self.args["summary"]
post.content = "Your post content could not be processed. Please contact the administrator."
if self.args["cover_image"]:
post.cover_image = self.args["cover_image"]
elif post.cover_image:
logging.info( "Deleteing previous cover" )
blobstore.delete( post.cover_image.blob_key.key() )
post.cover_image.delete()
post.cover_image = None
post.content = self.sanitize_post( self.args["content"] )
post.put()
if self.args["cover_image"]:
self.args["cover_image"].post = post.key()
self.args["cover_image"].put()
logging.info( "Processing contained and dropped images from the current post..." )
self.process_images( post = post )
blog_entity_context = {
"post_id": post.key().id(),
"username": self.user.username
}
self.response.headers.add_header(
"Blog-Entity-Context",
json.dumps( blog_entity_context )
)
self.redirect_prefixed( "post/{0}".format( post.key().id() ) )
return
self.render(
"blog_form.html",
**self.args
)
def get( self ):
if not self.auth():
return
self.render(
"blog_form.html",
**{
"subject": self.get_request_var( "subject" ),
"content": self.get_request_var( "content" ),
"post_id": None,
"update": self.update,
"upload_url": self.get_image_upload_url(),
"upload_url_source": self.url_prefixed( "image/upload_url" )
}
)
class UdPyBlogPostUpdateHandler( UdPyBlogPostHandler ):
update = True
def get( self, post_id ):
self.no_cache()
if not self.auth():
return
if post_id.isdigit():
post = UdPyBlogPost.get_from_id( int( post_id ) )
if post:
if post.user.key() != self.user.key():
self.redirect_prefixed( "post/{}".format( post.key().id() ) )
return
self.render(
"blog_form.html",
**{
"subject": post.subject,
"summary": post.summary,
"content": post.content,
"post_id": post_id,
"update": self.update,
"cover_image_url": post.cover_image and self.url_prefixed(
"{0}{1}".format(
UdPyBlog.get_config( "image_view_url_part" ),
post.cover_image.blob_key.key()
)
),
"upload_url": self.get_image_upload_url(),
"upload_url_source": self.url_prefixed( "image/upload_url" )
}
)
return
else:
logging.info( "ERROR" )
self.render( "blog_main.html",error = "ID not found ( " + str( post_id ) + " )" )
return
else:
self.redirect_prefixed( "" )
class UdPyBlogPostDeleteHandler( UdPyBlogPostHandler ):
delete = True
def post( self, post_id ):
if not self.auth():
return
if post_id.isdigit():
post = UdPyBlogPost.get_from_id( int( post_id ) )
if post:
if post.user.key() != self.user.key():
self.redirect_prefixed( "post/{}".format( post.key().id() ) )
return
post.delete_post()
self.redirect_prefixed( "" )
else:
logging.info( "ERROR" )
self.render( "blog_main.html",error = "ID not found ( " + str( post_id ) + " )" )
return
else:
self.redirect_prefixed( "" )
class UdPyBlogPostCommentHandler( UdPyBlogPostHandler ):
"""Handling comments posted on a post"""
fields = [ "subject", "note" ]
required = fields
restricted = True
scope = "comment"
def validate( self,field ):
# Check for validity of entered data agains re and length reqs
# Higher level checks only if no error here
error = UdPyBlog.validate_input(
field,
self.get_request_var( field ),
field in self.required
)
if error != True:
self.errors += 1
return ( self.get_request_var( field ),error )
if field == "subject":
return ( cgi.escape( self.get_request_var( field ) ),"" )
if field == "note":
return ( cgi.escape( self.get_request_var( field ) ),"" )
def get( self, post_id ):
# calling this per get requests requires a frozen post!
if self.session.get( "request_override" ).__class__.__name__ == "UnicodeMultiDict":
self.request_override = self.session.get( "request_override" )
self.session["request_override"] = None
self.post(
post_id,
thaw = True
)
return
self.error( 403 )
return
def post( self, post_id, thaw = False ):
if not self.auth():
return
if self.update:
self.fields.append( "comment_id" )
post = UdPyBlogPost.get_from_id( int( post_id ) )
if not post:
self.redirect_prefixed( "" )
self.args["jump"] = ""
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if not self.args["jump"] and self.args["error_" + field]:
self.args["jump"] = "{}-{}".format( self.scope,field )
if self.errors > 0:
self.args["post"] = post
self.args["comment"] = UdPyBlogPostComment.empty(
**{
"subject": self.args["subject"],
"note": self.args["note"]
}
)
self.render(
"blog_post.html",
**self.args
)
return
else:
if not self.update:
comment = UdPyBlogPostComment(
subject = self.args["subject"],
note = self.args["note"],
post = post,
user = self.user
)
else:
comment = UdPyBlogPostComment.get_from_id( int( self.args["comment_id"] ) )
if not post or post.user.username != self.user.username:
self.redirect_prefixed( "post/{0}".format( int( post_id ) ) )
return
comment.subject = self.args["subject"]
comment.note = self.args["note"]
comment.put()
blog_entity_context = {
"post_id": post.key().id(),
"comment_id": comment.key().id()
}
self.response.headers.add_header(
"Blog-Entity-Context",
json.dumps( blog_entity_context )
)
self.redirect_prefixed( "post/{0}".format( post.key().id() ) )
return
self.render(
"blog_post.html",**{
"error": error,
"comment": UdPyBlogPostComment.empty(
**{
"subject": self.args["subject"],
"note": self.args["note"]
}
),
"post": post
}
)
class UdPyBlogPostCommentDeleteHandler( UdPyBlogPostCommentHandler ):
"""Handling comment deletions on a post"""
restricted = True
def get( self, post_id, comment_id ):
if not self.auth():
return
comment = UdPyBlogPostComment.get_from_id( int( comment_id ) )
if not comment or comment.user.key() != self.user.key():
self.redirect_prefixed( "" )
return
comment.delete()
self.redirect_prefixed(
"post/{0}".format(
post_id
)
)
return
class UdPyBlogPostCommentEditHandler( UdPyBlogPostCommentHandler ):
"""Handling comment edits on a post"""
update = True
restricted = True
def get( self, post_id, comment_id ):
if not self.auth():
return
comment = UdPyBlogPostComment.get_from_id( int( comment_id ) )
if not comment_id or comment.user.key() != self.user.key():
self.redirect_prefixed( "" )
return
post_id = comment.post.key().id()
if not comment.post or comment.user.username != self.user.username:
self.redirect_prefixed(
"post/{0}".format(
post_id
)
)
return
self.render(
"blog_post.html",
**{
"post": comment.post,
"comment": comment,
"update": self.update
}
)
def post( self, post_id, comment_id ):
if not self.auth():
return
post = UdPyBlogPost.get_from_id( int( post_id ) )
if not post:
self.redirect_prefixed( "" )
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if self.errors > 0:
self.render(
"blog_post.html",
**{
"comment": None,
"user": self.user,
"post": post,
"subject": self.args["subject"],
"comment": self.args["comment"],
"update": self.update
}
)
return
else:
if not self.update:
comment = UdPyBlogPostComment(
subject = self.get_request_var( "subject" ),
note = self.get_request_var( "note" ),
post = post,
user = self.user
)
else:
comment = UdPyBlogPostComment.get_from_id( int( comment_id ) )
if not comment or comment.user.username != self.user.username:
self.redirect_prefixed(
"post/{0}".format(
post_id
)
)
return
comment.subject = self.args["subject"]
comment.note = self.args["note"]
comment.put()
blog_entity_context = {
"post_id": post.key().id(),
"comment_id": comment.key().id(),
"username": self.user.username
}
self.response.headers.add_header(
"Blog-Entity-Context",
json.dumps( blog_entity_context )
)
self.redirect_prefixed(
"post/{0}".format(
post.key().id()
)
)
return
self.render(
"blog_post.html",**{
"error": error,
"comment": None,
"subject": self.get_request_var( "subject" ),
"note": self.get_request_var( "note" ),
"created": self.get_request_var( "created" )
}
)
class UdPyBlogInitHandler( UdPyBlogSignupHandler ):
fields = [ "password" ]
def get( self ):
if not self.auth():
return
self.render( "init.html" )
def post( self ):
if not self.auth():
return
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if self.errors > 0:
self.render(
"init.html",
**self.args
)
return
else:
if self.args["password"] == udpyblog_init_pass:
udpyblog_init_blog()
return
else:
self.args["error"] = "invalid login"
self.render(
"init.html",
**self.args
)
return
class UdPyBlogPostCleanUpHandler( UdPyBlogTaskHandler ):
def get( self ):
if self.auth():
try:
logging.info( "Starting Clean Up" )
self.process_images(
expiry = (
datetime.datetime.today()
+
datetime.timedelta(
seconds = (
UdPyBlog.get_config( "blob_expiry_seconds" ) * -1
)
)
)
)
except:
logging.info( sys.exc_info() )
return
self.error( 403 )
return
class UdPyBlogSignupHandlerLogin( UdPyBlogSignupHandler ):
fields = [ "username","password" ]
required = fields
login = True
scope = "login"
def get( self ):
# If the user chooses to log in or sign up, capture referrer!
if self.request.url.find( "login/auto" ) == -1:
self.add_redirection( self.request.referer )
if not self.auth():
return
self.delete_cookie("access")
self.render( "login.html" )
def post( self ):
if not self.auth():
return
self.args["jump"] = ""
for field in self.fields:
self.args[field],self.args["error_" + field] = "",""
self.args[field],self.args["error_" + field] = self.validate( field )
if not self.args["jump"] and self.args["error_" + field]:
self.args["jump"] = "{}-{}".format(
self.scope,
field
)
logging.info(self.args)
if self.errors > 0:
self.render(
"login.html",
**self.args
)
return
else:
user = UdPyBlogUser.get_all().filter(
"username =",
self.args["username"]
).get()
if user:
logging.info( "User match!!!" )
if self.make_hash( self.args["password"], user.salt ) == user.password:
logging.info( "Password match!!!" )
self.set_cookie(
"access",
"{}|{}".format(
self.make_hash(
user.username,
user.salt
),
user.key().id()
)
)
blog_entity_context = {
"username": user.username
}
self.response.headers.add_header(
"Blog-Entity-Context",
json.dumps( blog_entity_context )
)
self.user = user
self.process_images()
if self.get_redirection():
return
self.redirect_prefixed( "" )
return
else:
self.args["error"] = "invalid login"
self.render(
"login.html",
**self.args
)
return
# Base class
class UdPyBlog():
"""This class serves as a configuration class. It populates all
nescessary variables given a dictionary from via the setup method"""
routes = [
( "", UdPyBlogMainHandler ),
( "page/([0-9]+)", UdPyBlogMainHandler ),
( "signup", UdPyBlogSignupHandler ),
( "logout", UdPyBlogSignupHandlerLogout ),
( "login", UdPyBlogSignupHandlerLogin ),
( "login/auto", UdPyBlogSignupHandlerLogin ),
( "welcome", UdPyBlogSignupSuccessHandler ),
( "post/([0-9]+)", UdPyBlogPostViewHandler ),
( "post/([0-9]+)/like", UdPyBlogPostLikeHandler ),
( "post/([0-9]+)/update", UdPyBlogPostUpdateHandler ),
( "post/([0-9]+)/delete", UdPyBlogPostDeleteHandler ),
( "post/([0-9]+)/comment", UdPyBlogPostCommentHandler ),
( "post/([0-9]+)/comment/([0-9]+)/edit", UdPyBlogPostCommentEditHandler ),
( "post/([0-9]+)/comment/([0-9]+)/delete", UdPyBlogPostCommentDeleteHandler ),
( "newpost", UdPyBlogPostHandler ),
( "image/upload_url",UdPyBlogImageUploadPrepareHandler ),
( "image/upload",UdPyBlogImageUploadHandler ),
( "_cleanup", UdPyBlogPostCleanUpHandler ) # cron task
]
__config = {
"template_folder": "dist/templates",
"blog_prefix": "/",
"blob_expiry_seconds": ( 5*24*3600 ),
"static_path_prefix": "",
"post_date_template": "%d, %b %Y, %I:%M%p",
"comment_date_template": "%d, %b %Y, %I:%M%p",
"posts_per_page": 4,
"input_requirements": {
"email": {
"min": 6,
"max": 250
},
"password": {
"min": 3,
"max": 20
},
"username": {
"min": 3,
"max": 20
},
"summary": {
"min": 10,
"max": 250
},
"subject": {
"min": 6,
"max": 80
},
"content": {
"min": 10,
"max": 100000000
},
"note": {
"min": 10,
"max": 500
}
}
}
regexp = {
"username": r"[a-zA-Z0-9_-]+",
"email": r"[\S]+@[\S]+\.[\S]",
"subject": r"[^\r\n\t]"
}
jinja_env = None
@classmethod
def prepare( cls, config = None ):
if config:
# Sensitive directives are prefixed with a "_" to mask them on dump
if "password_secret" in config:
cls.__config["_password_secret"] = config["password_secret"]
if "template_folder" in config:
cls.__config["template_folder"] = config["template_folder"]
if "blog_prefix" in config:
cls.__config["blog_prefix"] = config["blog_prefix"]
if "forbidden_tags" in config:
cls.__config["forbidden_tags"] = config["forbidden_tags"]
if "image_view_url_part" in config:
cls.__config["image_view_url_part"] = config["image_view_url_part"]
if "blob_expiry_seconds" in config:
cls.__config["blob_expiry_seconds"] = config["blob_expiry_seconds"]
if "post_date_template" in config:
cls.__config["post_date_template"] = config["post_date_template"]
if "comment_date_template" in config:
cls.__config["comment_date_template"] = config["comment_date_template"]
if "posts_per_page" in config:
cls.__config["posts_per_page"] = config["posts_per_page"]
if "input_requirements" in config:
cls.__config["input_requirements"] = cls.merge_dicts(
cls.__config["input_requirements"],
config["input_requirements"]
)
cls.template_dir = os.path.join(
os.path.dirname( __file__ ),
cls.__config["template_folder"]
)
cls.jinja_env = jinja2.Environment( loader = jinja2.FileSystemLoader( cls.template_dir ) )
@classmethod
def get_routes( cls ):
cls.routes.append(
(
UdPyBlog.get_config( "image_view_url_part" ) + "(.+)",
UdPyBlogImageViewHandler
)
)
if cls.__config["blog_prefix"]:
routes_prefixed = []
for route in cls.routes:
routes_prefixed.append(
(
cls.__config["blog_prefix"] + route[0],
route[1]
)
)
return routes_prefixed
else:
return cls.routes
@classmethod
def validate_input( cls, field, input, required ):
if not input:
if required:
return "Field empty"
else:
return True
if field in cls.__config["input_requirements"]:
if len( input ) < cls.__config["input_requirements"][field]["min"]:
return "Input too short"
if len( input ) > cls.__config["input_requirements"][field]["max"]:
return "Input too long"
if field in cls.regexp:
if not re.match(
cls.regexp[field],
input
):
return "Input contains illegal characters"
return True
@classmethod
def error_handler( cls, request, response, exception ):
logging.info( sys.exc_info() )
try:
code = exception.code
except:
code = "000"
response.out.write(
cls.render_template(
"error_{}.html".format(code),
exception = exception,
response = response,
user = UdPyBlogUser.empty(),
config = cls.dump_config(),
stats = cls.render_stats()
)
)
@classmethod
def render_template( cls, template_file, **params ):
template = cls.jinja_env.get_template( template_file )
return template.render( **params )
@classmethod
def inject( cls, app ):
app.error_handlers[404] = cls.error_handler
app.error_handlers[403] = cls.error_handler
app.error_handlers[500] = cls.error_handler
@classmethod
def merge_dicts( cls, *dict_args ):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update( dictionary )
return result
@classmethod
def get_config( cls, key, secure = False ):
if not secure:
if key in cls.__config:
return cls.__config[key]
else:
if "_" + key in cls.__config:
return cls.__config["_" + key]
return ""
@classmethod
def dump_config( cls ):
"""preventing sensitive config keys from being exposed"""
return {key: value for key, value in cls.__config.iteritems() if key[0] != "_"}
@classmethod
def render_stats( cls ):
logging.info(UdPyBlogPost.get_all().get())
images_deleted_count = UdPyBlogImage.get_all( True ).filter(
"deleted !=",
None
).count()
blobstore_count = blobstore.BlobInfo.all().count()
return {
"users_count": UdPyBlogUser.get_all().count(),
"posts_count": UdPyBlogPost.get_all().count(),
"comments_count": UdPyBlogPostComment.get_all().count(),
"likes_count": UdPyBlogPostLike.get_all().count(),
"images_count": UdPyBlogImage.get_all().count(),
"images_deleted_count": images_deleted_count,
"blobstore_count": (blobstore_count - images_deleted_count),
"blobstore_deleted_count": images_deleted_count
}
|
from rip import Rip
Rip().main()
|
"""
Technical Analysis Factors
--------------------------
"""
from __future__ import division
from numpy import (
abs,
average,
clip,
diff,
dstack,
inf,
)
from numexpr import evaluate
from zipline.pipeline.data import USEquityPricing, EquityPricing
from zipline.pipeline.factors import CustomFactor
from zipline.pipeline.mixins import SingleInputMixin
from zipline.utils.input_validation import expect_bounded
from zipline.utils.math_utils import (
nanargmax,
nanargmin,
nanmax,
nanmean,
nanstd,
nanmin,
)
from zipline.utils.numpy_utils import rolling_window
from .basic import exponential_weights
from .basic import ( # noqa reexport
# These are re-exported here for backwards compatibility with the old
# definition site.
LinearWeightedMovingAverage,
MaxDrawdown,
SimpleMovingAverage,
VWAP,
WeightedAverageValue
)
class RSI(CustomFactor, SingleInputMixin):
"""
Relative Strength Index
**Default Inputs**: [EquityPricing.close]
**Default Window Length**: 15
"""
window_length = 15
inputs = (EquityPricing.close,)
window_safe = True
def compute(self, today, assets, out, closes):
diffs = diff(closes, axis=0)
ups = nanmean(clip(diffs, 0, inf), axis=0)
downs = abs(nanmean(clip(diffs, -inf, 0), axis=0))
return evaluate(
"100 - (100 / (1 + (ups / downs)))",
local_dict={'ups': ups, 'downs': downs},
global_dict={},
out=out,
)
class BollingerBands(CustomFactor):
"""
Bollinger Bands technical indicator.
https://en.wikipedia.org/wiki/Bollinger_Bands
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
inputs : length-1 iterable[BoundColumn]
The expression over which to compute bollinger bands.
window_length : int > 0
Length of the lookback window over which to compute the bollinger
bands.
k : float
The number of standard deviations to add or subtract to create the
upper and lower bands.
"""
params = ('k',)
inputs = (EquityPricing.close,)
outputs = 'lower', 'middle', 'upper'
def compute(self, today, assets, out, close, k):
difference = k * nanstd(close, axis=0)
out.middle = middle = nanmean(close, axis=0)
out.upper = middle + difference
out.lower = middle - difference
class Aroon(CustomFactor):
"""
Aroon technical indicator.
https://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/aroon-indicator # noqa
**Defaults Inputs:** EquityPricing.low, EquityPricing.high
Parameters
----------
window_length : int > 0
Length of the lookback window over which to compute the Aroon
indicator.
"""
inputs = (EquityPricing.low, EquityPricing.high)
outputs = ('down', 'up')
def compute(self, today, assets, out, lows, highs):
wl = self.window_length
high_date_index = nanargmax(highs, axis=0)
low_date_index = nanargmin(lows, axis=0)
evaluate(
'(100 * high_date_index) / (wl - 1)',
local_dict={
'high_date_index': high_date_index,
'wl': wl,
},
out=out.up,
)
evaluate(
'(100 * low_date_index) / (wl - 1)',
local_dict={
'low_date_index': low_date_index,
'wl': wl,
},
out=out.down,
)
class FastStochasticOscillator(CustomFactor):
"""
Fast Stochastic Oscillator Indicator [%K, Momentum Indicator]
https://wiki.timetotrade.eu/Stochastic
This stochastic is considered volatile, and varies a lot when used in
market analysis. It is recommended to use the slow stochastic oscillator
or a moving average of the %K [%D].
**Default Inputs:** :data: `zipline.pipeline.data.EquityPricing.close`
:data: `zipline.pipeline.data.EquityPricing.low`
:data: `zipline.pipeline.data.EquityPricing.high`
**Default Window Length:** 14
Returns
-------
out: %K oscillator
"""
inputs = (EquityPricing.close, EquityPricing.low, EquityPricing.high)
window_safe = True
window_length = 14
def compute(self, today, assets, out, closes, lows, highs):
highest_highs = nanmax(highs, axis=0)
lowest_lows = nanmin(lows, axis=0)
today_closes = closes[-1]
evaluate(
'((tc - ll) / (hh - ll)) * 100',
local_dict={
'tc': today_closes,
'll': lowest_lows,
'hh': highest_highs,
},
global_dict={},
out=out,
)
class IchimokuKinkoHyo(CustomFactor):
"""Compute the various metrics for the Ichimoku Kinko Hyo (Ichimoku Cloud).
http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:ichimoku_cloud # noqa
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`
:data:`zipline.pipeline.data.EquityPricing.low`
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 52
Parameters
----------
window_length : int > 0
The length the the window for the senkou span b.
tenkan_sen_length : int >= 0, <= window_length
The length of the window for the tenkan-sen.
kijun_sen_length : int >= 0, <= window_length
The length of the window for the kijou-sen.
chikou_span_length : int >= 0, <= window_length
The lag for the chikou span.
"""
params = {
'tenkan_sen_length': 9,
'kijun_sen_length': 26,
'chikou_span_length': 26,
}
inputs = (EquityPricing.high, EquityPricing.low, EquityPricing.close)
outputs = (
'tenkan_sen',
'kijun_sen',
'senkou_span_a',
'senkou_span_b',
'chikou_span',
)
window_length = 52
def _validate(self):
super(IchimokuKinkoHyo, self)._validate()
for k, v in self.params.items():
if v > self.window_length:
raise ValueError(
'%s must be <= the window_length: %s > %s' % (
k, v, self.window_length,
),
)
def compute(self,
today,
assets,
out,
high,
low,
close,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length):
out.tenkan_sen = tenkan_sen = (
high[-tenkan_sen_length:].max(axis=0) +
low[-tenkan_sen_length:].min(axis=0)
) / 2
out.kijun_sen = kijun_sen = (
high[-kijun_sen_length:].max(axis=0) +
low[-kijun_sen_length:].min(axis=0)
) / 2
out.senkou_span_a = (tenkan_sen + kijun_sen) / 2
out.senkou_span_b = (high.max(axis=0) + low.min(axis=0)) / 2
out.chikou_span = close[chikou_span_length]
class RateOfChangePercentage(CustomFactor):
"""
Rate of change Percentage
ROC measures the percentage change in price from one period to the next.
The ROC calculation compares the current price with the price `n`
periods ago.
Formula for calculation: ((price - prevPrice) / prevPrice) * 100
price - the current price
prevPrice - the price n days ago, equals window length
"""
def compute(self, today, assets, out, close):
today_close = close[-1]
prev_close = close[0]
evaluate('((tc - pc) / pc) * 100',
local_dict={
'tc': today_close,
'pc': prev_close
},
global_dict={},
out=out,
)
class TrueRange(CustomFactor):
"""
True Range
A technical indicator originally developed by J. Welles Wilder, Jr.
Indicates the true degree of daily price change in an underlying.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.high`
:data:`zipline.pipeline.data.EquityPricing.low`
:data:`zipline.pipeline.data.EquityPricing.close`
**Default Window Length:** 2
"""
inputs = (
EquityPricing.high,
EquityPricing.low,
EquityPricing.close,
)
window_length = 2
def compute(self, today, assets, out, highs, lows, closes):
high_to_low = highs[1:] - lows[1:]
high_to_prev_close = abs(highs[1:] - closes[:-1])
low_to_prev_close = abs(lows[1:] - closes[:-1])
out[:] = nanmax(
dstack((
high_to_low,
high_to_prev_close,
low_to_prev_close,
)),
2
)
class MovingAverageConvergenceDivergenceSignal(CustomFactor):
"""
Moving Average Convergence/Divergence (MACD) Signal line
https://en.wikipedia.org/wiki/MACD
A technical indicator originally developed by Gerald Appel in the late
1970's. MACD shows the relationship between two moving averages and
reveals changes in the strength, direction, momentum, and duration of a
trend in a stock's price.
**Default Inputs:** :data:`zipline.pipeline.data.EquityPricing.close`
Parameters
----------
fast_period : int > 0, optional
The window length for the "fast" EWMA. Default is 12.
slow_period : int > 0, > fast_period, optional
The window length for the "slow" EWMA. Default is 26.
signal_period : int > 0, < fast_period, optional
The window length for the signal line. Default is 9.
Notes
-----
Unlike most pipeline expressions, this factor does not accept a
``window_length`` parameter. ``window_length`` is inferred from
``slow_period`` and ``signal_period``.
"""
inputs = (EquityPricing.close,)
# We don't use the default form of `params` here because we want to
# dynamically calculate `window_length` from the period lengths in our
# __new__.
params = ('fast_period', 'slow_period', 'signal_period')
@expect_bounded(
__funcname='MACDSignal',
fast_period=(1, None), # These must all be >= 1.
slow_period=(1, None),
signal_period=(1, None),
)
def __new__(cls,
fast_period=12,
slow_period=26,
signal_period=9,
*args,
**kwargs):
if slow_period <= fast_period:
raise ValueError(
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period={slow}, fast_period={fast}".format(
slow=slow_period,
fast=fast_period,
)
)
return super(MovingAverageConvergenceDivergenceSignal, cls).__new__(
cls,
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
window_length=slow_period + signal_period - 1,
*args, **kwargs
)
def _ewma(self, data, length):
decay_rate = 1.0 - (2.0 / (1.0 + length))
return average(
data,
axis=1,
weights=exponential_weights(length, decay_rate)
)
def compute(self, today, assets, out, close, fast_period, slow_period,
signal_period):
slow_EWMA = self._ewma(
rolling_window(close, slow_period),
slow_period
)
fast_EWMA = self._ewma(
rolling_window(close, fast_period)[-signal_period:],
fast_period
)
macd = fast_EWMA - slow_EWMA
out[:] = self._ewma(macd.T, signal_period)
# Convenience aliases.
MACDSignal = MovingAverageConvergenceDivergenceSignal
|
"""
:type: nltk.WordNetCorpusReader
:Size: 10.283MB
Model files for wordnet in nltk.
`[page] <http://wordnet.princeton.edu/>`__
"""
from OpenAttack.utils import make_zip_downloader
NAME = "TProcess.NLTKWordNet"
URL = "https://cdn.data.thunlp.org/TAADToolbox/wordnet.zip"
DOWNLOAD = make_zip_downloader(URL)
class Lemmatizer:
def __init__(self, wnc):
self.__wnc = wnc
def __call__(self, word, pos):
pp = "n"
if pos in ["a", "r", "n", "v", "s"]:
pp = pos
else:
if pos[:2] == "JJ":
pp = "a"
elif pos[:2] == "VB":
pp = "v"
elif pos[:2] == "NN":
pp = "n"
elif pos[:2] == "RB":
pp = "r"
else:
pp = None
if pp is None: # do not need lemmatization
return word
lemmas = self.__wnc._morphy(word, pp)
return min(lemmas, key=len) if len(lemmas) > 0 else word
def LOAD(path):
wnc = __import__("nltk").corpus.WordNetCorpusReader(path, None)
wnc.lemma = Lemmatizer(wnc)
return wnc
|
import json
from http import HTTPStatus
from typing import Any, List
from httpx import Client
from pydantic import BaseModel
from typing_extensions import Annotated
from hintapi import (
Header,
HintAPI,
HTMLResponse,
HttpRoute,
HttpView,
JSONResponse,
Path,
Routes,
required_method,
)
from hintapi.openapi.application import OpenAPI
def test_openapi_page():
app = HintAPI()
openapi = OpenAPI()
app.router << Routes("/docs" // openapi.routes, namespace="docs")
assert app.router.url_for("docs:json_docs") == "/docs/json"
@app.router.http.get("/hello")
def hello() -> Annotated[Any, JSONResponse[200, {}, List[str]]]:
"""
hello
"""
pass
class Username(BaseModel):
name: str
@app.router.http.get("/path/{name}")
def path(name: str = Path(...)):
pass
@app.router.http("/http-view")
class HTTPClass(HttpView):
def get(self) -> Annotated[Any, HTMLResponse[200]]:
"""
...
......
"""
def post(
self,
) -> Annotated[Any, JSONResponse[201, {}, Username]]:
"""
...
......
"""
def delete(
self,
) -> Annotated[Any, {"204": {"description": HTTPStatus(204).description}}]:
"""
...
......
"""
def just_middleware(endpoint):
def wrapper(
authorization: str = Header(..., description="JWT Token")
) -> Annotated[Any, {"401": {"description": HTTPStatus(401).description}}]:
return endpoint()
return wrapper
middleware_routes = "/middleware" // Routes(
HttpRoute("/path/{name}", path),
HttpRoute("/http-view", HTTPClass),
http_middlewares=[just_middleware],
namespace="middleware",
)
app.router << middleware_routes
client = Client(app=app, base_url="http://localhost")
response = client.get("/docs", follow_redirects=False)
assert response.status_code == 307
assert response.headers["location"] == "http://localhost/docs/"
response = client.get("/docs/")
assert response.status_code == 200
response = client.get("/docs/json")
assert response.status_code == 200
assert len(response.headers["hash"]) == 32
# Check Auto Reload
assert response.headers.get("reload") == "true"
openapi.reload = False
assert client.get("/docs/json").headers["reload"] == "false"
openapi.reload = True
openapi_docs_text = response.text
assert json.loads(openapi_docs_text) == {
"openapi": "3.0.3",
"info": {"title": "hintapi API", "version": "1.0.0"},
"paths": {
"/hello": {
"get": {
"summary": "hello",
"responses": {
"200": {
"description": "Request fulfilled, document follows",
"content": {
"application/json": {
"schema": {
"type": "array",
"items": {"type": "string"},
}
}
},
}
},
}
},
"/http-view": {
"get": {
"summary": "...",
"description": "......",
"responses": {
"200": {
"description": "Request fulfilled, document follows",
"content": {"text/html": {"schema": {"type": "string"}}},
}
},
},
"post": {
"summary": "...",
"description": "......",
"responses": {
"201": {
"description": "Document created, URL follows",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"}
},
"required": ["name"],
}
}
},
}
},
},
"delete": {
"summary": "...",
"description": "......",
"responses": {
"204": {"description": "Request fulfilled, nothing follows"}
},
},
},
"/path/{name}": {
"get": {
"parameters": [
{
"in": "path",
"name": "name",
"description": "",
"required": True,
"schema": {"title": "Name", "type": "string"},
"deprecated": False,
}
],
"responses": {
"422": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/RequestValidationError"
}
}
},
"description": "Failed to verify request parameters",
}
},
}
},
"/middleware/http-view": {
"get": {
"parameters": [
{
"in": "header",
"name": "authorization",
"description": "JWT Token",
"required": True,
"schema": {"title": "Authorization", "type": "string"},
"deprecated": False,
}
],
"responses": {
"422": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/RequestValidationError"
}
}
},
"description": "Failed to verify request parameters",
},
"200": {
"description": "Request fulfilled, document follows",
"content": {"text/html": {"schema": {"type": "string"}}},
},
"401": {
"description": "No permission -- see authorization schemes"
},
},
},
"post": {
"parameters": [
{
"in": "header",
"name": "authorization",
"description": "JWT Token",
"required": True,
"schema": {"title": "Authorization", "type": "string"},
"deprecated": False,
}
],
"responses": {
"422": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/RequestValidationError"
}
}
},
"description": "Failed to verify request parameters",
},
"201": {
"description": "Document created, URL follows",
"content": {
"application/json": {
"schema": {
"type": "object",
"properties": {
"name": {"title": "Name", "type": "string"}
},
"required": ["name"],
}
}
},
},
"401": {
"description": "No permission -- see authorization schemes"
},
},
},
"delete": {
"parameters": [
{
"in": "header",
"name": "authorization",
"description": "JWT Token",
"required": True,
"schema": {"title": "Authorization", "type": "string"},
"deprecated": False,
}
],
"responses": {
"422": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/RequestValidationError"
}
}
},
"description": "Failed to verify request parameters",
},
"204": {"description": "Request fulfilled, nothing follows"},
"401": {
"description": "No permission -- see authorization schemes"
},
},
},
},
},
"tags": [],
"components": {
"schemas": {
"RequestValidationError": {
"type": "array",
"items": {
"type": "object",
"properties": {
"loc": {
"title": "Location",
"description": "error field",
"type": "array",
"items": {"type": "string"},
},
"type": {
"title": "Type",
"description": "error type",
"type": "string",
},
"msg": {
"title": "Message",
"description": "error message",
"type": "string",
},
"ctx": {
"title": "Context",
"description": "error context",
"type": "string",
},
"in": {
"title": "In",
"type": "string",
"enum": ["path", "query", "header", "cookie", "body"],
},
},
"required": ["loc", "type", "msg"],
},
}
}
},
"servers": [
{"url": "/", "description": "Current server"},
dict(
url="{scheme}://{address}/",
description="Custom API Server Host",
variables={
"scheme": {
"default": "http",
"enum": ["http", "https"],
"description": "http or https",
},
"address": {
"default": "localhost",
"description": "api server's host[:port]",
},
},
),
],
}, str(json.loads(openapi_docs_text))
def test_openapi_single_function_summary_and_description():
app = HintAPI()
openapi = OpenAPI()
app.router << "/docs" // openapi.routes
@app.router.http.get("/0", name=None, summary="Summary", description="Description")
def _():
return ""
@app.router.http.get("/1", name=None, summary="Summary")
def _():
return ""
@app.router.http.get("/2", name=None, summary="Summary")
def _():
"""
Description
"""
return ""
@app.router.http.get("/3", name=None)
def _():
"""
Summary
Description
"""
return ""
assert openapi._generate_path(app.router.search("http", "/0")[1], "/")[0] == {
"get": {"summary": "Summary", "description": "Description"}
}
assert openapi._generate_path(app.router.search("http", "/1")[1], "/")[0] == {
"get": {"summary": "Summary"}
}
assert openapi._generate_path(app.router.search("http", "/2")[1], "/")[0] == {
"get": {"summary": "Summary", "description": "Description"}
}
assert openapi._generate_path(app.router.search("http", "/3")[1], "/")[0] == {
"get": {"summary": "Summary", "description": "Description"}
}
def test_openapi_single_function_tags():
app = HintAPI()
openapi = OpenAPI()
app.router << "/docs" // openapi.routes
@app.router.http.get("/", name=None, tags=["tag0"])
def homepage():
return ""
assert openapi._generate_path(app.router.search("http", "/")[1], "/") == (
{"get": {"tags": ["tag0"]}},
{},
)
def test_openapi_routes_tags():
app = HintAPI()
openapi = OpenAPI()
app.router << "/docs" // openapi.routes
def homepage():
return ""
app.router << Routes(
HttpRoute("/", homepage) @ required_method("GET"), tags=["tag0"]
)
assert openapi._generate_path(app.router.search("http", "/")[1], "/") == (
{"get": {"tags": ["tag0"]}},
{},
)
|
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import os
import six
from bson.objectid import ObjectId
from .model_base import Model
from girder import events
from girder import logger
from girder.constants import AccessType
from girder.exceptions import ValidationException, GirderException
from girder.utility import acl_mixin
from girder.utility.model_importer import ModelImporter
class Item(acl_mixin.AccessControlMixin, Model):
"""
Items are leaves in the data hierarchy. They can contain 0 or more
files within them, and can also contain arbitrary metadata.
"""
def initialize(self):
self.name = 'item'
self.ensureIndices(('folderId', 'name', 'lowerName',
([('folderId', 1), ('name', 1)], {})))
self.ensureTextIndex({
'name': 10,
'description': 1
})
self.resourceColl = 'folder'
self.resourceParent = 'folderId'
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'size', 'updated', 'description', 'created', 'meta',
'creatorId', 'folderId', 'name', 'baseParentType', 'baseParentId',
'copyOfItem'))
def _validateString(self, value):
"""
Make sure a value is a string and is stripped of whitespace.
:param value: the value to coerce into a string if it isn't already.
:returns: the string version of the value.
"""
if value is None:
value = ''
if not isinstance(value, six.string_types):
value = str(value)
return value.strip()
def validate(self, doc):
from .folder import Folder
doc['name'] = self._validateString(doc.get('name', ''))
doc['description'] = self._validateString(doc.get('description', ''))
if not doc['name']:
raise ValidationException('Item name must not be empty.', 'name')
# Ensure unique name among sibling items and folders. If the desired
# name collides with an existing item or folder, we will append (n)
# onto the end of the name, incrementing n until the name is unique.
name = doc['name']
# If the item already exists with the current name, don't check.
# Although we don't want duplicate names, they can occur when there are
# simultaneous uploads, and also because Mongo has no guaranteed
# multi-collection uniqueness constraints. If this occurs, and we are
# changing a non-name property, don't validate the name (since that may
# fail). If the name is being changed, validate that it is probably
# unique.
checkName = '_id' not in doc or not self.findOne({'_id': doc['_id'], 'name': name})
n = 0
while checkName:
q = {
'name': name,
'folderId': doc['folderId']
}
if '_id' in doc:
q['_id'] = {'$ne': doc['_id']}
dupItem = self.findOne(q, fields=['_id'])
q = {
'parentId': doc['folderId'],
'name': name,
'parentCollection': 'folder'
}
dupFolder = Folder().findOne(q, fields=['_id'])
if dupItem is None and dupFolder is None:
doc['name'] = name
checkName = False
else:
n += 1
name = '%s (%d)' % (doc['name'], n)
doc['lowerName'] = doc['name'].lower()
return doc
def load(self, id, level=AccessType.ADMIN, user=None, objectId=True,
force=False, fields=None, exc=False):
"""
Calls AccessControlMixin.load while doing some auto-correction.
Takes the same parameters as
:py:func:`girder.models.model_base.AccessControlMixin.load`.
"""
# Ensure we include extra fields to do the migration below
extraFields = {'baseParentId', 'baseParentType', 'parentId', 'parentCollection',
'name', 'lowerName'}
loadFields = self._supplementFields(fields, extraFields)
doc = super(Item, self).load(
id=id, level=level, user=user, objectId=objectId, force=force, fields=loadFields,
exc=exc)
if doc is not None:
if 'baseParentType' not in doc:
pathFromRoot = self.parentsToRoot(doc, user=user, force=True)
baseParent = pathFromRoot[0]
doc['baseParentId'] = baseParent['object']['_id']
doc['baseParentType'] = baseParent['type']
self.update({'_id': doc['_id']}, {'$set': {
'baseParentId': doc['baseParentId'],
'baseParentType': doc['baseParentType']
}})
if 'lowerName' not in doc:
doc['lowerName'] = doc['name'].lower()
self.update({'_id': doc['_id']}, {'$set': {
'lowerName': doc['lowerName']
}})
if 'meta' not in doc:
doc['meta'] = {}
self.update({'_id': doc['_id']}, {'$set': {
'meta': {}
}})
self._removeSupplementalFields(doc, fields)
return doc
def move(self, item, folder):
"""
Move the given item from its current folder into another folder.
:param item: The item to move.
:type item: dict
:param folder: The folder to move the item into.
:type folder: dict.
"""
self.propagateSizeChange(item, -item['size'])
item['folderId'] = folder['_id']
item['baseParentType'] = folder['baseParentType']
item['baseParentId'] = folder['baseParentId']
self.propagateSizeChange(item, item['size'])
return self.save(item)
def propagateSizeChange(self, item, inc):
from .folder import Folder
Folder().increment(query={
'_id': item['folderId']
}, field='size', amount=inc, multi=False)
ModelImporter.model(item['baseParentType']).increment(query={
'_id': item['baseParentId']
}, field='size', amount=inc, multi=False)
def recalculateSize(self, item):
"""
Recalculate the item size based on the files that are in it. If this
is different than the recorded size, propagate the changes.
:param item: The item to recalculate the size of.
:returns: the recalculated size in bytes
"""
size = 0
for file in self.childFiles(item):
# We could add a recalculateSize to the file model, in which case
# this would be:
# size += File().recalculateSize(file)
size += file.get('size', 0)
delta = size - item.get('size', 0)
if delta:
logger.info('Item %s was wrong size: was %d, is %d' % (
item['_id'], item['size'], size))
item['size'] = size
self.update({'_id': item['_id']}, update={'$set': {'size': size}})
self.propagateSizeChange(item, delta)
return size
def childFiles(self, item, limit=0, offset=0, sort=None, **kwargs):
"""
Returns child files of the item. Passes any kwargs to the find
function.
:param item: The parent item.
:param limit: Result limit.
:param offset: Result offset.
:param sort: The sort structure to pass to pymongo.
"""
from .file import File
q = {
'itemId': item['_id']
}
return File().find(q, limit=limit, offset=offset, sort=sort, **kwargs)
def remove(self, item, **kwargs):
"""
Delete an item, and all references to it in the database.
:param item: The item document to delete.
:type item: dict
"""
from .file import File
from .upload import Upload
# Delete all files in this item
fileModel = File()
files = fileModel.find({
'itemId': item['_id']
})
for file in files:
fileKwargs = kwargs.copy()
fileKwargs.pop('updateItemSize', None)
fileModel.remove(file, updateItemSize=False, **fileKwargs)
# Delete pending uploads into this item
uploadModel = Upload()
uploads = uploadModel.find({
'parentId': item['_id'],
'parentType': 'item'
})
for upload in uploads:
uploadModel.remove(upload, **kwargs)
# Delete the item itself
Model.remove(self, item)
def createItem(self, name, creator, folder, description='',
reuseExisting=False):
"""
Create a new item. The creator will be given admin access to it.
:param name: The name of the item.
:type name: str
:param description: Description for the item.
:type description: str
:param folder: The parent folder of the item.
:param creator: User document representing the creator of the item.
:type creator: dict
:param reuseExisting: If an item with the given name already exists
under the given folder, return that item rather than creating a
new one.
:type reuseExisting: bool
:returns: The item document that was created.
"""
if reuseExisting:
existing = self.findOne({
'folderId': folder['_id'],
'name': name
})
if existing:
return existing
now = datetime.datetime.utcnow()
if not isinstance(creator, dict) or '_id' not in creator:
# Internal error -- this shouldn't be called without a user.
raise GirderException('Creator must be a user.',
'girder.models.item.creator-not-user')
if 'baseParentType' not in folder:
pathFromRoot = self.parentsToRoot({'folderId': folder['_id']},
creator, force=True)
folder['baseParentType'] = pathFromRoot[0]['type']
folder['baseParentId'] = pathFromRoot[0]['object']['_id']
return self.save({
'name': self._validateString(name),
'description': self._validateString(description),
'folderId': ObjectId(folder['_id']),
'creatorId': creator['_id'],
'baseParentType': folder['baseParentType'],
'baseParentId': folder['baseParentId'],
'created': now,
'updated': now,
'size': 0,
'meta': {}
})
def updateItem(self, item):
"""
Updates an item.
:param item: The item document to update
:type item: dict
:returns: The item document that was edited.
"""
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item)
def filter(self, doc, user=None, additionalKeys=None):
"""
Overrides the parent ``filter`` method to add an empty meta field
(if it doesn't exist) to the returned folder.
"""
filteredDoc = super(Item, self).filter(doc, user, additionalKeys=additionalKeys)
if 'meta' not in filteredDoc:
filteredDoc['meta'] = {}
return filteredDoc
def setMetadata(self, item, metadata, allowNull=False):
"""
Set metadata on an item. A `ValidationException` is thrown in the
cases where the metadata JSON object is badly formed, or if any of the
metadata keys contains a period ('.').
:param item: The item to set the metadata on.
:type item: dict
:param metadata: A dictionary containing key-value pairs to add to
the items meta field
:type metadata: dict
:param allowNull: Whether to allow `null` values to be set in the item's
metadata. If set to `False` or omitted, a `null` value will cause that
metadata field to be deleted.
:returns: the item document
"""
if 'meta' not in item:
item['meta'] = {}
# Add new metadata to existing metadata
item['meta'].update(six.viewitems(metadata))
# Remove metadata fields that were set to null (use items in py3)
if not allowNull:
toDelete = [k for k, v in six.viewitems(metadata) if v is None]
for key in toDelete:
del item['meta'][key]
self.validateKeys(item['meta'])
item['updated'] = datetime.datetime.utcnow()
# Validate and save the item
return self.save(item)
def deleteMetadata(self, item, fields):
"""
Delete metadata on an item. A `ValidationException` is thrown if the
metadata field names contain a period ('.') or begin with a dollar sign
('$').
:param item: The item to delete metadata from.
:type item: dict
:param fields: An array containing the field names to delete from the
item's meta field
:type field: list
:returns: the item document
"""
self.validateKeys(fields)
if 'meta' not in item:
item['meta'] = {}
for field in fields:
item['meta'].pop(field, None)
item['updated'] = datetime.datetime.utcnow()
return self.save(item)
def parentsToRoot(self, item, user=None, force=False):
"""
Get the path to traverse to a root of the hierarchy.
:param item: The item whose root to find
:type item: dict
:param user: The user making the request (not required if force=True).
:type user: dict or None
:param force: Set to True to skip permission checking. If False, the
returned models will be filtered.
:type force: bool
:returns: an ordered list of dictionaries from root to the current item
"""
from .folder import Folder
folderModel = Folder()
curFolder = folderModel.load(
item['folderId'], user=user, level=AccessType.READ, force=force)
folderIdsToRoot = folderModel.parentsToRoot(
curFolder, user=user, level=AccessType.READ, force=force)
if force:
folderIdsToRoot.append({'type': 'folder', 'object': curFolder})
else:
filteredFolder = folderModel.filter(curFolder, user)
folderIdsToRoot.append({'type': 'folder', 'object': filteredFolder})
return folderIdsToRoot
def copyItem(self, srcItem, creator, name=None, folder=None, description=None):
"""
Copy an item, including duplicating files and metadata.
:param srcItem: the item to copy.
:type srcItem: dict
:param creator: the user who will own the copied item.
:param name: The name of the new item. None to copy the original name.
:type name: str
:param folder: The parent folder of the new item. None to store in the
same folder as the original item.
:param description: Description for the new item. None to copy the
original description.
:type description: str
:returns: the new item.
"""
from .file import File
from .folder import Folder
if name is None:
name = srcItem['name']
if folder is None:
folder = Folder().load(srcItem['folderId'], force=True)
if description is None:
description = srcItem['description']
newItem = self.createItem(
folder=folder, name=name, creator=creator, description=description)
# copy metadata and other extension values
if 'meta' in srcItem:
newItem['meta'] = copy.deepcopy(srcItem['meta'])
filteredItem = self.filter(newItem, creator)
for key in srcItem:
if key not in filteredItem and key not in newItem:
newItem[key] = copy.deepcopy(srcItem[key])
# add a reference to the original item
newItem['copyOfItem'] = srcItem['_id']
newItem = self.save(newItem, triggerEvents=False)
# Give listeners a chance to change things
events.trigger('model.item.copy.prepare', (srcItem, newItem))
# copy files
fileModel = File()
for file in self.childFiles(item=srcItem):
fileModel.copyFile(file, creator=creator, item=newItem)
# Reload to get updated size value
newItem = self.load(newItem['_id'], force=True)
events.trigger('model.item.copy.after', newItem)
return newItem
def fileList(self, doc, user=None, path='', includeMetadata=False,
subpath=True, mimeFilter=None, data=True):
"""
This function generates a list of 2-tuples whose first element is the
relative path to the file from the item's root and whose second
element depends on the value of the `data` flag. If `data=True`, the
second element will be a generator that will generate the bytes of the
file data as stored in the assetstore. If `data=False`, the second
element will be the file document itself.
:param doc: The item to list.
:param user: A user used to validate data that is returned. This isn't
used, but is present to be consistent across all model
implementations of fileList.
:param path: A path prefix to add to the results.
:type path: str
:param includeMetadata: If True and there is any metadata, include a
result which is the JSON string of the
metadata. This is given a name of
metadata[-(number).json that is distinct from
any file within the item.
:type includeMetadata: bool
:param subpath: If True and the item has more than one file, any
metadata, or the sole file is not named the same as the
item, then the returned paths include the item name.
:type subpath: bool
:param mimeFilter: Optional list of MIME types to filter by. Set to
None to include all files.
:type mimeFilter: `list or tuple`
:param data: If True return raw content of each file as stored in the
assetstore, otherwise return file document.
:type data: bool
:returns: Iterable over files in this item, where each element is a
tuple of (path name of the file, stream function with file
data or file object).
:rtype: generator(str, func)
"""
from .file import File
if subpath:
files = list(self.childFiles(item=doc, limit=2))
if (len(files) != 1 or files[0]['name'] != doc['name']
or (includeMetadata and doc.get('meta', {}))):
path = os.path.join(path, doc['name'])
metadataFile = 'girder-item-metadata.json'
fileModel = File()
# Eagerly evaluate this list, as the MongoDB cursor can time out on long requests
# Don't use a "filter" projection here, since returning the full file document is promised
# by this function, and file objects tend to not have large fields present
childFiles = list(self.childFiles(item=doc))
for file in childFiles:
if not self._mimeFilter(file, mimeFilter):
continue
if file['name'] == metadataFile:
metadataFile = None
if data:
val = fileModel.download(file, headers=False)
else:
val = file
yield (os.path.join(path, file['name']), val)
if includeMetadata and metadataFile and len(doc.get('meta', {})):
def stream():
yield json.dumps(doc['meta'], default=str)
yield (os.path.join(path, metadataFile), stream)
def _mimeFilter(self, file, mimeFilter):
"""
Returns whether or not the given file should be passed through the given
MIME filter. If no MIME filter is specified, all files are allowed.
"""
if not mimeFilter:
return True
return file['mimeType'] in mimeFilter
def isOrphan(self, item):
"""
Returns True if this item is orphaned (its folder is missing).
:param item: The item to check.
:type item: dict
"""
from .folder import Folder
return not Folder().load(item.get('folderId'), force=True)
def updateSize(self, doc):
"""
Recomputes the size of this item and its underlying
files and fixes the sizes as needed.
:param doc: The item.
:type doc: dict
"""
from .file import File
# get correct size from child files
size = 0
fixes = 0
fileModel = File()
for file in self.childFiles(doc):
s, f = fileModel.updateSize(file)
size += s
fixes += f
# fix value if incorrect
if size != doc.get('size'):
self.update({'_id': doc['_id']}, update={'$set': {'size': size}})
fixes += 1
return size, fixes
|
import sys
from common import *
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import matplotlib as mpl
series_labels = [kOriginalLabel, kFppLabel, kFsharpLabel,kFoccStandardLabel, kFoccLatestLabel]
def main():
if 1 < len(sys.argv) :
diagram_path = sys.argv[1]
else:
diagram_path = ""
curDir = os.path.dirname(os.path.realpath(__file__))
data_path = os.path.join(curDir, "data", "blksize_abort_rate")
x_axis, series_names, series = parse(data_path)
f, (axis) = plt.subplots()
f.set_size_inches(6, 6)
xticks = range(len(x_axis))
for label in series_labels:
pruned_ticks = []
pruned_series = []
for i in range(len(series[label])):
if series[label][i] >= 0:
pruned_ticks.append(xticks[i])
pruned_series.append(series[label][i])
axis.plot(pruned_ticks, pruned_series,fmt(label), label=label, **line_fmt(label))
# axis.plot(xticks, series[label],fmt(label))
axis.tick_params(axis='both', which='major', labelsize=18)
axis.set_title("Throughput")
axis.set_xlabel("# of txns per block")
axis.set_xticks(xticks)
# latency_ax.set_xticklabels(xlabels, fontsize=18)
axis.set_xticklabels(x_axis)
axis.set_yticks([0, .2, .4, .6, .8, 1.0])
axis.set_ylim([-.1, 1.2])
axis.set_ylabel("tps")
handles, labels = axis.get_legend_handles_labels()
f.legend(handles, labels,
loc='upper center', ncol=2, bbox_to_anchor=(0.53, 0.88))
if diagram_path == "":
plt.tight_layout()
plt.show()
else:
f.tight_layout()
f.savefig(diagram_path, bbox_inches='tight')
if __name__ == "__main__":
sys.exit(main())
|
import bluesky
from bluesky.tests.conftest import RE # noqa
from bluesky.plans import count
from bluesky.plan_stubs import trigger_and_read, configure
import event_model
from ophyd.tests.conftest import hw # noqa
import pytest
from .. import UnknownEventType
import warnings
# This line is used to ignore the deprecation warning for bulk_events in tests
warnings.filterwarnings("ignore", message="The document type 'bulk_events'*")
_md = {'reason': 'test', 'user': 'temp user', 'beamline': 'test_beamline'}
# Some useful plans for use in testing
def simple_plan(dets):
'''A simple plane which runs count with num=5'''
md = {**_md, **{'test_plan_name': 'simple_plan'}}
yield from count(dets, num=5, md=md)
def multi_stream_one_descriptor_plan(dets):
'''A plan that has two streams but on descriptor per stream)'''
md = {**_md, **{'test_plan_name': 'multi_stream_one_descriptor_plan'}}
@bluesky.preprocessors.baseline_decorator(dets)
def _plan(dets):
yield from count(dets, md=md)
yield from _plan(dets)
def one_stream_multi_descriptors_plan(dets):
'''A plan that has one stream but two descriptors per stream)'''
md = {**_md, **{'test_plan_name': 'simple_plan'}}
@bluesky.preprocessors.run_decorator(md=md)
def _internal_plan(dets):
yield from trigger_and_read(dets)
for det in dets:
yield from configure(det, {})
yield from trigger_and_read(dets)
yield from _internal_plan(dets)
@pytest.fixture(params=['det', 'direct_img', 'direct_img_list',
'det direct_img direct_img_list'],
scope='function')
def detector_list(hw, request): # noqa
def _det_list_func(ignore):
if request.param in ignore:
pytest.skip()
dets = [getattr(hw, det_name) for det_name in request.param.split(' ')]
return dets
return _det_list_func
@pytest.fixture(params=['event', 'bulk_events', 'event_page'],
scope='function')
def event_type(request):
def _event_type_func(ignore):
if request.param in ignore:
pytest.skip()
return request.param
return _event_type_func
@pytest.fixture(params=[simple_plan, multi_stream_one_descriptor_plan,
one_stream_multi_descriptors_plan],
scope='function')
def plan_type(request):
def _plan_type_func(ignore):
if request.param in ignore:
pytest.skip()
return request.param
return _plan_type_func
@pytest.fixture()
def generate_data(RE, detector_list, event_type): # noqa
'''A fixture that returns event data for a number of test cases.
Returns a list of (name, doc) tuples for the plan passed in as an arg.
Parameters
----------
RE : object
pytest fixture object imported from `bluesky.test.conftest`
detector_list : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of detectors
event_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'event_types'.
'''
def _generate_data_func(plan, ignore=None):
'''Generates data to be used for testing of suitcase.*.export(..)
functions
Parameters
----------
plan : the plan to use to generate the test data
Returns
-------
collector : list
A list of (name, doc) tuple pairs generated by the run engine.
ignore : list, optional
list of the pytest.fixture parameter 'values' to ignore.
'''
if ignore is None:
ignore = []
# define the output lists and an internal list.
collector = []
event_list = []
# define the collector function depending on the event_type
if event_type(ignore) == 'event':
def collect(name, doc):
collector.append((name, doc))
if name == 'event':
event_list.append(doc)
elif event_type(ignore) == 'event_page':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('event_page',
event_model.pack_event_page(
*event_list)))
collector.append((name, doc))
else:
collector.append((name, doc))
elif event_type(ignore) == 'bulk_events':
def collect(name, doc):
if name == 'event':
event_list.append(doc)
elif name == 'stop':
collector.append(('bulk_events', {'primary': event_list}))
collector.append((name, doc))
else:
collector.append((name, doc))
else:
raise UnknownEventType('Unknown event_type kwarg passed to '
'suitcase.utils.events_data')
# collect the documents
RE.subscribe(collect)
RE(plan(detector_list(ignore)))
return collector
return _generate_data_func
@pytest.fixture
def example_data(generate_data, plan_type):
'''A fixture that returns event data for a number of test cases.
Returns a function that returns a list of (name, doc) tuples for each of
the plans in plan_type.
.. note::
It is recommended that you use this fixture for testing of
``suitcase-*`` export functions, for an example see
``suitcase-tiff.tests``. This will mean that future additions to the
test suite here will be automatically applied to all ``suitcase-*``
repos. Some important implementation notes:
1. These fixtures are imported into other suitcase libraries via those
libraries' ``conftest.py`` file. This is automatically set up by
suitcases-cookiecutter, and no additional action is required.
2. If any of the parameters from the fixtures above are not valid for
the suitcase you are designing and cause testing issues please skip
them internally by adding them to the ``ignore`` kwarg list via the
line ``collector = example_data(ignore=[param_to_ignore, ...])``.
Parameters
----------
generate_data : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
function that accepts a plan as an argument and returns name, do pairs
plan_type : list
pytest fixture defined in `suitcase.utils.conftest` which returns a
list of 'plans' to test against.
'''
def _example_data_func(ignore=[]):
'''returns a list of (name, doc) tuples for a number of test cases
ignore : list optional
list of the pytest.fixture parameter 'values' to ignore, this is
also passed down to `generate_data`
'''
return generate_data(plan_type(ignore), ignore=ignore)
return _example_data_func
@pytest.fixture(params=['test-', 'scan_{uid}-'],
scope='function')
def file_prefix_list(request): # noqa
'''Returns a function that provides file_prefixes for testing.
'''
def _file_prefix_list_func(ignore=[]):
if request.param in ignore:
pytest.skip()
return request.param
return _file_prefix_list_func
|
'''Given two arrays, write a function to compute their intersection.
'''
class Solution(object):
def intersect(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: List[int]
"""
m,n=len(nums1),len(nums2)
l=[]
if len(nums1)>=len(nums2):
for i in range(len(nums1)):
if nums1[i] in nums2:
l.append(nums1[i])
nums2.remove(nums1[i])
else:
for i in range(len(nums2)):
if nums2[i] in nums1:
l.append(nums2[i])
nums1.remove(nums2[i])
return l
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import argparse
import os
import pkg_resources
from colorama import init
from .common_utils import print_error
from .launcher import create_experiment, resume_experiment, view_experiment
from .updater import update_searchspace, update_concurrency, update_duration, update_trialnum, import_data
from .nnictl_utils import stop_experiment, trial_ls, trial_kill, list_experiment, experiment_status,\
log_trial, experiment_clean, platform_clean, experiment_list, \
monitor_experiment, export_trials_data, trial_codegen, webui_url, \
get_config, log_stdout, log_stderr, search_space_auto_gen, webui_nas
from .package_management import package_install, package_show
from .constants import DEFAULT_REST_PORT
from .tensorboard_utils import start_tensorboard, stop_tensorboard
init(autoreset=True)
if os.environ.get('COVERAGE_PROCESS_START'):
import coverage
coverage.process_startup()
def nni_info(*args):
if args[0].version:
try:
print(pkg_resources.get_distribution('nni').version)
except pkg_resources.ResolutionError:
print_error('Get version failed, please use `pip3 list | grep nni` to check nni version!')
else:
print('please run "nnictl {positional argument} --help" to see nnictl guidance')
def parse_args():
'''Definite the arguments users need to follow and input'''
parser = argparse.ArgumentParser(prog='nnictl', description='use nnictl command to control nni experiments')
parser.add_argument('--version', '-v', action='store_true')
parser.set_defaults(func=nni_info)
# create subparsers for args with sub values
subparsers = parser.add_subparsers()
# parse the command of auto generating search space
parser_start = subparsers.add_parser('ss_gen', help='automatically generate search space file from trial code')
parser_start.add_argument('--trial_command', '-t', required=True, dest='trial_command', help='the command for running trial code')
parser_start.add_argument('--trial_dir', '-d', default='./', dest='trial_dir', help='the directory for running the command')
parser_start.add_argument('--file', '-f', default='nni_auto_gen_search_space.json', dest='file', help='the path of search space file')
parser_start.set_defaults(func=search_space_auto_gen)
# parse start command
parser_start = subparsers.add_parser('create', help='create a new experiment')
parser_start.add_argument('--config', '-c', required=True, dest='config', help='the path of yaml config file')
parser_start.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', help='the port of restful server')
parser_start.add_argument('--debug', '-d', action='store_true', help=' set debug mode')
parser_start.add_argument('--foreground', '-f', action='store_true', help=' set foreground mode, print log content to terminal')
parser_start.set_defaults(func=create_experiment)
# parse resume command
parser_resume = subparsers.add_parser('resume', help='resume a new experiment')
parser_resume.add_argument('id', nargs='?', help='The id of the experiment you want to resume')
parser_resume.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', help='the port of restful server')
parser_resume.add_argument('--debug', '-d', action='store_true', help=' set debug mode')
parser_resume.add_argument('--foreground', '-f', action='store_true', help=' set foreground mode, print log content to terminal')
parser_resume.set_defaults(func=resume_experiment)
# parse view command
parser_view = subparsers.add_parser('view', help='view a stopped experiment')
parser_view.add_argument('id', nargs='?', help='The id of the experiment you want to view')
parser_view.add_argument('--port', '-p', default=DEFAULT_REST_PORT, dest='port', help='the port of restful server')
parser_view.set_defaults(func=view_experiment)
# parse update command
parser_updater = subparsers.add_parser('update', help='update the experiment')
#add subparsers for parser_updater
parser_updater_subparsers = parser_updater.add_subparsers()
parser_updater_searchspace = parser_updater_subparsers.add_parser('searchspace', help='update searchspace')
parser_updater_searchspace.add_argument('id', nargs='?', help='the id of experiment')
parser_updater_searchspace.add_argument('--filename', '-f', required=True)
parser_updater_searchspace.set_defaults(func=update_searchspace)
parser_updater_concurrency = parser_updater_subparsers.add_parser('concurrency', help='update concurrency')
parser_updater_concurrency.add_argument('id', nargs='?', help='the id of experiment')
parser_updater_concurrency.add_argument('--value', '-v', required=True)
parser_updater_concurrency.set_defaults(func=update_concurrency)
parser_updater_duration = parser_updater_subparsers.add_parser('duration', help='update duration')
parser_updater_duration.add_argument('id', nargs='?', help='the id of experiment')
parser_updater_duration.add_argument('--value', '-v', required=True, help='the unit of time should in {\'s\', \'m\', \'h\', \'d\'}')
parser_updater_duration.set_defaults(func=update_duration)
parser_updater_trialnum = parser_updater_subparsers.add_parser('trialnum', help='update maxtrialnum')
parser_updater_trialnum.add_argument('id', nargs='?', help='the id of experiment')
parser_updater_trialnum.add_argument('--value', '-v', required=True)
parser_updater_trialnum.set_defaults(func=update_trialnum)
#parse stop command
parser_stop = subparsers.add_parser('stop', help='stop the experiment')
parser_stop.add_argument('id', nargs='?', help='the id of experiment, use \'all\' to stop all running experiments')
parser_stop.add_argument('--port', '-p', dest='port', help='the port of restful server')
parser_stop.add_argument('--all', '-a', action='store_true', help='stop all of experiments')
parser_stop.set_defaults(func=stop_experiment)
#parse trial command
parser_trial = subparsers.add_parser('trial', help='get trial information')
#add subparsers for parser_trial
parser_trial_subparsers = parser_trial.add_subparsers()
parser_trial_ls = parser_trial_subparsers.add_parser('ls', help='list trial jobs')
parser_trial_ls.add_argument('id', nargs='?', help='the id of experiment')
parser_trial_ls.set_defaults(func=trial_ls)
parser_trial_kill = parser_trial_subparsers.add_parser('kill', help='kill trial jobs')
parser_trial_kill.add_argument('id', nargs='?', help='the id of experiment')
parser_trial_kill.add_argument('--trial_id', '-T', required=True, dest='trial_id', help='the id of trial to be killed')
parser_trial_kill.set_defaults(func=trial_kill)
parser_trial_codegen = parser_trial_subparsers.add_parser('codegen', help='generate trial code for a specific trial')
parser_trial_codegen.add_argument('id', nargs='?', help='the id of experiment')
parser_trial_codegen.add_argument('--trial_id', '-T', required=True, dest='trial_id', help='the id of trial to do code generation')
parser_trial_codegen.set_defaults(func=trial_codegen)
#parse experiment command
parser_experiment = subparsers.add_parser('experiment', help='get experiment information')
#add subparsers for parser_experiment
parser_experiment_subparsers = parser_experiment.add_subparsers()
parser_experiment_show = parser_experiment_subparsers.add_parser('show', help='show the information of experiment')
parser_experiment_show.add_argument('id', nargs='?', help='the id of experiment')
parser_experiment_show.set_defaults(func=list_experiment)
parser_experiment_status = parser_experiment_subparsers.add_parser('status', help='show the status of experiment')
parser_experiment_status.add_argument('id', nargs='?', help='the id of experiment')
parser_experiment_status.set_defaults(func=experiment_status)
parser_experiment_list = parser_experiment_subparsers.add_parser('list', help='list all of running experiment ids')
parser_experiment_list.add_argument('--all', action='store_true', default=False, help='list all of experiments')
parser_experiment_list.set_defaults(func=experiment_list)
parser_experiment_clean = parser_experiment_subparsers.add_parser('delete', help='clean up the experiment data')
parser_experiment_clean.add_argument('id', nargs='?', help='the id of experiment')
parser_experiment_clean.add_argument('--all', action='store_true', default=False, help='delete all of experiments')
parser_experiment_clean.set_defaults(func=experiment_clean)
#parse experiment command
parser_platform = subparsers.add_parser('platform', help='get platform information')
#add subparsers for parser_experiment
parser_platform_subparsers = parser_platform.add_subparsers()
parser_platform_clean = parser_platform_subparsers.add_parser('clean', help='clean up the platform data')
parser_platform_clean.add_argument('--config', '-c', required=True, dest='config', help='the path of yaml config file')
parser_platform_clean.set_defaults(func=platform_clean)
#import tuning data
parser_import_data = parser_experiment_subparsers.add_parser('import', help='import additional data')
parser_import_data.add_argument('id', nargs='?', help='the id of experiment')
parser_import_data.add_argument('--filename', '-f', required=True)
parser_import_data.set_defaults(func=import_data)
#export trial data
parser_trial_export = parser_experiment_subparsers.add_parser('export', help='export trial job results to csv or json')
parser_trial_export.add_argument('id', nargs='?', help='the id of experiment')
parser_trial_export.add_argument('--type', '-t', choices=['json', 'csv'], required=True, dest='type', help='target file type')
parser_trial_export.add_argument('--filename', '-f', required=True, dest='path', help='target file path')
parser_trial_export.set_defaults(func=export_trials_data)
#TODO:finish webui function
#parse board command
parser_webui = subparsers.add_parser('webui', help='get web ui information')
#add subparsers for parser_board
parser_webui_subparsers = parser_webui.add_subparsers()
parser_webui_url = parser_webui_subparsers.add_parser('url', help='show the url of web ui')
parser_webui_url.add_argument('id', nargs='?', help='the id of experiment')
parser_webui_url.set_defaults(func=webui_url)
parser_webui_nas = parser_webui_subparsers.add_parser('nas', help='show nas ui')
parser_webui_nas.add_argument('--port', default=6060, type=int, help='port of nas ui')
parser_webui_nas.add_argument('--logdir', default='.', type=str, help='the logdir where nas ui will read data')
parser_webui_nas.set_defaults(func=webui_nas)
#parse config command
parser_config = subparsers.add_parser('config', help='get config information')
parser_config_subparsers = parser_config.add_subparsers()
parser_config_show = parser_config_subparsers.add_parser('show', help='show the information of config')
parser_config_show.add_argument('id', nargs='?', help='the id of experiment')
parser_config_show.set_defaults(func=get_config)
#parse log command
parser_log = subparsers.add_parser('log', help='get log information')
# add subparsers for parser_log
parser_log_subparsers = parser_log.add_subparsers()
parser_log_stdout = parser_log_subparsers.add_parser('stdout', help='get stdout information')
parser_log_stdout.add_argument('id', nargs='?', help='the id of experiment')
parser_log_stdout.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stdout')
parser_log_stdout.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stdout')
parser_log_stdout.add_argument('--path', action='store_true', default=False, help='get the path of stdout file')
parser_log_stdout.set_defaults(func=log_stdout)
parser_log_stderr = parser_log_subparsers.add_parser('stderr', help='get stderr information')
parser_log_stderr.add_argument('id', nargs='?', help='the id of experiment')
parser_log_stderr.add_argument('--tail', '-T', dest='tail', type=int, help='get tail -100 content of stderr')
parser_log_stderr.add_argument('--head', '-H', dest='head', type=int, help='get head -100 content of stderr')
parser_log_stderr.add_argument('--path', action='store_true', default=False, help='get the path of stderr file')
parser_log_stderr.set_defaults(func=log_stderr)
parser_log_trial = parser_log_subparsers.add_parser('trial', help='get trial log path')
parser_log_trial.add_argument('id', nargs='?', help='the id of experiment')
parser_log_trial.add_argument('--trial_id', '-T', dest='trial_id', help='find trial log path by id')
parser_log_trial.set_defaults(func=log_trial)
#parse package command
parser_package = subparsers.add_parser('package', help='control nni tuner and assessor packages')
# add subparsers for parser_package
parser_package_subparsers = parser_package.add_subparsers()
parser_package_install = parser_package_subparsers.add_parser('install', help='install packages')
parser_package_install.add_argument('--name', '-n', dest='name', help='package name to be installed')
parser_package_install.set_defaults(func=package_install)
parser_package_show = parser_package_subparsers.add_parser('show', help='show the information of packages')
parser_package_show.set_defaults(func=package_show)
#parse tensorboard command
parser_tensorboard = subparsers.add_parser('tensorboard', help='manage tensorboard')
parser_tensorboard_subparsers = parser_tensorboard.add_subparsers()
parser_tensorboard_start = parser_tensorboard_subparsers.add_parser('start', help='start tensorboard')
parser_tensorboard_start.add_argument('id', nargs='?', help='the id of experiment')
parser_tensorboard_start.add_argument('--trial_id', '-T', dest='trial_id', help='the id of trial')
parser_tensorboard_start.add_argument('--port', dest='port', default=6006, help='the port to start tensorboard')
parser_tensorboard_start.set_defaults(func=start_tensorboard)
parser_tensorboard_stop = parser_tensorboard_subparsers.add_parser('stop', help='stop tensorboard')
parser_tensorboard_stop.add_argument('id', nargs='?', help='the id of experiment')
parser_tensorboard_stop.set_defaults(func=stop_tensorboard)
#parse top command
parser_top = subparsers.add_parser('top', help='monitor the experiment')
parser_top.add_argument('--time', '-t', dest='time', type=int, default=3, help='the time interval to update the experiment status, ' \
'the unit is second')
parser_top.set_defaults(func=monitor_experiment)
args = parser.parse_args()
args.func(args)
if __name__ == '__main__':
parse_args()
|
# -*- coding: utf-8 -*-
'''
Tests for the salt fileclient
'''
# Import Python libs
from __future__ import absolute_import
import errno
import logging
import os
import shutil
# Import Salt Testing libs
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin
from tests.support.mock import patch, Mock, MagicMock, NO_MOCK, NO_MOCK_REASON
from tests.support.paths import TMP
from tests.support.unit import TestCase, skipIf
# Import Salt libs
import salt.utils.files
from salt.ext.six.moves import range
from salt import fileclient
from salt.ext import six
log = logging.getLogger(__name__)
class FileclientTestCase(TestCase):
'''
Fileclient test
'''
opts = {
'extension_modules': '',
'cachedir': '/__test__',
}
def _fake_makedir(self, num=errno.EEXIST):
def _side_effect(*args, **kwargs):
raise OSError(num, 'Errno {0}'.format(num))
return Mock(side_effect=_side_effect)
def test_cache_skips_makedirs_on_race_condition(self):
'''
If cache contains already a directory, do not raise an exception.
'''
with patch('os.path.isfile', lambda prm: False):
for exists in range(2):
with patch('os.makedirs', self._fake_makedir()):
with fileclient.Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == os.sep + os.sep.join(['__test__', 'files', 'base', 'testfile'])
def test_cache_raises_exception_on_non_eexist_ioerror(self):
'''
If makedirs raises other than EEXIST errno, an exception should be raised.
'''
with patch('os.path.isfile', lambda prm: False):
with patch('os.makedirs', self._fake_makedir(num=errno.EROFS)):
with self.assertRaises(OSError):
with fileclient.Client(self.opts)._cache_loc('testfile') as c_ref_itr:
assert c_ref_itr == '/__test__/files/base/testfile'
def test_extrn_path_with_long_filename(self):
safe_file_name = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/' + ('A' * 254), 'base'))[-1]
assert safe_file_name == 'A' * 254
oversized_file_name = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_name) < 256
assert oversized_file_name != 'A' * 255
oversized_file_with_query_params = os.path.split(fileclient.Client(self.opts)._extrn_path('https://test.com/file?' + ('A' * 255), 'base'))[-1]
assert len(oversized_file_with_query_params) < 256
SALTENVS = ('base', 'dev')
FS_ROOT = os.path.join(TMP, 'fileclient_fs_root')
CACHE_ROOT = os.path.join(TMP, 'fileclient_cache_root')
SUBDIR = 'subdir'
SUBDIR_FILES = ('foo.txt', 'bar.txt', 'baz.txt')
def _get_file_roots():
return dict(
[(x, [os.path.join(FS_ROOT, x)]) for x in SALTENVS]
)
MOCKED_OPTS = {
'file_roots': _get_file_roots(),
'fileserver_backend': ['roots'],
'cachedir': CACHE_ROOT,
'file_client': 'local',
}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class FileClientTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin):
def setup_loader_modules(self):
return {fileclient: {'__opts__': MOCKED_OPTS}}
def setUp(self):
self.file_client = fileclient.Client(self.master_opts)
def tearDown(self):
del self.file_client
def test_file_list_emptydirs(self):
'''
Ensure that the fileclient class won't allow a direct call to file_list_emptydirs()
'''
with self.assertRaises(NotImplementedError):
self.file_client.file_list_emptydirs()
def test_get_file(self):
'''
Ensure that the fileclient class won't allow a direct call to get_file()
'''
with self.assertRaises(NotImplementedError):
self.file_client.get_file(None)
def test_get_file_client(self):
minion_opts = self.get_temp_config('minion')
minion_opts['file_client'] = 'remote'
with patch('salt.fileclient.RemoteClient', MagicMock(return_value='remote_client')):
ret = fileclient.get_file_client(minion_opts)
self.assertEqual('remote_client', ret)
@skipIf(NO_MOCK, NO_MOCK_REASON)
class FileclientCacheTest(TestCase, AdaptedConfigurationTestCaseMixin, LoaderModuleMockMixin):
'''
Tests for the fileclient caching. The LocalClient is the only thing we can
test as it is the only way we can mock the fileclient (the tests run from
the minion process, so the master cannot be mocked from test code).
'''
def setup_loader_modules(self):
return {fileclient: {'__opts__': MOCKED_OPTS}}
def setUp(self):
'''
No need to add a dummy foo.txt to muddy up the github repo, just make
our own fileserver root on-the-fly.
'''
def _new_dir(path):
'''
Add a new dir at ``path`` using os.makedirs. If the directory
already exists, remove it recursively and then try to create it
again.
'''
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
# Just in case a previous test was interrupted, remove the
# directory and try adding it again.
shutil.rmtree(path)
os.makedirs(path)
else:
raise
# Crete the FS_ROOT
for saltenv in SALTENVS:
saltenv_root = os.path.join(FS_ROOT, saltenv)
# Make sure we have a fresh root dir for this saltenv
_new_dir(saltenv_root)
path = os.path.join(saltenv_root, 'foo.txt')
with salt.utils.files.fopen(path, 'w') as fp_:
fp_.write(
'This is a test file in the \'{0}\' saltenv.\n'
.format(saltenv)
)
subdir_abspath = os.path.join(saltenv_root, SUBDIR)
os.makedirs(subdir_abspath)
for subdir_file in SUBDIR_FILES:
path = os.path.join(subdir_abspath, subdir_file)
with salt.utils.files.fopen(path, 'w') as fp_:
fp_.write(
'This is file \'{0}\' in subdir \'{1} from saltenv '
'\'{2}\''.format(subdir_file, SUBDIR, saltenv)
)
# Create the CACHE_ROOT
_new_dir(CACHE_ROOT)
def tearDown(self):
'''
Remove the directories created for these tests
'''
shutil.rmtree(FS_ROOT)
shutil.rmtree(CACHE_ROOT)
def test_cache_dir(self):
'''
Ensure entire directory is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=None
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_dir_with_alternate_cachedir_and_relative_path(self):
'''
Ensure entire directory is cached to correct location when an alternate
cachedir is specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_dir(
'salt://{0}'.format(SUBDIR),
saltenv,
cachedir=alt_cachedir
)
)
for subdir_file in SUBDIR_FILES:
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
SUBDIR,
subdir_file)
# Double check that the content of the cached file
# identifies it as being from the correct saltenv. The
# setUp function creates the file with the name of the
# saltenv mentioned in the file, so a simple 'in' check is
# sufficient here. If opening the file raises an exception,
# this is a problem, so we are not catching the exception
# and letting it be raised so that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(subdir_file in content)
self.assertTrue(SUBDIR in content)
self.assertTrue(saltenv in content)
def test_cache_file(self):
'''
Ensure file is cached to correct location
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt', saltenv, cachedir=None)
)
cache_loc = os.path.join(
fileclient.__opts__['cachedir'], 'files', saltenv, 'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_absolute_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is an absolute path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = os.path.join(TMP, 'abs_cachedir')
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
def test_cache_file_with_alternate_cachedir_and_relative_path(self):
'''
Ensure file is cached to correct location when an alternate cachedir is
specified and that cachedir is a relative path
'''
patched_opts = dict((x, y) for x, y in six.iteritems(self.minion_opts))
patched_opts.update(MOCKED_OPTS)
alt_cachedir = 'foo'
with patch.dict(fileclient.__opts__, patched_opts):
client = fileclient.get_file_client(fileclient.__opts__, pillar=False)
for saltenv in SALTENVS:
self.assertTrue(
client.cache_file('salt://foo.txt',
saltenv,
cachedir=alt_cachedir)
)
cache_loc = os.path.join(fileclient.__opts__['cachedir'],
alt_cachedir,
'files',
saltenv,
'foo.txt')
# Double check that the content of the cached file identifies
# it as being from the correct saltenv. The setUp function
# creates the file with the name of the saltenv mentioned in
# the file, so a simple 'in' check is sufficient here. If
# opening the file raises an exception, this is a problem, so
# we are not catching the exception and letting it be raised so
# that the test fails.
with salt.utils.files.fopen(cache_loc) as fp_:
content = fp_.read()
log.debug('cache_loc = %s', cache_loc)
log.debug('content = %s', content)
self.assertTrue(saltenv in content)
|
"""
ORY Oathkeeper
ORY Oathkeeper is a reverse proxy that checks the HTTP Authorization for validity against a set of rules. This service uses Hydra to validate access tokens and policies. # noqa: E501
The version of the OpenAPI document: v0.38.25-beta.1
Contact: hi@ory.am
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import ory_oathkeeper_client
from ory_oathkeeper_client.model.json_web_key import JsonWebKey
class TestJsonWebKey(unittest.TestCase):
"""JsonWebKey unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testJsonWebKey(self):
"""Test JsonWebKey"""
# FIXME: construct object with mandatory attributes with example values
# model = JsonWebKey() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
import os
import time
import logging
import threading
from xml.etree import ElementTree
from galaxy import model
from galaxy.util import plugin_config
import galaxy.workflow.schedulers
log = logging.getLogger( __name__ )
DEFAULT_SCHEDULER_ID = "default" # well actually this should be called DEFAULT_DEFAULT_SCHEDULER_ID...
DEFAULT_SCHEDULER_PLUGIN_TYPE = "core"
EXCEPTION_MESSAGE_SHUTDOWN = "Exception raised while attempting to shutdown workflow scheduler."
EXCEPTION_MESSAGE_NO_SCHEDULERS = "Failed to defined workflow schedulers - no workflow schedulers defined."
EXCEPTION_MESSAGE_NO_DEFAULT_SCHEDULER = "Failed to defined workflow schedulers - no workflow scheduler found for default id '%s'."
EXCEPTION_MESSAGE_DUPLICATE_SCHEDULERS = "Failed to defined workflow schedulers - workflow scheduling plugin id '%s' duplicated."
class WorkflowSchedulingManager( object ):
""" A workflow scheduling manager based loosely on pattern established by
``galaxy.manager.JobManager``. Only schedules workflows on handler
processes.
"""
def __init__( self, app ):
self.app = app
self.__job_config = app.job_config
self.workflow_schedulers = {}
self.active_workflow_schedulers = {}
# Passive workflow schedulers won't need to be monitored I guess.
self.request_monitor = None
self.__plugin_classes = self.__plugins_dict()
self.__init_schedulers()
if self._is_workflow_handler():
log.debug("Starting workflow schedulers")
self.__start_schedulers()
if self.active_workflow_schedulers:
self.__start_request_monitor()
else:
# Process should not schedule workflows - do nothing.
pass
# Provide a handler config-like interface by delegating to job handler
# config. Perhaps it makes sense to let there be explicit workflow
# handlers?
def _is_workflow_handler( self ):
return self.app.is_job_handler()
def _get_handler( self ):
return self.__job_config.get_handler( None )
def shutdown( self ):
for workflow_scheduler in self.workflow_schedulers.itervalues():
try:
workflow_scheduler.shutdown()
except Exception:
log.exception( EXCEPTION_MESSAGE_SHUTDOWN )
if self.request_monitor:
try:
self.request_monitor.shutdown()
except Exception:
log.exception( "Failed to shutdown workflow request monitor." )
def queue( self, workflow_invocation, request_params ):
workflow_invocation.state = model.WorkflowInvocation.states.NEW
scheduler = request_params.get( "scheduler", None ) or self.default_scheduler_id
handler = self._get_handler()
log.info("Queueing workflow invocation for handler [%s]" % handler)
workflow_invocation.scheduler = scheduler
workflow_invocation.handler = handler
sa_session = self.app.model.context
sa_session.add( workflow_invocation )
sa_session.flush()
return workflow_invocation
def __start_schedulers( self ):
for workflow_scheduler in self.workflow_schedulers.itervalues():
workflow_scheduler.startup( self.app )
def __plugins_dict( self ):
return plugin_config.plugins_dict( galaxy.workflow.schedulers, 'plugin_type' )
def __init_schedulers( self ):
config_file = self.app.config.workflow_schedulers_config_file
use_default_scheduler = False
if not config_file:
log.info( "Not workflow schedulers plugin config file defined, using default scheduler." )
use_default_scheduler = True
elif not os.path.exists( config_file ):
log.info( "Cannot find workflow schedulers plugin config file '%s', using default scheduler." % config_file )
use_default_scheduler = True
if use_default_scheduler:
self.__init_default_scheduler()
else:
plugins_element = ElementTree.parse( config_file ).getroot()
self.__init_schedulers_for_element( plugins_element )
def __init_default_scheduler( self ):
self.default_scheduler_id = DEFAULT_SCHEDULER_ID
self.__init_plugin( DEFAULT_SCHEDULER_PLUGIN_TYPE )
def __init_schedulers_for_element( self, plugins_element ):
plugins_kwds = dict( plugins_element.items() )
self.default_scheduler_id = plugins_kwds.get( 'default', DEFAULT_SCHEDULER_ID )
for plugin_element in plugins_element:
plugin_type = plugin_element.tag
plugin_kwds = dict( plugin_element.items() )
workflow_scheduler_id = plugin_kwds.get( 'id', None )
self.__init_plugin( plugin_type, workflow_scheduler_id, **plugin_kwds )
if not self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_NO_SCHEDULERS )
if self.default_scheduler_id not in self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_NO_DEFAULT_SCHEDULER % self.default_scheduler_id )
def __init_plugin( self, plugin_type, workflow_scheduler_id=None, **kwds ):
workflow_scheduler_id = workflow_scheduler_id or self.default_scheduler_id
if workflow_scheduler_id in self.workflow_schedulers:
raise Exception( EXCEPTION_MESSAGE_DUPLICATE_SCHEDULERS % workflow_scheduler_id )
workflow_scheduler = self.__plugin_classes[ plugin_type ]( **kwds )
self.workflow_schedulers[ workflow_scheduler_id ] = workflow_scheduler
if isinstance( workflow_scheduler, galaxy.workflow.schedulers.ActiveWorkflowSchedulingPlugin ):
self.active_workflow_schedulers[ workflow_scheduler_id ] = workflow_scheduler
def __start_request_monitor( self ):
self.request_monitor = WorkflowRequestMonitor( self.app, self )
class WorkflowRequestMonitor( object ):
def __init__( self, app, workflow_scheduling_manager ):
self.app = app
self.active = True
self.workflow_scheduling_manager = workflow_scheduling_manager
self.monitor_thread = threading.Thread( name="WorkflowRequestMonitor.monitor_thread", target=self.__monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
def __monitor( self ):
to_monitor = self.workflow_scheduling_manager.active_workflow_schedulers
while self.active:
for workflow_scheduler_id, workflow_scheduler in to_monitor.iteritems():
if not self.active:
return
self.__schedule( workflow_scheduler_id, workflow_scheduler )
# TODO: wake if stopped
time.sleep(1)
def __schedule( self, workflow_scheduler_id, workflow_scheduler ):
invocation_ids = self.__active_invocation_ids( workflow_scheduler_id )
for invocation_id in invocation_ids:
self.__attempt_schedule( invocation_id, workflow_scheduler )
if not self.active:
return
def __attempt_schedule( self, invocation_id, workflow_scheduler ):
sa_session = self.app.model.context
workflow_invocation = sa_session.query( model.WorkflowInvocation ).get( invocation_id )
if not workflow_invocation or not workflow_invocation.active:
return False
try:
# This ensures we're only ever working on the 'first' active
# workflow invocation in a given history, to force sequential
# activation.
if self.app.config.history_local_serial_workflow_scheduling:
for i in workflow_invocation.history.workflow_invocations:
if i.active and i.id < workflow_invocation.id:
return False
workflow_scheduler.schedule( workflow_invocation )
except Exception:
# TODO: eventually fail this - or fail it right away?
log.exception( "Exception raised while attempting to schedule workflow request." )
return False
# A workflow was obtained and scheduled...
return True
def __active_invocation_ids( self, scheduler_id ):
sa_session = self.app.model.context
handler = self.app.config.server_name
return model.WorkflowInvocation.poll_active_workflow_ids(
sa_session,
scheduler=scheduler_id,
handler=handler,
)
def shutdown( self ):
self.active = False
|
import tensorflow as tf
from tensorflow.contrib.framework.python.ops.variables import get_or_create_global_step
from tensorflow.python.platform import tf_logging as logging
from enet import ENet, ENet_arg_scope
from preprocessing import preprocess
from get_class_weights import ENet_weighing, median_frequency_balancing
import os
import time
import numpy as np
import matplotlib.pyplot as plt
slim = tf.contrib.slim
#==============INPUT ARGUMENTS==================
flags = tf.app.flags
#Directory arguments
flags.DEFINE_string('dataset_dir', './dataset', 'The dataset directory to find the train, validation and test images.')
flags.DEFINE_string('logdir', './log/original', 'The log directory to save your checkpoint and event files.')
flags.DEFINE_boolean('save_images', True, 'Whether or not to save your images.')
flags.DEFINE_boolean('combine_dataset', False, 'If True, combines the validation with the train dataset.')
#Training arguments
flags.DEFINE_integer('num_classes', 2, 'The number of classes to predict.')
flags.DEFINE_integer('batch_size', 1, 'The batch_size for training.')
flags.DEFINE_integer('eval_batch_size', 10, 'The batch size used for validation.')
flags.DEFINE_integer('image_height', 512, "The input height of the images.")
flags.DEFINE_integer('image_width', 512, "The input width of the images.")
flags.DEFINE_integer('num_epochs', 300, "The number of epochs to train your model.")
flags.DEFINE_integer('num_epochs_before_decay', 300, 'The number of epochs before decaying your learning rate.')
flags.DEFINE_float('weight_decay', 2e-5, "The weight decay for ENet convolution layers.")
flags.DEFINE_float('learning_rate_decay_factor', 1e-2, 'The learning rate decay factor.')
flags.DEFINE_float('initial_learning_rate', 5e-5, 'The initial learning rate for your training.')
flags.DEFINE_string('weighting', "MFB", 'Choice of Median Frequency Balancing or the custom ENet class weights.')
#Architectural changes
flags.DEFINE_integer('num_initial_blocks', 1, 'The number of initial blocks to use in ENet.')
flags.DEFINE_integer('stage_two_repeat', 5, 'The number of times to repeat stage two.')
flags.DEFINE_boolean('skip_connections', True, 'If True, perform skip connections from encoder to decoder.')
FLAGS = flags.FLAGS
#==========NAME HANDLING FOR CONVENIENCE==============
num_classes = FLAGS.num_classes
batch_size = FLAGS.batch_size
image_height = FLAGS.image_height
image_width = FLAGS.image_width
eval_batch_size = FLAGS.eval_batch_size #Can be larger than train_batch as no need to backpropagate gradients.
combine_dataset = FLAGS.combine_dataset
#Training parameters
initial_learning_rate = FLAGS.initial_learning_rate
num_epochs_before_decay = FLAGS.num_epochs_before_decay
num_epochs =FLAGS.num_epochs
learning_rate_decay_factor = FLAGS.learning_rate_decay_factor
weight_decay = FLAGS.weight_decay
epsilon = 1e-8
#Architectural changes
num_initial_blocks = FLAGS.num_initial_blocks
stage_two_repeat = FLAGS.stage_two_repeat
skip_connections = FLAGS.skip_connections
#Use median frequency balancing or not
weighting = FLAGS.weighting
#Visualization and where to save images
save_images = FLAGS.save_images
photo_dir = os.path.join(FLAGS.logdir, "images")
#Directories
dataset_dir = FLAGS.dataset_dir
logdir = FLAGS.logdir
#===============PREPARATION FOR TRAINING==================
#Get the images into a list
image_files = sorted([os.path.join(dataset_dir, 'train', file) for file in os.listdir(dataset_dir + "/train") if file.endswith('.png')])
annotation_files = sorted([os.path.join(dataset_dir, "trainannot", file) for file in os.listdir(dataset_dir + "/trainannot") if file.endswith('.png')])
image_val_files = sorted([os.path.join(dataset_dir, 'val', file) for file in os.listdir(dataset_dir + "/val") if file.endswith('.png')])
annotation_val_files = sorted([os.path.join(dataset_dir, "valannot", file) for file in os.listdir(dataset_dir + "/valannot") if file.endswith('.png')])
if combine_dataset:
image_files += image_val_files
annotation_files += annotation_val_files
#Know the number steps to take before decaying the learning rate and batches per epoch
num_batches_per_epoch = len(image_files) / batch_size
num_steps_per_epoch = num_batches_per_epoch
decay_steps = int(num_epochs_before_decay * num_steps_per_epoch)
#=================CLASS WEIGHTS===============================
#Median frequency balancing class_weights
if weighting == "MFB":
class_weights = median_frequency_balancing(annotation_files,num_classes)
print("========= Median Frequency Balancing Class Weights =========\n", class_weights)
#Inverse weighing probability class weights
elif weighting == "ENET":
class_weights = ENet_weighing(annotation_files,num_classes)
print("========= ENet Class Weights =========\n", class_weights)
#============= TRAINING =================
def weighted_cross_entropy(onehot_labels, logits, class_weights):
'''
A quick wrapper to compute weighted cross entropy.
------------------
Technical Details
------------------
The class_weights list can be multiplied by onehot_labels directly because the last dimension
of onehot_labels is 12 and class_weights (length 12) can broadcast across that dimension, which is what we want.
Then we collapse the last dimension for the class_weights to get a shape of (batch_size, height, width, 1)
to get a mask with each pixel's value representing the class_weight.
This mask can then be that can be broadcasted to the intermediate output of logits
and onehot_labels when calculating the cross entropy loss.
------------------
INPUTS:
- onehot_labels(Tensor): the one-hot encoded labels of shape (batch_size, height, width, num_classes)
- logits(Tensor): the logits output from the model that is of shape (batch_size, height, width, num_classes)
- class_weights(list): A list where each index is the class label and the value of the index is the class weight.
OUTPUTS:
- loss(Tensor): a scalar Tensor that is the weighted cross entropy loss output.
'''
weights = onehot_labels * class_weights
weights = tf.reduce_sum(weights, 3)
loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits, weights=weights)
return loss
def run():
with tf.Graph().as_default() as graph:
tf.logging.set_verbosity(tf.logging.INFO)
#===================TRAINING BRANCH=======================
#Load the files into one input queue
images = tf.convert_to_tensor(image_files)
annotations = tf.convert_to_tensor(annotation_files)
input_queue = tf.train.slice_input_producer([images, annotations]) #Slice_input producer shuffles the data by default.
#Decode the image and annotation raw content
image = tf.read_file(input_queue[0])
image = tf.image.decode_image(image, channels=3)
annotation = tf.read_file(input_queue[1])
annotation = tf.image.decode_image(annotation)
#preprocess and batch up the image and annotation
preprocessed_image, preprocessed_annotation = preprocess(image, image_height, image_width, annotation)
images, annotations = tf.train.batch([preprocessed_image, preprocessed_annotation], batch_size=batch_size, allow_smaller_final_batch=True)
print("-------------")
print(type(images))
print(images)
print("-----------sdfdsf--")
#Create the model inference
with slim.arg_scope(ENet_arg_scope(weight_decay=weight_decay)):
logits, probabilities = ENet(images,
num_classes,
batch_size=batch_size,
is_training=True,
reuse=None,
num_initial_blocks=num_initial_blocks,
stage_two_repeat=stage_two_repeat,
skip_connections=skip_connections)
#perform one-hot-encoding on the ground truth annotation to get same shape as the logits
annotations = tf.reshape(annotations, shape=[batch_size, image_height, image_width])
annotations_ohe = tf.one_hot(annotations, num_classes, axis=-1)
#Actually compute the loss
loss = weighted_cross_entropy(logits=logits, onehot_labels=annotations_ohe, class_weights=class_weights)
total_loss = tf.losses.get_total_loss()
#Create the global step for monitoring the learning_rate and training.
global_step = get_or_create_global_step()
#Define your exponentially decaying learning rate
lr = tf.train.exponential_decay(
learning_rate = initial_learning_rate,
global_step = global_step,
decay_steps = decay_steps,
decay_rate = learning_rate_decay_factor,
staircase = True)
#Now we can define the optimizer that takes on the learning rate
optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=epsilon)
#Create the train_op.
train_op = slim.learning.create_train_op(total_loss, optimizer)
#State the metrics that you want to predict. We get a predictions that is not one_hot_encoded.
predictions = tf.argmax(probabilities, -1)
accuracy, accuracy_update = tf.contrib.metrics.streaming_accuracy(predictions, annotations)
mean_IOU, mean_IOU_update = tf.contrib.metrics.streaming_mean_iou(predictions=predictions, labels=annotations, num_classes=num_classes)
metrics_op = tf.group(accuracy_update, mean_IOU_update)
#Now we need to create a training step function that runs both the train_op, metrics_op and updates the global_step concurrently.
def train_step(sess, train_op, global_step, metrics_op):
'''
Simply runs a session for the three arguments provided and gives a logging on the time elapsed for each global step
'''
#Check the time for each sess run
start_time = time.time()
total_loss, global_step_count, accuracy_val, mean_IOU_val, _ = sess.run([train_op, global_step, accuracy, mean_IOU, metrics_op])
time_elapsed = time.time() - start_time
#Run the logging to show some results
logging.info('global step %s: loss: %.4f (%.2f sec/step) Current Streaming Accuracy: %.4f Current Mean IOU: %.4f', global_step_count, total_loss, time_elapsed, accuracy_val, mean_IOU_val)
return total_loss, accuracy_val, mean_IOU_val
#================VALIDATION BRANCH========================
#Load the files into one input queue
images_val = tf.convert_to_tensor(image_val_files)
annotations_val = tf.convert_to_tensor(annotation_val_files)
input_queue_val = tf.train.slice_input_producer([images_val, annotations_val])
#Decode the image and annotation raw content
image_val = tf.read_file(input_queue_val[0])
image_val = tf.image.decode_jpeg(image_val, channels=3)
annotation_val = tf.read_file(input_queue_val[1])
annotation_val = tf.image.decode_png(annotation_val)
#preprocess and batch up the image and annotation
preprocessed_image_val, preprocessed_annotation_val = preprocess(image_val, image_height, image_width, annotation_val)
images_val, annotations_val = tf.train.batch([preprocessed_image_val, preprocessed_annotation_val], batch_size=eval_batch_size, allow_smaller_final_batch=True)
with slim.arg_scope(ENet_arg_scope(weight_decay=weight_decay)):
logits_val, probabilities_val = ENet(images_val,
num_classes,
batch_size=eval_batch_size,
is_training=True,
reuse=True,
num_initial_blocks=num_initial_blocks,
stage_two_repeat=stage_two_repeat,
skip_connections=skip_connections)
#perform one-hot-encoding on the ground truth annotation to get same shape as the logits
annotations_val = tf.reshape(annotations_val, shape=[eval_batch_size, image_height, image_width])
annotations_ohe_val = tf.one_hot(annotations_val, num_classes, axis=-1)
#State the metrics that you want to predict. We get a predictions that is not one_hot_encoded. ----> Should we use OHE instead?
predictions_val = tf.argmax(probabilities_val, -1)
accuracy_val, accuracy_val_update = tf.contrib.metrics.streaming_accuracy(predictions_val, annotations_val)
mean_IOU_val, mean_IOU_val_update = tf.contrib.metrics.streaming_mean_iou(predictions=predictions_val, labels=annotations_val, num_classes=num_classes)
metrics_op_val = tf.group(accuracy_val_update, mean_IOU_val_update)
#Create an output for showing the segmentation output of validation images
segmentation_output_val = tf.cast(predictions_val, dtype=tf.float32)
segmentation_output_val = tf.reshape(segmentation_output_val, shape=[-1, image_height, image_width, 1])
segmentation_ground_truth_val = tf.cast(annotations_val, dtype=tf.float32)
segmentation_ground_truth_val = tf.reshape(segmentation_ground_truth_val, shape=[-1, image_height, image_width, 1])
def eval_step(sess, metrics_op):
'''
Simply takes in a session, runs the metrics op and some logging information.
'''
start_time = time.time()
_, accuracy_value, mean_IOU_value = sess.run([metrics_op, accuracy_val, mean_IOU_val])
time_elapsed = time.time() - start_time
#Log some information
logging.info('---VALIDATION--- Validation Accuracy: %.4f Validation Mean IOU: %.4f (%.2f sec/step)', accuracy_value, mean_IOU_value, time_elapsed)
return accuracy_value, mean_IOU_value
#=====================================================
#Now finally create all the summaries you need to monitor and group them into one summary op.
tf.summary.scalar('Monitor/Total_Loss', total_loss)
tf.summary.scalar('Monitor/validation_accuracy', accuracy_val)
tf.summary.scalar('Monitor/training_accuracy', accuracy)
tf.summary.scalar('Monitor/validation_mean_IOU', mean_IOU_val)
tf.summary.scalar('Monitor/training_mean_IOU', mean_IOU)
tf.summary.scalar('Monitor/learning_rate', lr)
tf.summary.image('Images/Validation_original_image', images_val, max_outputs=1)
tf.summary.image('Images/Validation_segmentation_output', segmentation_output_val, max_outputs=1)
tf.summary.image('Images/Validation_segmentation_ground_truth', segmentation_ground_truth_val, max_outputs=1)
my_summary_op = tf.summary.merge_all()
#Define your supervisor for running a managed session. Do not run the summary_op automatically or else it will consume too much memory
sv = tf.train.Supervisor(logdir=logdir, summary_op=None, init_fn=None)
# Run the managed session
with sv.managed_session() as sess:
for step in range(int(num_steps_per_epoch * num_epochs)):
#At the start of every epoch, show the vital information:
if step % num_batches_per_epoch == 0:
logging.info('Epoch %s/%s', step/num_batches_per_epoch + 1, num_epochs)
learning_rate_value = sess.run([lr])
logging.info('Current Learning Rate: %s', learning_rate_value)
#Log the summaries every 10 steps or every end of epoch, which ever lower.
if step % min(num_steps_per_epoch, 10) == 0:
loss, training_accuracy, training_mean_IOU = train_step(sess, train_op, sv.global_step, metrics_op=metrics_op)
#Check the validation data only at every third of an epoch
if step % (num_steps_per_epoch / 3) == 0:
for i in range(int(len(image_val_files) / eval_batch_size)):
validation_accuracy, validation_mean_IOU = eval_step(sess, metrics_op_val)
summaries = sess.run(my_summary_op)
sv.summary_computed(sess, summaries)
#If not, simply run the training step
else:
loss, training_accuracy,training_mean_IOU = train_step(sess, train_op, sv.global_step, metrics_op=metrics_op)
#We log the final training loss
logging.info('Final Loss: %s', loss)
logging.info('Final Training Accuracy: %s', training_accuracy)
logging.info('Final Training Mean IOU: %s', training_mean_IOU)
logging.info('Final Validation Accuracy: %s', validation_accuracy)
logging.info('Final Validation Mean IOU: %s', validation_mean_IOU)
#Once all the training has been done, save the log files and checkpoint model
logging.info('Finished training! Saving model to disk now.')
sv.saver.save(sess, sv.save_path, global_step = sv.global_step)
if save_images:
if not os.path.exists(photo_dir):
os.mkdir(photo_dir)
#Plot the predictions - check validation images only
logging.info('Saving the images now...')
predictions_value, annotations_value = sess.run([predictions_val, annotations_val])
for i in range(eval_batch_size):
predicted_annotation = predictions_value[i]
annotation = annotations_value[i]
plt.subplot(1,2,1)
plt.imshow(predicted_annotation)
plt.subplot(1,2,2)
plt.imshow(annotation)
plt.savefig(photo_dir+"/image_" + str(i))
if __name__ == '__main__':
run()
|
import copy
import onnx
import numpy as np
from onnx import numpy_helper
from onnx import TensorProto
from mqbench.utils.logger import logger
class ONNXGraph(object):
def __init__(self, onnx_model_path):
'''Describe onnx graph
args:
input_map[tensor_name] = node which input is tensor_name
output_map[tensor_name] = node which output is tensor_name
'''
self.model = onnx.load(onnx_model_path)
self.graph = self.model.graph
self.initializer = {}
self.input_map = {}
self.output_map = {}
self.topologize_graph()
self.prepare_initializer()
def prepare_initializer(self):
self.initializer.clear()
for idx, init in enumerate(self.graph.initializer):
self.initializer[init.name] = (init, idx)
def get_constant(self, name):
for node in self.model.graph.node:
if node.op_type == 'Constant':
if node.output[0] == name:
return numpy_helper.to_array(node.attribute[0].t).tolist()
def get_initializer(self, initializer_name):
return numpy_helper.to_array(self.initializer[initializer_name][0])
def set_initializer(self, initializer_name, value_tensor, raw=True):
idx = None
if initializer_name in self.initializer:
idx = self.initializer[initializer_name][1]
if raw:
initializer = numpy_helper.from_array(value_tensor)
else:
if value_tensor.dtype == np.float32:
data_type = TensorProto.FLOAT
if value_tensor.dtype == np.uint8:
data_type = TensorProto.UINT8
initializer = onnx.helper.make_tensor(name=initializer_name,
data_type=data_type,
dims=[],
vals=value_tensor,
raw=False)
initializer.name = initializer_name
if idx is not None:
self.graph.initializer.remove(self.graph.initializer[idx])
self.graph.initializer.append(initializer)
self.prepare_initializer()
def topologize_graph(self):
self.input_map.clear()
self.output_map.clear()
for node in self.graph.node:
for output_name in node.output:
self.output_map[output_name] = node
for input_name in node.input:
if input_name not in self.input_map:
self.input_map[input_name] = []
self.input_map[input_name].append(node)
def get_tensor_producer(self, output_name):
if output_name not in self.output_map:
return 'INPUT_TOKEN'
return self.output_map[output_name]
def get_tensor_consumer(self, input_name):
if input_name not in self.input_map:
return ['OUTPUT_TOKEN']
return self.input_map[input_name]
def save_onnx_model(self, model_path):
onnx.save(self.model, model_path)
def remove_node_purely(self, node):
self.graph.node.remove(node)
def insert_node_purely(self, node, idx=0):
self.graph.node.insert(idx, node)
def del_initializer(self, initializer_name):
if initializer_name in self.initializer:
del(self.initializer[initializer_name])
def optimize_model(self):
# Delete redundant nodes.
remove_node_list = []
for node in self.model.graph.node:
if len(node.input) == 0:
not_be_used = True
for output_name in node.output:
if output_name in self.input_map:
not_be_used = False
break
if not_be_used:
remove_node_list.append(node)
for node in remove_node_list:
self.remove_node_purely(node)
self.topologize_graph()
# Delete redundant initializers.
for initializer_name in self.initializer:
if initializer_name not in self.input_map:
self.del_initializer(initializer_name)
# Make node in topology order.
exist_input = [input_node.name for input_node in self.model.graph.input]
origin_node_num = len(self.model.graph.node)
finished_node_name = []
# O(n^2)
while len(finished_node_name) < origin_node_num:
node_detect = False
for i in range(origin_node_num):
node = self.model.graph.node[i]
all_inputs_exist = True
for input_name in node.input:
if input_name not in exist_input and input_name not in self.initializer:
all_inputs_exist = False
break
if all_inputs_exist:
if node.name not in finished_node_name:
node_detect = True
finished_node_name.append(node.name)
self.model.graph.node.append(node)
for output_name in node.output:
exist_input.append(output_name)
assert node_detect, "Graph is illegel, error occured!"
for i in range(origin_node_num):
self.model.graph.node.remove(self.model.graph.node[0])
def set_opset_version(self, domain, version):
opset_info = copy.deepcopy(self.model.opset_import[0])
opset_info.domain = domain
opset_info.version = version
self.model.opset_import.insert(0, opset_info)
class OnnxPreprocess(object):
def replace_resize_op_with_upsample(self, graph, out2node):
nodes_to_be_removed = []
idx = 0
while idx < len(graph.node):
node = graph.node[idx]
if node.op_type == 'Resize':
logger.info(f"Replace resize op: <{node.name}> with upsample.")
mode = 'nearest'
for attr in node.attribute:
if attr.name == 'mode':
mode = attr.s
upsample_node = onnx.helper.make_node('Upsample',
name=node.name,
inputs=[node.input[0], node.input[2]],
outputs=node.output,
mode=mode)
nodes_to_be_removed.append(node)
nodes_to_be_removed.extend(get_constant_inputs(node, out2node))
graph.node.insert(idx, upsample_node)
idx += 1
idx += 1
for node in nodes_to_be_removed:
graph.node.remove(node)
return
def remove_fake_pad_op(self, graph, name2data, inp2node, out2node):
nodes_to_be_removed = []
for idx, node in enumerate(graph.node):
if node.op_type == 'Pad':
pads = name2data[node.input[1]]
if all([x == 0 for x in pads]):
logger.info(f"Remove pad op: <{node.name}>.")
next_nodes = inp2node[node.output[0]]
for next_node, idx in next_nodes:
next_node.input[idx] = node.input[0]
nodes_to_be_removed.append(node)
nodes_to_be_removed.extend(get_constant_inputs(node, out2node))
for node in nodes_to_be_removed:
graph.node.remove(node)
return
def update_inp2node_out2node(graph):
out2node = {}
inp2node = {}
for node in graph.node:
for out in node.output:
# suppose each node only has one output
out2node[out] = node
for idx, inp in enumerate(node.input):
# one node may have multiple inputs
if inp not in inp2node:
inp2node[inp] = []
inp2node[inp].append([node, idx])
return out2node, inp2node
def prepare_data(graph):
params = {}
for init in graph.initializer:
params[init.name] = numpy_helper.to_array(init)
for node in graph.node:
if node.op_type == "Constant":
for attr in node.attribute:
if attr.name == "value":
params[node.output[0]] = numpy_helper.to_array(attr.t)
return params
def prepare_initializer(graph):
named_initializer = {}
for init in graph.initializer:
named_initializer[init.name] = init
return named_initializer
def parse_attrs(node_attrs):
attrs = {}
for attr in node_attrs:
if attr.type == onnx.AttributeProto.AttributeType.INTS:
attrs[attr.name] = tuple(attr.ints)
elif attr.type == onnx.AttributeProto.AttributeType.INT:
attrs[attr.name] = attr.i
elif attr.type == onnx.AttributeProto.AttributeType.FLOATS:
attrs[attr.name] = tuple(attr.floats)
elif attr.type == onnx.AttributeProto.AttributeType.FLOAT:
attrs[attr.name] = attr.f
elif attr.type == onnx.AttributeProto.AttributeType.TENSOR:
attrs[attr.name] = numpy_helper.to_array(attr.t)
elif attr.type == onnx.AttributeProto.AttributeType.STRING:
attrs[attr.name] = str(attr.s)
elif attr.type == onnx.AttributeProto.AttributeType.STRINGS:
attrs[attr.name] = tuple([str(x) for x in attr.strings])
else:
raise Exception("ATTR Type [{}] Not Supported!".format(attr.type))
return attrs
def get_constant_inputs(node, out2node):
node_list = []
for inp in node.input:
if inp in out2node and out2node[inp].op_type == 'Constant':
node_list.append(out2node[inp])
return node_list
|
import os
"""
Compatibility module
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2013, yt Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from .grid_traversal import *
from .particle_mesh_operations import *
from .depth_first_octree import *
from .fortran_reader import *
from .interpolators import *
from .misc_utilities import *
from .basic_octree import *
from .image_utilities import *
from .points_in_volume import *
from .quad_tree import *
from .marching_cubes import *
from .write_array import *
from .mesh_utilities import *
from .contour_finding import *
from .line_integral_convolution import *
|
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Resolve interface dependencies, producing a merged IdlDefinitions object.
This library computes interface dependencies (partial interfaces and
includes), reads the dependency files, and merges them to the IdlDefinitions
for the main IDL file, producing an IdlDefinitions object representing the
entire interface.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler#TOC-Dependency-resolution
"""
import os.path
from utilities import idl_filename_to_component, is_valid_component_dependency, merge_dict_recursively
# The following extended attributes can be applied to a dependency interface,
# and are then applied to the individual members when merging.
# Note that this moves the extended attribute from the interface to the member,
# which changes the semantics and yields different code than the same extended
# attribute on the main interface.
DEPENDENCY_EXTENDED_ATTRIBUTES = frozenset([
'RuntimeEnabled',
'SecureContext',
])
class InterfaceDependencyResolver(object):
def __init__(self, interfaces_info, reader):
"""Initialize dependency resolver.
Args:
interfaces_info:
dict of interfaces information, from compute_dependencies.py
reader:
IdlReader, used for reading dependency files
"""
self.interfaces_info = interfaces_info
self.reader = reader
def resolve_dependencies(self, definitions, component):
"""Resolve dependencies, merging them into IDL definitions of main file.
Dependencies consist of 'partial interface' for the same interface as
in the main file, and mixins that this interface 'includes'.
These are merged into the main IdlInterface, as the main IdlInterface
implements all these members.
Partial interfaces and mixins are added to IdlDefinitions, but not
merged into the main IdlInterface, as these are only referenced (their
members are introspected, but not implemented in this interface).
Inherited extended attributes are also added to the main IdlInterface.
Modifies definitions in place by adding parsed dependencies.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
Raises:
Exception:
A given IdlDefinitions object doesn't have any interfaces,
or a given IdlDefinitions object has incorrect referenced
interfaces.
"""
# TODO(crbug.com/579896): we need to resolve dependency when we
# support partial dictionary.
if not definitions.interfaces:
raise Exception('No need to resolve any dependencies of '
'this definition: %s, because this should '
'have a dictionary.' % definitions.idl_name)
target_interface = next(iter(definitions.interfaces.values()))
interface_name = target_interface.name
interface_info = self.interfaces_info[interface_name]
if 'inherited_extended_attributes' in interface_info:
target_interface.extended_attributes.update(
interface_info['inherited_extended_attributes'])
resolved_definitions = merge_interface_dependencies(
definitions, component, target_interface,
interface_info['dependencies_full_paths'] +
interface_info['dependencies_other_component_full_paths'],
self.reader)
inherit_unforgeable_attributes(resolved_definitions,
self.interfaces_info)
for referenced_interface_name in \
interface_info['referenced_interfaces']:
referenced_definitions = self.reader.read_idl_definitions(
self.interfaces_info[referenced_interface_name]['full_path'])
for referenced_component in referenced_definitions:
if not is_valid_component_dependency(component,
referenced_component):
raise Exception('This definitions: %s is defined in %s '
'but reference interface:%s is defined '
'in %s' % (definitions.idl_name, component,
referenced_interface_name,
referenced_component))
resolved_definitions[component].update(
referenced_definitions[component])
return resolved_definitions
def merge_interface_dependencies(definitions, component, target_interface,
dependency_idl_filenames, reader):
"""Merge dependencies ('partial interface' and 'implements') in dependency_idl_filenames into target_interface.
Args:
definitions: IdlDefinitions object, modified in place
component:
string, describing where the above definitions are defined,
'core' or 'modules'. See KNOWN_COMPONENTS in utilities.py
target_interface: IdlInterface object, modified in place
dependency_idl_filenames:
Idl filenames which depend on the above definitions.
reader: IdlReader object.
Returns:
A dictionary whose key is component and value is IdlDefinitions
object whose dependency is resolved.
"""
resolved_definitions = {component: definitions}
# Sort so order consistent, so can compare output from run to run.
for dependency_idl_filename in sorted(dependency_idl_filenames):
dependency_definitions = reader.read_idl_file(dependency_idl_filename)
dependency_component = idl_filename_to_component(
dependency_idl_filename)
dependency_interface = next(
iter(dependency_definitions.interfaces.values()))
transfer_extended_attributes(dependency_interface,
dependency_idl_filename)
# We need to use different checkdeps here for partial interface and
# inheritance.
if dependency_interface.is_partial:
# Case: dependency_interface is a partial interface of
# target_interface.
# So,
# - A partial interface defined in modules can update
# the original interface defined in core.
# However,
# - A partial interface defined in core cannot update
# the original interface defined in modules.
if not is_valid_component_dependency(dependency_component,
component):
raise Exception(
'The partial interface:%s in %s cannot update '
'the original interface:%s in %s' %
(dependency_interface.name, dependency_component,
target_interface.name, component))
if dependency_component in resolved_definitions:
# When merging a new partial interfaces, should not overwrite
# ImpelemntedAs extended attributes in merged partial
# interface.
# See also the below "if 'ImplementedAs' not in ... " line's
# comment.
dependency_interface.extended_attributes.pop(
'ImplementedAs', None)
resolved_definitions[dependency_component].update(
dependency_definitions)
continue
dependency_interface.extended_attributes.update(
target_interface.extended_attributes)
assert target_interface == \
definitions.interfaces[dependency_interface.name]
# A partial interface should use its original interface's
# ImplementedAs. If the original interface doesn't have,
# remove ImplementedAs defined in the partial interface.
# Because partial interface needs the original interface's
# cpp class to obtain partial interface's cpp class.
# e.g.. V8WindowPartial.cpp:
# DOMWindow* impl = V8Window::ToImpl(holder);
# DOMWindowQuota* cpp_value(DOMWindowQuota::webkitStorageInfo(impl));
# TODO(tasak): remove ImplementedAs extended attributes
# from all partial interfaces. Instead, rename all cpp/header
# files correctly. ImplementedAs should not be allowed in
# partial interfaces.
if 'ImplementedAs' not in target_interface.extended_attributes:
dependency_interface.extended_attributes.pop(
'ImplementedAs', None)
dependency_interface.original_interface = target_interface
target_interface.partial_interfaces.append(dependency_interface)
resolved_definitions[dependency_component] = dependency_definitions
else:
# Case: |target_interface| includes |dependency_interface| mixin.
# So,
# - An interface defined in modules can include any interface mixin
# defined in core.
# However,
# - An interface defined in core cannot include an interface mixin
# defined in modules.
if not dependency_interface.is_mixin:
raise Exception(
'The interface:%s cannot include '
'the non-mixin interface: %s.' %
(target_interface.name, dependency_interface.name))
if not is_valid_component_dependency(component,
dependency_component):
raise Exception(
'The interface:%s in %s cannot include '
'the interface mixin:%s in %s.' %
(target_interface.name, component,
dependency_interface.name, dependency_component))
# merges partial interfaces
resolved_definitions[component].update(dependency_definitions)
# Mixins are also merged into the target interface, so Code
# Generator can just iterate over one list (and not need to handle
# 'includes' itself).
target_interface.merge(dependency_interface)
return resolved_definitions
def transfer_extended_attributes(dependency_interface,
dependency_idl_filename):
"""Transfer extended attributes from dependency interface onto members.
Merging consists of storing certain interface-level data in extended
attributes of the *members* (because there is no separate dependency
interface post-merging).
The data storing consists of:
* moving certain extended attributes from the dependency interface
to its members (deleting the extended attribute from the interface)
* storing the C++ class of the implementation in an internal
extended attribute of each member, [PartialInterfaceImplementedAs]
No return: modifies dependency_interface in place.
"""
merged_extended_attributes = {}
for key in DEPENDENCY_EXTENDED_ATTRIBUTES:
if key not in dependency_interface.extended_attributes:
continue
merged_extended_attributes[key] = \
dependency_interface.extended_attributes[key]
# Remove the merged attributes from the original dependency interface.
# This ensures that if other dependency interfaces are merged onto this
# one, its extended_attributes do not leak through
# (https://crbug.com/603782).
del dependency_interface.extended_attributes[key]
# A partial interface's members are implemented as static member functions
# in a separate C++ class. This class name is stored in
# [PartialInterfaceImplementedAs] which is copied from [ImplementedAs] on
# the partial interface definition.
#
# Note that implemented interfaces do *not* need [ImplementedAs], since
# they are implemented on the C++ object |impl| itself, just like members of
# the main interface definition, so the bindings do not need to know in
# which class implemented interfaces are implemented.
#
# Currently [LegacyTreatAsPartialInterface] can be used to have partial
# interface behavior on mixins, but this is being removed as legacy cruft:
# http://crbug.com/360435
#
# Note that [ImplementedAs] is used with different meanings on interfaces
# and members:
# for Blink class name and function name (or constant name), respectively.
# Thus we do not want to copy this from the interface to the member, but
# instead extract it and handle it separately.
if dependency_interface.is_partial:
if 'ImplementedAs' not in dependency_interface.extended_attributes:
raise ValueError('Partial interface in %s must have ImplementedAs.'
% dependency_idl_filename)
merged_extended_attributes['PartialInterfaceImplementedAs'] = \
dependency_interface.extended_attributes.pop('ImplementedAs')
elif 'LegacyTreatAsPartialInterface' in \
dependency_interface.extended_attributes:
merged_extended_attributes['PartialInterfaceImplementedAs'] = (
dependency_interface.extended_attributes.pop(
'ImplementedAs', dependency_interface.name))
def update_attributes(attributes, extras):
for key, value in extras.items():
if key not in attributes:
attributes[key] = value
for attribute in dependency_interface.attributes:
update_attributes(attribute.extended_attributes,
merged_extended_attributes)
for constant in dependency_interface.constants:
update_attributes(constant.extended_attributes,
merged_extended_attributes)
for operation in dependency_interface.operations:
update_attributes(operation.extended_attributes,
merged_extended_attributes)
def inherit_unforgeable_attributes(resolved_definitions, interfaces_info):
"""Inherits [Unforgeable] attributes and updates the arguments accordingly.
For each interface in |resolved_definitions|, collects all [Unforgeable]
attributes in ancestor interfaces and adds them to the interface.
'referenced_interfaces' and 'cpp_includes' in |interfaces_info| are updated
accordingly.
"""
def collect_unforgeable_attributes_in_ancestors(interface_name, component):
if not interface_name:
# unforgeable_attributes, referenced_interfaces, cpp_includes
return [], [], set()
interface = interfaces_info[interface_name]
unforgeable_attributes, referenced_interfaces, cpp_includes = \
collect_unforgeable_attributes_in_ancestors(
interface.get('parent'), component)
this_unforgeable = interface.get('unforgeable_attributes', [])
for attr in this_unforgeable:
if attr.defined_in is None:
attr.defined_in = interface_name
unforgeable_attributes.extend(this_unforgeable)
this_referenced = [
attr.idl_type.base_type for attr in this_unforgeable
if attr.idl_type.base_type in interface.get(
'referenced_interfaces', [])
]
referenced_interfaces.extend(this_referenced)
cpp_includes.update(
interface.get('cpp_includes', {}).get(component, {}))
return unforgeable_attributes, referenced_interfaces, cpp_includes
for component, definitions in resolved_definitions.items():
for interface_name, interface in definitions.interfaces.items():
interface_info = interfaces_info[interface_name]
inherited_unforgeable_attributes, referenced_interfaces, cpp_includes = \
collect_unforgeable_attributes_in_ancestors(
interface_info.get('parent'), component)
# This loop may process the same interface many times, so it's
# possible that we're adding the same attributes twice or more.
# So check if there is a duplicate.
for attr in inherited_unforgeable_attributes:
if attr not in interface.attributes:
interface.attributes.append(attr)
referenced_interfaces.extend(
interface_info.get('referenced_interfaces', []))
interface_info['referenced_interfaces'] = sorted(
set(referenced_interfaces))
merge_dict_recursively(interface_info,
{'cpp_includes': {
component: cpp_includes
}})
|
import logging
import threading
import requests
from kube_hunter.conf import get_config
from kube_hunter.core.types import KubernetesCluster
from kube_hunter.core.types.vulnerabilities import (
GeneralSensitiveInformationTechnique,
ExposedSensitiveInterfacesTechnique,
MountServicePrincipalTechnique,
ListK8sSecretsTechnique,
AccessContainerServiceAccountTechnique,
AccessK8sApiServerTechnique,
AccessKubeletAPITechnique,
AccessK8sDashboardTechnique,
InstanceMetadataApiTechnique,
ExecIntoContainerTechnique,
SidecarInjectionTechnique,
NewContainerTechnique,
GeneralPersistenceTechnique,
HostPathMountPrivilegeEscalationTechnique,
PrivilegedContainerTechnique,
ClusterAdminBindingTechnique,
ARPPoisoningTechnique,
CoreDNSPoisoningTechnique,
DataDestructionTechnique,
GeneralDefenseEvasionTechnique,
ConnectFromProxyServerTechnique,
CVERemoteCodeExecutionCategory,
CVEPrivilegeEscalationCategory,
CVEDenialOfServiceTechnique,
)
logger = logging.getLogger(__name__)
class EventFilterBase:
def __init__(self, event):
self.event = event
# Returns self.event as default.
# If changes has been made, should return the new event that's been altered
# Return None to indicate the event should be discarded
def execute(self):
return self.event
class Event:
def __init__(self):
self.previous = None
self.hunter = None
# newest attribute gets selected first
def __getattr__(self, name):
if name == "previous":
return None
for event in self.history:
if name in event.__dict__:
return event.__dict__[name]
# Event's logical location to be used mainly for reports.
# If event don't implement it check previous event
# This is because events are composed (previous -> previous ...)
# and not inherited
def location(self):
location = None
if self.previous:
location = self.previous.location()
return location
# returns the event history ordered from newest to oldest
@property
def history(self):
previous, history = self.previous, list()
while previous:
history.append(previous)
previous = previous.previous
return history
class MultipleEventsContainer(Event):
"""
This is the class of the object an hunter will get if he was registered to multiple events.
"""
def __init__(self, events):
self.events = events
def get_by_class(self, event_class):
for event in self.events:
if event.__class__ == event_class:
return event
class Service:
def __init__(self, name, path="", secure=True):
self.name = name
self.secure = secure
self.path = path
self.role = "Node"
# if a service account token was specified, we load it to the Service class
# We load it here because generally all kuberentes services could be authenticated with the token
config = get_config()
if config.service_account_token:
self.auth_token = config.service_account_token
def get_name(self):
return self.name
def get_path(self):
return "/" + self.path if self.path else ""
def explain(self):
return self.__doc__
class Vulnerability:
severity = dict(
{
GeneralSensitiveInformationTechnique: "low",
ExposedSensitiveInterfacesTechnique: "high",
MountServicePrincipalTechnique: "high",
ListK8sSecretsTechnique: "high",
AccessContainerServiceAccountTechnique: "low",
AccessK8sApiServerTechnique: "medium",
AccessKubeletAPITechnique: "medium",
AccessK8sDashboardTechnique: "medium",
InstanceMetadataApiTechnique: "high",
ExecIntoContainerTechnique: "high",
SidecarInjectionTechnique: "high",
NewContainerTechnique: "high",
GeneralPersistenceTechnique: "high",
HostPathMountPrivilegeEscalationTechnique: "high",
PrivilegedContainerTechnique: "high",
ClusterAdminBindingTechnique: "high",
ARPPoisoningTechnique: "medium",
CoreDNSPoisoningTechnique: "high",
DataDestructionTechnique: "high",
GeneralDefenseEvasionTechnique: "high",
ConnectFromProxyServerTechnique: "low",
CVERemoteCodeExecutionCategory: "high",
CVEPrivilegeEscalationCategory: "high",
CVEDenialOfServiceTechnique: "medium",
}
)
# TODO: make vid mandatory once migration is done
def __init__(self, component, name, category=None, vid="None"):
self.vid = vid
self.component = component
self.category = category
self.name = name
self.evidence = ""
self.role = "Node"
def get_vid(self):
return self.vid
def get_category(self):
if self.category:
return self.category.name
def get_name(self):
return self.name
def explain(self):
return self.__doc__
def get_severity(self):
return self.severity.get(self.category, "low")
event_id_count_lock = threading.Lock()
event_id_count = 0
class NewHostEvent(Event):
def __init__(self, host, cloud=None):
global event_id_count
self.host = host
self.cloud_type = cloud
with event_id_count_lock:
self.event_id = event_id_count
event_id_count += 1
@property
def cloud(self):
if not self.cloud_type:
self.cloud_type = self.get_cloud()
return self.cloud_type
def get_cloud(self):
config = get_config()
try:
logger.debug("Checking whether the cluster is deployed on azure's cloud")
# Leverage 3rd tool https://github.com/blrchen/AzureSpeed for Azure cloud ip detection
result = requests.get(
f"https://api.azurespeed.com/api/region?ipOrUrl={self.host}",
timeout=config.network_timeout,
).json()
return result["cloud"] or "NoCloud"
except requests.ConnectionError:
logger.info("Failed to connect cloud type service", exc_info=True)
except Exception:
logger.warning(f"Unable to check cloud of {self.host}", exc_info=True)
return "NoCloud"
def __str__(self):
return str(self.host)
# Event's logical location to be used mainly for reports.
def location(self):
return str(self.host)
class OpenPortEvent(Event):
def __init__(self, port):
self.port = port
def __str__(self):
return str(self.port)
# Event's logical location to be used mainly for reports.
def location(self):
if self.host:
location = str(self.host) + ":" + str(self.port)
else:
location = str(self.port)
return location
class HuntFinished(Event):
pass
class HuntStarted(Event):
pass
class ReportDispatched(Event):
pass
class K8sVersionDisclosure(Vulnerability, Event):
"""The kubernetes version could be obtained from the {} endpoint"""
def __init__(self, version, from_endpoint, extra_info="", category=None):
Vulnerability.__init__(
self,
KubernetesCluster,
"K8s Version Disclosure",
category=ExposedSensitiveInterfacesTechnique,
vid="KHV002",
)
self.version = version
self.from_endpoint = from_endpoint
self.extra_info = extra_info
self.evidence = version
# depending from where the version came from, we might want to also override the category
if category:
self.category = category
def explain(self):
return self.__doc__.format(self.from_endpoint) + self.extra_info
|
from ..base.twilltestcase import common, ShedTwillTestCase
class BasicToolShedFeatures(ShedTwillTestCase):
'''Test installing a basic repository.'''
def test_0000_initiate_users(self):
"""Create necessary user accounts."""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert test_user_1 is not None, f'Problem retrieving user with email {common.test_user_1_email} from the database'
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, f'Problem retrieving user with email {common.admin_email} from the database'
self.test_db_util.get_private_role(admin_user)
self.galaxy_login(email=common.admin_email, username=common.admin_username)
galaxy_admin_user = self.test_db_util.get_galaxy_user(common.admin_email)
assert galaxy_admin_user is not None, f'Problem retrieving user with email {common.admin_email} from the database'
self.test_db_util.get_galaxy_private_role(galaxy_admin_user)
def test_0005_ensure_repositories_and_categories_exist(self):
'''Create the 0000 category and upload the filtering repository to it, if necessary.'''
self.login(email=common.admin_email, username=common.admin_username)
category = self.create_category(name='Test 0000 Basic Repository Features 2', description='Test Description 0000 Basic Repository Features 2')
category = self.create_category(name='Test 0000 Basic Repository Features 1', description='Test Description 0000 Basic Repository Features 1')
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.get_or_create_repository(name='filtering_0000',
description="Galaxy's filtering tool",
long_description="Long description of Galaxy's filtering tool",
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id))
if self.repository_is_new(repository):
self.upload_file(repository,
filename='filtering/filtering_1.1.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 1.1.0 tarball.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_0000.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded readme for 1.1.0',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='filtering/filtering_2.2.0.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded filtering 2.2.0 tarball.',
strings_displayed=[],
strings_not_displayed=[])
self.upload_file(repository,
filename='readme.txt',
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded readme for 2.2.0',
strings_displayed=[],
strings_not_displayed=[])
def test_0010_browse_tool_sheds(self):
"""Browse the available tool sheds in this Galaxy instance."""
self.galaxy_login(email=common.admin_email, username=common.admin_username)
self.browse_tool_shed(url=self.url, strings_displayed=['Test 0000 Basic Repository Features 1', 'Test 0000 Basic Repository Features 2'])
def test_0015_browse_test_0000_category(self):
'''Browse the category created in test 0000. It should contain the filtering_0000 repository also created in that test.'''
category = self.test_db_util.get_category_by_name('Test 0000 Basic Repository Features 1')
self.browse_category(category, strings_displayed=['filtering_0000'])
def test_0020_preview_filtering_repository(self):
'''Load the preview page for the filtering_0000 repository in the tool shed.'''
self.preview_repository_in_tool_shed('filtering_0000', common.test_user_1_name, strings_displayed=['filtering_0000', 'Valid tools'])
def test_0025_install_filtering_repository(self):
self.install_repository('filtering_0000',
common.test_user_1_name,
'Test 0000 Basic Repository Features 1',
new_tool_panel_section_label='test_1000')
installed_repository = self.test_db_util.get_installed_repository_by_name_owner('filtering_0000', common.test_user_1_name)
strings_displayed = ['filtering_0000',
"Galaxy's filtering tool",
'user1',
self.url.replace('http://', ''),
str(installed_repository.installed_changeset_revision)]
self.display_galaxy_browse_repositories_page(strings_displayed=strings_displayed)
strings_displayed.extend(['Installed tool shed repository', 'Valid tools', 'Filter1'])
self.display_installed_repository_manage_page(installed_repository, strings_displayed=strings_displayed)
self.verify_tool_metadata_for_installed_repository(installed_repository)
def test_0030_install_filtering_repository_again(self):
'''Attempt to install the already installed filtering repository.'''
installed_repository = self.test_db_util.get_installed_repository_by_name_owner('filtering_0000', common.test_user_1_name)
# The page displayed after installation is the ajaxian "Montior installing tool shed repositories" page. Since the filter
# repository was already installed, nothing will be in the process of being installed, so the grid will not display 'filtering_0000'.
post_submit_strings_not_displayed = ['filtering_0000']
self.install_repository('filtering_0000',
common.test_user_1_name,
'Test 0000 Basic Repository Features 1',
post_submit_strings_not_displayed=post_submit_strings_not_displayed)
strings_displayed = ['filtering_0000',
"Galaxy's filtering tool",
'user1',
self.url.replace('http://', ''),
str(installed_repository.installed_changeset_revision)]
self.display_installed_repository_manage_page(installed_repository, strings_displayed=strings_displayed)
self.display_galaxy_browse_repositories_page(strings_displayed=strings_displayed)
def test_0035_verify_installed_repository_metadata(self):
'''Verify that resetting the metadata on an installed repository does not change the metadata.'''
self.verify_installed_repository_metadata_unchanged('filtering_0000', common.test_user_1_name)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from util import sample_and_group
from model.pct_module import *
"""
pct分割网络完整版,本文件的pct是分割版pct
"""
class Pct(nn.Module):
"""Pct网络
网络结构(从上到下,从左到右):
Input Embedding module: Neighborhood Embedding i.e. LBR-->SG
Attention module: four stacked offset-attention layer
Stack to 1024 channel
Classification: LBRD-->LBRD-->Linear-->Score
Returns:
Tensor: 提取到的特征
"""
def __init__(self, args, output_channels=40):
super(Pct, self).__init__()
self.args = args
# Input Embedding module, here is Neighborhood Embedding
## Point Embedding
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
## SG
self.gather_local_0 = SG_Layer(in_channels=128, out_channels=128)
self.gather_local_1 = SG_Layer(in_channels=256, out_channels=256)
# Attention module, here is Offset-Attention
self.pt_last = Offset_Attention_Position_Embedding(args)
# Stack to 1024 channel
self.conv_fuse = nn.Sequential(nn.Conv1d(1280, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
# Classification
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = x.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=512, radius=0.15, nsample=32, xyz=xyz, points=x)
feature_0 = self.gather_local_0(new_feature)
feature = feature_0.permute(0, 2, 1)
new_xyz, new_feature = sample_and_group(npoint=256, radius=0.2, nsample=32, xyz=new_xyz, points=feature)
feature_1 = self.gather_local_1(new_feature)
x = self.pt_last(feature_1, new_xyz)
x = torch.cat([x, feature_1], dim=1)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class SPct(nn.Module):
"""SPct网络
网络结构(从上到下,从左到右):
Input Embedding module: no Neighborhood Embedding i.e. LBR
Attention module: four stacked offset-attention layer
Stack to 1024 channel
Classification: LBRD-->LBRD-->Linear-->Score
Returns:
Tensor: 提取到的特征
"""
def __init__(self, args, output_channels=40):
super(SPct, self).__init__()
self.args = args
# Input Embedding module
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
# Attention module
self.pt_last = Offset_Attention(args, channels=64)
# Stack to 1024 channel
self.conv_fuse = nn.Sequential(nn.Conv1d(256, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
# Classification
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = self.pt_last(x)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
class NPct(nn.Module):
"""NPct网络
网络结构(从上到下,从左到右):
Input Embedding module: No Neighborhood Embedding i.e. LBR
Attention module: four stacked self-attention layer
Stack to 1024 channel
Classification: LBRD-->LBRD-->Linear-->Score
Returns:
Tensor: 提取到的特征
"""
def __init__(self, args, output_channels=40):
super(NPct, self).__init__()
self.args = args
# Input Embedding module, no Neighborhood Embedding, Point Embedding i.e. two LBR
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
# Attention module
self.pt_last = Self_Attention(args, channels=64)
# Stack to 1024 channel
self.conv_fuse = nn.Sequential(nn.Conv1d(256, 1024, kernel_size=1, bias=False),
nn.BatchNorm1d(1024),
nn.LeakyReLU(negative_slope=0.2))
# Classification
self.linear1 = nn.Linear(1024, 512, bias=False)
self.bn6 = nn.BatchNorm1d(512)
self.dp1 = nn.Dropout(p=args.dropout)
self.linear2 = nn.Linear(512, 256)
self.bn7 = nn.BatchNorm1d(256)
self.dp2 = nn.Dropout(p=args.dropout)
self.linear3 = nn.Linear(256, output_channels)
def forward(self, x):
batch_size, _, _ = x.size()
# B, D, N
x = F.relu(self.bn1(self.conv1(x)))
# B, D, N
x = F.relu(self.bn2(self.conv2(x)))
x = self.pt_last(x)
x = self.conv_fuse(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = F.leaky_relu(self.bn6(self.linear1(x)), negative_slope=0.2)
x = self.dp1(x)
x = F.leaky_relu(self.bn7(self.linear2(x)), negative_slope=0.2)
x = self.dp2(x)
x = self.linear3(x)
return x
|
#!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
import os
import subprocess
import sys
TERM_COLOR_RED = '\033[91m'
TERM_COLOR_END = '\033[0m'
# Updates the path of the main target in the depfile to the relative path
# from base_path build_output_path
def fix_depfile(depfile_path, base_path, build_output_path):
with open(depfile_path, "r") as depfile:
content = depfile.read()
content_split = content.split(': ', 1)
target_path = os.path.relpath(build_output_path, start=base_path)
new_content = "%s: %s" % (target_path, content_split[1])
with open(depfile_path, "w") as depfile:
depfile.write(new_content)
# Creates the directory containing the given file.
def create_base_directory(file):
path = os.path.dirname(file)
try:
os.makedirs(path)
except os.error:
# Already existed.
pass
# Starts the given command and returns the newly created job.
def start_command(args, env):
return subprocess.Popen(args, env=env, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def main():
parser = argparse.ArgumentParser("Compiles a Rust crate")
parser.add_argument("--rustc",
help="Path to rustc",
required=True)
# This forces a recompile when the CIPD version changes. The value is unused.
parser.add_argument("--cipd-version",
help="CIPD version of Rust toolchain",
required=False)
parser.add_argument("--crate-root",
help="Path to source directory",
required=True)
parser.add_argument("--crate-type",
help="Type of crate to build",
required=True,
choices=["bin", "rlib", "staticlib", "proc-macro"])
parser.add_argument("--crate-name",
help="Name of crate to build",
required=True)
parser.add_argument("--edition",
help="Edition of rust to use when compiling the crate",
required=True,
choices=["2015", "2018"])
parser.add_argument("--opt-level",
help="Optimization level",
required=True,
choices=["0", "1", "2", "3", "s", "z"])
parser.add_argument("--lto",
help="Use LTO",
required=False,
choices=["thin", "fat"])
parser.add_argument("--output-file",
help="Path at which the output file should be stored",
required=True)
parser.add_argument("--depfile",
help="Path at which the output depfile should be stored",
required=True)
parser.add_argument("--test",
action="store_true",
help="Whether to build the target in test configuration",
default=False)
parser.add_argument("--root-out-dir",
help="Root output dir on which depfile paths should be rebased",
required=True)
parser.add_argument("--target",
help="Target for which this crate is being compiled",
required=True)
parser.add_argument("--cmake-dir",
help="Path to the directory containing cmake",
required=True)
parser.add_argument("--clang_prefix",
help="Path to the clang prefix",
required=True)
parser.add_argument("--clang-resource-dir",
help="Path to the clang resource dir",
required=True)
parser.add_argument("--sysroot",
help="Path to the sysroot",
required=True)
parser.add_argument("--lib-dir",
help="Link path for binary libraries",
action='append', default=[])
parser.add_argument("--lib-dir-file",
help="File of --lib-dir directory names, one per line")
parser.add_argument("--first-party-crate-root",
help="Path to directory containing the libs for first-party dependencies",
required=True)
parser.add_argument("--third-party-crate-root",
help="Path to directory containing the libs for third-party dependencies",
required=True)
parser.add_argument("--dep-data",
action="append",
help="Path to metadata from a crate dependency",
required=False)
parser.add_argument("--mmacosx-version-min",
help="Select macosx framework version",
required=False)
parser.add_argument("--symbol-level",
help="Symbols to include (0=none, 1=minimal, 2=full)",
choices=["0", "1", "2"],
required=True)
parser.add_argument("--cap-lints",
help="Maximum error promotion for lints",
choices=["deny", "allow", "warn"],
required=True)
parser.add_argument("--unstable-rust-feature",
help="Unstable Rust feature to allow",
action="append",
dest="unstable_rust_features",
required=False)
parser.add_argument("--feature",
help="Feature to enable",
action="append",
dest="features",
required=False)
parser.add_argument("--remap-path-prefix",
help="Remap source names in output",
action="append",
required=False)
parser.add_argument("--mac-host",
help="Whether or not the host is a Mac",
default=False,
action="store_true",
required=False)
parser.add_argument
args = parser.parse_args()
env = os.environ.copy()
env["CC"] = os.path.join(args.clang_prefix, "clang")
env["CXX"] = os.path.join(args.clang_prefix, "clang++")
env["AR"] = os.path.join(args.clang_prefix, "llvm-ar")
env["RANLIB"] = os.path.join(args.clang_prefix, "llvm-ranlib")
if args.cmake_dir:
env["PATH"] = "%s:%s" % (env["PATH"], args.cmake_dir)
env["RUST_BACKTRACE"] = "1"
create_base_directory(args.output_file)
if args.lib_dir_file:
with open(args.lib_dir_file) as f:
args.lib_dir += [line.strip() for line in f.readlines()]
call_args = [
args.rustc,
args.crate_root,
"-Dwarnings",
"--cap-lints",
args.cap_lints,
"--edition=%s" % args.edition,
"--crate-type=%s" % args.crate_type,
"--crate-name=%s" % args.crate_name,
"--target=%s" % args.target,
"-Copt-level=%s" % args.opt_level,
"-Cdebuginfo=%s" % args.symbol_level,
"--color=always",
"-Zallow-features=%s" % ",".join(args.unstable_rust_features or [])
]
call_args += ["-Lnative=%s" % dir for dir in args.lib_dir]
if args.test:
call_args += ["--test"]
if args.features:
for feature in args.features:
call_args += ["--cfg", "feature=\"%s\"" % feature]
if args.remap_path_prefix:
for path_prefix in args.remap_path_prefix:
call_args += ["--remap-path-prefix", path_prefix]
if args.target.endswith("fuchsia"):
call_args += [
"-L", os.path.join(args.sysroot, "lib"),
"-Clinker=%s" % os.path.join(args.clang_prefix, "lld"),
"-Clink-arg=--pack-dyn-relocs=relr",
"-Clink-arg=--sysroot=%s" % args.sysroot,
"-Clink-arg=-L%s" % os.path.join(args.sysroot, "lib"),
"-Clink-arg=-L%s" % os.path.join(args.clang_resource_dir, args.target, "lib"),
"-Clink-arg=--threads",
"-Clink-arg=-dynamic-linker=ld.so.1",
"-Clink-arg=--icf=all",
]
if args.target.startswith("aarch64"):
call_args += ["-Clink-arg=--fix-cortex-a53-843419"]
else:
call_args += [
"-Clinker=%s" % os.path.join(args.clang_prefix, "clang"),
]
if args.target.startswith("aarch64"):
call_args += ["-Clink-arg=-Wl,--fix-cortex-a53-843419"]
if args.target.endswith("linux-gnu"):
call_args += ["-Clink-arg=-Wl,--build-id"]
if not args.target.endswith("darwin"):
call_args += ["-Clink-arg=-Wl,--threads", "-Clink-arg=-Wl,--icf=all"]
if args.mmacosx_version_min:
call_args += [
"-Clink-arg=-mmacosx-version-min=%s" % args.mmacosx_version_min,
]
if args.lto:
call_args += ["-Clto=%s" % args.lto]
# calculate all the search paths we should look for for deps in cargo's output
search_path_suffixes = [
os.path.join("debug", "deps"),
os.path.join("release", "deps"),
]
targets = [
"x86_64-fuchsia",
"aarch64-fuchsia",
"x86_64-unknown-linux-gnu",
"x86_64-apple-darwin",
]
# add in e.g. x86_64-unknown-linux/release/deps
for target in targets:
search_path_suffixes += [os.path.join(target, suffix) for suffix in search_path_suffixes]
search_paths = [
args.first_party_crate_root,
args.third_party_crate_root,
]
search_paths += [os.path.join(args.third_party_crate_root, suffix) for suffix in search_path_suffixes]
for path in search_paths:
call_args += ["-L", "dependency=%s" % path]
externs = []
# Collect externs
if args.dep_data:
for data_path in args.dep_data:
if not os.path.isfile(data_path):
print TERM_COLOR_RED
print "Missing Rust target data for dependency " + data_path
print "Did you accidentally depend on a non-Rust target?"
print TERM_COLOR_END
return -1
dep_data = json.load(open(data_path))
if dep_data["third_party"]:
package_name = dep_data["package_name"]
crate = dep_data["crate_name"]
crate_type = dep_data["crate_type"]
if crate_type == "lib":
ext = ".rlib"
elif crate_type == "staticlib":
ext = ".a"
elif crate_type == "proc-macro":
if args.mac_host:
ext = ".dylib"
else:
ext = ".so"
else:
print "Unrecognized crate type: " + crate_type
return -1
filename = "lib" + crate + "-" + package_name + ext
lib_path = os.path.join(args.third_party_crate_root, filename)
if not os.path.exists(lib_path):
print TERM_COLOR_RED
print "lib not found at path: " + lib_path
print "This is a bug. Please report this to the Fuchsia Toolchain team."
print TERM_COLOR_END
return -1
else:
crate = dep_data["crate_name"]
lib_path = dep_data["lib_path"]
crate_underscore = crate.replace("-", "_")
externs.append("%s=%s" % (crate_underscore, lib_path))
# add externs to arguments
for extern in externs:
call_args += ["--extern", extern]
# Build the depfile
depfile_args = call_args + [
"-o%s" % args.depfile,
"--emit=dep-info",
]
depfile_job = start_command(depfile_args, env)
# Build the desired output
build_args = call_args + ["-o%s" % args.output_file]
build_job = start_command(build_args, env)
# Wait for build jobs to complete
stdout, stderr = depfile_job.communicate()
if stdout or stderr:
print(stdout + stderr)
if depfile_job.returncode != 0:
return depfile_job.returncode
fix_depfile(args.depfile, os.getcwd(), args.output_file)
stdout, stderr = build_job.communicate()
if stdout or stderr:
print(stdout + stderr)
if build_job.returncode != 0:
return build_job.returncode
if __name__ == '__main__':
sys.exit(main())
|
# XXX like the Mac equivalent, this module is designed to be run independently
# of the rest of CCTBX if necessary, although it will use installed resources
# if found
from __future__ import division
try :
import libtbx.load_env
libtbx_env = libtbx.env
except ImportError, e :
libtbx_env = None
import optparse
import shutil
import stat
import os
import sys
def run (args, out=sys.stdout) :
if (sys.platform != "win32") :
print >> out, "This application will only run on Windows systems."
return 1
parser = optparse.OptionParser(
description="Utility for creating an iconified Windows launcher for the specified command, which must be present in %LIBTBX_BUILD%\\bin.")
bin_path = icns_path = None
if (libtbx_env is not None) :
bin_path = os.path.join(abs(libtbx_env.build_path), "bin")
ico_path = libtbx_env.find_in_repositories(
relative_path="gui_resources/icons/custom/WinPhenix.ico",
test=os.path.exists)
else :
bin_path = os.getcwd()
parser.add_option("--bin_dir", dest="bin_dir", action="store",
help="Directory containing target executable or batch script.",
default=bin_path)
parser.add_option("--exe_name", dest="exe_name", action="store",
help="Name of iconified program", default=None)
parser.add_option("--icon", dest="icon", action="store",
help="Path to .ico file", default=ico_path)
parser.add_option("--dest", dest="dest", action="store",
help="Destination path", default=os.getcwd())
parser.add_option("--bundle_all", dest="bundle_all", action="store_true",
help="Bundle Python interpreter, etc. into .exe", default=False)
options, args = parser.parse_args(args)
if (len(args) == 0) :
return parser.error("Executable name not specified.")
if (options.bin_dir is None) :
return parser.error("Executables directory not specified.")
program_name = args[-1]
bin_dir = options.bin_dir
bin_files = os.listdir(bin_dir)
program_cmd_file = None
for file_name in bin_files :
base, ext = os.path.splitext(file_name)
if (base == program_name) :
if (ext == ".bat") : # preferred
program_cmd_file = file_name
break
elif (ext == ".exe") :
program_cmd_file = file_name
if (program_cmd_file is None) :
print >> out, "No program named '%s' found in %s." % (program_name,
bin_dir)
return 1
exe_name = program_name
if (options.exe_name is not None) :
exe_name = options.exe_name
if (os.path.isdir("py2exe_tmp")) :
shutil.rmtree("py2exe_tmp")
os.mkdir("py2exe_tmp")
os.chdir("py2exe_tmp")
f = open("%s.py" % exe_name, "w")
# XXX for reasons unknown to me, the method used on Mac (os.spawnv) will
# not work for windows, but subprocess.call appears to do the job nicely,
# with the minor complaint that it leaves the phenix.exe command running
# (and visible in the taskbar) until the actual app closes.
f.write("""
import subprocess
subprocess.call(r"%s")
""" % os.path.join(bin_dir, program_cmd_file))
f.close()
bundle_files = 3
#zip_file = "'%s.zip'" %
if (options.bundle_all) :
bundle_files = 1 # won't work on win64
zip_file = None
icon_rsrc = ""
if (options.icon is not None) :
icon_rsrc = "'icon_resources':[(0,r'%s')]," % options.icon
f2 = open("setup.py", "w")
f2.write("""
from distutils.core import setup
import py2exe
setup(
console=[
{
'script':'%s.py',
%s
},
],
zipfile=None,
options={
'py2exe': {
'includes': ['subprocess'],
'dll_excludes' : ['w9xpopen.exe'],
'bundle_files': %d,
},
})
""" % (exe_name, icon_rsrc, bundle_files))
f2.close()
# XXX use sys.executable to avoid -Qnew behavior (crashes py2exe)
import subprocess
rc = subprocess.call([sys.executable, "setup.py", "py2exe"])
if (rc != 0) :
return rc
dist_path = os.path.join(os.getcwd(), "dist")
dist_files = os.listdir(dist_path)
exe_file = os.path.join(dist_path, "%s.exe" % exe_name)
assert (os.path.isfile(exe_file))
os.chdir(options.dest)
print >> out, ""
for file_name in dist_files :
if os.path.exists(file_name) :
print >> out, "WARNING: %s already exists" % file_name
continue
# XXX Even for Windows, this is incredibly broken
#full_path = os.path.join(options.dest, file_name)
#print "removing %s" % file_name
#subprocess.call("del /F /Q %s" % os.path.join(os.getcwd(), file_name))
#os.chmod(full_path, stat.S_IWRITE)
#os.unlink(full_path)
print >> out, "moving %s..." % file_name
shutil.move(os.path.join(dist_path, file_name), os.getcwd())
os.chmod(file_name, stat.S_IWRITE)
return 0
if (__name__ == "__main__") :
sys.exit(run(sys.argv[1:]))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Run pdb and capture args.
Oct 06, 2019:
Works as expected!!!
Start IPython and run this as a post-mortem debugger!
Unrelated but who wants to see something I was surprised about.::
from bdb import foo, bar
Yeah those are real functions.::
foo()
"""
import pdb
from pdb import Pdb
import sys
import traceback
from prompt_toolkit.completion import DynamicCompleter
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.keys import Keys
from prompt_toolkit.key_binding import KeyBindings, merge_key_bindings
from prompt_toolkit.key_binding.defaults import load_key_bindings
from prompt_toolkit.formatted_text import PygmentsTokens
from prompt_toolkit.shortcuts.prompt import PromptSession
from pygments.token import Token
from IPython.core.getipython import get_ipython
from IPython.terminal.embed import InteractiveShellEmbed
from IPython.terminal.interactiveshell import TerminalInteractiveShell
from IPython.terminal.ptutils import IPythonPTCompleter
from IPython.terminal.shortcuts import create_ipython_shortcuts
from default_profile.ipython_config import UsageError
class IPD(Pdb):
"""Set up the IPython Debugger.
Rewrote this largely to break up the :meth:`pt_init` from
`IPython.terminal.debugger.TerminalPdb` and decouple it from the rest
of the application.
"""
def __init__(self, shell=None, keys=None, context=None, *args, **kwargs):
"""Add everything to call signature.
The original only displays star args and star kwargs.
Parameters
----------
shell : |ip|
Global IPython
completer : optional
What do we use for completions?
prompt_toolkit_application : prompt_toolkit.PromptSession, optional
pt_init parameter
"""
self.shell = shell or get_ipython()
self.pt_session = self.pt_init()
self.keys = self.initialize_keybindings(keys)
# self.completer = completer or self.initialize_completer()
self.completer = DynamicCompleter(self.initialize_completer())
self.prompt = "Your Debugger: "
self.context = context
super().__init__(self, *args, **kwargs)
def __repr__(self):
return f"{self.prompt}"
# TODO:
# def __call__(self):
# initialize()
def get_prompt_tokens(self):
"""Create the prompt."""
return [(Token.Prompt, self.prompt)]
def initialize_completer(self):
"""Create a completion instance for the debugger."""
return IPythonPTCompleter(
shell=self.shell,
# namespace=self.shell.user_ns,
# global_namespace=globals(),
# parent=self.shell,
)
def initialize_keybindings(self, custom_keys=None):
_ = [
load_key_bindings(),
create_ipython_shortcuts(self.shell),
]
if custom_keys is None:
return merge_key_bindings(_)
else:
return merge_key_bindings(_.append(custom_keys))
def pt_init(self):
"""Override the default initialization for prompt_toolkit."""
return PromptSession(
message=(lambda: PygmentsTokens(self.get_prompt_tokens())),
editing_mode=getattr(EditingMode, self.shell.editing_mode.upper()),
key_bindings=self.keys,
history=self.shell.debugger_history,
completer=self.completer,
enable_history_search=True,
mouse_support=self.shell.mouse_support,
complete_style=self.shell.pt_complete_style,
# style=self.shell.style,
# inputhook=self.shell.inputhook,
color_depth=self.shell.color_depth,
)
def initialize_ipython():
"""If IPython hasn't been started, then do so.
#) Build a terminal app in order to force IPython to load the configuration.
#) Set the trait 'interact' to `False`.
#) TerminalIPythonApp().initialize([])
.. todo:: allow for args and kwargs
"""
# ipapp = TerminalIPythonApp()
# # Avoid output (banner, prints)
# ipapp.interact = False
# # ipapp.initialize([args], kwargs)
# # I don't know if we can do it that way
# ip.initialize([])
# shell = ipapp.shell
# Unfortunately this doesn't work
# shell = start_ipython()
# This creates an IPython instance inside of our debugger
shell = TerminalInteractiveShell()
return shell
def is_embedded_shell(shell):
"""Determine if 'shell' is an instance of InteractiveShellEmbed."""
if isinstance(shell, InteractiveShellEmbed):
sys.stderr.write(
"\nYou are currently into an embedded ipython shell,\n"
"the configuration will not be loaded.\n\n"
)
return True
def formatted_traceback():
"""Allow a little post mortem introspection."""
print("Traceback: Extracted stack\n" + repr(traceback.extract_stack()) + "\n")
print("Traceback: Formatted stack\n" + repr(traceback.format_stack()) + "\n")
def init_debugger():
"""Create an instance of our debugger."""
if hasattr(sys, "last_traceback"):
formatted_traceback()
# with patch_stdout.patch_stdout():
dynamic_debugger = IPD(shell=get_ipython())
return dynamic_debugger
if __name__ == "__main__":
debugger = init_debugger()
try:
debugger.pt_session.prompt()
except (KeyboardInterrupt, EOFError):
sys.exit("Bye!")
|
#!/usr/bin/env python
# license removed for brevity
from scapy.all import *
import sys
import os
current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.append(current_folder)
main_folder = os.path.join(current_folder, "..")
sys.path.append(main_folder)
'''
utils_folder = os.path.join(current_folder, "..", "utils")
sys.path.append(utils_folder)
'''
from IPParser import IPParser
class Sniffer:
interface_ = None
count_ = -1
filter_ = None
store_ = 0
use_show = True
use_logger = True
folder_logger = os.path.join(current_folder, "sniffer_log")
logger_filename = "sniffer_record.txt"
logger_raw_filename = "sniffer_raw.txt"
parser_ = IPParser()
def __init__(self, interface=None, count=-1, filter=None, store=0, use_show=True, use_logger=True):#filter="tcp and ( port 80 or port 443 )"
self.interface_ = interface
self.count_ = count
self.filter_ = filter
self.store_ = store
self.use_show = use_show
self.use_logger = use_logger
def run(self):
pkts = sniff(iface=self.interface_, prn=self.callback, count=self.count_, filter=self.filter_, store=self.store_)
return pkts
def sniff_analyze(self, pkts):
src_ether = "" if self.parser_.parser_src_ether(pkts) == None else self.parser_.parser_src_ether(pkts)
src_ip = "" if self.parser_.parser_src_ip(pkts) == None else self.parser_.parser_src_ip(pkts)
src_port = "" if self.parser_.parser_src_port(pkts) == None else self.parser_.parser_src_port(pkts)
dst_ether = "" if self.parser_.parser_dst_ether(pkts) == None else self.parser_.parser_dst_ether(pkts)
dst_ip = "" if self.parser_.parser_dst_ip(pkts) == None else self.parser_.parser_dst_ip(pkts)
dst_port = "" if self.parser_.parser_dst_port(pkts) == None else self.parser_.parser_dst_port(pkts)
protocol = "" if self.parser_.parser_proto(pkts) == None else self.parser_.parser_proto(pkts)
result = "Ether = {} / IP = {:<15s}:{:<7s} >>>> Ether = {} / IP = {:<15s}:{:<5s} Proto:{:<7s}".format(src_ether, src_ip, src_port, dst_ether, dst_ip, dst_port, protocol)
if protocol == 'tcp':
flags = self.parser_.parser_flags(pkts)
result = "{} Flags:{}".format(result, flags)
tm = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
result = "{} {}".format(tm, result)
return result
def callback(self, pkts):
self.show(pkts)
self.logger(pkts)
def logger(self, pkts):
if not self.use_logger: return
if not os.path.exists(self.folder_logger):
os.makedirs(self.folder_logger)
with open(os.path.join(self.folder_logger, self.logger_raw_filename), 'a') as fid:
fid.write("{}\n".format(pkts))
with open(os.path.join(self.folder_logger, self.logger_filename), 'a') as fid:
fid.write("{}\n".format(self.sniff_analyze(pkts)))
def show(self, pkts):
if not self.use_show: return
print(self.sniff_analyze(pkts))
|
from data.model.filterword import FilterWord
from data.model.guild import Guild
from data.model.tag import Tag
from utils.config import cfg
class GuildService:
def get_guild(self) -> Guild:
"""Returns the state of the main guild from the database.
Returns
-------
Guild
The Guild document object that holds information about the main guild.
"""
return Guild.objects(_id=cfg.guild_id).first()
def add_tag(self, tag: Tag) -> None:
Guild.objects(_id=cfg.guild_id).update_one(push__tags=tag)
def remove_tag(self, _id: int):
return Guild.objects(_id=cfg.guild_id).update_one(pull__tags___id=Tag(_id=_id)._id)
def get_tag_by_name(self, name: str, args: bool):
g = Guild.objects(_id=cfg.guild_id).first()
for t in g.tags:
if t.name == name and t.args == args:
t.use_count += 1
g.save()
return t
return None
def get_tag(self, _id: int):
g = Guild.objects(_id=cfg.guild_id).first()
for t in g.tags:
if t._id == _id:
t.use_count += 1
g.save()
return t
return None
def add_meme(self, meme: Tag) -> None:
Guild.objects(_id=cfg.guild_id).update_one(push__memes=meme)
def remove_meme(self, meme: str):
return Guild.objects(_id=cfg.guild_id).update_one(pull__memes__name=Tag(name=meme).name)
def edit_meme(self, meme):
return Guild.objects(_id=cfg.guild_id, memes__name=meme.name).update_one(set__memes__S=meme)
def get_meme(self, name: str):
meme = Guild.objects.get(_id=cfg.guild_id).memes.filter(name=name).first()
if meme is None:
return
meme.use_count += 1
self.edit_meme(meme)
return meme
def inc_caseid(self) -> None:
"""Increments Guild.case_id, which keeps track of the next available ID to
use for a case.
"""
Guild.objects(_id=cfg.guild_id).update_one(inc__case_id=1)
def all_rero_mappings(self):
g = self.get_guild()
current = g.reaction_role_mapping
return current
def add_rero_mapping(self, mapping):
g = self.get_guild()
current = g.reaction_role_mapping
the_key = list(mapping.keys())[0]
current[str(the_key)] = mapping[the_key]
g.reaction_role_mapping = current
g.save()
def append_rero_mapping(self, message_id, mapping):
g = self.get_guild()
current = g.reaction_role_mapping
current[str(message_id)] = current[str(message_id)] | mapping
g.reaction_role_mapping = current
g.save()
def get_rero_mapping(self, id):
g = self.get_guild()
if id in g.reaction_role_mapping:
return g.reaction_role_mapping[id]
else:
return None
def delete_rero_mapping(self, id):
g = self.get_guild()
if str(id) in g.reaction_role_mapping.keys():
g.reaction_role_mapping.pop(str(id))
g.save()
def add_raid_phrase(self, phrase: str) -> bool:
existing = self.get_guild().raid_phrases.filter(word=phrase)
if(len(existing) > 0):
return False
Guild.objects(_id=cfg.guild_id).update_one(push__raid_phrases=FilterWord(word=phrase, bypass=5, notify=True))
return True
def remove_raid_phrase(self, phrase: str):
Guild.objects(_id=cfg.guild_id).update_one(pull__raid_phrases__word=FilterWord(word=phrase).word)
def set_spam_mode(self, mode) -> None:
Guild.objects(_id=cfg.guild_id).update_one(set__ban_today_spam_accounts=mode)
def add_filtered_word(self, fw: FilterWord) -> None:
existing = self.get_guild().filter_words.filter(word=fw.word)
if(len(existing) > 0):
return False
Guild.objects(_id=cfg.guild_id).update_one(push__filter_words=fw)
return True
def remove_filtered_word(self, word: str):
return Guild.objects(_id=cfg.guild_id).update_one(pull__filter_words__word=FilterWord(word=word).word)
def update_filtered_word(self, word: FilterWord):
return Guild.objects(_id=cfg.guild_id, filter_words__word=word.word).update_one(set__filter_words__S=word)
def add_whitelisted_guild(self, id: int):
g = Guild.objects(_id=cfg.guild_id)
g2 = g.first()
if id not in g2.filter_excluded_guilds:
g.update_one(push__filter_excluded_guilds=id)
return True
return False
def remove_whitelisted_guild(self, id: int):
g = Guild.objects(_id=cfg.guild_id)
g2 = g.first()
if id in g2.filter_excluded_guilds:
g.update_one(pull__filter_excluded_guilds=id)
return True
return False
def add_ignored_channel(self, id: int):
g = Guild.objects(_id=cfg.guild_id)
g2 = g.first()
if id not in g2.filter_excluded_channels:
g.update_one(push__filter_excluded_channels=id)
return True
return False
def remove_ignored_channel(self, id: int):
g = Guild.objects(_id=cfg.guild_id)
g2 = g.first()
if id in g2.filter_excluded_channels:
g.update_one(pull__filter_excluded_channels=id)
return True
return False
def get_locked_channels(self):
return self.get_guild().locked_channels
def add_locked_channels(self, channel):
Guild.objects(_id=cfg.guild_id).update_one(push__locked_channels=channel)
def remove_locked_channels(self, channel):
Guild.objects(_id=cfg.guild_id).update_one(pull__locked_channels=channel)
def set_nsa_mapping(self, channel_id, webhooks):
guild = Guild.objects(_id=cfg.guild_id).first()
guild.nsa_mapping[str(channel_id)] = webhooks
guild.save()
guild_service = GuildService()
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .key_store_type_details import KeyStoreTypeDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class KeyStoreTypeFromOracleKeyVaultDetails(KeyStoreTypeDetails):
"""
Details for Oracle Key Vault
"""
def __init__(self, **kwargs):
"""
Initializes a new KeyStoreTypeFromOracleKeyVaultDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.database.models.KeyStoreTypeFromOracleKeyVaultDetails.type` attribute
of this class is ``ORACLE_KEY_VAULT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param type:
The value to assign to the type property of this KeyStoreTypeFromOracleKeyVaultDetails.
Allowed values for this property are: "ORACLE_KEY_VAULT"
:type type: str
:param connection_ips:
The value to assign to the connection_ips property of this KeyStoreTypeFromOracleKeyVaultDetails.
:type connection_ips: list[str]
:param admin_username:
The value to assign to the admin_username property of this KeyStoreTypeFromOracleKeyVaultDetails.
:type admin_username: str
:param vault_id:
The value to assign to the vault_id property of this KeyStoreTypeFromOracleKeyVaultDetails.
:type vault_id: str
:param secret_id:
The value to assign to the secret_id property of this KeyStoreTypeFromOracleKeyVaultDetails.
:type secret_id: str
"""
self.swagger_types = {
'type': 'str',
'connection_ips': 'list[str]',
'admin_username': 'str',
'vault_id': 'str',
'secret_id': 'str'
}
self.attribute_map = {
'type': 'type',
'connection_ips': 'connectionIps',
'admin_username': 'adminUsername',
'vault_id': 'vaultId',
'secret_id': 'secretId'
}
self._type = None
self._connection_ips = None
self._admin_username = None
self._vault_id = None
self._secret_id = None
self._type = 'ORACLE_KEY_VAULT'
@property
def connection_ips(self):
"""
**[Required]** Gets the connection_ips of this KeyStoreTypeFromOracleKeyVaultDetails.
The list of Oracle Key Vault connection IP addresses.
:return: The connection_ips of this KeyStoreTypeFromOracleKeyVaultDetails.
:rtype: list[str]
"""
return self._connection_ips
@connection_ips.setter
def connection_ips(self, connection_ips):
"""
Sets the connection_ips of this KeyStoreTypeFromOracleKeyVaultDetails.
The list of Oracle Key Vault connection IP addresses.
:param connection_ips: The connection_ips of this KeyStoreTypeFromOracleKeyVaultDetails.
:type: list[str]
"""
self._connection_ips = connection_ips
@property
def admin_username(self):
"""
**[Required]** Gets the admin_username of this KeyStoreTypeFromOracleKeyVaultDetails.
The administrator username to connect to Oracle Key Vault
:return: The admin_username of this KeyStoreTypeFromOracleKeyVaultDetails.
:rtype: str
"""
return self._admin_username
@admin_username.setter
def admin_username(self, admin_username):
"""
Sets the admin_username of this KeyStoreTypeFromOracleKeyVaultDetails.
The administrator username to connect to Oracle Key Vault
:param admin_username: The admin_username of this KeyStoreTypeFromOracleKeyVaultDetails.
:type: str
"""
self._admin_username = admin_username
@property
def vault_id(self):
"""
**[Required]** Gets the vault_id of this KeyStoreTypeFromOracleKeyVaultDetails.
The `OCID`__ of the Oracle Cloud Infrastructure `vault`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/Content/KeyManagement/Concepts/keyoverview.htm#concepts
:return: The vault_id of this KeyStoreTypeFromOracleKeyVaultDetails.
:rtype: str
"""
return self._vault_id
@vault_id.setter
def vault_id(self, vault_id):
"""
Sets the vault_id of this KeyStoreTypeFromOracleKeyVaultDetails.
The `OCID`__ of the Oracle Cloud Infrastructure `vault`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/Content/KeyManagement/Concepts/keyoverview.htm#concepts
:param vault_id: The vault_id of this KeyStoreTypeFromOracleKeyVaultDetails.
:type: str
"""
self._vault_id = vault_id
@property
def secret_id(self):
"""
**[Required]** Gets the secret_id of this KeyStoreTypeFromOracleKeyVaultDetails.
The `OCID`__ of the Oracle Cloud Infrastructure `secret`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/Content/KeyManagement/Concepts/keyoverview.htm#concepts
:return: The secret_id of this KeyStoreTypeFromOracleKeyVaultDetails.
:rtype: str
"""
return self._secret_id
@secret_id.setter
def secret_id(self, secret_id):
"""
Sets the secret_id of this KeyStoreTypeFromOracleKeyVaultDetails.
The `OCID`__ of the Oracle Cloud Infrastructure `secret`__.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
__ https://docs.cloud.oracle.com/Content/KeyManagement/Concepts/keyoverview.htm#concepts
:param secret_id: The secret_id of this KeyStoreTypeFromOracleKeyVaultDetails.
:type: str
"""
self._secret_id = secret_id
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
#!/usr/bin/env python3
#
# Copyright (c) 2015-2018, Fabian Greif
# Copyright (c) 2018, Niklas Hauser
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import os
import sys
import enum
import shutil
import anytree
import logging
import textwrap
import colorful
import itertools
import lbuild.filter
from lbuild.format import format_node, format_description, format_short_description
from .exception import LbuildException, LbuildAttributeException
LOGGER = logging.getLogger('lbuild.node')
def load_functions_from_file(repository, filename: str, required, optional=None, local={}):
try:
localpath = os.path.dirname(os.path.realpath(filename))
local.update({
# The localpath(...) function can be used to create
# a local path form the folder of the repository file.
'localpath': RelocatePath(localpath),
'repopath': RelocatePath(repository._filepath),
'FileReader': LocalFileReaderFactory(localpath),
'listify': lbuild.filter.listify,
# 'ignore_patterns': shutil.ignore_patterns,
'StringOption': lbuild.option.StringOption,
'BooleanOption': lbuild.option.BooleanOption,
'NumericOption': lbuild.option.NumericOption,
'EnumerationOption': lbuild.option.EnumerationOption,
'SetOption': lbuild.option.SetOption,
})
# LOGGER.debug("Parse filename '%s'", filename)
local = lbuild.utils.with_forward_exception(repository,
lambda: lbuild.utils.load_module_from_file(filename, local))
functions = lbuild.utils.get_global_functions(local, required, optional)
return functions
except FileNotFoundError as error:
raise LbuildException("Repository configuration file not found '{}'.".format(filename))
except KeyError as error:
raise LbuildException("Invalid repository configuration file '{}':\n"
" {}: {}".format(filename,
error.__class__.__name__,
error))
class RelocatePath:
def __init__(self, basepath):
self.basepath = basepath
def __call__(self, *args):
return os.path.join(self.basepath, *args)
class LocalFileReader:
def __init__(self, basepath, filename):
self.basepath = basepath
self.filename = filename
def __str__(self):
return self.read()
def read(self):
with open(os.path.join(self.basepath, self.filename)) as file:
return file.read()
class LocalFileReaderFactory:
def __init__(self, basepath):
self.basepath = basepath
def __call__(self, filename):
return LocalFileReader(self.basepath, filename)
class Renderer(anytree.RenderTree):
def __init__(self, node):
anytree.RenderTree.__init__(self, node,
style=anytree.ContRoundStyle(),
childiter=self.childsort)
def __str__(self):
lines = [pre + format_node(node, pre) for pre, _, node in self]
return "\n".join(lines)
@staticmethod
def childsort(items):
def sorting(item):
return (item._type != BaseNode.Type.OPTION, item.name)
return sorted(items, key=sorting)
class NameResolver:
"""
Name resolver for node.
"""
def __init__(self, node, nodetype, selected=True):
self._node = node
self._type = nodetype
self._str = nodetype.name.lower()
self._value_resolver = False
self._selected = selected
def __getitem__(self, key: str):
node = self._node._resolve_partial_max(key, max_results=1)[0]
if not node._available:
raise LbuildException("{} '{}' is not available!".format(self._str, node.fullname))
if self._selected and not node._selected:
raise LbuildException("{} '{}' is not selected!".format(self._str, node.fullname))
if node._type != self._type:
raise LbuildException("'{}' is of type '{}', but searching for '{}'!".format(
node.fullname, node._type.name.lower(), self._str))
if node._type == BaseNode.Type.OPTION and self._value_resolver:
return node.value
return node
def get(self, key, default=None):
try:
return self.__getitem__(key)
except:
return default
def __contains__(self, key):
try:
_ = self.__getitem__(key)
return True
except:
return False
def __len__(self):
return len(self._node._findall(self._type, selected=self._selected))
def __repr__(self):
return repr(self._node._findall(self._type, selected=self._selected))
class BaseNode(anytree.Node):
separator = ":"
resolver = anytree.Resolver()
class Type(enum.Enum):
PARSER = 1
REPOSITORY = 2
MODULE = 3
OPTION = 4
def __init__(self, name, node_type, repository=None):
anytree.Node.__init__(self, name)
self._type = node_type
self._functions = {}
self._fullname = name
self._filename = None
# Dependency management
self._repository = repository
self._dependency_module_names = []
self._dependencies_resolved = False
self._dependencies = []
self._description = ""
# All _update()-able traits: defaults
self._available_default = True
self._selected_default = True
self._format_description_default = format_description
self._format_short_description_default = format_short_description
self._context_default = None
# All _update()-able traits: defaults
self._available = (self._type != BaseNode.Type.MODULE)
self._selected = True
self._format_description = self._format_description_default
self._format_short_description = self._format_short_description_default
self._context = self._context_default
self._ignore_patterns = lbuild.utils.default_ignore_patterns
self._filters = lbuild.filter.default_filters
@property
def format_description(self):
return format_description
@property
def format_short_description(self):
return format_short_description
@property
def _filepath(self):
return os.path.dirname(self._filename)
@property
def fullname(self):
if self._fullname is None:
self._fullname = self.name
return self._fullname
@property
def type(self):
return self._type
@property
def options(self):
return self.all_options(depth=2)
@property
def submodules(self):
return self.all_modules(depth=2)
@property
def repository(self):
return self._repository
@property
def dependencies(self):
if not self._dependencies_resolved:
self._resolve_dependencies()
return self._dependencies + [d for o in self.all_options(depth=2) for d in o._dependencies if d != self]
@property
def description(self):
return self._format_description(self, str(self._description))
@property
def short_description(self):
return self._format_short_description(self, str(self._description))
@description.setter
def description(self, description):
self._description = description
@property
def option_value_resolver(self):
resolver = NameResolver(self, self.Type.OPTION)
resolver._value_resolver = True
return resolver
@property
def option_resolver(self):
return NameResolver(self, self.Type.OPTION)
@property
def module_resolver(self):
return NameResolver(self, self.Type.MODULE)
def render(self):
return Renderer(self)
def add_dependencies(self, *dependencies):
"""
Add a new dependencies.
The module name has not to be fully qualified.
"""
self._dependency_module_names += dependencies
def add_option(self, option):
"""
Define new option for this module.
The module options only influence the build process but not the
selection and dependencies of modules.
"""
if option.name in [c.name for c in self.children]:
raise LbuildException("Option name '{}' is already defined".format(option.name))
option._repository = self._repository
option.parent = self
option.add_dependencies(self.fullname)
option._fullname = self.fullname + ":" + option.name
def all_options(self, depth=None, selected=True):
return self._findall(self.Type.OPTION, depth, selected)
def all_modules(self, depth=None, selected=True):
return self._findall(self.Type.MODULE, depth, selected)
def _findall(self, node_type, depth=None, selected=True):
def _filter(n):
return (n._type == node_type and
n._available and
(n._selected or not selected) and
n is not self)
return anytree.search.findall(self, maxlevel=depth, filter_=_filter)
def _resolve_dependencies(self, ignore_failure=False):
"""
Update the internal list of dependencies.
Resolves the module names to the actual module objects.
"""
if self._dependencies_resolved:
return
dependencies = set()
# print(self.fullname, self._dependency_module_names)
dependency_names = set(n for n in self._dependency_module_names if ":" in n)
for dependency_name in dependency_names:
try:
dependencies.add(self.module_resolver[dependency_name])
except LbuildException as b:
if not ignore_failure:
raise LbuildException("Cannot resolve dependencies!\n" + str(b))
LOGGER.debug("ignoring", dependency_name)
self._dependencies = list(dependencies)
self._dependencies_resolved = not ignore_failure
for child in self.children:
child._resolve_dependencies(ignore_failure)
def _resolve_partial_max(self, query, max_results=1):
nodes = self._resolve_partial(query, None)
if nodes is None:
raise LbuildException("Unknown '{}' in module '{}'!".format(query, self.fullname))
if len(nodes) > max_results:
raise LbuildException("Ambiguous '{}'! Found: '{}'".format(query, "', '".join([n.fullname for n in nodes])))
return nodes
def _resolve_partial(self, query, default=[]):
# Try if query result is unique
resolved1 = self._resolve(query, [])
if len(resolved1) == 1:
return resolved1
# no result or ambiguous? try to fill the partial name
query = ":".join(self._fill_partial_name(["" if p == "*" else p for p in query.split(":")]))
resolved2 = self._resolve(query, [])
if not (len(resolved2) or len(resolved1)):
return default # neither found anything
if not len(resolved2):
return resolved1
if not len(resolved1):
return resolved2
# return the less ambiguous one
return resolved2 if len(resolved2) < len(resolved1) else resolved1
def _resolve(self, query, default=[]):
# :* -> non-recursive
# :** -> recursive
query = ":".join(p if len(p) else "*" for p in query.strip().split(":"))
try:
qquery = ":" + query.replace(":**", "")
if self.root._type == self.Type.PARSER:
qquery = ":lbuild" + qquery
# print("\n\n\n", qquery)
found_modules = BaseNode.resolver.glob(self.root, qquery)
except (anytree.resolver.ChildResolverError, anytree.resolver.ResolverError):
return default
modules = found_modules
if query.endswith(":**"):
for module in found_modules:
modules.extend(module.descendants)
# print("\n\n\n", modules)
return modules if len(modules) else default
def _fill_partial_name(self, partial_name):
"""
Fill the array of the module name with the parts of the full name
of the current module.
Returns an array of the full name.
"""
module_fullname_parts = self.fullname.split(":")
# if partial_name is just leaf name, set scope to local node
if len(partial_name) == 1:
partial_name = module_fullname_parts + partial_name
# Limit length of the module name to the length of the requested name
depth = len(partial_name)
if len(module_fullname_parts) > depth:
module_fullname_parts = module_fullname_parts[:depth]
# Using zip_longest restricts the name to the length of full name
# if it is shorted than the requested module name.
name = []
for part, fill in itertools.zip_longest(partial_name,
module_fullname_parts,
fillvalue=""):
name.append(fill if (part == "") else part)
return name
def _update_attribute(self, attr):
self_attr = getattr(self, attr, "unknown")
parent_attr = getattr(self.parent, attr, "unknown")
if self_attr is "unknown" or parent_attr is "unknown":
raise LbuildException("Cannot update non-existant attribute '{}'!".format(attr))
if isinstance(self_attr, list):
self_attr = list(set(self_attr + parent_attr))
return
if isinstance(self_attr, dict):
self_attr.update(parent_attr)
return
default = getattr(self, attr + "_default")
if ((parent_attr is not default) and (self_attr is default)):
setattr(self, attr, parent_attr)
# print("Updating {}.{} = {} -> {}.".format(self.fullname, attr, self_attr, parent_attr))
def _update_format(self):
if self.parent:
self._update_attribute("_format_description")
self._update_attribute("_format_short_description")
for c in self.children:
c._update_format()
def _update(self):
if self.parent:
self._update_attribute("_format_description")
self._update_attribute("_format_short_description")
self._update_attribute("_available")
self._update_attribute("_selected")
self._update_attribute("_ignore_patterns")
self._update_attribute("_filters")
self._update_attribute("_context")
for c in self.children:
c._update()
def _relocate_relative_path(self, path):
"""
Relocate relative paths to the path of the repository
configuration file.
"""
if not os.path.isabs(path):
path = os.path.join(self._filepath, path)
return os.path.normpath(path)
|
# Copyright 2020 BlueCat Networks (USA) Inc. and its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Workflow form template
"""
import datetime
import re
import json
import os
from wtforms import StringField, PasswordField, FileField
from wtforms import BooleanField, DateTimeField, SubmitField, SelectField
from wtforms.validators import DataRequired, Email, MacAddress, URL, ValidationError
from bluecat.wtform_extensions import GatewayForm
from bluecat.wtform_fields import Configuration, CustomStringField, IP4Address
states = [
["AL", "Alabama"],
["AK", "Alaska"],
["AS", "American Samoa"],
["AZ", "Arizona"],
["AR", "Arkansas"],
["CA", "California"],
["CO", "Colorado"],
["CT", "Connecticut"],
["DE", "Delaware"],
["DC", "District Of Columbia"],
["FM", "Federated States Of Micronesia"],
["FL", "Florida"],
["GA", "Georgia"],
["GU", "Guam"],
["HI", "Hawaii"],
["ID", "Idaho"],
["IL", "Illinois"],
["IN", "Indiana"],
["IA", "Iowa"],
["KS", "Kansas"],
["KY", "Kentucky"],
["LA", "Louisiana"],
["ME", "Maine"],
["MH", "Marshall Islands"],
["MD", "Maryland"],
["MA", "Massachusetts"],
["MI", "Michigan"],
["MN", "Minnesota"],
["MS", "Mississippi"],
["MO", "Missouri"],
["MT", "Montana"],
["NE", "Nebraska"],
["NV", "Nevada"],
["NH", "New Hampshire"],
["NJ", "New Jersey"],
["NM", "New Mexico"],
["NY", "New York"],
["NC", "North Carolina"],
["ND", "North Dakota"],
["MP", "Northern Mariana Islands"],
["OH", "Ohio"],
["OK", "Oklahoma"],
["OR", "Oregon"],
["PW", "Palau"],
["PA", "Pennsylvania"],
["PR", "Puerto Rico"],
["RI", "Rhode Island"],
["SC", "South Carolina"],
["SD", "South Dakota"],
["TN", "Tennessee"],
["TX", "Texas"],
["UT", "Utah"],
["VT", "Vermont"],
["VI", "Virgin Islands"],
["VA", "Virginia"],
["WA", "Washington"],
["WV", "West Virginia"],
["WI", "Wisconsin"],
["WY", "Wyoming"]
]
state_choices = [(x[0], x[1]) for x in states]
default = [("-1","Please Select")]
state_choices = default + state_choices
def validate_name(form, field):
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(dir_path + '/rules.json') as f:
data = json.load(f)
s = data['network_name']['regex']
message = data['network_name']['message']
regex = re.compile(r'%s' % s)
match = regex.match(field.data)
try:
print(match.group(0))
except:
raise ValidationError(message)
def validate_dropdown(form, field):
if field.data == '-1':
raise ValidationError("You need to select an option")
class GenericFormTemplate(GatewayForm):
"""
Generic form Template
Note:
When updating the form, remember to make the corresponding changes to the workflow pages
"""
workflow_name = 'network_manager'
workflow_permission = 'network_manager_page'
network_name = StringField(
label='Name',
validators=[validate_name]
)
network_location = SelectField(
label='Location',
choices=state_choices,
validators=[validate_dropdown]
)
network_size = SelectField(
label="Size",
choices=[('-1','Please Select'), ('256','256'), ('512','512'), ('1024','1024')],
validators=[validate_dropdown]
)
submit = SubmitField(
label='Submit'
)
|
n1 = int(input('Primeiro valor: '))
n2 = int(input('Segundo valor: '))
n3 = int(input('Terceiro Valor: '))
# verificando quem é menor
menor = n1
if n2 < n1 and n2 < n3:
menor = n2
if n3 < n1 and n2 < n3:
menor = n3
print('O menor valor digitado foi {}'.format(menor))
# verificando quem é maior
maior = n1
if n2 > n1 and n2 > n3:
maior = n2
if n3 > n1 and n3 > n2:
maior = n3
print('O menor valor digitado foi {}'.format(maior))
|
#!/usr/bin/python3
import sys
import os
import argparse
import backTCP
from utils import *
def parse_args():
parser = argparse.ArgumentParser(description="receive a file from backTCP", epilog="This program is created by iBug")
parser.add_argument('filename', metavar="file", help="the name to save received file as")
parser.add_argument('-a', '-A', '--address', metavar="addr", help="address to listen for", default="0.0.0.0")
parser.add_argument('-p', '--port', metavar="port", type=int, help="port to listen on", default=6666)
parser.add_argument('-l', '--log-level', metavar="level", help="logging level", default=LOG_WARNING)
return parser.parse_args()
def main():
args = parse_args()
set_log_level(args.log_level)
with open(args.filename, "wb") as f:
f.write(backTCP.recv(args.address, args.port))
if __name__ == '__main__':
main()
|
from django.contrib import admin
from django import forms
from system.models import Setting, Server, Template
from images.models import stage
import system.forms
serstage = stage.copy()
serstage[0] = 'edit original images'
serstage[7] = 'edit aligned images'
for i in serstage.keys():
if int(i) > 8:
serstage.pop(i, None)
STAGE_CHOICES = serstage.items()
class ServerForm(forms.ModelForm):
run_stages = system.forms.CSIMultipleChoiceField(choices=STAGE_CHOICES)
class ServerAdmin(admin.ModelAdmin):
form = ServerForm
list_filter = ('active', )
class TemplateAdmin(admin.ModelAdmin):
readonly_fields = ('temp_image', )
TemplateAdmin.allow_tags = True
admin.site.register(Setting)
admin.site.register(Server, ServerAdmin)
admin.site.register(Template, TemplateAdmin)
|
"""Tests for sharpness_aware_minimization."""
import os
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.models import sharpness_aware_minimization
from keras.optimizers.optimizer_experimental import adam
from keras.testing_infra import test_utils
ds_combinations = tf.__internal__.distribute.combinations
STRATEGIES = [
ds_combinations.one_device_strategy,
ds_combinations.mirrored_strategy_with_two_gpus,
ds_combinations.tpu_strategy,
ds_combinations.parameter_server_strategy_3worker_2ps_1gpu,
ds_combinations.multi_worker_mirrored_2x1_cpu,
ds_combinations.multi_worker_mirrored_2x2_gpu,
ds_combinations.central_storage_strategy_with_two_gpus,
]
@test_utils.run_v2_only
class SharpnessAwareMinimizationTest(tf.test.TestCase, parameterized.TestCase):
def test_sam_model_call(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([2, 2])
self.assertAllClose(model(data), sam_model(data))
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=STRATEGIES)
)
def test_sam_model_fit(self, strategy):
with strategy.scope():
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([2, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label, steps_per_epoch=1)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(strategy=STRATEGIES)
)
def test_sam_model_fit_with_sub_batch(self, strategy):
with strategy.scope():
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model, num_batch_splits=4
)
data = tf.random.uniform([48, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label, steps_per_epoch=1)
def test_save_sam(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([1, 2, 2])
label = data[:, 0] > 0.5
sam_model.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model.fit(data, label)
path = os.path.join(self.get_temp_dir(), "model")
sam_model.save(path)
loaded_sam_model = keras.models.load_model(path)
loaded_sam_model.load_weights(path)
self.assertAllClose(sam_model(data), loaded_sam_model(data))
def test_checkpoint_sam(self):
model = keras.Sequential(
[
keras.Input([2, 2]),
keras.layers.Dense(4),
keras.layers.Dense(1),
]
)
sam_model_1 = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
sam_model_2 = sharpness_aware_minimization.SharpnessAwareMinimization(
model
)
data = tf.random.uniform([1, 2, 2])
label = data[:, 0] > 0.5
sam_model_1.compile(
optimizer=adam.Adam(),
loss=keras.losses.BinaryCrossentropy(from_logits=True),
)
sam_model_1.fit(data, label)
checkpoint = tf.train.Checkpoint(sam_model_1)
checkpoint2 = tf.train.Checkpoint(sam_model_2)
temp_dir = self.get_temp_dir()
save_path = checkpoint.save(temp_dir)
checkpoint2.restore(save_path)
self.assertAllClose(sam_model_1(data), sam_model_2(data))
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main()
|
from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.GE(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, mgr.Plus(s_msg_id, i1)))
hint = Hint("h_s_msg_id1", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_l)
loc0.set_progress(0, x_s_l)
hint = Hint("h_s_l0", env, frozenset([s_l]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, r0))
hint = Hint("h_s_timeout0", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r1))
hint = Hint("h_delta1", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, r_l)
loc0.set_progress(1, mgr.Not(x_r_l))
loc1 = Location(env, mgr.Not(r_l))
loc1.set_progress(0, x_r_l)
hint = Hint("h_r_l1", env, frozenset([r_l]), symbs)
hint.set_locs([loc0, loc1])
res.append(hint)
loc0 = Location(env, mgr.GE(s_timeout, r0))
loc0.set_progress(0, mgr.Equals(x_s_timeout, mgr.Plus(s_timeout, r1)))
hint = Hint("h_s_timeout1", env, frozenset([s_timeout]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
|
#!/usr/bin/env python
# coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2019 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
"""This script is a simple example of how to add your own statistic to a
:class:`~silx.gui.plot.statsWidget.StatsWidget` from customs
:class:`~silx.gui.plot.stats.Stats` and display it.
On this example we will:
- show sum of values for each type
- compute curve integrals (only for 'curve').
- compute center of mass for all possible items
.. note:: stats are available for 1D and 2D at the time being
"""
__authors__ = ["H. Payno"]
__license__ = "MIT"
__date__ = "23/07/2019"
from silx.gui import qt
from silx.gui.colors import Colormap
from silx.gui.plot import Plot1D
from silx.gui.plot.stats.stats import StatBase
from silx.gui.utils import concurrent
import random
import threading
import argparse
import numpy
import time
class UpdateThread(threading.Thread):
"""Thread updating the curve of a :class:`~silx.gui.plot.Plot1D`
:param plot1d: The Plot1D to update."""
def __init__(self, plot1d):
self.plot1d = plot1d
self.running = False
super(UpdateThread, self).__init__()
def start(self):
"""Start the update thread"""
self.running = True
super(UpdateThread, self).start()
def run(self):
"""Method implementing thread loop that updates the plot"""
while self.running:
time.sleep(1)
# Run plot update asynchronously
concurrent.submitToQtMainThread(
self.plot1d.addCurve,
numpy.arange(1000),
numpy.random.random(1000),
resetzoom=False,
legend=random.choice(('mycurve0', 'mycurve1'))
)
def stop(self):
"""Stop the update thread"""
self.running = False
self.join(2)
class Integral(StatBase):
"""
Simple calculation of the line integral
"""
def __init__(self):
StatBase.__init__(self, name='integral', compatibleKinds=('curve',))
def calculate(self, context):
xData, yData = context.data
return numpy.trapz(x=xData, y=yData)
class COM(StatBase):
"""
Compute data center of mass
"""
def __init__(self):
StatBase.__init__(self, name='COM', description="Center of mass")
def calculate(self, context):
if context.kind in ('curve', 'histogram'):
xData, yData = context.data
deno = numpy.sum(yData).astype(numpy.float32)
if deno == 0.0:
return 0.0
else:
return numpy.sum(xData * yData).astype(numpy.float32) / deno
elif context.kind == 'scatter':
xData, yData, values = context.data
values = values.astype(numpy.float64)
deno = numpy.sum(values)
if deno == 0.0:
return float('inf'), float('inf')
else:
comX = numpy.sum(xData * values) / deno
comY = numpy.sum(yData * values) / deno
return comX, comY
def main(argv):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--update-mode',
default='auto',
help='update mode to display (manual or auto)')
options = parser.parse_args(argv[1:])
app = qt.QApplication([])
plot = Plot1D()
# Create the thread that calls submitToQtMainThread
updateThread = UpdateThread(plot)
updateThread.start() # Start updating the plot
plot.addScatter(x=[0, 2, 5, 5, 12, 20],
y=[2, 3, 4, 20, 15, 6],
value=[5, 6, 7, 10, 90, 20],
colormap=Colormap('viridis'),
legend='myScatter')
stats = [
('sum', numpy.sum),
Integral(),
(COM(), '{0:.2f}'),
]
plot.getStatsWidget().setStats(stats)
plot.getStatsWidget().setUpdateMode(options.update_mode)
plot.getStatsWidget().setDisplayOnlyActiveItem(False)
plot.getStatsWidget().parent().setVisible(True)
plot.show()
app.exec_()
updateThread.stop() # Stop updating the plot
if __name__ == '__main__':
import sys
main(sys.argv)
|
#!/usr/bin/env python3
import os
# os.path() features a lot of functionalities.
# 1. os.path.basename() prints the end leaf name
print(os.path.basename("/tmp/test.txt"))
# 2. os.path.dirname() prints the directory part
print(os.path.dirname("/tmp/test.txt"))
# 3. os.path.exists() check for the existence of paths
print(f'Checking /tmp/test.txt exists: {os.path.exists("/tmp/test.txt")}')
# 4. os.path.isdir() and os.path.isfile()
if os.path.isdir("/tmp/test.txt"):
print("/tmp/test.txt is not a folder")
elif os.path.isfile("/tmp/test.txt"):
print("/tmp/test.txt is a file")
# 5. os.path.join() joins two paths
# It adds `/` if it's Unix, and `\`
# if it's Windows versions
# NOTE: This doesn't check if the path exists.
print(os.path.join("/tmp", "test.txt"))
# 6. os.path.split() splits a path to a dir and file
print(os.path.split("/tmp/test.txt"))
|
# from __future__ import annotations
#
#
# class BaseInMemoryRepository:
# def __init__(self):
# self._data = {}
#
# def clear(self):
# self._data.clear()
#
# def is_empty(self):
# return len(self._data) == 0
from __future__ import annotations
|
#!/usr/bin/python2.7
import sys
import os
sys.path.insert(0, os.path.dirname(__file__))
if os.path.exists(os.path.join(os.path.dirname(__file__), 'vpy', 'bin', 'activate_this.py')):
activate_this = os.path.join(os.path.dirname(__file__), 'vpy', 'bin', 'activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ['SLICK_SETTINGS'] = os.path.join(os.path.dirname(__file__), 'prodserver.cfg')
os.chdir(os.path.dirname(__file__))
from slickqaweb.main import app as application
|
# coding:utf-8
from app.user.user import (
UserProfileHandler,
UserProfileEditHandler,
UserAvatarEditHandler,
UserNotificationHandler,
UserFollowerHandler,
UserOptHandler,
WebsocketChatHandler
)
urlprefix = r''
urlpattern = (
(r'/user/(\d+)', UserProfileHandler),
(r'/user/edit', UserProfileEditHandler),
(r'/user/notification', UserNotificationHandler),
(r'/user/chatwebsocket', WebsocketChatHandler),
(r'/user/avatar/edit', UserAvatarEditHandler),
(r'/useropt', UserOptHandler),
(r'/follower/(\d+)', UserFollowerHandler),
)
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import torch
import timm
from timm.models import create_model, safe_model_name, resume_checkpoint, load_checkpoint,\
convert_splitbn_model, model_parameters
import torch.onnx
from collections import OrderedDict
def proc_node_module(checkpoint, AttrName):
new_state_dict = OrderedDict()
for k, v in checkpoint[AttrName].items():
if(k[0:7] == "module."):
name = k[7:]
else:
name = k[0:]
new_state_dict[name] = v
return new_state_dict
def convert():
checkpoint = torch.load("./model_best.pth.tar", map_location='cpu')
checkpoint['state_dict'] = proc_node_module(checkpoint, 'state_dict')
model = timm.create_model('spnasnet_100',pretrained=True)
model.load_state_dict(checkpoint['state_dict'])
model.eval()
print(model)
input_names = ["actual_input_1"]
output_names = ["output1"]
dummy_input = torch.randn(16, 3, 224, 224)
torch.onnx.export(model, dummy_input, "spnasnet_100_npu_16.onnx", input_names=input_names, output_names=output_names,
opset_version=11)
if __name__ == "__main__":
convert()
|
from flask import Flask
from string import Template
import requests
def is_url_ok(url):
return 200 == requests.head(url).status_code
IFRAME_TEMPLATE = Template("""
<h2>
YouTube video link:
<a href="https://www.youtube.com/watch?v=${youtube_id}">
${youtube_id}
</a>
</h2>
<iframe src="https://www.youtube.com/embed/${youtube_id}" width="853" height="480" frameborder="0" allowfullscreen></iframe>""")
app = Flask(__name__)
@app.route('/')
def homepage():
vidhtml = IFRAME_TEMPLATE.substitute(youtube_id='YQHsXMglC9A')
return """<h1>Hello world!</h1>""" + vidhtml
@app.route('/videos/<vid>')
def videos(vid):
youtube_url = 'https://www.youtube.com/watch?v=' + vid
if True == is_url_ok(youtube_url):
hed = """<h2><a href="{url}">YouTube video: {id}</a></h2>""".format(url=youtube_url, id=vid)
iframe = IFRAME_TEMPLATE.substitute(youtube_id=vid)
return hed + iframe
else:
# when the youtube video id is not found
hed = """<h2>Youtube video {id} <strong>does not exist</strong></h2>""".format(id=vid)
# note that we substitute a specific YouTube ID for the template
# iframe = IFRAME_TEMPLATE.substitute(youtube_id=vid)
return hed
# return hed + iframe
if __name__ == '__main__':
app.run(debug=True, use_reloader=True)
|
# -*- coding: utf-8 -*-
from pytrol.model.action.Action import Action
from pytrol.model.action.Actions import Actions
class SendingMessageAction(Action):
def __init__(self, _message: str, _agt_id: int):
r"""
Args:
_message (str):
_agt_id (int):
"""
Action.__init__(self, "sending_message", Actions.Sending_message)
self.message = _message
self.agt_id = _agt_id
|
from kutana import Plugin, t
plugin = Plugin(name=t("Echo"), description=t("Sends your messages back (.echo)"))
@plugin.on_commands(["echo"])
async def __(msg, ctx):
await ctx.reply("{}".format(ctx.body or '(/)'), attachments=msg.attachments, disable_mentions=0)
|
"""
Probabilistic Detectron Inference Script
"""
import json
import os
import sys
from shutil import copyfile
import torch
import tqdm
import core
# This is very ugly. Essential for now but should be fixed.
sys.path.append(os.path.join(core.top_dir(), "src", "detr"))
from detectron2.data import MetadataCatalog, build_detection_test_loader
# Detectron imports
from detectron2.engine import launch
# Project imports
from core.evaluation_tools.evaluation_utils import (
get_train_contiguous_id_to_test_thing_dataset_id_dict,
)
from core.setup import setup_arg_parser, setup_config
from offline_evaluation import (
compute_average_precision,
compute_calibration_errors,
compute_ood_probabilistic_metrics,
compute_probabilistic_metrics,
)
from probabilistic_inference.inference_utils import (
build_predictor,
get_inference_output_dir,
instances_to_json,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def main(args):
# Setup config
cfg = setup_config(args, random_seed=args.random_seed, is_testing=True)
# Make sure only 1 data point is processed at a time. This simulates
# deployment.
cfg.defrost()
cfg.DATALOADER.NUM_WORKERS = 32
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.MODEL.DEVICE = device.type
# Set up number of cpu threads#
torch.set_num_threads(cfg.DATALOADER.NUM_WORKERS)
# Create inference output directory and copy inference config file to keep
# track of experimental settings
if args.inference_dir == "":
inference_output_dir = get_inference_output_dir(
cfg["OUTPUT_DIR"],
args.test_dataset,
args.inference_config,
args.image_corruption_level,
)
else:
inference_output_dir = args.inference_dir
if not os.path.isdir(inference_output_dir):
os.makedirs(inference_output_dir, exist_ok=True)
os.makedirs(inference_output_dir, exist_ok=True)
copyfile(
args.inference_config,
os.path.join(inference_output_dir, os.path.split(args.inference_config)[-1]),
)
# Get category mapping dictionary:
train_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
cfg.DATASETS.TRAIN[0]
).thing_dataset_id_to_contiguous_id
test_thing_dataset_id_to_contiguous_id = MetadataCatalog.get(
args.test_dataset
).thing_dataset_id_to_contiguous_id
# If both dicts are equal or if we are performing out of distribution
# detection, just flip the test dict.
cat_mapping_dict = get_train_contiguous_id_to_test_thing_dataset_id_dict(
cfg,
args,
train_thing_dataset_id_to_contiguous_id,
test_thing_dataset_id_to_contiguous_id,
)
# Build predictor
predictor = build_predictor(cfg)
test_data_loader = build_detection_test_loader(cfg, dataset_name=args.test_dataset)
final_output_list = []
if not args.eval_only:
with torch.no_grad():
with tqdm.tqdm(total=len(test_data_loader)) as pbar:
for idx, input_im in enumerate(test_data_loader):
# Apply corruption
outputs = predictor(input_im)
# print(f'Image id {input_im[0]["image_id"]}')
# predictor.visualize_inference(input_im, outputs)
final_output_list.extend(
instances_to_json(
outputs, input_im[0]["image_id"], cat_mapping_dict
)
)
pbar.update(1)
with open(
os.path.join(inference_output_dir, "coco_instances_results.json"), "w"
) as fp:
json.dump(final_output_list, fp, indent=4, separators=(",", ": "))
if "ood" in args.test_dataset:
compute_ood_probabilistic_metrics.main(args, cfg)
else:
compute_average_precision.main(args, cfg, inference_output_dir)
compute_probabilistic_metrics.main(
args, cfg, inference_output_dir=inference_output_dir
)
compute_calibration_errors.main(
args, cfg, inference_output_dir=inference_output_dir
)
if __name__ == "__main__":
# Create arg parser
arg_parser = setup_arg_parser()
args = arg_parser.parse_args()
# Support single gpu inference only.
args.num_gpus = 1
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
import os
from collections import defaultdict
from itertools import chain
from operator import attrgetter
from flask import flash, request, session
from sqlalchemy.orm import selectinload
from werkzeug.exceptions import Forbidden
from werkzeug.utils import cached_property
from indico.modules.events.contributions import Contribution
from indico.modules.events.papers.controllers.base import RHJudgingAreaBase
from indico.modules.events.papers.forms import BulkPaperJudgmentForm
from indico.modules.events.papers.lists import PaperAssignmentListGenerator, PaperJudgingAreaListGeneratorDisplay
from indico.modules.events.papers.models.revisions import PaperRevisionState
from indico.modules.events.papers.operations import judge_paper, update_reviewing_roles
from indico.modules.events.papers.settings import PaperReviewingRole
from indico.modules.events.papers.views import WPDisplayJudgingArea, WPManagePapers
from indico.modules.events.util import ZipGeneratorMixin
from indico.util.fs import secure_filename
from indico.util.i18n import _, ngettext
from indico.web.flask.util import url_for
from indico.web.util import jsonify_data, jsonify_form, jsonify_template
CFP_ROLE_MAP = {
PaperReviewingRole.judge: attrgetter('judges'),
PaperReviewingRole.content_reviewer: attrgetter('content_reviewers'),
PaperReviewingRole.layout_reviewer: attrgetter('layout_reviewers'),
}
CONTRIB_ROLE_MAP = {
PaperReviewingRole.judge: attrgetter('paper_judges'),
PaperReviewingRole.content_reviewer: attrgetter('paper_content_reviewers'),
PaperReviewingRole.layout_reviewer: attrgetter('paper_layout_reviewers'),
}
class RHPapersListBase(RHJudgingAreaBase):
"""Base class for assignment/judging paper lists."""
@cached_property
def list_generator(self):
if self.management:
return PaperAssignmentListGenerator(event=self.event)
else:
return PaperJudgingAreaListGeneratorDisplay(event=self.event, user=session.user)
class RHPapersList(RHPapersListBase):
"""Display the paper list for assignment/judging."""
@cached_property
def view_class(self):
return WPManagePapers if self.management else WPDisplayJudgingArea
def _process(self):
return self.view_class.render_template(self.template, self.event, **self.list_generator.get_list_kwargs())
@cached_property
def template(self):
return 'management/assignment.html' if self.management else 'display/judging_area.html'
class RHCustomizePapersList(RHPapersListBase):
"""Filter options and columns to display for the paper list."""
ALLOW_LOCKED = True
def _process_GET(self):
list_config = self.list_generator.list_config
return jsonify_template('events/papers/paper_list_filter.html',
event=self.event,
static_items=self.list_generator.static_items,
filters=list_config['filters'],
visible_items=list_config['items'])
def _process_POST(self):
self.list_generator.store_configuration()
return jsonify_data(flash=False, **self.list_generator.render_list())
class RHPapersActionBase(RHPapersListBase):
"""Base class for actions on selected papers."""
def _get_contrib_query_options(self):
return ()
def _process_args(self):
RHPapersListBase._process_args(self)
ids = map(int, request.form.getlist('contribution_id'))
self.contributions = (self.list_generator._build_query()
.filter(Contribution.id.in_(ids))
.options(*self._get_contrib_query_options())
.all())
class RHDownloadPapers(ZipGeneratorMixin, RHPapersActionBase):
"""
Generate a ZIP file with paper files for a given list of contributions.
"""
ALLOW_LOCKED = True
def _prepare_folder_structure(self, item):
paper_title = secure_filename('{}_{}'.format(item.paper.contribution.friendly_id,
item.paper.contribution.title), 'paper')
file_name = secure_filename('{}_{}'.format(item.id, item.filename), 'paper')
return os.path.join(*self._adjust_path_length([paper_title, file_name]))
def _iter_items(self, contributions):
contributions_with_paper = [c for c in self.contributions if c.paper]
for contrib in contributions_with_paper:
for f in contrib.paper.last_revision.files:
yield f
def _process(self):
return self._generate_zip_file(self.contributions, name_prefix='paper-files', name_suffix=self.event.id)
class RHJudgePapers(RHPapersActionBase):
"""Bulk judgment of papers."""
def _process(self):
form = BulkPaperJudgmentForm(event=self.event, judgment=request.form.get('judgment'),
contribution_id=[c.id for c in self.contributions])
if form.validate_on_submit():
submitted_papers = [c.paper for c in self.contributions if
c.paper and c.paper.last_revision.state == PaperRevisionState.submitted]
for submitted_paper in submitted_papers:
judge_paper(submitted_paper, form.judgment.data, form.judgment_comment.data, judge=session.user)
num_submitted_papers = len(submitted_papers)
num_not_submitted_papers = len(self.contributions) - num_submitted_papers
if num_submitted_papers:
flash(ngettext("One paper has been judged.",
"{num} papers have been judged.",
num_submitted_papers).format(num=num_submitted_papers), 'success')
if num_not_submitted_papers:
flash(ngettext("One contribution has been skipped since it has no paper submitted yet or it is in "
"a final state.",
"{num} contributions have been skipped since they have no paper submitted yet or they "
"are in a final state.",
num_not_submitted_papers).format(num=num_not_submitted_papers), 'warning')
return jsonify_data(**self.list_generator.render_list())
return jsonify_form(form=form, submit=_('Judge'), disabled_until_change=False)
class RHAssignPapersBase(RHPapersActionBase):
"""Base class for assigning/unassigning paper reviewing roles."""
def _get_contrib_query_options(self):
return [selectinload('person_links')]
def _process_args(self):
RHPapersActionBase._process_args(self)
self.role = PaperReviewingRole[request.view_args['role']]
user_ids = map(int, request.form.getlist('user_id'))
self.users = {u for u in CFP_ROLE_MAP[self.role](self.event.cfp) if u.id in user_ids}
def _check_access(self):
RHPapersActionBase._check_access(self)
if not self.management and self.role == PaperReviewingRole.judge:
raise Forbidden
def _process_assignment(self, assign):
update_reviewing_roles(self.event, self.users, self.contributions, self.role, assign)
if assign:
flash(_("Paper reviewing roles have been assigned."), 'success')
else:
flash(_("Paper reviewing roles have been unassigned."), 'success')
return jsonify_data(**self.list_generator.render_list())
def _get_conflicts(self, users):
conflicts = defaultdict(list)
for user in users:
if not user.affiliation:
continue
for contribution in self.contributions:
conflicts[user].extend(
(
contribution.title,
url_for('contributions.display_contribution', contribution),
)
for person in contribution.person_links
if user.affiliation in person.affiliation
)
return conflicts
def _render_form(self, users, action):
conflicts = self._get_conflicts(users)
user_competences = self.event.cfp.user_competences
competences = {'competences_{}'.format(user_id): competences.competences
for user_id, competences in user_competences.iteritems()}
return jsonify_template('events/papers/assign_role.html', event=self.event, role=self.role.name,
action=action, users=users, competences=competences,
contribs=self.contributions, conflicts=conflicts)
class RHAssignPapers(RHAssignPapersBase):
"""Render the user list to assign paper reviewing roles."""
def _process(self):
if self.users:
return self._process_assignment(True)
users = CFP_ROLE_MAP[self.role](self.event.cfp)
return self._render_form(users, 'assign')
class RHUnassignPapers(RHAssignPapersBase):
"""Render the user list to unassign paper reviewing roles."""
def _process(self):
if self.users:
return self._process_assignment(False)
_get_users = CONTRIB_ROLE_MAP[self.role]
users = set(chain.from_iterable(_get_users(c) for c in self.contributions))
return self._render_form(users, 'unassign')
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .multi_night_plots import *
from .aij_plots import *
|
"""This module contains the general information for InitiatorLunEp ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class InitiatorLunEpConsts():
BOOTABLE_FALSE = "false"
BOOTABLE_NO = "no"
BOOTABLE_TRUE = "true"
BOOTABLE_YES = "yes"
ID_UNSPECIFIED = "unspecified"
class InitiatorLunEp(ManagedObject):
"""This is InitiatorLunEp class."""
consts = InitiatorLunEpConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("InitiatorLunEp", "initiatorLunEp", "lun-[id]", VersionMeta.Version211a, "InputOutput", 0x3f, [], ["read-only"], [u'storageIScsiTargetIf'], [], [None])
prop_meta = {
"bootable": MoPropertyMeta("bootable", "bootable", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["false", "no", "true", "yes"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version211a, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"ep_dn": MoPropertyMeta("ep_dn", "epDn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, None, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "string", VersionMeta.Version211a, MoPropertyMeta.NAMING, 0x8, None, None, None, ["unspecified"], ["0-4294967295"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version211a, MoPropertyMeta.READ_ONLY, 0x10, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version211a, MoPropertyMeta.READ_WRITE, 0x20, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"bootable": "bootable",
"childAction": "child_action",
"dn": "dn",
"epDn": "ep_dn",
"id": "id",
"rn": "rn",
"sacl": "sacl",
"status": "status",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.bootable = None
self.child_action = None
self.ep_dn = None
self.sacl = None
self.status = None
ManagedObject.__init__(self, "InitiatorLunEp", parent_mo_or_dn, **kwargs)
|
"""
Optimizations addressing the ops in nnet root directory
"""
import theano
from theano import compile, gof
from theano.compile import optdb
from theano.gof.opt import (
LocalMetaOptimizerSkipAssertionError,
copy_stack_trace,
local_optimizer,
)
from theano.tensor.nnet.abstract_conv import (
AbstractConv2d,
AbstractConv2d_gradInputs,
AbstractConv2d_gradWeights,
AbstractConv3d,
AbstractConv3d_gradInputs,
AbstractConv3d_gradWeights,
get_conv_output_shape,
)
from theano.tensor.nnet.blocksparse import (
SparseBlockGemv,
SparseBlockOuter,
sparse_block_gemv_inplace,
sparse_block_outer_inplace,
)
# Cpu implementation
from theano.tensor.nnet.conv import ConvOp, conv2d
from theano.tensor.nnet.corr import CorrMM, CorrMM_gradInputs, CorrMM_gradWeights
from theano.tensor.nnet.corr3d import Corr3dMM, Corr3dMMGradInputs, Corr3dMMGradWeights
from theano.tensor.opt import in2out, register_specialize_device
from theano.tensor.type import TensorType
@gof.local_optimizer([SparseBlockGemv], inplace=True)
def local_inplace_sparse_block_gemv(fgraph, node):
"""
SparseBlockGemv(inplace=False) -> SparseBlockGemv(inplace=True)
"""
if isinstance(node.op, SparseBlockGemv) and not node.op.inplace:
new_node = sparse_block_gemv_inplace(*node.inputs)
copy_stack_trace(node.outputs[0], new_node)
return [new_node]
return False
compile.optdb.register(
"local_inplace_sparse_block_gemv",
gof.TopoOptimizer(
local_inplace_sparse_block_gemv, failure_callback=gof.TopoOptimizer.warn_inplace
),
60,
"fast_run",
"inplace",
) # DEBUG
@gof.local_optimizer([SparseBlockOuter], inplace=True)
def local_inplace_sparse_block_outer(fgraph, node):
"""
SparseBlockOuter(inplace=False) -> SparseBlockOuter(inplace=True)
"""
if isinstance(node.op, SparseBlockOuter) and not node.op.inplace:
new_node = sparse_block_outer_inplace(*node.inputs)
copy_stack_trace(node.outputs[0], new_node)
return [new_node]
return False
compile.optdb.register(
"local_inplace_sparse_block_outer",
gof.TopoOptimizer(
local_inplace_sparse_block_outer,
failure_callback=gof.TopoOptimizer.warn_inplace,
),
60,
"fast_run",
"inplace",
) # DEBUG
# Conv opts
@local_optimizer([AbstractConv2d])
def local_abstractconv_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv2d):
return None
img, kern = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + (slice(None, None, -1),) * 2
kern = kern[flip]
rval = CorrMM(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared,
)(img, kern)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv3d])
def local_abstractconv3d_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv3d):
return None
img, kern = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = Corr3dMM(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
)(img, kern)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_abstractconv_gradweight_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv2d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(topgrad.type, TensorType):
return None
rval = CorrMM_gradWeights(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared,
)(img, topgrad, shape)
copy_stack_trace(node.outputs[0], rval)
# need to flip the kernel if necessary
if node.op.filter_flip:
flip = (slice(None),) * (rval.ndim - 2) + (slice(None, None, -1),) * 2
rval = rval[flip]
rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv3d_gradWeights])
def local_abstractconv3d_gradweight_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv3d_gradWeights):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(topgrad.type, TensorType):
return None
rval = Corr3dMMGradWeights(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
)(img, topgrad, shape)
copy_stack_trace(node.outputs[0], rval)
# need to flip the kernel if necessary
if node.op.filter_flip:
rval = rval[:, :, ::-1, ::-1, ::-1]
rval = theano.tensor.patternbroadcast(rval, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv2d_gradInputs])
def local_abstractconv_gradinputs_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv2d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, TensorType) or not isinstance(
topgrad.type, TensorType
):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
flip = (slice(None),) * (kern.ndim - 2) + (slice(None, None, -1),) * 2
kern = kern[flip]
rval = CorrMM_gradInputs(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
unshared=node.op.unshared,
)(kern, topgrad, shape)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv3d_gradInputs])
def local_abstractconv3d_gradinputs_gemm(fgraph, node):
# If theano.config.blas.ldflags is empty, Theano will use
# a NumPy C implementation of [sd]gemm_.
if theano.config.cxx == "" or node.inputs[0].dtype == "float16":
return
if not isinstance(node.op, AbstractConv3d_gradInputs):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, TensorType) or not isinstance(
topgrad.type, TensorType
):
return None
# need to flip the kernel if necessary
if node.op.filter_flip:
kern = kern[:, :, ::-1, ::-1, ::-1]
rval = Corr3dMMGradInputs(
border_mode=node.op.border_mode,
subsample=node.op.subsample,
filter_dilation=node.op.filter_dilation,
num_groups=node.op.num_groups,
)(kern, topgrad, shape)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv2d])
def local_conv2d_cpu(fgraph, node):
if not isinstance(node.op, AbstractConv2d) or node.inputs[0].dtype == "float16":
return None
img, kern = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(kern.type, TensorType):
return None
if node.op.border_mode not in ["full", "valid"]:
return None
if not node.op.filter_flip:
# Not tested yet
return None
if node.op.num_groups > 1 or node.op.unshared:
return None
if node.op.filter_dilation != (1, 1):
return None
rval = conv2d(
img,
kern,
node.op.imshp,
node.op.kshp,
border_mode=node.op.border_mode,
subsample=node.op.subsample,
)
copy_stack_trace(node.outputs[0], rval)
return [rval]
@local_optimizer([AbstractConv2d_gradWeights])
def local_conv2d_gradweight_cpu(fgraph, node):
if (
not isinstance(node.op, AbstractConv2d_gradWeights)
or node.inputs[0].dtype == "float16"
):
return None
img, topgrad, shape = node.inputs
if not isinstance(img.type, TensorType) or not isinstance(topgrad.type, TensorType):
return None
if node.op.border_mode not in ["full", "valid"]:
return None
if not node.op.filter_flip:
# Not tested yet
return
if node.op.num_groups > 1 or node.op.unshared:
return None
if node.op.border_mode == "valid" and (node.op.subsample != (1, 1)):
return None
dx, dy = node.op.subsample
if dx not in (1, 2) or dy not in (1, 2):
# Not implemented in the gradient of ConvOp
return None
if node.op.imshp is None:
op_imshp = (None, None, None, None)
else:
op_imshp = node.op.imshp
if node.op.kshp is None:
op_kshp = (None, None, None, None)
else:
op_kshp = node.op.kshp
if None in op_imshp or None in op_kshp:
if (dx, dy) != (1, 1):
# We cannot infer the shapes
return None
# Determine gradient on kernels
assert len(op_imshp) == 4 and len(op_kshp) == 4
outshp = get_conv_output_shape(
op_imshp,
op_kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation,
)[2:]
fulloutshp = get_conv_output_shape(op_imshp, op_kshp, node.op.border_mode, (1, 1))[
2:
]
newimg = img.dimshuffle((1, 0, 2, 3))
newtopgrad = topgrad.dimshuffle((1, 0, 2, 3))
if node.op.border_mode == "valid":
(img, filters) = (newimg, newtopgrad)
kshp_logical = fulloutshp
kshp_logical_top_aligned = False
imshp_logical = None
(bsize, nkern) = (op_imshp[1], op_kshp[0])
imshp = (op_imshp[0], op_imshp[2], op_imshp[3])
kshp = outshp
elif node.op.border_mode == "full":
(img, filters) = (newtopgrad, newimg)
kshp_logical = None
kshp_logical_top_aligned = True
imshp_logical = (op_imshp[0], fulloutshp[0], fulloutshp[1])
(bsize, nkern) = (op_kshp[0], op_imshp[1])
imshp = (op_imshp[0], outshp[0], outshp[1])
kshp = op_imshp[2:]
else:
raise NotImplementedError("Only [full,valid] modes are currently supported.")
# Flip the kernels
filters = filters[:, :, ::-1, ::-1]
dw = ConvOp(
imshp,
kshp,
nkern,
bsize,
1,
1,
output_mode="valid",
unroll_batch=None,
unroll_kern=None,
unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=kshp_logical,
kshp_logical_top_aligned=kshp_logical_top_aligned,
direction_hint="bprop weights",
)
res = dw(img, filters)
copy_stack_trace(node.outputs[0], res)
if node.op.border_mode == "valid":
res = res.dimshuffle((1, 0, 2, 3))
res = res[:, :, ::-1, ::-1]
res = theano.tensor.patternbroadcast(res, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], res)
return [res]
@local_optimizer([AbstractConv2d_gradInputs])
def local_conv2d_gradinputs_cpu(fgraph, node):
if (
not isinstance(node.op, AbstractConv2d_gradInputs)
or node.inputs[0].dtype == "float16"
):
return None
kern, topgrad, shape = node.inputs
if not isinstance(kern.type, TensorType) or not isinstance(
topgrad.type, TensorType
):
return None
if node.op.border_mode not in ["full", "valid"]:
return None
if not node.op.filter_flip:
# Not tested yet
return None
if node.op.num_groups > 1 or node.op.unshared:
return None
# Conv 3d implementation, needed when subsample > 2
if node.op.border_mode == "valid" and node.op.subsample != (1, 1):
# The op don't support that anymore.
return False
# Conv2d Implementation
dx, dy = node.op.subsample
if dx not in (1, 2) or dy not in (1, 2):
# Not implemented in the gradient of ConvOp
return None
if node.op.imshp is None:
op_imshp = (None, None, None, None)
else:
op_imshp = node.op.imshp
if node.op.kshp is None:
op_kshp = (None, None, None, None)
else:
op_kshp = node.op.kshp
if None in op_imshp or None in op_kshp:
if (dx, dy) != (1, 1):
return None
mode = "valid"
if not node.op.border_mode == "full":
mode = "full"
filters = kern.dimshuffle((1, 0, 2, 3))
filters = filters[:, :, ::-1, ::-1]
outshp = get_conv_output_shape(
op_imshp,
op_kshp,
node.op.border_mode,
node.op.subsample,
node.op.filter_dilation,
)[2:]
fulloutshp = get_conv_output_shape(op_imshp, op_kshp, node.op.border_mode, (1, 1))[
2:
]
nkern = op_imshp[1]
imshp = (op_kshp[0], outshp[0], outshp[1])
imshp_logical = (op_kshp[0], fulloutshp[0], fulloutshp[1])
din = ConvOp(
imshp,
op_kshp[2:],
nkern,
op_imshp[0],
1,
1,
output_mode=mode,
unroll_batch=None,
unroll_kern=None,
unroll_patch=None,
imshp_logical=imshp_logical,
kshp_logical=None,
version=-1,
direction_hint="bprop inputs",
)
din = din(topgrad, filters)
copy_stack_trace(node.outputs[0], din)
din = theano.tensor.patternbroadcast(din, node.outputs[0].broadcastable)
copy_stack_trace(node.outputs[0], din)
return [din]
# Register Cpu Optmization
conv_groupopt = theano.gof.optdb.LocalGroupDB()
conv_groupopt.__name__ = "conv_opts"
register_specialize_device(conv_groupopt, "fast_compile", "fast_run")
# GEMM-based convolution
# It can be disabled by excluding 'conv_gemm'.
conv_groupopt.register(
"local_abstractconv_gemm",
local_abstractconv_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_abstractconv_gradweight_gemm",
local_abstractconv_gradweight_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_abstractconv_gradinputs_gemm",
local_abstractconv_gradinputs_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_abstractconv3d_gemm",
local_abstractconv3d_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_abstractconv3d_gradweight_gemm",
local_abstractconv3d_gradweight_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_abstractconv3d_gradinputs_gemm",
local_abstractconv3d_gradinputs_gemm,
30,
"conv_gemm",
"fast_compile",
"fast_run",
)
# Legacy convolution
conv_groupopt.register(
"local_conv2d_cpu", local_conv2d_cpu, 40, "fast_compile", "fast_run"
)
conv_groupopt.register(
"local_conv2d_gradweight_cpu",
local_conv2d_gradweight_cpu,
40,
"fast_compile",
"fast_run",
)
conv_groupopt.register(
"local_conv2d_gradinputs_cpu",
local_conv2d_gradinputs_cpu,
40,
"fast_compile",
"fast_run",
)
# Verify that no AbstractConv are present in the graph
@local_optimizer(
[
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs,
]
)
def local_abstractconv_check(fgraph, node):
if isinstance(
node.op,
(
AbstractConv2d,
AbstractConv2d_gradWeights,
AbstractConv2d_gradInputs,
AbstractConv3d,
AbstractConv3d_gradWeights,
AbstractConv3d_gradInputs,
),
):
raise LocalMetaOptimizerSkipAssertionError(
f"{node.op.__class__.__name__} Theano optimization failed: there is no implementation "
"available supporting the requested options. Did you exclude "
'both "conv_dnn" and "conv_gemm" from the optimizer? If on GPU, '
"is cuDNN available and does the GPU support it? If on CPU, "
"do you have a BLAS library installed Theano can link against? "
"On the CPU we do not support float16."
)
optdb.register(
"AbstractConvCheck",
in2out(local_abstractconv_check, name="AbstractConvCheck"),
48.7,
"fast_compile",
"fast_run",
)
|
from __future__ import unicode_literals, absolute_import, print_function
from django.contrib.syndication.views import Feed
from django.shortcuts import get_object_or_404
from biostar.apps.posts.models import Post
from biostar.apps.users.models import User
from biostar.apps.messages.models import Message
from biostar.apps.planet.models import BlogPost
from django.conf import settings
from django.contrib.sites.models import Site
from datetime import datetime, timedelta
import bleach
from biostar import const
SITE = Site.objects.get(id=settings.SITE_ID)
SITE_NAME = settings.SITE_NAME
FEED_COUNT = 25
def reduce_html(text):
if len(text) > 1500:
text = bleach.clean(text, strip=True)
text = text[:1500] + u' ... '
return text
def split(text):
text = ''.join(text.split())
rows = text.split('+')
return rows
class PlanetFeed(Feed):
"Latest posts"
link = "/"
FEED_COUNT = 50
title = "%s Planet!" % SITE_NAME
description = "Latest 50 posts of the %s" % title
def item_title(self, item):
try:
title = u"%s (%s)" % (item.title, item.blog.title)
except Exception, exc:
title = item.title
return title
def item_description(self, item):
return item.content[:250]
def item_guid(self, obj):
return "%s" % obj.id
def items(self):
posts = BlogPost.objects.select_related("blog").order_by('-creation_date')
return posts[:FEED_COUNT]
class PostBase(Feed):
"Forms the base class to any feed producing posts"
link = "/"
title = "title"
description = "description"
def item_title(self, item):
if item.type != Post.QUESTION:
return "%s: %s" % (item.get_type_display(), item.title)
else:
return item.title
def item_description(self, item):
return reduce_html(item.content)
def item_guid(self, obj):
return "%s" % obj.id
def item_pubdate(self, item):
return item.creation_date
class LatestFeed(PostBase):
"Latest posts"
title = "%s latest!" % SITE_NAME
description = "Latest 25 posts from the %s" % title
def items(self):
# Delay posts hours.
delay_time = const.now() - timedelta(hours=2)
posts = Post.objects.filter(type__in=Post.TOP_LEVEL, status=Post.OPEN, creation_date__lt=delay_time).exclude(type=Post.BLOG).order_by('-creation_date')
return posts[:FEED_COUNT]
class PostTypeFeed(PostBase):
TYPE_MAP = {
'job': Post.JOB, 'blog': Post.BLOG, 'question': Post.QUESTION,
'forum': Post.FORUM, 'page': Post.PAGE
}
def get_object(self, request, text):
words = split(text)
codes = [self.TYPE_MAP[word] for word in words if word in self.TYPE_MAP]
return codes, text
def description(self, obj):
code, text = obj
return "Activity on posts %s" % text
def title(self, obj):
return "Post Activity"
def items(self, obj):
codes, text = obj
posts = Post.objects.filter(type__in=codes).order_by('-creation_date')
return posts[:FEED_COUNT]
class PostFeed(PostBase):
def get_object(self, request, text):
return text
def description(self, obj):
return "Activity on posts %s" % obj
def title(self, obj):
return "Post Activity"
def items(self, text):
ids = split(text)
posts = Post.objects.filter(root_id__in=ids).order_by('-creation_date')
return posts[:FEED_COUNT]
class TagFeed(PostBase):
"Posts matching one or more tags"
def get_object(self, request, text):
elems = split(text)
return ",".join(elems)
def description(self, obj):
return "Posts that match %s" % obj
def title(self, obj):
return "Post Feed"
def items(self, obj):
posts = Post.objects.tag_search(obj)
return posts[:FEED_COUNT]
class UserFeed(PostBase):
def get_object(self, request, text):
return text
def description(self, obj):
return "Posts for users that match %s" % obj
def title(self, obj):
return "User Feed"
def items(self, text):
ids = split(text)
posts = Post.objects.filter(author__id__in=ids).order_by('-creation_date')
return posts[:FEED_COUNT]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.