code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import Axon
import Image
from Kamaelia.Chassis.Pipeline import Pipeline
from Kamaelia.File.UnixProcess import UnixProcess
class DirectoryWatcher(Axon.ThreadedComponent.threadedcomponent):
watch = "uploads"
def main(self):
S = None
while True:
N = os.stat(self.watch)
if S != N:
if S != None:
if S.st_mtime != N.st_mtime:
print "uploads changed, processing", S==N, list(S), list(N)
S = N
self.send(self.watch, "outbox")
else:
print "initialising, checking uploads", S==N, S, list(N)
S = N
self.send(self.watch, "outbox")
time.sleep(1)
class FileProcessor(Axon.Component.component):
Inboxes = {
"inbox": "-",
"control": "-",
"_unixprocessdone": "-",
}
def Inline(self, X, outbox="outbox", signal="signal", inbox="inbox", control="control"):
def Y(X, outbox,signal,inbox,control):
L1 = self.link((X, signal), (self, control))
L2 = self.link((X, outbox), (self, inbox))
X.activate()
yield 1
while not self.dataReady(control):
yield 1
self.recv(control)
self.unlink(L1)
self.unlink(L2)
del X
return Axon.Ipc.WaitComplete(Y(X,outbox,signal,inbox,control))
def system(self, command):
return self.Inline( UnixProcess(command+";sleep 0.2"), control="_unixprocessdone" )
def processfile(self, directory, filename):
print " ... processing:", filename
yield 1
def processfiles(self, directory):
print "Directory changed: ", directory
for filename in os.listdir(directory):
for i in self.processfile(directory, filename):
yield i
def main(self):
while True:
while not self.anyReady():
self.pause()
yield 1
for message in self.Inbox("inbox"):
for i in self.processfiles(message):
yield i
yield 1
class ImageTranscoder(FileProcessor):
destdir = "moderate"
sizes = {
"large" : 626,
"normal" : 466,
"medium" : 306,
"thumb" : 146,
"minithumb" : 66,
"microthumb" : 18,
}
def processfile(self, directory, filename):
thefile = filename[:filename.rfind(".")]
file_ending = filename[filename.rfind("."):]
print thefile
try:
os.makedirs( os.path.join( self.destdir , thefile ) )
except OSError:
return
sourcefile = os.path.join(directory, filename)
try:
X = Image.open(sourcefile)
size = X.size
X = None
side_size = min(*size)
except IOError:
return
for size in self.sizes:
if size == "microthumb":
dest_file1 = self.destdir + "/" + thefile + "/" + "nanothumb" + ".jpg"
dest_file2 = self.destdir + "/" + thefile + "/" + size + ".jpg"
print "convert %s -crop %dx%d+0+0 -resize 18x %s" % (sourcefile, side_size,side_size, dest_file1)
yield self.system( "convert %s -crop %dx%d+0+0 -resize 18x %s" % (sourcefile, side_size,side_size, dest_file1) )
print "convert %s -crop %dx%d+0+0 -resize 40x %s" % (sourcefile, side_size,side_size, dest_file2)
yield self.system( "convert %s -crop %dx%d+0+0 -resize 40x %s" % (sourcefile, side_size,side_size, dest_file2) )
else:
width = self.sizes[size]
dest_filename = size + "-" + filename
full_dest_filename = os.path.join(self.destdir, dest_filename)
full_dest_filename = self.destdir + "/" + thefile + "/" + size + ".jpg"
resize_arg = "-resize %dx" % width
print "convert", sourcefile, resize_arg, full_dest_filename
yield self.system( " ".join( [ "convert", sourcefile, resize_arg, full_dest_filename ]) )
os.unlink(sourcefile)
class ImageMover(FileProcessor):
destdir = "/tmp"
def processfile(self, directory, filename):
extn = filename[filename.rfind("."):].lower()
if extn in [ ".jpg", ".jpeg", ".png", ".gif", ".bmp", ".ppm", ".pnm"]:
os.rename( os.path.join(directory, filename),
os.path.join(self.destdir, filename)
)
yield 1
class VideoTranscoder(FileProcessor):
destdir = "moderate"
conversion = "ffmpeg -i %(sourcefile)s %(deststem)s.flv"
template = "player-template.html"
def processfile(self, directory, filename):
thefile = filename[:filename.rfind(".")]
sourcefile = os.path.join(directory, filename)
command = self.conversion % {
"sourcefile" : sourcefile,
"deststem" : self.destdir + "/" + thefile,
}
yield self.system( command )
F = open(self.template)
t = F.read()
F.close()
X = t % {"videofile" : thefile + ".flv" }
F = open(self.destdir + "/" + thefile + ".html", "w")
F.write(X)
F.close()
os.unlink(sourcefile)
class VideoMover(FileProcessor):
destdir = "/tmp"
extensions = [ ".3gp", ".3gp2", ".3gpp", ".asf", ".asx", ".avi", ".dv",
".flv", ".m1v", ".m4e", ".m4u", ".m4v", ".mjp", ".moov",
".mov", ".movie", ".mp4", ".mpe", ".mpeg", ".mpg", ".qt",
".rm", ".swf", ".ts", ".wmv"]
def processfile(self, directory, filename):
extn = filename[filename.rfind("."):].lower()
if extn.lower() in self.extensions:
os.rename( os.path.join(directory, filename),
os.path.join(self.destdir, filename)
)
yield 1
def read_config(filename):
conf = {}
try:
F = open(filename)
for line in F:
line = line.strip().rstrip()
if line == "":
continue
if line[0] == "#":
continue
try:
key, value = line.split()
conf[key] = value
except:
print "BAD CONFIG LINE: ", repr(line)
F.close()
except:
print "General error parsing", filename
return conf
conf = {}
default_conf = {
"main_incoming_queue" : "/tmp/uploads",
"image_queue" : "/tmp/uploads/images",
"video_queue" : "/tmp/uploads/videos",
"image_moderation_queue" : "/tmp/moderate/images",
"video_moderation_queue" : "/tmp/moderate/videos",
}
local_def_conf = read_config("/etc/batch_converter.conf.dist")
local_conf = read_config("/etc/batch_converter.conf")
conf.update(default_conf)
conf.update(local_def_conf)
conf.update(local_conf)
Pipeline(
DirectoryWatcher(watch = conf["main_incoming_queue"]),
ImageMover(destdir = conf["image_queue"]),
).activate()
Pipeline(
DirectoryWatcher(watch = conf["image_queue"]),
ImageTranscoder(destdir = conf["image_moderation_queue"]),
).activate()
Pipeline(
DirectoryWatcher(watch = conf["main_incoming_queue"]),
VideoMover(destdir = conf["video_queue"]),
).activate()
Pipeline(
DirectoryWatcher(watch = conf["video_queue"]),
VideoTranscoder(destdir = conf["video_moderation_queue"]),
).run()
|
sparkslabs/kamaelia_
|
Code/Python/Apps/FileProcessor/App/BatchFileProcessor.py
|
Python
|
apache-2.0
| 8,474
|
from unittest import TestCase
import boto3
from botocore.exceptions import NoRegionError
from aq import BotoSqliteEngine
from aq.engines import get_resource_model_attributes
import os, tempfile
from nose.tools import eq_
class TestCommandLineArg(TestCase):
def setUp(self):
try:
del os.environ['AWS_PROFILE']
del os.environ['AWS_DEFAULT_REGION']
del os.environ['AWS_CONFIG_FILE']
del os.environ['AWS_SHARED_CREDENTIALS_FILE']
except:
pass
self.credential_file = tempfile.NamedTemporaryFile()
os.environ['AWS_SHARED_CREDENTIALS_FILE'] = self.credential_file.name
self.credential_file.write(
b'[profile_env]\n'
b'region=region-profile-env\n'
b'\n'
b'[profile_arg]\n'
b'region=region-profile-arg\n'
)
self.credential_file.flush()
self.config_file = tempfile.NamedTemporaryFile()
os.environ['AWS_CONFIG_FILE'] = self.config_file.name
self.config_file.write(
b'[default]\n'
b'region=region-config-default\n'
b'\n'
b'[config_env]\n'
b'region=region-config-env\n'
)
self.config_file.flush()
def test_command_line_arg_profile(self):
os.environ['AWS_PROFILE'] = 'profile_env'
os.environ['AWS_CONFIG_FILE'] = 'config_env'
os.environ['AWS_DEFAULT_REGION'] = 'region-env'
engine = BotoSqliteEngine({ '--profile': 'profile_arg' })
eq_(engine.boto3_session.profile_name, 'profile_arg')
def test_command_line_arg_region(self):
os.environ['AWS_PROFILE'] = 'profile_env'
os.environ['AWS_CONFIG_FILE'] = 'config_env'
os.environ['AWS_DEFAULT_REGION'] = 'region-env'
engine = BotoSqliteEngine({ '--region': 'region-arg' })
eq_(engine.boto3_session.region_name, 'region-arg')
def test_command_line_arg_none(self):
os.environ['AWS_PROFILE'] = 'profile_env'
os.environ['AWS_CONFIG_FILE'] = 'config_env'
os.environ['AWS_DEFAULT_REGION'] = 'region-env'
engine = BotoSqliteEngine({})
eq_(engine.boto3_session.profile_name, 'profile_env')
eq_(engine.boto3_session.region_name, 'region-env')
def test_command_line_arg_and_env_file_none(self):
del os.environ['AWS_CONFIG_FILE']
del os.environ['AWS_SHARED_CREDENTIALS_FILE']
engine = BotoSqliteEngine({})
eq_(engine.boto3_session.profile_name, 'default')
eq_(engine.boto3_session.region_name, 'us-east-1')
|
lebinh/aq
|
tests/test_command_line_arg.py
|
Python
|
mit
| 2,606
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_device_ntp
short_description: Manage NTP servers on a BIG-IP.
description:
- Manage NTP servers on a BIG-IP.
version_added: "2.2"
options:
ntp_servers:
description:
- A list of NTP servers to set on the device. At least one of C(ntp_servers)
or C(timezone) is required.
required: false
default: []
state:
description:
- The state of the NTP servers on the system. When C(present), guarantees
that the NTP servers are set on the system. When C(absent), removes the
specified NTP servers from the device configuration.
required: false
default: present
choices:
- absent
- present
timezone:
description:
- The timezone to set for NTP lookups. At least one of C(ntp_servers) or
C(timezone) is required.
default: UTC
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
authors:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = '''
- name: Set NTP server
bigip_device_ntp:
ntp_servers:
- "192.0.2.23"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: Set timezone
bigip_device_ntp:
password: "secret"
server: "lb.mydomain.com"
timezone: "America/Los_Angeles"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
ntp_servers:
description: The NTP servers that were set on the device
returned: changed
type: list
sample: ["192.0.2.23", "192.0.2.42"]
timezone:
description: The timezone that was set on the device
returned: changed
type: string
sample: "true"
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'servers': 'ntp_servers'
}
api_attributes = [
'servers', 'timezone',
]
updatables = [
'ntp_servers', 'timezone'
]
returnables = [
'ntp_servers', 'timezone'
]
absentables = [
'ntp_servers'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self,
self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def _absent_changed_options(self):
changed = {}
for key in Parameters.absentables:
if getattr(self.want, key) is not None:
set_want = set(getattr(self.want, key))
set_have = set(getattr(self.have, key))
if set_want != set_have:
changed[key] = list(set_want)
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.update()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def should_absent(self):
result = self._absent_changed_options()
if result:
return True
return False
def absent(self):
self.have = self.read_current_from_device()
if not self.should_absent():
return False
if self.client.check_mode:
return True
self.absent_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
resource = self.client.api.tm.sys.ntp.load()
resource.update(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.ntp.load()
result = resource.attrs
return Parameters(result)
def absent_on_device(self):
params = self.changes.api_params()
resource = self.client.api.tm.sys.ntp.load()
resource.update(**params)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
ntp_servers=dict(
required=False,
default=None,
type='list',
),
timezone=dict(
required=False,
default=None,
)
)
self.required_one_of = [
['ntp_servers', 'timezone']
]
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name,
required_one_of=spec.required_one_of
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
mcgonagle/ansible_f5
|
library_old/bigip_device_ntp.py
|
Python
|
apache-2.0
| 7,840
|
#! /usr/bin/env python
#PyInvaders2 (c) 2018 by Karsten Lehmann
###############################################################################
# #
# This file is a part of PyInvaders2 #
# #
# PyInvaders2 is free software you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
"""
This software allows you to create level for PyInvaders2
"""
import pygame
import sys
import tkinter as tk
from tkinter import filedialog as tkFileDialog
from tkinter import messagebox as tkMessageBox
from os.path import dirname, abspath
import inspect
__author__ = "Karsten Lehmann"
__copyright__ = "Copyright 2018, Karsten Lehmann"
__license__ = "GPLv3"
__version__ = "2.1"
__maintainer__ = "Karsten Lehmann"
game_dir = dirname(
abspath(inspect.getfile(inspect.currentframe()))
)
def mouse_down(events):
for event in events:
if event.type == pygame.MOUSEBUTTONDOWN:
if event.button == 1:
return True
def mouse_over(x_vals, y_vals, mouse_pos):
if (x_vals[0] <= mouse_pos[0] <= x_vals[1] and
y_vals[0] <= mouse_pos[1] <= y_vals[1] and
pygame.mouse.get_focused()):
return True
class Button(object):
def __init__(self, image_p, image_a, position):
self.surface_p = pygame.image.load(image_p)
self.surface_p.convert_alpha()
self.surface_a = pygame.image.load(image_a)
self.surface_a.convert_alpha()
self.size = self.surface_a.get_size()
self.position = position
self.screen = pygame.display.get_surface()
self.range = ((self.position[0], self.position[0] + self.size[0]),
(self.position[1], self.position[1] + self.size[1]))
def add_text(self, text, font, colour):
font = font.render(text, 8, colour)
font_position = (int(self.size[0] * 0.5 - font.get_size()[0] / 2),
int(self.size[1] * 0.5 - font.get_size()[1] / 2))
self.surface_a.blit(font, font_position)
self.surface_p.blit(font, font_position)
def handle(self, events, mouse_pos):
if mouse_over(self.range[0], self.range[1], mouse_pos):
self.screen.blit(self.surface_a, self.position)
if mouse_down(events):
return True
else:
self.screen.blit(self.surface_p, self.position)
class LevelCreator(object):
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode((800, 400))
self.fps_clock = pygame.time.Clock()
self.fps = 30
self.font = pygame.font.Font(
game_dir + "/textures/game_font.ttf", 50
)
def main(self):
self.selection_screen()
def selection_screen(self):
pygame.mouse.set_visible(True)
button_create = Button(
game_dir + "/gfx/button_p.png",
game_dir + "/gfx/button_a.png",
(25, 25)
)
button_create.add_text("Create Level", self.font, (0, 0, 0))
button_load = Button(
game_dir + "/gfx/button_p.png",
game_dir + "/gfx/button_a.png",
(425, 25)
)
button_load.add_text("Load Level", self.font, (0, 0, 0))
while True:
mouse_position = pygame.mouse.get_pos()
event_list = pygame.event.get()
self.check_for_exit(event_list)
self.screen.fill((55, 55, 55))
if button_create.handle(event_list, mouse_position):
self.edit_screen()
if button_load.handle(event_list, mouse_position):
lines = self.open_file()
self.edit_screen(lines)
pygame.display.update()
self.fps_clock.tick(self.fps)
def edit_screen(self, lines=None):
button_back = Button(
game_dir + "/gfx/button_flat_p.png",
game_dir + "/gfx/button_flat_a.png",
(25, 275)
)
button_back.add_text("Back", self.font, (0, 0, 0))
button_save = Button(
game_dir + "/gfx/button_flat_p.png",
game_dir + "/gfx/button_flat_a.png",
(425, 275)
)
button_save.add_text("Save", self.font, (0, 0, 0))
surface_empty = pygame.image.load(game_dir + "/gfx/empty.png")
surface_invader = pygame.image.load(game_dir + "/gfx/invader.png")
if lines is None:
lines = [[], [], [], [], []]
for i in range(5):
for j in range(19):
lines[i].append(False)
else:
pass
while True:
mouse_position = pygame.mouse.get_pos()
event_list = pygame.event.get()
self.check_for_exit(event_list)
self.screen.fill((55, 55, 55))
for i in range(5):
for j in range(19):
position = 38 * j + 39, 38 * i + 55
if lines[i][j]:
self.screen.blit(surface_invader, position)
else:
self.screen.blit(surface_empty, position)
if mouse_down(event_list):
line = int(round((mouse_position[1] - 71) / 38.0))
number = int(round((mouse_position[0] - 55) / 38.0))
if number < 19 and line < 5:
lines[line][number] = not lines[line][number]
if button_back.handle(event_list, mouse_position):
break
if button_save.handle(event_list, mouse_position):
self.save_file(lines)
pygame.display.update()
self.fps_clock.tick(self.fps)
def check_for_exit(self, events):
"""test if the window gets closed and exit the game"""
for event in events:
if event.type == pygame.QUIT:
print('EXIT')
sys.exit()
def open_file(self):
window = tk.Tk() #setup main_window
window.wm_withdraw() #set main_window to invisible
level_file = tkFileDialog.askopenfilename()
window.destroy() #close main_window
if level_file == '':
return None
lines = [[], [], [], [], []]
num_lines = sum(1 for line in open(level_file))
if num_lines != 5:
self.messagebox("Invalid level_file")
sys.exit()
level_file = open(level_file, 'r')
for line, line_number in zip(level_file, range(5)):
for letter, number in zip(line, range(19)):
if letter == '#':
lines[line_number].append(True)
else:
lines[line_number].append(False)
level_file.close()
return lines
def save_file(self, lines):
window = tk.Tk() #setup main_window
window.wm_withdraw() #set main_window to invisible
file_name = tkFileDialog.asksaveasfilename()
window.destroy() #close main_window
if file_name == '':
return None
level_file = open(file_name, "w")
for line in lines:
for place in line:
if place:
level_file.write("#")
else:
level_file.write("0")
level_file.write('\n')
def messagebox(self, message):
"""Opens a simple window with the message"""
window = tk.Tk() #setup main_window
window.wm_withdraw() #set main_window to invisible
tkMessageBox.showinfo("Info", message)
window.destroy() #close main_window
|
kalehmann/PyInvaders2
|
pyinvaders2/level_creator.py
|
Python
|
gpl-3.0
| 8,701
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
filament_watch_uc.py
Interface to microcontroller connected to encoder
"""
##############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Richard L. Lynch <rich@richlynch.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
##############################################################################
import logging
import serial
class ArduinoInterface(object):
'''Class to interface with Arduino running filament watch'''
def __init__(self, dev, baudrate, recent_length):
self.port = serial.Serial(dev, baudrate=baudrate, timeout=10.5)
self.recent_length = recent_length
self.recent_pos = None
self.offset = 0
self.logger = logging.getLogger(__name__)
def get_pos_change(self):
'''Get current absolute position and position change'''
rcv = self.port.readline().decode('utf-8', 'ignore')
lines = rcv.replace('\r', '').split('\n')
lines = [l.strip() for l in lines if l.strip() != '']
pos = None
if len(lines) > 0:
if lines[-1]:
try:
pos = int(lines[-1]) + self.offset
except ValueError:
self.logger.error('Invalid serial data: "%s"', lines[-1])
if pos != None:
if self.recent_pos is None:
self.recent_pos = [pos] * self.recent_length
change = pos - self.recent_pos[-1]
if change > 32768:
self.offset -= 65536
pos -= 65536
self.logger.debug('New offset is %d', self.offset)
if change < -32768:
self.offset += 65536
pos += 65536
self.logger.debug('New offset is %d', self.offset)
self.recent_pos.append(pos)
self.recent_pos.pop(0)
change = pos - self.recent_pos[0]
change = abs(change) / len(self.recent_pos)
return [pos, change]
return [None, None]
|
rllynch/filament_watch
|
filament_watch/microcontroller_if.py
|
Python
|
mit
| 3,107
|
from django.shortcuts import render
from .forms import SearchForm, ClassifyForm
from whoosh.qparser import MultifieldParser, QueryParser
from whoosh import index as i
from whoosh import scoring
import whoosh.query as QRY
import time
import pandas as pd
from datetime import datetime
from indexing.crawl import crawl_and_update
from classification.classify import Classification
from sklearn.externals import joblib
from django.contrib.staticfiles.templatetags.staticfiles import static
INDEX_FILE = '/Users/noopurjain/Desktop/Index'
WRITE_FILE = '/Users/noopurjain/Desktop/Trial_2'
CLASSIFICATION_PATH = '/mnt/d/model_files_new_with_voting_with_weights/'
def show(request):
if request.method == 'POST':
overview = request.POST.get('overview')
title = request.POST.get('title')
poster_path = request.POST.get('poster_path')
id = request.POST.get('imdb_id')
print (id)
ix = i.open_dir(INDEX_FILE)
searcher = ix.searcher()
docnum = searcher.document_number(imdb_id=id)
recoms = searcher.more_like(docnum,'overview')
return render(request, 'frontend/show.html', {'overview': overview, 'title': title, 'poster_path': poster_path, 'recommendations': recoms})
def index(request):
if request.method == 'GET':
form = SearchForm(request.GET)
if form.is_valid():
search_field = form.cleaned_data['search_field']
query = form.cleaned_data['search_text']
rating = request.GET.get("rating")
year = request.GET.get("year")
query = query.replace('+', ' AND ').replace('-', ' NOT ')
filter_q = None
# TODO: Change Directory here
ix = i.open_dir(INDEX_FILE)
start_time = time.time()
if query is not None and query != u"":
parser = MultifieldParser(search_field, schema=ix.schema)
if year!=None and rating!=None:
date_q = QRY.DateRange("release_date", datetime.strptime(year.split(",")[0], "%Y"),\
datetime.strptime(year.split(",")[1], "%Y"))
rating_q = QRY.NumericRange("vote_average",int(rating.split(",")[0]), int(rating.split(",")[1]))
filter_q = QRY.Require(date_q, rating_q)
else:
year = "1970,2017"
rating = "2,8"
try:
qry = parser.parse(query)
except:
qry = None
return render(request, 'frontend/index.html', {'error': True, 'message':"Query is null!", 'form':form})
if qry is not None:
searcher = ix.searcher(weighting=scoring.TF_IDF())
corrected = searcher.correct_query(qry, query)
if corrected.query != qry:
return render(request, 'frontend/index.html', {'search_field': search_field, 'correction': True, 'suggested': corrected.string, 'form': form})
hits = searcher.search(qry,filter=filter_q,limit=None)
elapsed_time = time.time() - start_time
elapsed_time = "{0:.3f}".format(elapsed_time)
return render(request, 'frontend/index.html', {'search_field': search_field, 'search_text': form.cleaned_data['search_text'], \
'error': False, 'hits': hits, 'form':form, 'elapsed': elapsed_time,\
'number': len(hits), 'year': year, 'rating': rating})
else:
return render(request, 'frontend/index.html', {'error': True, 'message':"Sorry couldn't parse", 'form':form})
else:
return render(request, 'frontend/index.html', {'error': True, 'message':'oops', 'form':form})
else:
form = SearchForm()
return render(request, 'frontend/index.html', {'form': form})
def classification(request):
results_dict = Classification(CLASSIFICATION_PATH).get_classification_results()
results = pd.DataFrame(results_dict)
for column in ['romance','crime','horror']:
results[column] = results[column].apply(lambda x: str((int(x.split('/')[0]) * 100)/int(x.split('/')[1]))+" %")
results.columns = ['F(1) Score', 'F(W) Score', 'Recall', 'Accuracy', 'Crime', 'Horror', 'Model', 'Precision', 'Romance','Vectorizer']
results = results[['Model','Vectorizer', 'Crime', 'Horror', 'Romance', 'F(1) Score', 'F(W) Score', 'Recall', 'Accuracy', 'Precision']]
results = results.to_html
if request.method == "POST":
form = ClassifyForm(request.POST)
if form.is_valid():
plot = form.cleaned_data['classify_plot']
genre, time = Classification(CLASSIFICATION_PATH).Classify_Text(plot)
return render(request, 'frontend/classify.html', {'results': results, 'form': form, 'genre': genre[0], 'time': time})
else:
return render(request, 'frontend/classify.html', {'results': results, 'form': form})
else:
form = ClassifyForm()
return render(request, 'frontend/classify.html', {'results': results, 'form': form})
def crawl(request):
if request.method == "GET":
form = SearchForm(request.GET)
date_now = datetime.now()
search_field = request.GET.get('search_field')
query = request.GET.get('search_text')
ix = i.open_dir(INDEX_FILE)
parser = QueryParser("release_date", schema=ix.schema)
qry = parser.parse(date_now.strftime("%Y-%m-%d"))
searcher = ix.searcher()
hits = searcher.search(qry, limit=1)
print (len(hits))
if (len(hits)==0):
# send new records directory to the indexing function to add them to the index
total_records = crawl_and_update(date_now, WRITE_FILE, INDEX_FILE)
else:
total_records = "Already up-to-date"
return render(request, 'frontend/crawl.html', {'total_records': total_records, 'form': form})
|
BhavyaLight/information-retrival-search-engine
|
informationRetrival/frontend/views.py
|
Python
|
mit
| 6,155
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-06-16 12:12
from __future__ import unicode_literals
import autoslug.fields
from django.db import migrations
import webconference.models
class Migration(migrations.Migration):
dependencies = [
('webconference', '0003_auto_20170323_1535'),
]
operations = [
migrations.AddField(
model_name='webconference',
name='jitsi_slug',
field=autoslug.fields.AutoSlugField(editable=False, null=True, populate_from='name', slugify=webconference.models.slugify_jitsi, unique=True, verbose_name='Jitsi Slug'),
),
]
|
amadeusproject/amadeuslms
|
webconference/migrations/0004_webconference_jitsi_slug.py
|
Python
|
gpl-2.0
| 641
|
# encoding: UTF-8
'''
风控模块相关的GUI控制组件
'''
from vnpy.trader.app.riskManager.language import text
from vnpy.trader.uiBasicWidget import QtWidgets
########################################################################
class RmSpinBox(QtWidgets.QSpinBox):
"""调整参数用的数值框"""
#----------------------------------------------------------------------
def __init__(self, value):
"""Constructor"""
super(RmSpinBox, self).__init__()
self.setMinimum(0)
self.setMaximum(1000000)
self.setValue(value)
########################################################################
class RmLine(QtWidgets.QFrame):
"""水平分割线"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(RmLine, self).__init__()
self.setFrameShape(self.HLine)
self.setFrameShadow(self.Sunken)
########################################################################
class RmEngineManager(QtWidgets.QWidget):
"""风控引擎的管理组件"""
#----------------------------------------------------------------------
def __init__(self, rmEngine, eventEngine, parent=None):
"""Constructor"""
super(RmEngineManager, self).__init__(parent)
self.rmEngine = rmEngine
self.eventEngine = eventEngine
self.initUi()
self.updateEngineStatus()
#----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle(text.RISK_MANAGER)
# 设置界面
self.buttonSwitchEngineStatus = QtWidgets.QPushButton(text.RISK_MANAGER_STOP)
self.spinOrderFlowLimit = RmSpinBox(self.rmEngine.orderFlowLimit)
self.spinOrderFlowClear = RmSpinBox(self.rmEngine.orderFlowClear)
self.spinOrderSizeLimit = RmSpinBox(self.rmEngine.orderSizeLimit)
self.spinTradeLimit = RmSpinBox(self.rmEngine.tradeLimit)
self.spinWorkingOrderLimit = RmSpinBox(self.rmEngine.workingOrderLimit)
self.spinOrderCancelLimit = RmSpinBox(self.rmEngine.orderCancelLimit)
buttonClearOrderFlowCount = QtWidgets.QPushButton(text.CLEAR_ORDER_FLOW_COUNT)
buttonClearTradeCount = QtWidgets.QPushButton(text.CLEAR_TOTAL_FILL_COUNT)
buttonSaveSetting = QtWidgets.QPushButton(text.SAVE_SETTING)
Label = QtWidgets.QLabel
grid = QtWidgets.QGridLayout()
grid.addWidget(Label(text.WORKING_STATUS), 0, 0)
grid.addWidget(self.buttonSwitchEngineStatus, 0, 1)
grid.addWidget(RmLine(), 1, 0, 1, 2)
grid.addWidget(Label(text.ORDER_FLOW_LIMIT), 2, 0)
grid.addWidget(self.spinOrderFlowLimit, 2, 1)
grid.addWidget(Label(text.ORDER_FLOW_CLEAR), 3, 0)
grid.addWidget(self.spinOrderFlowClear, 3, 1)
grid.addWidget(RmLine(), 4, 0, 1, 2)
grid.addWidget(Label(text.ORDER_SIZE_LIMIT), 5, 0)
grid.addWidget(self.spinOrderSizeLimit, 5, 1)
grid.addWidget(RmLine(), 6, 0, 1, 2)
grid.addWidget(Label(text.TOTAL_TRADE_LIMIT), 7, 0)
grid.addWidget(self.spinTradeLimit, 7, 1)
grid.addWidget(RmLine(), 8, 0, 1, 2)
grid.addWidget(Label(text.WORKING_ORDER_LIMIT), 9, 0)
grid.addWidget(self.spinWorkingOrderLimit, 9, 1)
grid.addWidget(RmLine(), 10, 0, 1, 2)
grid.addWidget(Label(text.CONTRACT_CANCEL_LIMIT), 11, 0)
grid.addWidget(self.spinOrderCancelLimit, 11, 1)
hbox = QtWidgets.QHBoxLayout()
hbox.addWidget(buttonClearOrderFlowCount)
hbox.addWidget(buttonClearTradeCount)
hbox.addStretch()
hbox.addWidget(buttonSaveSetting)
vbox = QtWidgets.QVBoxLayout()
vbox.addLayout(grid)
vbox.addLayout(hbox)
self.setLayout(vbox)
# 连接组件信号
self.spinOrderFlowLimit.valueChanged.connect(self.rmEngine.setOrderFlowLimit)
self.spinOrderFlowClear.valueChanged.connect(self.rmEngine.setOrderFlowClear)
self.spinOrderSizeLimit.valueChanged.connect(self.rmEngine.setOrderSizeLimit)
self.spinTradeLimit.valueChanged.connect(self.rmEngine.setTradeLimit)
self.spinWorkingOrderLimit.valueChanged.connect(self.rmEngine.setWorkingOrderLimit)
self.spinOrderCancelLimit.valueChanged.connect(self.rmEngine.setOrderCancelLimit)
self.buttonSwitchEngineStatus.clicked.connect(self.switchEngineSatus)
buttonClearOrderFlowCount.clicked.connect(self.rmEngine.clearOrderFlowCount)
buttonClearTradeCount.clicked.connect(self.rmEngine.clearTradeCount)
buttonSaveSetting.clicked.connect(self.rmEngine.saveSetting)
# 设为固定大小
self.setFixedSize(self.sizeHint())
#----------------------------------------------------------------------
def switchEngineSatus(self):
"""控制风控引擎开关"""
self.rmEngine.switchEngineStatus()
self.updateEngineStatus()
#----------------------------------------------------------------------
def updateEngineStatus(self):
"""更新引擎状态"""
if self.rmEngine.active:
self.buttonSwitchEngineStatus.setText(text.RISK_MANAGER_RUNNING)
else:
self.buttonSwitchEngineStatus.setText(text.RISK_MANAGER_STOP)
|
cmbclh/vnpy1.7
|
vnpy/trader/app/riskManager/uiRmWidget.py
|
Python
|
mit
| 5,492
|
import java_lang_System
import java_lang_Object
import java_lang_Class
import java_lang_Float
import java_lang_String
import java_lang_Throwable
import java_lang_Double
import java_lang_Thread
import java_lang_Runtime
import java_security_AccessController
import sun_misc_VM
import sun_misc_Unsafe
import sun_reflect_Reflection
classes_with_natives = {
'java/lang/Object': java_lang_Object,
'java/lang/Class': java_lang_Class,
'java/lang/System': java_lang_System,
'java/lang/Float': java_lang_Float,
'java/lang/Throwable': java_lang_Throwable,
'java/lang/Double': java_lang_Double,
'java/lang/Thread': java_lang_Thread,
'java/lang/String': java_lang_String,
'java/lang/Runtime': java_lang_Runtime,
'java/security/AccessController': java_security_AccessController,
'sun/misc/VM': sun_misc_VM,
'sun/misc/Unsafe': sun_misc_Unsafe,
'sun/reflect/Reflection': sun_reflect_Reflection,
}
primitive_classes = {}
|
MrHamdulay/myjvm
|
klasses/__init__.py
|
Python
|
mit
| 1,010
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import copy
import memcache
from keystone.common import utils
from keystone import config
from keystone import exception
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.openstack.common import timeutils
from keystone import token
CONF = config.CONF
LOG = log.getLogger(__name__)
class Token(token.Driver):
revocation_key = 'revocation-list'
def __init__(self, client=None):
self._memcache_client = client
@property
def client(self):
return self._memcache_client or self._get_memcache_client()
def _get_memcache_client(self):
memcache_servers = CONF.memcache.servers
# NOTE(morganfainberg): The memcache client library for python is NOT
# thread safe and should not be passed between threads. This is highly
# specific to the cas() (compare and set) methods and the caching of
# the previous value(s). It appears greenthread should ensure there is
# a single data structure per spawned greenthread.
self._memcache_client = memcache.Client(memcache_servers, debug=0,
cache_cas=True)
return self._memcache_client
def _prefix_token_id(self, token_id):
return 'token-%s' % token_id.encode('utf-8')
def _prefix_user_id(self, user_id):
return 'usertokens-%s' % user_id.encode('utf-8')
def get_token(self, token_id):
if token_id is None:
raise exception.TokenNotFound(token_id='')
ptk = self._prefix_token_id(token_id)
token_ref = self.client.get(ptk)
if token_ref is None:
raise exception.TokenNotFound(token_id=token_id)
return token_ref
def create_token(self, token_id, data):
data_copy = copy.deepcopy(data)
ptk = self._prefix_token_id(token_id)
if not data_copy.get('expires'):
data_copy['expires'] = token.default_expire_time()
if not data_copy.get('user_id'):
data_copy['user_id'] = data_copy['user']['id']
kwargs = {}
if data_copy['expires'] is not None:
expires_ts = utils.unixtime(data_copy['expires'])
kwargs['time'] = expires_ts
self.client.set(ptk, data_copy, **kwargs)
if 'id' in data['user']:
token_data = jsonutils.dumps(token_id)
user_id = data['user']['id']
user_key = self._prefix_user_id(user_id)
# Append the new token_id to the token-index-list stored in the
# user-key within memcache.
self._update_user_list_with_cas(user_key, token_data)
return copy.deepcopy(data_copy)
def _update_user_list_with_cas(self, user_key, token_id):
cas_retry = 0
max_cas_retry = CONF.memcache.max_compare_and_set_retry
current_time = timeutils.normalize_time(
timeutils.parse_isotime(timeutils.isotime()))
self.client.reset_cas()
while cas_retry <= max_cas_retry:
# NOTE(morganfainberg): cas or "compare and set" is a function of
# memcache. It will return false if the value has changed since the
# last call to client.gets(). This is the memcache supported method
# of avoiding race conditions on set(). Memcache is already atomic
# on the back-end and serializes operations.
#
# cas_retry is for tracking our iterations before we give up (in
# case memcache is down or something horrible happens we don't
# iterate forever trying to compare and set the new value.
cas_retry += 1
record = self.client.gets(user_key)
filtered_list = []
if record is not None:
token_list = jsonutils.loads('[%s]' % record)
for token_i in token_list:
ptk = self._prefix_token_id(token_i)
token_ref = self.client.get(ptk)
if not token_ref:
# skip tokens that do not exist in memcache
continue
if 'expires' in token_ref:
expires_at = timeutils.normalize_time(
token_ref['expires'])
if expires_at < current_time:
# skip tokens that are expired.
continue
# Add the still valid token_id to the list.
filtered_list.append(jsonutils.dumps(token_i))
# Add the new token_id to the list.
filtered_list.append(token_id)
# Use compare-and-set (cas) to set the new value for the
# token-index-list for the user-key. Cas is used to prevent race
# conditions from causing the loss of valid token ids from this
# list.
if self.client.cas(user_key, ','.join(filtered_list)):
msg = _('Successful set of token-index-list for user-key '
'"%(user_key)s", #%(count)d records')
LOG.debug(msg, {'user_key': user_key,
'count': len(filtered_list)})
return filtered_list
# The cas function will return true if it succeeded or false if it
# failed for any reason, including memcache server being down, cas
# id changed since gets() called (the data changed between when
# this loop started and this point, etc.
error_msg = _('Failed to set token-index-list for user-key '
'"%(user_key)s". Attempt %(cas_retry)d of '
'%(cas_retry_max)d')
LOG.debug(error_msg,
{'user_key': user_key,
'cas_retry': cas_retry,
'cas_retry_max': max_cas_retry})
# Exceeded the maximum retry attempts.
error_msg = _('Unable to add token user list')
raise exception.UnexpectedError(error_msg)
def _add_to_revocation_list(self, data):
data_json = jsonutils.dumps(data)
if not self.client.append(self.revocation_key, ',%s' % data_json):
if not self.client.add(self.revocation_key, data_json):
if not self.client.append(self.revocation_key,
',%s' % data_json):
msg = _('Unable to add token to revocation list.')
raise exception.UnexpectedError(msg)
def delete_token(self, token_id):
# Test for existence
data = self.get_token(token_id)
ptk = self._prefix_token_id(token_id)
result = self.client.delete(ptk)
self._add_to_revocation_list(data)
return result
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
return super(Token, self).delete_tokens(
user_id=user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id,
)
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
tokens = []
user_key = self._prefix_user_id(user_id)
user_record = self.client.get(user_key) or ""
token_list = jsonutils.loads('[%s]' % user_record)
for token_id in token_list:
ptk = self._prefix_token_id(token_id)
token_ref = self.client.get(ptk)
if token_ref:
if tenant_id is not None:
tenant = token_ref.get('tenant')
if not tenant:
continue
if tenant.get('id') != tenant_id:
continue
if trust_id is not None:
trust = token_ref.get('trust_id')
if not trust:
continue
if trust != trust_id:
continue
if consumer_id is not None:
try:
oauth = token_ref['token_data']['token']['OS-OAUTH1']
if oauth.get('consumer_id') != consumer_id:
continue
except KeyError:
continue
tokens.append(token_id)
return tokens
def list_revoked_tokens(self):
list_json = self.client.get(self.revocation_key)
if list_json:
return jsonutils.loads('[%s]' % list_json)
return []
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired.
"""
raise exception.NotImplemented()
|
derekchiang/keystone
|
keystone/token/backends/memcache.py
|
Python
|
apache-2.0
| 9,447
|
import requests
from requests_oauthlib import OAuth1
from . import __version__
from .compat import json, parse_qsl, urlencode, urlparse
from .exceptions import TumblpyAuthError, TumblpyError
from .helpers import _split_params_and_files
class Tumblpy(object):
def __init__(self, app_key=None, app_secret=None, oauth_token=None,
oauth_token_secret=None, headers=None, proxies=None):
# Define some API URLs real quick
self.base_api_url = 'https://api.tumblr.com'
self.api_version = 'v2'
self.api_url = '%s/%s/' % (self.base_api_url, self.api_version)
# Authentication URLs
self.request_token_url = 'https://www.tumblr.com/oauth/request_token'
self.access_token_url = 'https://www.tumblr.com/oauth/access_token'
self.authorize_url = 'https://www.tumblr.com/oauth/authorize'
self.authenticate_url = 'https://www.tumblr.com/oauth/authorize'
self.default_params = {'api_key': app_key}
req_headers = {'User-Agent': 'Tumblpy v' + __version__}
if headers:
req_headers.update(headers)
self.app_key = app_key
self.app_secret = app_secret
self.oauth_token = oauth_token
self.oauth_token_secret = oauth_token_secret
auth = None
if self.app_key and self.app_secret:
if not self.oauth_token and not self.oauth_token_secret:
auth = OAuth1(self.app_key, self.app_secret)
else:
auth = OAuth1(self.app_key, self.app_secret,
self.oauth_token, self.oauth_token_secret)
self.client = requests.Session()
self.client.proxies = proxies
self.client.headers = req_headers
self.client.auth = auth
def get_authentication_tokens(self, callback_url=None):
"""Returns a dict including an authorization URL (auth_url) to direct a user to
:param callback_url: (optional) Url the user is returned to after they authorize your app (web clients only)
"""
request_args = {}
if callback_url:
request_args['oauth_callback'] = callback_url
response = self.client.get(self.request_token_url, params=request_args)
if response.status_code != 200:
raise TumblpyAuthError('Seems something couldn\'t be verified with your OAuth junk. Error: %s, Message: %s' % (response.status_code, response.content))
res = response.content
if isinstance( response.content, bytes ):
res = res.decode()
request_tokens = dict(parse_qsl(res))
if not request_tokens:
raise TumblpyError('Unable to decode request tokens.')
auth_url_params = {
'oauth_token': request_tokens['oauth_token'],
}
if callback_url:
auth_url_params['oauth_callback'] = callback_url
request_tokens['auth_url'] = self.authenticate_url + '?' + urlencode(auth_url_params)
return request_tokens
def get_authorized_tokens(self, oauth_verifier):
"""Returns authorized tokens after they go through the auth_url phase.
"""
response = self.client.get(self.access_token_url,
params={'oauth_verifier': oauth_verifier})
res = response.content
if isinstance( response.content, bytes ):
res = res.decode()
authorized_tokens = dict(parse_qsl(res))
if not authorized_tokens:
raise TumblpyError('Unable to decode authorized tokens.')
return authorized_tokens
def request(self, endpoint, method='GET', blog_url=None,
extra_endpoints=None, params=None):
params = params or {}
method = method.lower()
if not method in ('get', 'post'):
raise TumblpyError('Method must be of GET or POST')
url = self.api_url # http://api.tumblr.com/v2/
if blog_url is not None:
# http://api.tumblr.com/v2/blog/blogname.tumblr.com/
blog_url = urlparse(blog_url)
url = '%sblog/%s/' % (
self.api_url,
blog_url.hostname if blog_url.hostname is not None else blog_url.path
)
url = '%s%s' % (url, endpoint)
if extra_endpoints is not None:
# In cases like:
# http://api.tumblr.com/v2/blog/blogname.tumblr.com/posts/type/
# 'type' is extra in the url & thought this was the best way
# Docs: http://www.tumblr.com/docs/en/api/v2#posts
url = '%s/%s' % (url, '/'.join(extra_endpoints))
params, files = _split_params_and_files(params)
params.update(self.default_params)
func = getattr(self.client, method)
try:
if method == 'get':
response = func(url, params=params, allow_redirects=False)
else:
kwargs = {'data': params, 'files': files, 'allow_redirects': False}
if files:
kwargs['params'] = params
response = func(url, **kwargs)
except requests.exceptions.RequestException:
raise TumblpyError('An unknown error occurred.')
if response.status_code == 401:
raise TumblpyAuthError('Error: %s, Message: %s' % (response.status_code, response.content))
content = response.content.decode('utf-8')
try:
if endpoint == 'avatar':
content = {
'response': {
'url': response.headers.get('location')
}
}
else:
content = json.loads(content)
except ValueError:
raise TumblpyError('Unable to parse response, invalid JSON.')
try:
content = content.get('response', {})
except AttributeError:
raise TumblpyError('Unable to parse response, invalid content returned: %s' % content)
if response.status_code < 200 or response.status_code > 301:
error_message = ''
if content and (content.get('errors') or content.get('error')):
if 'errors' in content:
for error in content['errors']:
error_message = '%s ' % error
elif 'error' in content:
error_message = content['error']
error_message = (error_message or
'There was an error making your request.')
raise TumblpyError(error_message, error_code=response.status_code)
return content
def get(self, endpoint, blog_url=None, extra_endpoints=None, params=None):
return self.request(endpoint, blog_url=blog_url,
extra_endpoints=extra_endpoints, params=params)
def post(self, endpoint, blog_url=None, extra_endpoints=None, params=None):
return self.request(endpoint, method='POST', blog_url=blog_url,
extra_endpoints=extra_endpoints, params=params)
def get_avatar_url(self, blog_url, size=64):
size = [str(size)] or ['64']
return self.get('avatar', blog_url=blog_url, extra_endpoints=size)
def following(self, kwargs=None):
"""
Gets the blogs that the current user is following.
:param limit: an int, the number of likes you want returned
:param offset: an int, the blog you want to start at, for pagination.
# Start at the 20th blog and get 20 more blogs.
client.following({'offset': 20, 'limit': 20})
:returns: A dict created from the JSON response
"""
return self.get('user/following', params=kwargs)
def dashboard(self, kwargs=None):
"""
Gets the dashboard of the current user
example: dashboard = client.dashboard({'limit': '3'})
:param limit: an int, the number of posts you want returned
:param offset: an int, the posts you want to start at, for pagination.
:param type: the type of post you want to return
:param since_id: return only posts that have appeared after this ID
:param reblog_info: return reblog information about posts
:param notes_info: return notes information about the posts
:returns: A dict created from the JSON response
"""
return self.get('user/dashboard', params=kwargs)
def posts(self, blog_url, post_type=None, kwargs=None):
"""
Gets a list of posts from a particular blog
:param blog_url: a string, the blogname you want to look up posts
for. eg: codingjester.tumblr.com
:param post_type: the type of posts you want returned, e.g. video. If omitted returns all post types.
:param limit: an int, the number of likes you want returned
:param offset: an int, the blog you want to start at, for pagination.
:returns: A dict created from the JSON response
"""
url = 'posts'
if post_type:
url = '%s/%s' % (url, post_type)
return self.get(url, blog_url=blog_url, params=kwargs)
def __repr__(self):
return u'<TumblrAPI: %s>' % self.app_key
|
michaelhelmick/python-tumblpy
|
tumblpy/api.py
|
Python
|
bsd-2-clause
| 9,263
|
# setTexture.py
#
# Copyright (c) 2009 Javier Romero
#
# Author: Javier Romero <jrgn@kth.se>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
import poser
def setTexture(texture):
scene = poser.Scene()
fig = scene.Figure("Figure 1")
materials = fig.Materials()
for material in materials:
#material.SetTextureMapFileName(texture)
#print material.TextureMapFileName()
name = material.Name()
#print material.Name()
if name == "skin":
material.SetTextureMapFileName(texture)
# print "skin!!!"
#else:
# print "no skin!!!"
#skin = fig.FindMaterialByName("skin")
#skin.SetTextureMapFileName("texture")
|
libicocco/poser-hand-generator
|
setTexture.py
|
Python
|
gpl-2.0
| 1,394
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates new contacts.
To determine which contacts exist, run get_all_contacts.py.
Tags: ContactService.createContacts
"""
__author__ = 'Vincent Tsao'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set the ID of the advertiser company this contact is associated with.
ADVERTISER_COMPANY_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
# Set the ID of the agency company this contact is associated with.
AGENCY_COMPANY_ID = 'INSERT_AGENCY_COMPANY_ID_HERE'
def main(client, advertiser_company_id, agency_company_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201311')
# Create an advertiser contact.
advertiser_contact = {
'name': 'Mr. Advertiser #%s' % Utils.GetUniqueName(),
'email': 'advertiser@advertising.com',
'companyId': advertiser_company_id
}
# Create an agency contact.
agency_contact = {
'name': 'Ms. Agency #%s' % Utils.GetUniqueName(),
'email': 'agency@agencies.com',
'companyId': agency_company_id
}
# Create the contacts on the server.
contacts = contact_service.CreateContacts([advertiser_contact,
agency_contact])
# Display results.
for contact in contacts:
print ('Contact with ID \'%s\' name \'%s\' was created.'
% (contact['id'], contact['name']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ADVERTISER_COMPANY_ID, AGENCY_COMPANY_ID)
|
caioserra/apiAdwords
|
examples/adspygoogle/dfp/v201311/contact_service/create_contacts.py
|
Python
|
apache-2.0
| 2,467
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/scale.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def scale(X: Matrix,
center: bool,
scale: bool):
params_dict = {'X': X, 'center': center, 'scale': scale}
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Matrix(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, ]
op = MultiReturn(X.sds_context, 'scale', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
return op
|
apache/incubator-systemml
|
src/main/python/systemds/operator/algorithm/builtin/scale.py
|
Python
|
apache-2.0
| 1,766
|
from .data_asset import DataAsset
from .file_data_asset import FileDataAsset
|
great-expectations/great_expectations
|
great_expectations/data_asset/__init__.py
|
Python
|
apache-2.0
| 77
|
{
'name' : 'Improvements for mass mailing',
'version' : '1.0.0',
'author' : 'IT-Projects LLC, Ivan Yelizariev',
'license': 'GPL-3',
'category' : 'Mail',
'website' : 'https://yelizariev.github.io',
'description': """
Modules adds:
* partners info in mail.mail.statistics tree
* partners info in mail.mail.statistics form
Tested on 8.0 f8d5a6727d3e8d428d9bef93da7ba6b11f344284
""",
'depends' : ['mass_mailing'],
'data':[
'views.xml',
],
'installable': True
}
|
ufaks/addons-yelizariev
|
mass_mailing_extra/__openerp__.py
|
Python
|
lgpl-3.0
| 519
|
import time
import json
import sys
import yaml
import MySQLdb
import pickle
import dbcrawler_util as util
from collections import defaultdict
from bs4 import BeautifulSoup
from urllib2 import urlopen
from datetime import datetime
def main(argv):
'''
arg[1]: outDir
arg[2:]: seedPlantListFPaths, e.g. ijah_jamu_plants.lst
'''
assert len(argv)>=3
outDir = argv[1]
seedPlantListFPaths = argv[2:]
parseKnapsack(seedPlantListFPaths, outDir)
def parseKnapsack(seedPlantListFPaths, outDir):
seedPlantList = []
for fp in seedPlantListFPaths:
with open(fp) as infile:
idx = 0
for line in infile:
idx += 1
print 'parsing idx=', idx, 'of', fp
line = line.strip()
words = line.split()
if len(words)==3:
pass
elif len(words)==4:
pass
name = ' '.join(words)
seedPlantList.append(name)
# crawl knapsack
BASE_URL = 'http://kanaya.naist.jp/knapsack_jsp/result.jsp?sname=organism&word='
plantCompoundDict = defaultdict(list)
now = datetime.now()
for idx,p in enumerate(seedPlantList):
idx += 1
print 'crawling idx=', idx, 'of', len(seedPlantList)
url = BASE_URL + p
# print 'crawling url=', url
html = urlopen(url).read()
soup = BeautifulSoup(html, "lxml")
table = soup.find("table", "sortable d1")
table = table.find_all('tr')
compoundData = dict()
for i,row in enumerate(table):
datum = []
cols = row.find_all('td', 'd1')
for pos,col in enumerate(cols):
datum.append(str(col.get_text()))
if len(datum)==6:
comKnapsackId = datum[0]
comCasId = datum[1]
comName = datum[2]
comFormula = datum[3]
plantName = datum[5]
plantNameWords = plantName.split()
if len(plantNameWords)>1:
plantNameWords = plantNameWords[0:2]
plantName = ' '.join(plantNameWords)
plantName = plantName.capitalize()
compoundDatum = ( comKnapsackId, comCasId, comName, comFormula )
existingCom = [ c[0] for c in plantCompoundDict[plantName]]
if comKnapsackId not in existingCom:
plantCompoundDict[plantName].append( compoundDatum )
jsonFpath = outDir+'/knapsack_jsp_plant_vs_compound_'+str(now.date())+'_'+str(now.time())+'.json'
with open(jsonFpath, 'w') as f:
json.dump(plantCompoundDict, f, indent=2, sort_keys=True)
pklFpath = outDir+'/knapsack_jsp_plant_vs_compound_'+str(now.date())+'_'+str(now.time())+'.pkl'
with open(pklFpath, 'wb') as f:
pickle.dump(plantCompoundDict, f)
return plantCompoundDict
if __name__ == '__main__':
start_time = time.time()
main(sys.argv)
print("--- %s seconds ---" % (time.time() - start_time))
|
tttor/csipb-jamu-prj
|
database/crawler/crawl_knapsack.py
|
Python
|
mit
| 3,107
|
__a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c = 0, 1, 2, 3, 4, 5, 6, 7, 8
print __a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c
class A(object):
__a = 9
print __a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c
def f(self):
print __a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c
class B(object):
__b = 10
print __a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c
def f(self):
print __a, _A__a, _B__a, __b, _A__b, _B__b, __c, _A__c, _B__c
A().f()
A().B().f()
|
csvoss/onelinerizer
|
tests/mangle.py
|
Python
|
mit
| 552
|
from sop_embed.da import DenoisingAutoencoder
from sop_embed.tools import NonLinearity
from sop_embed.tools import CostType
from sop_embed.tools import ModelMLP
from sop_embed.tools import train_one_epoch_chuncks
from sop_embed.tools import theano_fns
from sop_embed.tools import sharedX_value
from sop_embed.tools import collect_stats_epoch
from sop_embed.tools import plot_stats
from sop_embed.tools import split_data_to_minibatchs_eval
from sop_embed.tools import split_data_to_minibatchs_embed
from sop_embed.tools import evaluate_model
from sop_embed.tools import StaticAnnealedWeightRate
from sop_embed.tools import StaticExponentialDecayWeightRate
from sop_embed.tools import StaticExponentialDecayWeightRateSingle
from sop_embed.tools import StaticAnnealedWeightRateSingle
from sop_embed.tools import print_stats_train
from sop_embed.learning_rule import AdaDelta
from sop_embed.learning_rule import Momentum
import theano.tensor as T
import theano
import numpy as np
import cPickle as pkl
import datetime as DT
import os
import inspect
import sys
import shutil
from random import shuffle
# Alexnet: 9216
# VGG16: 25088
def standardize(data):
"""
Normalize the data with respect to finding the mean and the standard
deviation of it and dividing by mean and standard deviation.
"""
mu = np.mean(data, axis=0)
sigma = np.std(data, axis=0)
if sigma.nonzero()[0].shape[0] == 0:
raise Exception("Std dev should not be zero")
norm_data = (data - mu) / sigma
return norm_data
if __name__ == "__main__":
type_mod = ""
if type_mod is "alexnet":
dim_in = 9216
if type_mod is "vgg_16":
dim_in = 25088
if type_mod is "vgg_19":
dim_in = 9216
if type_mod is "googlenet":
dim_in = 9216
faceset = "lfpw"
fd_data = "../../inout/data/face/" + faceset + "_data/"
path_valid = fd_data + type_mod + "valid.pkl"
w, h = 50, 50
if type_mod is not None and type_mod is not "":
w, h = dim_in, 1
input = T.fmatrix("x_input")
output = T.fmatrix("y_output")
# Create mixed data
nbr_sup, nbr_xx, nbr_yy = 676, 0, 0
id_data = type_mod + "ch_tr_" + str(nbr_sup) + '_' + str(nbr_xx) + '_' +\
str(nbr_yy)
# List train chuncks
l_ch_tr = [
fd_data + id_data + "_" + str(i) + ".pkl" for i in range(0, 1)]
time_exp = DT.datetime.now().strftime('%m_%d_%Y_%H_%M_%s')
fold_exp = "../../exps/" + faceset + "_" + time_exp
if not os.path.exists(fold_exp):
os.makedirs(fold_exp)
nbr_layers = 4
init_w_path = "../../inout/init_weights/" + str(nbr_layers) + '_' +\
faceset + '_layers/'
if not os.path.exists(init_w_path):
os.makedirs(init_w_path)
rnd = np.random.RandomState(1231)
nhid_l0 = 1025
nhid_l1 = 512
nhid_l2 = 64
# Create the AE in 1
nvis, nhid = w*h, nhid_l0
path_ini_params_l0 = init_w_path + "dae_w_l0_init_" + str(nvis) + '_' +\
str(nhid) + ".pkl"
dae_l0 = DenoisingAutoencoder(input,
nvis=nvis,
nhid=nhid,
L1_reg=0.,
L2_reg=1e-2,
rnd=rnd,
nonlinearity=NonLinearity.SIGMOID,
cost_type=CostType.MeanSquared,
reverse=False,
corruption_level=0.2)
if not os.path.isfile(path_ini_params_l0):
dae_l0.save_params(path_ini_params_l0)
else:
dae_l0.set_params_vals(path_ini_params_l0)
# Create the AE in 2
nvis, nhid = nhid_l0, nhid_l1
path_ini_params_l1 = init_w_path + "dae_w_l1_init_" + str(nvis) + '_' +\
str(nhid) + ".pkl"
dae_l1 = DenoisingAutoencoder(dae_l0.encode((input)),
nvis=nhid_l0,
nhid=nhid_l1,
L1_reg=0.,
L2_reg=1e-2,
rnd=rnd,
nonlinearity=NonLinearity.SIGMOID,
cost_type=CostType.MeanSquared,
reverse=False,
corruption_level=0.01)
if not os.path.isfile(path_ini_params_l1):
dae_l1.save_params(path_ini_params_l1)
else:
dae_l1.set_params_vals(path_ini_params_l1)
# Create the AE in 3
nvis, nhid = nhid_l1, nhid_l2
path_ini_params_l2 = init_w_path + "dae_w_l2_init_" + str(nvis) + '_' +\
str(nhid) + ".pkl"
dae_l2 = DenoisingAutoencoder(dae_l1.encode(dae_l0.encode((input))),
nvis=nhid_l1,
nhid=nhid_l2,
L1_reg=0.,
L2_reg=0.,
rnd=rnd,
nonlinearity=NonLinearity.TANH,
cost_type=CostType.MeanSquared,
reverse=False)
if not os.path.isfile(path_ini_params_l2):
dae_l2.save_params(path_ini_params_l2)
else:
dae_l2.set_params_vals(path_ini_params_l2)
# Create the AE out
nvis, nhid = 68*2, nhid_l2
path_ini_params_l3 = init_w_path + "dae_w_l3_init_" + str(nvis) + '_' +\
str(nhid) + ".pkl"
dae_l3 = DenoisingAutoencoder(output,
L1_reg=0.,
L2_reg=1e-2,
nvis=nvis,
nhid=nhid,
rnd=rnd,
nonlinearity=NonLinearity.TANH,
cost_type=CostType.MeanSquared,
reverse=True)
if not os.path.isfile(path_ini_params_l3):
dae_l3.save_params(path_ini_params_l3)
else:
dae_l3.set_params_vals(path_ini_params_l3)
# Create the network
rng = np.random.RandomState(23455)
layer0 = {
"rng": rng,
"n_in": w*h,
"n_out": nhid_l0,
"W": dae_l0.hidden.W,
"b": dae_l0.hidden.b,
"activation": NonLinearity.SIGMOID
}
layer1 = {
"rng": rng,
"n_in": nhid_l0,
"n_out": nhid_l1,
"W": dae_l1.hidden.W,
"b": dae_l1.hidden.b,
"activation": NonLinearity.SIGMOID
}
layer2 = {
"rng": rng,
"n_in": nhid_l1,
"n_out": nhid_l2,
"W": dae_l2.hidden.W,
"b": dae_l2.hidden.b,
"activation": NonLinearity.TANH
}
layer3 = {
"rng": rng,
"n_in": nhid_l2,
"n_out": 68*2,
"W": dae_l3.hidden.W_prime,
"b": dae_l3.hidden.b_prime,
"activation": NonLinearity.TANH
}
layers = [layer0, layer1, layer2, layer3]
# dropout = [float(sys.argv[1]), float(sys.argv[2]), float(sys.argv[3]),
# float(sys.argv[4])]
dropout = [0.0, 0.0, 0.0, 0.0]
# number of the hidden layer just before the output ae. Default: None
id_code = None
model = ModelMLP(layers, input, l1_reg=0., l2_reg=0., reg_bias=False,
dropout=dropout, id_code=id_code)
aes_in = [dae_l0]
aes_out = [dae_l3]
if id_code is not None:
assert aes_out != []
# Train
# Data
tr_batch_size = 10
vl_batch_size = 8000
with open(path_valid, 'r') as f:
l_samples_vl = pkl.load(f)
list_minibatchs_vl = split_data_to_minibatchs_eval(
l_samples_vl, vl_batch_size)
max_epochs = int(1000)
lr_vl = 1e-3
lr = sharedX_value(lr_vl, name="lr")
# cost weights
separate = True
l_in = [sharedX_value(1., name="l_in"), sharedX_value(0.0, name="l_in2")]
l_out = [sharedX_value(1., name="l_out")]
l_sup = sharedX_value(1., name="l_sup")
l_code = sharedX_value(0.0, name="l_code")
if not separate:
assert l_sup.get_value() + l_in.get_value() + l_out.get_value() == 1.
if l_in[0].get_value() != 0. and aes_in == []:
raise ValueError("You setup the l_in but no aes in found.")
if l_out[0].get_value() != 0. and aes_out == []:
raise ValueError("You setup the l_out but no aes out found.")
# Train criterion
cost_type = CostType.MeanSquared # CostType.MeanSquared
# Compile the functions
# Momentum(0.9, nesterov_momentum=False,
# imagenet=False, imagenetDecay=5e-4,
# max_colm_norm=False)
train_updates, eval_fn = theano_fns(
model, aes_in, aes_out, l_in, l_out, l_sup, l_code, lr,
cost_type,
updaters={
"all": Momentum(0.9, nesterov_momentum=False,
imagenet=False, imagenetDecay=5e-4,
max_colm_norm=False),
"in": Momentum(0.9, nesterov_momentum=False,
imagenet=False, imagenetDecay=5e-4,
max_colm_norm=False),
"out": Momentum(0.9, nesterov_momentum=False,
imagenet=False, imagenetDecay=5e-4,
max_colm_norm=False),
"code": None},
max_colm_norm=False, max_norm=15.0, eye=False)
# How to update the weight costs
updater_wc = StaticAnnealedWeightRate(anneal_end=500, anneal_start=0)
updater_wc_in = StaticAnnealedWeightRateSingle(anneal_end=500, down=True,
init_vl=1., end_vl=0.,
anneal_start=100)
updater_wc_in2 = StaticAnnealedWeightRateSingle(anneal_end=500, down=True,
init_vl=0.0001, end_vl=0.,
anneal_start=400)
updater_wc_out = StaticAnnealedWeightRateSingle(anneal_end=700, down=True,
init_vl=1., end_vl=0.,
anneal_start=100)
# how to update the weight code
# l_code_updater = StaticExponentialDecayWeightRateSingle(slop=20,
# anneal_start=0)
to_update = {"l_in": True, "l_out": True}
if aes_in == []:
to_update["l_in"] = False
if aes_out == []:
to_update["l_out"] = False
# Train
i = 0
# Stats
train_stats = {"in_cost": [], "out_cost": [],
"all_cost": [], "tr_pure_cost": [], "code_cost": [],
"in_cost_mb": [], "out_cost_mb": [], "all_cost_mb": [],
"tr_pure_cost_mb": [], "error_tr": [], "error_vl": [],
"error_tr_mb": [], "error_vl_mb": [], "code_cost_mb": [],
"best_epoch": 0, "best_mb": 0}
# tag
if aes_in == [] and aes_out == []:
tag = "sup"
elif aes_in != [] and aes_out == []:
tag = "sup + in"
elif aes_in == [] and aes_out != []:
tag = "sup + out"
elif aes_in != [] and aes_out != []:
tag = "sup + in + out"
tag += ", data: " + faceset + " " + id_data
# First evaluation on valid
error_mn, _ = evaluate_model(list_minibatchs_vl, eval_fn)
vl_error_begin = np.mean(error_mn)
shutil.copy(inspect.stack()[0][1], fold_exp)
l_ch_tr_vl = []
for ch in l_ch_tr:
with open(ch, 'r') as f:
l_samples = pkl.load(f)
l_ch_tr_vl.append(l_samples)
stop = False
while i < max_epochs:
stop = (i == max_epochs - 1)
stats = train_one_epoch_chuncks(
train_updates, eval_fn, l_ch_tr_vl,
l_in, l_out, l_sup, l_code, list_minibatchs_vl,
model, aes_in, aes_out, i, fold_exp, train_stats,
vl_error_begin, tag, tr_batch_size, stop=stop)
# Shuffle the minibatchs: to avoid periodic behavior.
# for ts in xrange(100):
# shuffle(l_ch_tr_vl)
# Collect stats
train_stats = collect_stats_epoch(stats, train_stats)
# Print train stats
# print_stats_train(train_stats, i, "", 0)
# reduce the frequency of the disc access, it costs too much!!!
if stop:
# Plot stats: epoch
plot_stats(train_stats, "epoch", fold_exp, tag)
# Save stats
with open(fold_exp + "/train_stats.pkl", 'w') as f_ts:
pkl.dump(train_stats, f_ts)
# Update the cost weights
if aes_in != [] or aes_out != []:
# updater_wc(l_sup, l_in, l_out, i, to_update)
updater_wc_in(l_in[0], i)
updater_wc_in2(l_in[1], i)
updater_wc_out(l_out[0], i)
print "\n", l_sup.get_value(), l_out[0].get_value()
for el in l_in:
print el.get_value(),
print ""
if id_code is not None:
l_code_updater(l_code, i)
print "l_code:", l_code.get_value()
# Check the stopping criterion
# [TODO]
# Update lr
# if (i % 1 == 0):
# lr.set_value(np.cast[theano.config.floatX](lr.get_value()/1.0001))
# print "lr:", lr.get_value()
i += 1
del stats
# Eval
cmd = "python evaluate_face_new_data.py " + str(faceset) + " " +\
str(fold_exp) + " mlp"
# with open("./" + str(time_exp) + ".py", "w") as python_file:
# python_file.write("import os \n")
# cmd2 = 'os.system("' + cmd + '")'
# python_file.write(cmd2)
os.system(cmd)
# # std_data = standardize(x_data)
# std_data = np.asarray(x_data, dtype=theano.config.floatX)
#
# dae_l0.fit(learning_rate=9.96*1e-3,
# shuffle_data=True,
# data=std_data,
# weights_file=weights_file_l0,
# recons_img_file=None,
# corruption_level=0.095,
# batch_size=40,
# n_epochs=2)
#
# dae_l0_obj_out = open("dae_l0_obj.pkl", "wb")
# pkl.dump(dae_l0, dae_l0_obj_out, protocol=pkl.HIGHEST_PROTOCOL)
#
# dae_l0_out = dae_l0.encode((input))
# dae_l0_h = dae_l0.encode(std_data)
# dae_l0_h_fn = theano.function([], dae_l0_h)
# dae_l1_in = dae_l0_h_fn()
# dae_l1_in = np.asarray(dae_l1_in, dtype=theano.config.floatX)
#
# dae_l1 = DenoisingAutoencoder(dae_l0_out,
# L1_reg=1e-4,
# L2_reg=6*1e-4,
# nvis=nhid_l0,
# nhid=nhid_l1,
# rnd=rnd,
# reverse=True)
#
# dae_l1.fit(learning_rate=0.95*1e-2,
# data=dae_l1_in,
# shuffle_data=True,
# recons_img_file=None,
# weights_file=weights_file_l1,
# corruption_level=0.1,
# batch_size=25,
# n_epochs=2) # 1400
#
# dae_l1_obj_out = open("dae_l1_obj.pkl", "wb")
# pkl.dump(dae_l1, dae_l1_obj_out, protocol=pkl.HIGHEST_PROTOCOL)
|
sbelharbi/structured-output-ae
|
sop_embed/experiments/lfpw_4l_in_out.py
|
Python
|
lgpl-3.0
| 15,118
|
#!/usr/bin/env python
# encoding: utf-8
"""Implements a container for parsed snippets."""
class SnippetDictionary(object):
"""See module docstring."""
def __init__(self):
self._snippets = []
self._cleared = {}
self._clear_priority = float("-inf")
def add_snippet(self, snippet):
"""Add 'snippet' to this dictionary."""
self._snippets.append(snippet)
def get_matching_snippets(self, trigger, potentially):
"""Returns all snippets matching the given trigger.
If 'potentially' is true, returns all that could_match().
"""
all_snippets = self._snippets
if not potentially:
return [s for s in all_snippets if s.matches(trigger)]
else:
return [s for s in all_snippets if s.could_match(trigger)]
def clear_snippets(self, priority, triggers):
"""Clear the snippets by mark them as cleared.
If trigger is None, it updates the value of clear priority
instead.
"""
if not triggers:
if self._clear_priority is None or priority > self._clear_priority:
self._clear_priority = priority
else:
for trigger in triggers:
if (trigger not in self._cleared or
priority > self._cleared[trigger]):
self._cleared[trigger] = priority
|
linkinpark342/ultisnips
|
pythonx/UltiSnips/snippet/source/_snippet_dictionary.py
|
Python
|
gpl-3.0
| 1,399
|
from unittest import TestCase, main
from skbio.util import get_data_path
from microprot.scripts.split_search import (mask_sequence,
parse_pdb_match,
_parse_hit_block)
class ParsersTests(TestCase):
def setUp(self):
self.file_hhsearch1 = get_data_path(
'test_split_search/GRAMNEG_T1D_5168.out')
self.file_fasta1 = get_data_path(
'test_split_search/GRAMNEG_T1D_5168.fasta')
self.file_hhsearch2 = get_data_path(
'test_split_search/GRAMNEG_T1D_3144_1-275.out')
self.file_fasta2 = get_data_path(
'test_split_search/GRAMNEG_T1D_3144_1-275.fasta')
def test_split_search_parseerror_1(self):
mask_sequence(self.file_hhsearch1,
self.file_fasta1,
min_prob=95.0,
min_fragment_length=40)
parse_pdb_match(self.file_hhsearch1)
def test_split_search_parseerror_2(self):
mask_sequence(self.file_hhsearch2,
self.file_fasta2,
min_prob=95.0,
min_fragment_length=40)
parse_pdb_match(self.file_hhsearch2)
def test_split_search_parseerror_block(self):
f = open(
get_data_path('test_split_search/parsealignment_fail_example.txt'),
'r')
errorblock = "".join(f.readlines())
f.close()
_parse_hit_block(errorblock)
if __name__ == '__main__':
main()
|
biocore/microprot
|
microprot/tests/test_split_search_parseerror.py
|
Python
|
bsd-3-clause
| 1,539
|
#!usr/bin/env python
import sys
import os
import itertools
from itertools import izip
#import matplotlib.pyplot as plt
from Bio import SeqIO
import scipy as np
from scipy import median, mean
from scipy.optimize import curve_fit
from scipy.stats.distributions import t
from struct import pack
from random import shuffle, seed
from itertools import combinations
try:
from sympy.mpmath import fac
except:
from math import factorial as fac
from random import sample
try:
from numba import jit
except:
jit = lambda x:x
try:
from numexpr import evaluate
except:
evaluate = eval
# do the core gene find
# python this_script.py -i foo.pep.fsa -c foo.mcl [-l .5] [-u .95]
def manual_print():
print 'Usage:'
print ' python this_script.py -i foo.pep.fsa -g foo.mcl [-l .05] [-u .95]'
print 'Parameters:'
print ' -i: protein/gene fasta file. The header should be like xxxx|yyyy: xxxx is taxon name and yyyy is unqiue identifier'
print ' -g: protein/gene groups file. The proteins of each raw belong to the same protein/gene group'
print ' -l: threshold for specific genes'
print ' -u: threshold for core genes'
argv = sys.argv
# recommand parameter:
args = {'-i':'', '-g':'', '-l':.05, '-u':.95}
N = len(argv)
for i in xrange(1, N):
k = argv[i]
if k in args:
v = argv[i+1]
args[k] = v
elif k[:2] in args and len(k) > 2:
args[k[:2]] = k[2:]
else:
continue
if args['-i']=='' or args['-g']=='':
manual_print()
raise SystemExit()
try:
fas, mcl, ts, tc = args['-i'], args['-g'], float(args['-l']), float(args['-u'])
except:
manual_print()
raise SystemExit()
# get all the taxon
taxon_set = set()
f = open(fas, 'r')
for i in f:
if i.startswith('>'):
header = i[1:-1].split(' ')[0]
taxon = header.split('|')[0]
taxon_set.add(taxon)
f.close()
taxon_list = list(taxon_set)
taxon_dict = {j:i for i,j in enumerate(taxon_list)}
# build N by M matrix
# row is group name
# col is taxon name
# each cell stands for the genes count of the group for a specific taxon
#header = ['#family', 'type'] + taxon_list
_o0 = open('type.txt', 'wb')
_o1 = open('pan.npy', 'wb')
#print '\t'.join(header)
N = len(taxon_list)
Ts = ts < 1 and max(ts*N, 1) or ts
Tc = tc < 1 and tc * N or tc
#outputs = []
spec = shar = core = 0
visit = set()
flag = 0
f = open(mcl, 'r')
for i in f:
counts = [0] * N
#taxon_dict[elem.split('|')[0]] for elem in i[:-1].split('\t')]
group = i[:-1].split('\t')
for j in group:
tax = j.split('|')[0]
counts[taxon_dict[tax]]+=1
visit.add(j)
#thr = len([elem for elem in counts if elem>0]) * 1. / N
thr = len([elem for elem in counts if elem>0])
if thr <= Ts:
pan = 'Specific'
spec += 1
elif Ts< thr < Tc:
pan = 'Share'
shar += 1
else:
pan = 'Core'
core += 1
#output = [flag, pan]
output = ['group_%09d'%flag, pan]
output.extend(counts)
_o0.write('\t'.join(output[:2])+'\n')
_o1.write(''.join([pack('i', elem) for elem in counts]))
#print '\t'.join(map(str, output))
#outputs.append(output)
flag += 1
f.close()
for i in SeqIO.parse(fas, 'fasta'):
j = i.id
if j in visit:
continue
counts = [0] * N
tax = j.split('|')[0]
counts[taxon_dict[tax]]+=1
pan = 'Specific'
output = ['group_%09d'%flag, pan]
output.extend(counts)
_o0.write('\t'.join(output[:2])+'\n')
_o1.write(''.join([pack('i', elem) for elem in counts]))
#print '\t'.join(map(str, output))
#outputs.append(output)
flag += 1
spec += 1
_o0.close()
_o1.close()
print '#' * 80
print '# Statistics and profile of pan-genome:'
print '# The methods can be found in Hu X, et al. Trajectory and genomic determinants of fungal-pathogen speciation and host adaptation.'
print '#'
print '# statistic of core, shared and specific genes:'
print '\t'.join(['# Feature', 'core', 'shared', 'specific', 'taxon'])
print '\t'.join(map(str, ['# Number', core, shar, spec, N]))
#print flag, N
#_o.close()
# calculate the core, share and specific gene's profile
fp = np.memmap('pan.npy', mode='r+', shape=(flag, N), dtype='int32')
mat = np.asarray(fp, dtype='bool')
mat = np.asarray(mat, dtype='int8')
#mat[mat>0] = 1
#mat = np.asarray([elem[2:] for elem in outputs])
#print ts, tc, Ts, Tc
#print mat
def pan_feature0(x, ts=.05, tc=.95):
n, d = x.shape
idx = range(d)
index = []
cores = []
specs = []
panzs = []
for i in xrange(1, d+1):
j = i+1
Ts = ts < 1 and max(ts*j, 1) or ts
Tc = tc < 1 and tc * j or tc
#shuffle(idx)
flag = 0
for j in combinations(idx, i):
if flag > 100:
break
y = x[:,j].sum(1)
core = np.sum(y>Tc)
spec = np.sum(y<Ts)
panz = np.sum(y>0)
index.append(i)
cores.append(core)
specs.append(spec)
panzs.append(panz)
flag += 1
return index, cores, specs, panzs
def pan_feature1(x, size=100, ts=.05, tc=.95):
n, d = x.shape
idx = range(d)
index = []
cores = []
specs = []
panzs = []
for itr in xrange(size):
shuffle(idx)
y = mat[:, idx[0]]
for i in xrange(1, d-1):
j = i+1
Ts = ts < 1 and max(ts*j, 1) or ts
Tc = tc < 1 and tc * j or tc
y += mat[:, idx[i]]
core = np.sum(y>=Tc)
spec = np.sum(np.logical_and(y<=Ts, y>0))
panz = np.sum(y>0)
index.append(j)
cores.append(core)
specs.append(spec)
panzs.append(panz)
return index, cores, specs, panzs
def pan_feature(x, size=150, ts=.05, tc=.95):
n, d = x.shape
#size = min(size, d*(d-1)/2)
idx = range(d)
index = []
cores = []
specs = []
panzs = []
idxs = []
seed(42)
for i in xrange(size):
shuffle(idx)
idxs.append(idx[:])
ys = x[:, [elem[0] for elem in idxs]]
#for i in xrange(1, d-1):
for i in xrange(1, d):
j = i + 1
Ts = ts < 1 and max(ts*j, 1) or ts
Tc = tc < 1 and tc * j or tc
yn = x[:, [elem[i] for elem in idxs]]
sp = np.asarray(evaluate('(ys<=0) & (yn>0)'), dtype='int8')
#sp = np.asarray(evaluate('(ys<=Ts) & (yn>0)'), dtype='int8')
spec = evaluate('sum(sp, 0)')
ys = evaluate('ys+yn')
cr = np.asarray(evaluate('ys>=Tc'), dtype='int8')
core = evaluate('sum(cr, 0)')
#core = evaluate('sum(Ys>=Tc, 0)')
#core = np.sum(ys>=Tc, 0)
#spec = evaluate('sum((Ys<=Ts) & (Ys>0), 0)')
#spec = np.sum((ys<=Ts) & (ys>0), 0)
pa = np.asarray(evaluate('ys>0'), dtype='int8')
panz = evaluate('sum(pa, 0)')
#panz = np.sum(ys>0, 0)
cores.extend(core)
specs.extend(spec)
panzs.extend(panz)
index.extend([j] * size)
'''
if i < d-1:
cores.extend(core)
specs.extend(spec)
panzs.extend(panz)
index.extend([j] * size)
else:
cores.extend(core[:1])
specs.extend(spec[:1])
panzs.extend(panz[:1])
index.extend([j])
'''
#print 'ts tc ys'
#print Ts, Tc
#print ys
#print [elem[i] for elem in idxs]
#print 'core spec panz', len(core), len(spec), len(panz)
#print core, spec, panz
#print [j] * size
#print 'pan genome'
#print map(len, [index, cores, specs, panzs])
#print index
#print cores
#print specs
#print panzs
return index, cores, specs, panzs
index, cores, specs, panzs = pan_feature(mat)
for a, b in zip(index, specs):
print a, b
#raise SystemExit()
# compute the combine
def combs(N, M):
return fac(N) / fac(M) / fac(N - M)
# plt.figure()
#plt.plot(x, y, label = '$sin(x)$', color = 'red', linewidth = 1)
# plt.show()
# estimate core gene size
def Fc(n, K_c, Tau_c, Omega):
return K_c * np.exp(-n / max(1e-10, Tau_c)) + Omega
# return K_c * np.exp(-n / Tau_c) + Omega
# estimate specific gene size
def Fs(n, K_s, Tau_s, TgTheta):
#return K_s * np.exp(-n / Tau_s) + TgTheta
return K_s * np.exp(-n / max(1e-10, Tau_s)) + min(10000, TgTheta)
# pan-genome open test
# alpah <= 1 is open
# alpha > 1 is close
#def pan_open(n, K, Alpha):
# return K * n ** (-Alpha)
# pan size
def fpan(n, D, tgTheta, K_s, Tau_s):
return D + tgTheta * (n - 1) + K_s * np.exp(-2. / Tau_s) * (1 - np.exp(-(n - 1.) / Tau_s)) / (1 - np.exp(-1. / Tau_s))
# estimate pan-genome gene size
# r > 0 : open
def pgene(n, K, r):
return K * n ** r
def find_med(coreN):
med = {}
for i, j in coreN:
try:
med[i].append(j)
except:
med[i] = [j]
for i in med:
med[i] = np.median(med[i])
return np.asarray(med.items(), 'int64')
def fit_curve(f, X, Y, alpha=.05):
x, y = map(np.asarray, [X, Y])
pars, pcov = curve_fit(f, x, y)
n = len(y)
p = len(pars)
dof = max(0, n - p)
tval = t.ppf(1.0 - alpha / 2., dof)
conf = [tval * elem ** .5 for elem in np.diag(pcov)]
return pars, conf
#pm = '+/-'
pm = '\xc2\xb1'
#spcN = [elem for elem in coreN if elem[0] == 1] + spcN
# estimate the parameters
# the core parameter
# print 'the core N', coreN
#coreN = find_med(coreN)
# print 'the core N', coreN.tolist()
#popt, pcov = curve_fit(Fc, coreN[:, 0], coreN[:, 1])
#popt, conf = fit_curve(Fc, num, coreN[:, 1])\
print '#'
print '# \xcf\x89(core size of pan-genome) and 95% confidence interval:'
popt, conf = fit_curve(Fc, index, cores)
#print 'Kc\tTauc\tOmega', popt, conf
print '# \xce\xbac\t\xcf\x84c\t\xcf\x89'
#print pm
print '# '+'\t'.join([str(a)+pm+str(b) for a, b in zip(popt, conf)])
# the specific parameter
# print 'the spc N', spcN
#spcN = find_med(spcN)
#spcN = np.asarray(spcN, 'int64')
# print 'the spc N', spcN.tolist()
#popt, pcov = curve_fit(Fs, spcN[:, 0], spcN[:, 1])
#popt, conf = fit_curve(Fs, spcN[:, 0], spcN[:, 1])
print '#'
print '# \xce\xb8(new gene number for everay new genome sequenced) and 95% confidence interval:'
popt, conf = fit_curve(Fs, index, specs)
#print '# Ks\tTaus\tTheta', popt, conf
print '# \xce\xbas\t\xcf\x84s\t\xce\xb8'
print '# '+'\t'.join([str(a)+pm+str(b) for a, b in zip(popt, conf)])
# the openness
#print '#'
#print '# \xce\xb1(parameter of openness test) and 95% confidence interval(open if \xce\xb1 <= 1 else close):'
#popt, conf = fit_curve(pan_open, index, specs)
#print '# K\tAlpah', popt, conf
#print '# \xce\xba\t\xce\xb1 '
#print '# '+'\t'.join([str(a)+pm+str(b) for a, b in zip(popt, conf)])
# the pan-genome size
#pan_size = np.asarray(pan_size, 'int64')
#popt, pcov = curve_fit(pgene, pan_size[:, 0], pan_size[:, 1])
#popt, conf = fit_curve(pgene, pan_size[:, 0], pan_size[:, 1])
print '#'
print '# \xce\xba(size and openess of pan-genome, open if \xce\xb3 > 0) and 95% confidence interval:'
popt, conf = fit_curve(pgene, index, panzs)
#print 'pan-size, k, gamma', popt, conf
print '# \xce\xba\t\xce\xb3'
print '# '+'\t'.join([str(a)+pm+str(b) for a, b in zip(popt, conf)])
print '#'
print '# Type and frequency of each gene group in different species:'
print '#'*80
header = ['#family', 'type'] + taxon_list
#_o = open('pan.npy', 'wb')
print '\t'.join(header)
#for i in outputs:
# print '\t'.join(map(str, i))
f = open('type.txt', 'r')
#mat = np.memmap('pan.npy', mode='r+', shape=(flag, N), dtype='int32')
for i, j in izip(f, fp):
out = i[:-1] + '\t' + '\t'.join(map(str, j))
print out
f.close()
os.system('rm pan.npy type.txt')
#print fp
|
Rinoahu/fastclust
|
deprecate/py27/scripts/deprecate/pangenome.py
|
Python
|
gpl-3.0
| 11,736
|
import calendar
from collections import OrderedDict
from datetime import datetime
import fauxfactory
import pytest
from humanfriendly import parse_size
from humanfriendly import tokenize
from cfme import test_requirements
from cfme.containers.provider import ContainersProvider
from cfme.intelligence.chargeback import assignments
from cfme.utils.blockers import GH
from cfme.utils.log import logger
from cfme.utils.units import CHARGEBACK_HEADER_NAMES
from cfme.utils.units import parse_number
obj_types = ['Image', 'Project']
fixed_rates = ['Fixed1', 'Fixed2', 'CpuCores', 'Memory', 'Network']
variable_rates = ['CpuCores', 'Memory', 'Network']
all_rates = set(fixed_rates + variable_rates)
intervals = ['Hourly', 'Daily', 'Weekly', 'Monthly']
rate_types = ['fixed', 'variable']
pytestmark = [
pytest.mark.meta(
server_roles='+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor'),
pytest.mark.usefixtures('setup_provider_modscope'),
pytest.mark.parametrize('obj_type', obj_types, scope='module'),
pytest.mark.parametrize('rate_type', rate_types, scope='module'),
pytest.mark.parametrize('interval', intervals, scope='module'),
pytest.mark.long_running,
pytest.mark.provider([ContainersProvider], scope='module'),
pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:8798')]),
test_requirements.containers # This should eventually move to the chargeback req
]
# We cannot calculate the accurate value because the prices in the reports
# appears in a lower precision (floored). Hence we're using this accuracy coefficient:
TEST_MATCH_ACCURACY = 0.1
now = datetime.now()
hours_count_lut = OrderedDict([('Hourly', 1.), ('Daily', 24.), ('Weekly', 168.),
('Monthly', calendar.monthrange(now.year, now.month)[1] * 24.),
('Yearly', 8760)])
def dump_args(**kwargs):
"""Return string of the arguments and their values.
E.g. dump_args(a=1, b=2) --> 'a=1, b=2;
'"""
out = ''
for key, val in kwargs.items():
out += '{}={}, '.format(key, val)
if out:
return out[:-2] + ';'
return kwargs
def gen_report_base(appliance, obj_type, provider, rate_desc, rate_interval):
"""Base function for report generation
Args:
:py:type:`str` obj_type: Object being tested; only 'Project' and 'Image' are supported
:py:class:`ContainersProvider` provider: The Containers Provider
:py:type:`str` rate_desc: The rate description as it appears in the report
:py:type:`str` rate_interval: The rate interval, (Hourly/Daily/Weekly/Monthly)
"""
title = 'report_{}_{}'.format(obj_type.lower(), rate_desc)
if obj_type == 'Project':
data = {
'menu_name': title,
'title': title,
'base_report_on': 'Chargeback for Projects',
'report_fields': ['Archived', 'Chargeback Rates', 'Fixed Compute Metric',
'Cpu Cores Used Cost', 'Cpu Cores Used Metric',
'Network I/O Used', 'Network I/O Used Cost',
'Fixed Compute Cost 1', 'Fixed Compute Cost 2',
'Memory Used', 'Memory Used Cost',
'Provider Name', 'Fixed Total Cost', 'Total Cost'],
'filter': {
'filter_show_costs': 'Container Project',
'filter_provider': provider.name,
'filter_project': 'All Container Projects'
}
}
elif obj_type == 'Image':
data = {
'base_report_on': 'Chargeback for Images',
'report_fields': ['Archived', 'Chargeback Rates', 'Fixed Compute Metric',
'Cpu Cores Used Cost', 'Cpu Cores Used Metric',
'Network I/O Used', 'Network I/O Used Cost',
'Fixed Compute Cost 1', 'Fixed Compute Cost 2',
'Memory Used', 'Memory Used Cost',
'Provider Name', 'Fixed Total Cost', 'Total Cost'],
'filter': {
'filter_show_costs': 'Container Image',
'filter_provider': provider.name,
}
}
else:
raise Exception("Unknown object type: {}".format(obj_type))
data['menu_name'] = title
data['title'] = title
if rate_interval == 'Hourly':
data['filter']['interval'] = 'Day'
data['filter']['interval_end'] = 'Yesterday'
data['filter']['interval_size'] = '1 Day'
elif rate_interval == 'Daily':
data['filter']['interval'] = 'Week',
data['filter']['interval_end'] = 'Last Week'
data['filter']['interval_size'] = '1 Week'
elif rate_interval in ('Weekly', 'Monthly'):
data['filter']['interval'] = 'Month',
data['filter']['interval_end'] = 'Last Month'
data['filter']['interval_size'] = '1 Month'
else:
raise Exception('Unsupported rate interval: "{}"; available options: '
'(Hourly/Daily/Weekly/Monthly)')
report = appliance.collections.reports.create(is_candu=True, **data)
logger.info('QUEUING CUSTOM CHARGEBACK REPORT FOR CONTAINER {}'.format(obj_type.upper()))
report.queue(wait_for_finish=True)
return report
def assign_custom_compute_rate(obj_type, chargeback_rate, provider):
"""Assign custom Compute rate for Labeled Container Images
Args:
:py:type:`str` obj_type: Object being tested; only 'Project' and 'Image' are supported
:py:class:`ComputeRate` chargeback_rate: The chargeback rate object
:py:class:`ContainersProvider` provider: The containers provider
"""
if obj_type == 'Image':
compute_assign = assignments.ComputeAssign(
assign_to="Labeled Container Images",
docker_labels="architecture",
selections={
'x86_64': {'Rate': chargeback_rate.description}
})
logger.info('ASSIGNING COMPUTE RATE FOR LABELED CONTAINER IMAGES')
elif obj_type == 'Project':
compute_assign = assignments.ComputeAssign(
assign_to="Selected Providers",
selections={
provider.name: {'Rate': chargeback_rate.description}
})
logger.info('ASSIGNING CUSTOM COMPUTE RATE FOR PROJECT CHARGEBACK')
else:
raise Exception("Unknown object type: {}".format(obj_type))
compute_assign.assign()
logger.info('Rate - {}: {}'.format(chargeback_rate.description,
chargeback_rate.fields))
return chargeback_rate
@pytest.fixture(scope='module')
def compute_rate(appliance, rate_type, interval):
variable_rate = 1 if rate_type == 'variable' else 0
description = fauxfactory.gen_alphanumeric(20, start="custom_rate_")
data = {
'Used CPU Cores': {'per_time': interval,
'fixed_rate': 1,
'variable_rate': variable_rate},
'Fixed Compute Cost 1': {'per_time': interval,
'fixed_rate': 1},
'Fixed Compute Cost 2': {'per_time': interval,
'fixed_rate': 1},
'Used Memory': {'per_time': interval,
'fixed_rate': 1,
'variable_rate': variable_rate},
'Used Network I/O': {'per_time': interval,
'fixed_rate': 1,
'variable_rate': variable_rate}
}
ccb = appliance.collections.compute_rates.create(description, fields=data)
yield ccb
if ccb.exists:
ccb.delete()
@pytest.fixture(scope='module')
def assign_compute_rate(obj_type, compute_rate, provider):
assign_custom_compute_rate(obj_type, compute_rate, provider)
yield compute_rate
assignments.ComputeAssign(assign_to="<Nothing>").assign()
@pytest.fixture(scope='module')
def chargeback_report_data(appliance, obj_type, interval, assign_compute_rate, provider):
report = gen_report_base(appliance, obj_type, provider, assign_compute_rate.description,
interval)
yield report.saved_reports.all()[0].data
report.delete()
def abstract_test_chargeback_cost(
rate_key, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""This is an abstract test function for testing rate costs.
It's comparing the expected value that calculated by the rate
to the value in the chargeback report
Args:
:py:type:`str` rate_key: The rate key as it appear in the CHARGEBACK_HEADER_NAMES keys.
:py:type:`str` obj_type: Object being tested; only 'Project' and 'Image' are supported
:py:type:`str` interval: The rate interval, (Hourly/Daily/Weekly/Monthly)
:py:class:`Report` chargeback_report_data: The chargeback report data.
:py:class:`ComputeRate` compute_rate: The compute rate object.
:var soft_assert: soft_assert fixture.
"""
report_headers = CHARGEBACK_HEADER_NAMES[rate_key]
found_something_to_test = False
for row in chargeback_report_data.rows:
if row['Chargeback Rates'].lower() != compute_rate.description.lower():
continue
found_something_to_test = True
fixed_rate = float(compute_rate.fields[report_headers.rate_name]['fixed_rate'])
variable_rate = float(compute_rate.fields[report_headers.rate_name].get('variable_rate', 0))
# Calculate numerical metric
if rate_key == 'Memory':
size_, unit_ = tokenize(row[report_headers.metric_name].upper())
metric = round(parse_size(str(size_) + unit_, binary=True) / 1048576.0, 2)
else:
metric = parse_number(row[report_headers.metric_name])
# Calculate fixed product and cost
num_hours = parse_number(row[CHARGEBACK_HEADER_NAMES['Fixed1'].metric_name])
num_intervals = num_hours / hours_count_lut[interval]
fixed_cost = num_intervals * fixed_rate
variable_cost = num_intervals * metric * variable_rate
# Calculate expected cost
expected_cost = round(variable_cost + fixed_cost, 2)
found_cost = round(parse_number(row[report_headers.cost_name]), 2)
match_threshold = TEST_MATCH_ACCURACY * expected_cost
soft_assert(
abs(found_cost - expected_cost) <= match_threshold,
'Unexpected Chargeback: {}'.format(dump_args(
charge_for=obj_type, rate_key=rate_key, metric=metric, num_hours=num_hours,
num_intervals=num_intervals, fixed_rate=fixed_rate, variable_rate=variable_rate,
fixed_cost=fixed_cost, variable_cost=variable_cost,
expected_full_cost=expected_cost, found_full_cost=found_cost
))
)
assert found_something_to_test, \
'Could not find {} with the assigned rate: {}'.format(obj_type, compute_rate.description)
# Ideally, we would have a single test parametrized by two marks, one in module and the other in
# function scope; unfortunately, because of a bug in py.test [0], we are forced to do this
# [0] https://github.com/pytest-dev/pytest/issues/634
#
# Once resolved:
# @pytest.mark.uncollectif(
# lambda rate_type, rate:
# (rate_type == 'variable' and rate not in variable_rates) or
# (rate_type == 'fixed' and rate not in fixed_rates))
# @pytest.mark.parametrize('rate', all_rates)
# def test_chargeback_rate(
# rate, rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
# abstract_test_chargeback_cost(
# rate, obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
#
#
# Workaround:
# TODO: fix this parametrization, its janky and can be restructured.
@pytest.mark.uncollectif(lambda rate_type:
rate_type == 'variable',
reason='Variable rate type not valid for fixed test')
def test_chargeback_rate_fixed_1(
rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
abstract_test_chargeback_cost(
'Fixed1', obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
@pytest.mark.uncollectif(lambda rate_type:
rate_type == 'variable',
reason='Variable rate type not valid for fixed test')
def test_chargeback_rate_fixed_2(
rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
abstract_test_chargeback_cost(
'Fixed2', obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
def test_chargeback_rate_cpu_cores(
rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
abstract_test_chargeback_cost(
'CpuCores', obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
def test_chargeback_rate_memory_used(
rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
abstract_test_chargeback_cost(
'Memory', obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
# Network variable rate tests are skipped until this bug is solved:
# https://github.com/ManageIQ/integration_tests/issues/5027
@pytest.mark.uncollectif(lambda rate_type:
rate_type == 'variable',
reason='Variable rate type not valid for network chargeback test')
def test_chargeback_rate_network_io(
rate_type, obj_type, interval, chargeback_report_data, compute_rate, soft_assert):
"""
Polarion:
assignee: juwatts
caseimportance: medium
casecomponent: Containers
initialEstimate: 1/6h
"""
abstract_test_chargeback_cost(
'Network', obj_type, interval, chargeback_report_data, compute_rate, soft_assert)
|
izapolsk/integration_tests
|
cfme/tests/containers/test_chargeback.py
|
Python
|
gpl-2.0
| 14,502
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteTrial
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_v1_generated_VizierService_DeleteTrial_sync]
from google.cloud import aiplatform_v1
def sample_delete_trial():
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.DeleteTrialRequest(
name="name_value",
)
# Make the request
client.delete_trial(request=request)
# [END aiplatform_v1_generated_VizierService_DeleteTrial_sync]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_v1_generated_vizier_service_delete_trial_sync.py
|
Python
|
apache-2.0
| 1,389
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import sys
class Root(CMakePackage):
"""ROOT is a data analysis framework."""
homepage = "https://root.cern.ch"
url = "https://root.cern.ch/download/root_v6.09.02.source.tar.gz"
# Development versions
version('6.09.02', '4188dfeafb72df339a3d688fe92f57ec')
# Production versions
version('6.08.06', 'bcf0be2df31a317d25694ad2736df268', preferred=True)
# Old versions
version('6.06.08', '6ef0fe9bd9f88f3ce8890e3651142ee4')
version('6.06.06', '4308449892210c8d36e36924261fea26')
version('6.06.04', '55a2f98dd4cea79c9c4e32407c2d6d17')
version('6.06.02', 'e9b8b86838f65b0a78d8d02c66c2ec55')
version('5.34.36', '6a1ad549b3b79b10bbb1f116b49067ee')
if sys.platform == 'darwin':
patch('math_uint.patch', when='@6.06.02')
patch('root6-60606-mathmore.patch', when='@6.06.06')
variant('graphviz', default=False, description='Enable graphviz support')
depends_on('cmake@3.4.3:', type='build')
depends_on('pkg-config', type='build')
depends_on('zlib')
# depends_on('unuran')
depends_on('freetype')
depends_on('pcre')
depends_on('xz')
depends_on('libsm')
depends_on('libice')
depends_on('libx11')
depends_on('libxext')
depends_on('libxpm')
depends_on('libxft')
# depends_on('gif')
depends_on('libpng')
depends_on('jpeg')
depends_on('gsl')
depends_on('python@2.7:')
# depends_on('opengl')
depends_on('graphviz', when='+graphviz')
# depends_on('kerberos')
depends_on('libxml2+python')
depends_on('openssl')
# depends_on('castor')
# depends_on('rfio')
# depends_on('mysql')
# depends_on('oracle')
# depends_on('odbc')
# depends_on('postgresql')
depends_on('sqlite')
# depends_on('pythia')
depends_on('fftw')
depends_on('cfitsio')
# depends_on('monalisa')
# depends_on('xrootd')
# depends_on('gfal')
# depends_on('dcap')
# depends_on('ldap')
# depends_on('chirp')
# depends_on('hdfs')
# depends_on('davix')
# I was unable to build root with any Intel compiler
# See https://sft.its.cern.ch/jira/browse/ROOT-7517
conflicts('%intel')
def cmake_args(self):
args = [
'-Dcocoa=OFF',
'-Dbonjour=OFF',
'-Dx11=ON',
]
if sys.platform == 'darwin':
args.extend([
'-Dcastor=OFF',
'-Drfio=OFF',
'-Ddcache=OFF',
])
return args
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
spack_env.set('ROOTSYS', self.prefix)
spack_env.set('ROOT_VERSION', 'v{0}'.format(self.version.up_to(1)))
spack_env.prepend_path('PYTHONPATH', self.prefix.lib)
|
TheTimmy/spack
|
var/spack/repos/builtin/packages/root/package.py
|
Python
|
lgpl-2.1
| 4,028
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from karbor.common import karbor_keystone_plugin
from karbor.tests import base
class KarborKeystonePluginTest(base.TestCase):
def setUp(self):
super(KarborKeystonePluginTest, self).setUp()
self.kc_plugin = karbor_keystone_plugin.KarborKeystonePlugin()
self.kc_plugin.client.services.list = mock.MagicMock()
self.kc_plugin.client.endpoints.list = mock.MagicMock()
self.kc_plugin.client.services.list.return_value = (
'http://192.168.1.2:8799')
def test_get_service_endpoint_with_slash_end(self):
self.kc_plugin._auth_uri = 'http://192.168.1.1/identity/v3/'
self.kc_plugin.get_service_endpoint(
'karbor', 'data-protect', 'fake_region_id', 'public')
self.kc_plugin.client.services.list.assert_called_once_with(
name='karbor',
service_type='data-protect',
base_url='http://192.168.1.1/identity/v3'
)
def test_get_service_endpoint_with_no_slash_end(self):
self.kc_plugin._auth_uri = 'http://192.168.1.1/identity/v3'
self.kc_plugin.get_service_endpoint(
'karbor', 'data-protect', 'fake_region_id', 'public')
self.kc_plugin.client.services.list.assert_called_once_with(
name='karbor',
service_type='data-protect',
base_url='http://192.168.1.1/identity/v3'
)
|
openstack/smaug
|
karbor/tests/unit/common/test_karbor_keystone_plugin.py
|
Python
|
apache-2.0
| 1,967
|
from flask import Flask
from flaskext.cache import Cache
from flaskext.mongokit import BSONObjectIdConverter
from werkzeug.routing import BaseConverter
import settings
app = Flask(__name__)
app.config.from_object('woerterbuch.settings')
app.secret_key = settings.SECRET_KEY
## Hook up custom URL converters.
class RegexConverter(BaseConverter):
"""Regex-powered url converter."""
def __init__(self, url_map, *items):
super(RegexConverter, self).__init__(url_map)
self.regex = items[0]
app.url_map.converters['regex'] = RegexConverter
app.url_map.converters['ObjectId'] = BSONObjectIdConverter
# Caching
cache = Cache(app)
# Templates
import woerterbuch.context_processors
# Views
import woerterbuch.views
|
fwenzel/strassendeutsch
|
woerterbuch/__init__.py
|
Python
|
gpl-3.0
| 739
|
"""
Django settings for flocs project.
Generated by 'django-admin startproject' using Django 1.8.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import dj_database_url
import os
try:
import flocs.private_settings as private_settings
except ImportError:
import logging
logger = logging.getLogger(__name__)
logger.error('Using default private settings! Security is compromised ' +
'and OAuth logins are not available!')
import flocs.private_settings_template as private_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
EXPORTED_DATA_DIR = os.path.join(BASE_DIR, 'exported-data')
FRONTEND_DIR = os.path.join(BASE_DIR, 'frontend')
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = private_settings.SECRET_KEY
ON_STAGING = os.getenv('ON_STAGING', "False") == "True"
ON_PRODUCTION = os.getenv('ON_AL', "False") == "True" and not ON_STAGING
DEVELOPMENT = not ON_STAGING and not ON_PRODUCTION
DEBUG = (not ON_PRODUCTION) or (os.getenv('DJANGO_DEBUG', "False") == "True")
ALLOWED_HOSTS = [
'.robomise.cz'
]
if ON_PRODUCTION or ON_STAGING:
FRONTEND_BUILD_DIR = os.path.join(BASE_DIR, 'frontend', 'production-build')
else:
FRONTEND_BUILD_DIR = os.path.join(BASE_DIR, 'frontend',
'development-build')
# Application definition
INSTALLED_APPS = (
'django.contrib.contenttypes',
'modeltranslation', # must be before django.contrib.admin
'grappelli.dashboard', # must be before django.contrib.admin
'grappelli', # must be before django.contrib.admin
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'import_export',
'lazysignup',
'social.apps.django_app.default', # OAuth
# our apps
'common',
'blocks',
'concepts',
'tasks',
'feedback',
'practice',
'stats',
'flocs',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'flocs.aspects.localization.LocalizationMiddleware',
)
ROOT_URLCONF = 'flocs.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
# frontend home directory (where to search for index.html)
'DIRS': [FRONTEND_BUILD_DIR],
# allow for fallback index.html in flocs/templates/
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'flocs.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
"default": dj_database_url.config(default='sqlite:///' +
os.path.join(BASE_DIR, 'db.sqlite3'))
}
# Internationalization
USE_I18N = True
USE_L10N = True
USE_TZ = False
# TIME_ZONE = 'UTC'
LANGUAGES = [
('cs', 'Czech'),
('en', 'English')
]
if ON_PRODUCTION:
LANGUAGE_DOMAINS = {
'cs': 'robomise.cz',
'en': 'en.robomise.cz',
}
elif ON_STAGING:
LANGUAGE_DOMAINS = {
'cs': 'staging.robomise.cz',
'en': 'en.staging.robomise.cz',
}
else:
LANGUAGE_DOMAINS = {
'cs': 'localhost:8000',
'en': 'en.localhost:8000',
}
LANGUAGE_CODE = 'cs' # fallback language
MODELTRANSLATION_DEFAULT_LANGUAGE = 'en'
MODELTRANSLATION_TRANSLATION_FILES = (
'tasks.models.translation',
'blocks.models.translation',
'concepts.models.translation',
)
# Grappelli
GRAPPELLI_INDEX_DASHBOARD = 'flocs.dashboard.CustomIndexDashboard'
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(FRONTEND_BUILD_DIR, 'static'),
)
STATIC_ROOT = os.path.join(BASE_DIR, '../static')
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'lazysignup.backends.LazySignupBackend',
'social.backends.facebook.FacebookOAuth2',
'social.backends.google.GoogleOAuth2',
)
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = private_settings.SOCIAL_AUTH_GOOGLE_OAUTH2_KEY
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = private_settings.SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET
SOCIAL_AUTH_FACEBOOK_KEY = private_settings.SOCIAL_AUTH_FACEBOOK_KEY
SOCIAL_AUTH_FACEBOOK_SECRET = private_settings.SOCIAL_AUTH_FACEBOOK_SECRET
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
# SOCIAL_AUTH_USER_MODEL = 'django.contrib.auth.models.User'
SOCIAL_AUTH_SANITIZE_REDIRECTS = True
# http://stackoverflow.com/questions/22005841/is-not-json-serializable-django-social-auth-facebook-login
# SECRET_KEY MUST remain secret
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.PickleSerializer'
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'user.pipeline.remove_current_user',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details',
'user.pipeline.manual_login',
)
# --------------------------------------------------------------------------
# Email
# --------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 25
EMAIL_SUBJECT_PREFIX = '[flocs]'
EMAIL_ADMINS = ['adaptive-programming@googlegroups.com']
SERVER_EMAIL = 'error-reporting@robomise.cz'
ADMINS = (('Errors', 'adaptive-programming-errors@googlegroups.com'),)
if DEVELOPMENT:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# --------------------------------------------------------------------------
# Logging
# --------------------------------------------------------------------------
LOGGING_DIR = os.getenv('LOGGING_DIR', "logs")
LOGGING = {
'version': 1,
'formatters': {
'simple': {
'format': '[%(asctime)s] %(levelname)s %(message)s'
},
'brief': {
'format': '%(message)s'
},
'long-messages': {
'format': '[%(asctime)s] %(message)s----------'
},
'verbose': {
'format': '[%(asctime)s] %(levelname)s %(module)s : ' +
'"%(message)s" in %(filename)s:%(lineno)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'practice-file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': LOGGING_DIR + '/practice.log',
'formatter': 'verbose'
},
'feedback-file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': LOGGING_DIR + '/feedback.log',
'formatter': 'long-messages'
},
'requests-file': {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': LOGGING_DIR + '/requests.log',
'formatter': 'simple'
},
'mail-admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'feedback': {
'handlers': ['feedback-file'],
'level': 'DEBUG',
'propagate': True,
},
'practice': {
'handlers': ['practice-file'],
'level': 'DEBUG',
'propagate': True
},
'django.request': {
'handlers': ['requests-file', 'mail-admins'],
'level': 'DEBUG',
'propagate': True
},
}
}
|
effa/flocs
|
flocs/settings.py
|
Python
|
gpl-2.0
| 8,803
|
# Copyright (C) 2014-2015 Kate Cook
#
# This file is part of rnascan.
#
# rnascan is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# rnascan is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with rnascan. If not, see <http://www.gnu.org/licenses/>.
import os,sys,csv
import tempfile
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import Seq
from rnascan.pfmutil import norm_pfm
import subprocess
import numpy as np
def struct_pfm_from_aligned(sequences):
#alphabet = ['B','E','H','I','L','M','R','T']
alphabet = ['B','E','H','L','M','R','T']
length = len(sequences[0])
counts = {}
for base in alphabet:
counts[base] = [0] * length
for seq in sequences:
for index, char in enumerate(seq):
if char != '-':
counts[char][index] = counts[char][index] + 1
return counts
def get_structure_probability_matrix_for_sequence(id,seq,frag_length,overlap):
aligned_annotated_sequences = []
for i in xrange(-frag_length/2,len(seq)-frag_length/2,frag_length-overlap):
temphandle = tempfile.NamedTemporaryFile(delete=False,mode='r+b') # for centroid structure
temphandle2 = tempfile.NamedTemporaryFile(delete=False,mode='r+b') # for output of structure parser
# set up sequence fragment record & format using SeqIO
realstart = i
if i<0:
realstart = 0
subseq = seq[realstart:(i + frag_length)]
#print >> sys.stderr, subseq
#print i,i+frag_length,subseq
frag_id = id+"_frag_"+str(i)
input_record = SeqRecord(subseq,id=frag_id,description="")
# call RNAfold and pipe in sequence fragment as fasta
rnafold_args = ["RNAfold","-p","--noPS"]
rnafold_proc = subprocess.Popen(rnafold_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
rnafold_output = rnafold_proc.communicate(input_record.format("fasta"))[0]
#print rnafold_output
# process output to get the actual centroid structure & write it to a temp file
os.system("rm -f *.ps") # remove ps files created by RNAfold because it is dumb
centroid_struct = get_centroid_from_RNAfold_output(rnafold_output)
#print >> sys.stderr, centroid_struct
temphandle.write(centroid_struct+'\n')
temphandle.close()
# translate the centroid structure into structural context alphabet
parse_args = ['parse_secondary_structure', temphandle.name, temphandle2.name]
#print parse_args
parse_structure_proc = subprocess.Popen(parse_args)
parse_structure_proc.wait()
annotated_struct = temphandle2.readline()
annotated_struct.rstrip()
#print >> sys.stderr, annotated_struct
os.remove(temphandle.name) # remove centroid structure file
os.remove(temphandle2.name) # remove annotated structure file
# generate aligned fragment string with - for gaps
start_gap = "-"*i
end_gap = "-"*( len(seq) - (i + frag_length) )
aligned = start_gap + annotated_struct.rstrip() + end_gap
aligned_annotated_sequences.append(aligned)
# make count pfm & then normalize
counts = struct_pfm_from_aligned(aligned_annotated_sequences)
normalized = norm_pfm(counts)
return normalized
def get_structure_probability_matrix_from_probabilities(id,seq,frag_length):
input_record = SeqRecord(seq,id=id,description="")
alphabet = ['E','H','I','M']
programs = {'E':'E_RNAplfold_nolunp', 'H':'H_RNAplfold_nolunp', 'I':'I_RNAplfold_nolunp', 'M':'M_RNAplfold_nolunp'}
plfold_args = ["-L",str(frag_length),"-W ",str(frag_length),"-u","1"]
probabilities = {}
for alph,p in programs.iteritems():
args = [p] + plfold_args
plfold_proc = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None)
#plfold_output = plfold_proc.communicate(input_record.format("fasta"))[0]
plfold_output = plfold_proc.communicate(str(seq))[0]
data = np.fromstring(plfold_output, dtype=float, sep="\t")
probabilities[alph] = data
# calculated paired probability
length = len(probabilities[alph[0]])
sum = np.zeros(length)
for a in alphabet:
sum = sum + probabilities[a]
paired = np.subtract(np.ones(length),sum)
probabilities['P'] = paired
for a in alphabet+['P']:
probabilities[a] = probabilities[a].tolist()
return probabilities
def get_centroid_from_RNAfold_output(rnafold_output):
lines = rnafold_output.split('\n')
#print >> sys.stderr, "::".join(lines)
centroid_line = lines[4]
structetc = centroid_line.split(' ')
return structetc[0]
if __name__ == "__main__":
seq = Seq('GUACUCGAAAAAAUGUCAUGGACCCCUUAAAAUUACUGAGGGGUUCAGAAAAUACCGUGCAAAAGACGAAAAAAGACGAAUUUCAUUUGAUUUAUAUUUUAUAAAUGACUGUUGCAUUAAACAAUAGACCAAUUAUUUCAAUUUAAUAUUCUUUGCAGGAAACUUUCACAAUGGAAUAACGCCACAUAUUCAUUGUAAAGAUGUUGCGUACUUCUCUUACUAAAGGGGCACGGCUAACUGGGACAAGAUUUGUUCAAACAAAGGCCCUUUCGAAGGCAACAUUGACAGAUCUGCCCGAAAGAUGGGAAAAUAUGCCAAACUUAGAACAGAAAGAGAUUGCAGAUAAUUUGACAGAACGUCAAAAGCUUCCAUGGAAAACUCUCAAUAACGAGGAAAUCAAAGCAGCUUGGUACAUAUCCUACGGCGAGUGGGGACCUAGAAGACCUGUACACGGAAAAGGCGAUGUUGCAUUUAUAACUAAAGGAGUAUUUUUAGGGUUAGGAAUCUCAUUUGGGCUCUUUGGUUUAGUGAGACUAUUAGCCAAUCCUGAAACUCCAAAGACUAUGAACAGGGAAUGGCAGUUGAAAUCAGACGAGUAUCUGAAGUCAAAAAAUGCCAAUCCUUGGGGAGGUUAUUCUCAAGUUCAAUCUAAAUAAGUAGACGAGGAAAAUAAAAUUGUUUCGUAUAUUCCGUGUUUGGGGUAUAAGUAGAUUGUUUUCAUAUAUACGCAUUUGGUCUUAGUUCAGUAGGUUGAUUACUUAGUUCCUUGUACCUUCUUCUGCAAAUAUCAUUCAUUGUUACUUCGAAGAAGAAAAAAAAUAAUCAUGGAAAAUUGGAAAAAAAAAAAGUCCAAUCU')
id = 'seq_1'
get_structure_probability_matrix_from_probabilities(id,seq,40)
|
cookkate/rnascan
|
rnascan/average_structure.py
|
Python
|
agpl-3.0
| 6,283
|
#!/usr/bin/env python
# vim:set softtabstop=4 shiftwidth=4 tabstop=4 expandtab:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import os
from test import prompty
from test import UnitTestWrapper
class MyFunctions(prompty.functionBase.PromptyFunctions):
def testFunc(self):
return "This Is A Test"
def _hiddenFunc(self):
return "This is secret"
class FunctionContainerTests(UnitTestWrapper):
def test_noname(self):
c = prompty.functionContainer.FunctionContainer()
self.assertRaises(TypeError, c._call)
def test_extendFunctionContainer(self):
c = prompty.functionContainer.FunctionContainer()
# Import this module
c.addFunctionsFromModule(sys.modules[__name__])
self.assertEqual(r"This Is A Test", c._call("testFunc"))
self.assertRaises(KeyError, c._call, "_hiddenFunc")
def test_extendFunctionContainerFromDir(self):
c = prompty.functionContainer.FunctionContainer()
# Import this directory
c.addFunctionsFromDir(os.path.dirname(sys.modules[__name__].__file__))
self.assertEqual(r"This Is A Test", c._call("testFunc"))
|
ltn100/prompty
|
test/test_functionContainer.py
|
Python
|
mit
| 1,265
|
# Copyright (c) 2014 Dell Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.volume.drivers.dell import dell_storagecenter_api
from cinder.volume.drivers.dell import dell_storagecenter_fc
# We patch these here as they are used by every test to keep
# from trying to contact a Dell Storage Center.
@mock.patch.object(dell_storagecenter_api.HttpClient,
'__init__',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'open_connection')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'close_connection')
class DellSCSanFCDriverTestCase(test.TestCase):
VOLUME = {u'instanceId': u'64702.4829',
u'scSerialNumber': 64702,
u'replicationSource': False,
u'liveVolume': False,
u'vpdId': 4831,
u'objectType': u'ScVolume',
u'index': 4829,
u'volumeFolderPath': u'dopnstktst/',
u'hostCacheEnabled': False,
u'usedByLegacyFluidFsNasVolume': False,
u'inRecycleBin': False,
u'volumeFolderIndex': 17,
u'instanceName': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'statusMessage': u'',
u'status': u'Down',
u'storageType': {u'instanceId': u'64702.1',
u'instanceName': u'Assigned - Redundant - 2 MB',
u'objectType': u'ScStorageType'},
u'cmmDestination': False,
u'replicationDestination': False,
u'volumeFolder': {u'instanceId': u'64702.17',
u'instanceName': u'opnstktst',
u'objectType': u'ScVolumeFolder'},
u'deviceId': u'6000d31000fcbe0000000000000012df',
u'active': False,
u'portableVolumeDestination': False,
u'deleteAllowed': True,
u'name': u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'scName': u'Storage Center 64702',
u'secureDataUsed': False,
u'serialNumber': u'0000fcbe-000012df',
u'replayAllowed': False,
u'flashOptimized': False,
u'configuredSize': u'1.073741824E9 Bytes',
u'mapped': False,
u'cmmSource': False}
SCSERVER = {u'scName': u'Storage Center 64702',
u'volumeCount': 0,
u'removeHbasAllowed': True,
u'legacyFluidFs': False,
u'serverFolderIndex': 4,
u'alertOnConnectivity': True,
u'objectType': u'ScPhysicalServer',
u'instanceName': u'Server_21000024ff30441d',
u'instanceId': u'64702.47',
u'serverFolderPath': u'opnstktst/',
u'portType': [u'FibreChannel'],
u'type': u'Physical',
u'statusMessage': u'Only 5 of 6 expected paths are up',
u'status': u'Degraded',
u'scSerialNumber': 64702,
u'serverFolder': {u'instanceId': u'64702.4',
u'instanceName': u'opnstktst',
u'objectType': u'ScServerFolder'},
u'parentIndex': 0,
u'connectivity': u'Partial',
u'hostCacheIndex': 0,
u'deleteAllowed': True,
u'pathCount': 5,
u'name': u'Server_21000024ff30441d',
u'hbaPresent': True,
u'hbaCount': 2,
u'notes': u'Created by Dell Cinder Driver',
u'mapped': False,
u'operatingSystem': {u'instanceId': u'64702.38',
u'instanceName': u'Red Hat Linux 6.x',
u'objectType': u'ScServerOperatingSystem'}
}
MAPPING = {u'instanceId': u'64702.2183',
u'scName': u'Storage Center 64702',
u'scSerialNumber': 64702,
u'controller': {u'instanceId': u'64702.64702',
u'instanceName': u'SN 64702',
u'objectType': u'ScController'},
u'lunUsed': [1],
u'server': {u'instanceId': u'64702.47',
u'instanceName': u'Server_21000024ff30441d',
u'objectType': u'ScPhysicalServer'},
u'volume': {u'instanceId': u'64702.4829',
u'instanceName':
u'5729f1db-4c45-416c-bc15-c8ea13a4465d',
u'objectType': u'ScVolume'},
u'connectivity': u'Up',
u'readOnly': False,
u'objectType': u'ScMappingProfile',
u'hostCache': False,
u'mappedVia': u'Server',
u'mapCount': 2,
u'instanceName': u'4829-47',
u'lunRequested': u'N/A'
}
def setUp(self):
super(DellSCSanFCDriverTestCase, self).setUp()
# configuration is a mock. A mock is pretty much a blank
# slate. I believe mock's done in setup are not happy time
# mocks. So we just do a few things like driver config here.
self.configuration = mock.Mock()
self.configuration.san_is_local = False
self.configuration.san_ip = "192.168.0.1"
self.configuration.san_login = "admin"
self.configuration.san_password = "pwd"
self.configuration.dell_sc_ssn = 64702
self.configuration.dell_sc_server_folder = 'opnstktst'
self.configuration.dell_sc_volume_folder = 'opnstktst'
self.configuration.dell_sc_api_port = 3033
self._context = context.get_admin_context()
self.driver = dell_storagecenter_fc.DellStorageCenterFCDriver(
configuration=self.configuration)
self.driver.do_setup(None)
self.driver._stats = {'QoS_support': False,
'volume_backend_name': 'dell-1',
'free_capacity_gb': 12123,
'driver_version': '1.0.1',
'total_capacity_gb': 12388,
'reserved_percentage': 0,
'vendor_name': 'Dell',
'storage_protocol': 'FC'}
# Start with none. Add in the specific tests later.
# Mock tests bozo this.
self.driver.backends = None
self.driver.replication_enabled = False
self.volid = '5729f1db-4c45-416c-bc15-c8ea13a4465d'
self.volume_name = "volume" + self.volid
self.connector = {'ip': '192.168.0.77',
'host': 'cinderfc-vm',
'wwnns': ['20000024ff30441c', '20000024ff30441d'],
'initiator': 'iqn.1993-08.org.debian:01:e1b1312f9e1',
'wwpns': ['21000024ff30441c', '21000024ff30441d']}
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
def test_initialize_connection(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn':
[u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, False)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'_is_live_vol')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns')
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'initialize_secondary')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_live_volume')
def test_initialize_connection_live_vol(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102,
'secondaryRole': 'Secondary'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_initialize_secondary.return_value = (
1, [u'5000D31000FCBE3E', u'5000D31000FCBE36'],
{u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D'],
u'21000024FF30441E': [u'5000D31000FCBE36'],
u'21000024FF30441F': [u'5000D31000FCBE3E']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35',
u'5000D31000FCBE3E', u'5000D31000FCBE36']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
mock_find_volume.assert_called_once_with(fake.VOLUME_ID, None, True)
mock_get_volume.assert_called_once_with(self.VOLUME[u'instanceId'])
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'_is_live_vol')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns')
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'initialize_secondary')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_live_volume')
def test_initialize_connection_live_vol_afo(self,
mock_get_live_volume,
mock_initialize_secondary,
mock_find_wwns,
mock_is_live_volume,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID, 'provider_id': '101.101'}
scvol = {'instanceId': '102.101'}
mock_find_volume.return_value = scvol
mock_get_volume.return_value = scvol
connector = self.connector
sclivevol = {'instanceId': '101.10001',
'primaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'primaryScSerialNumber': 102,
'secondaryVolume': {'instanceId': '101.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 101,
'secondaryRole': 'Activated'}
mock_is_live_volume.return_value = True
mock_find_wwns.return_value = (
1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_get_live_volume.return_value = sclivevol
res = self.driver.initialize_connection(volume, connector)
expected = {'data':
{'discard': True,
'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_discovered': True,
'target_lun': 1,
'target_wwn': [u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
# verify find_volume has been called and that is has been called twice
self.assertFalse(mock_initialize_secondary.called)
mock_find_volume.assert_called_once_with(
fake.VOLUME_ID, '101.101', True)
mock_get_volume.assert_called_once_with('102.101')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_wwns(self,
mock_find_wwns,
mock_map_volume,
mock_get_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'create_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_no_server(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_create_server,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=MAPPING)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_vol_not_found(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'map_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(None, [], {}))
def test_initialize_connection_map_vol_fail(self,
mock_find_wwns,
mock_map_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where map_volume returns None (no mappings)
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.initialize_connection,
volume,
connector)
def test_initialize_secondary(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_create_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
find_wwns_ret = (1, [u'5000D31000FCBE3D', u'5000D31000FCBE35'],
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']})
mock_api.find_wwns = mock.MagicMock(return_value=find_wwns_ret)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
self.assertEqual(find_wwns_ret, ret)
def test_initialize_secondary_no_server(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=None)
mock_api.create_server = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_map_fail(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
def test_initialize_secondary_vol_not_found(self,
mock_close_connection,
mock_open_connection,
mock_init):
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.map_secondary_volume = mock.MagicMock(
return_value=self.VOLUME)
mock_api.get_volume = mock.MagicMock(return_value=None)
ret = self.driver.initialize_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
def test_terminate_connection(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'_is_live_vol')
@mock.patch.object(dell_storagecenter_fc.DellStorageCenterFCDriver,
'terminate_secondary')
def test_terminate_connection_live_vol(self,
mock_terminate_secondary,
mock_is_live_vol,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
mock_terminate_secondary.return_value = (None, [], {})
mock_is_live_vol.return_value = True
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_server(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=None)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_volume(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(None,
[],
{}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_no_wwns(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
expected = {'driver_volume_type': 'fibre_channel',
'data': {}}
self.assertEqual(expected, res, 'Unexpected return data')
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=False)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=1)
def test_terminate_connection_failure(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
volume = {'id': fake.VOLUME_ID}
connector = self.connector
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.terminate_connection,
volume,
connector)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_server',
return_value=SCSERVER)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_volume',
return_value=VOLUME)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'unmap_volume',
return_value=True)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'find_wwns',
return_value=(1,
[u'5000D31000FCBE3D',
u'5000D31000FCBE35'],
{u'21000024FF30441C':
[u'5000D31000FCBE35'],
u'21000024FF30441D':
[u'5000D31000FCBE3D']}))
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_volume_count',
return_value=0)
def test_terminate_connection_vol_count_zero(self,
mock_get_volume_count,
mock_find_wwns,
mock_unmap_volume,
mock_find_volume,
mock_find_server,
mock_close_connection,
mock_open_connection,
mock_init):
# Test case where get_volume_count is zero
volume = {'id': fake.VOLUME_ID}
connector = self.connector
res = self.driver.terminate_connection(volume, connector)
mock_unmap_volume.assert_called_once_with(self.VOLUME, self.SCSERVER)
expected = {'data':
{'initiator_target_map':
{u'21000024FF30441C': [u'5000D31000FCBE35'],
u'21000024FF30441D': [u'5000D31000FCBE3D']},
'target_wwn':
[u'5000D31000FCBE3D', u'5000D31000FCBE35']},
'driver_volume_type': 'fibre_channel'}
self.assertEqual(expected, res, 'Unexpected return data')
def test_terminate_secondary(self,
mock_close_connection,
mock_open_connection,
mock_init):
mock_api = mock.MagicMock()
mock_api.find_server = mock.MagicMock(return_value=self.SCSERVER)
mock_api.get_volume = mock.MagicMock(return_value=self.VOLUME)
mock_api.find_wwns = mock.MagicMock(return_value=(None, [], {}))
mock_api.unmap_volume = mock.MagicMock(return_value=True)
sclivevol = {'instanceId': '101.101',
'secondaryVolume': {'instanceId': '102.101',
'instanceName': fake.VOLUME_ID},
'secondaryScSerialNumber': 102}
ret = self.driver.terminate_secondary(mock_api, sclivevol,
['wwn1', 'wwn2'])
expected = (None, [], {})
self.assertEqual(expected, ret)
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_update_volume_stats_with_refresh(self,
mock_get_storage_usage,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(True)
self.assertEqual('FC', stats['storage_protocol'])
mock_get_storage_usage.assert_called_once_with()
@mock.patch.object(dell_storagecenter_api.StorageCenterApi,
'get_storage_usage',
return_value={'availableSpace': 100, 'freeSpace': 50})
def test_get_volume_stats_no_refresh(self,
mock_get_storage_usage,
mock_close_connection,
mock_open_connection,
mock_init):
stats = self.driver.get_volume_stats(False)
self.assertEqual('FC', stats['storage_protocol'])
mock_get_storage_usage.assert_not_called()
|
Hybrid-Cloud/cinder
|
cinder/tests/unit/volume/drivers/dell/test_dellfc.py
|
Python
|
apache-2.0
| 45,305
|
from django.contrib.auth.models import User
from django.test import TestCase
from ESSArch_Core.auth.models import Group, GroupType
class CurrentOrganizationTests(TestCase):
def setUp(self):
self.user = User.objects.create(username="admin")
self.member = self.user.essauth_member
self.org_group_type = GroupType.objects.create(codename='organization')
def test_set_when_added_to_organization_group(self):
group = Group.objects.create(name='organization', group_type=self.org_group_type)
group.add_member(self.member)
self.assertEqual(self.user.user_profile.current_organization, group)
group2 = Group.objects.create(name='organization 2', group_type=self.org_group_type)
group2.add_member(self.member)
self.assertEqual(self.user.user_profile.current_organization, group)
def test_set_when_added_to_organization_group_indirectly(self):
group = Group.objects.create(name='organization', group_type=self.org_group_type)
group2 = Group.objects.create(name='child_group', parent=group)
group2.add_member(self.member)
self.assertEqual(self.user.user_profile.current_organization, group)
def test_changed_when_membership_is_removed(self):
group = Group.objects.create(name='organization', group_type=self.org_group_type)
group2 = Group.objects.create(name='organization 2', group_type=self.org_group_type)
group.add_member(self.member)
group2.add_member(self.member)
group.remove_member(self.member)
self.user.user_profile.refresh_from_db()
self.assertEqual(self.user.user_profile.current_organization, group2)
group2.remove_member(self.member)
self.user.user_profile.refresh_from_db()
self.assertIsNone(self.user.user_profile.current_organization)
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/auth/tests/test_signals.py
|
Python
|
gpl-3.0
| 1,852
|
from typing import List, Optional
from fastapi import FastAPI
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel
app = FastAPI()
class Item(BaseModel):
name: Optional[str] = None
description: Optional[str] = None
price: Optional[float] = None
tax: float = 10.5
tags: List[str] = []
items = {
"foo": {"name": "Foo", "price": 50.2},
"bar": {"name": "Bar", "description": "The bartenders", "price": 62, "tax": 20.2},
"baz": {"name": "Baz", "description": None, "price": 50.2, "tax": 10.5, "tags": []},
}
@app.get("/items/{item_id}", response_model=Item)
async def read_item(item_id: str):
return items[item_id]
@app.patch("/items/{item_id}", response_model=Item)
async def update_item(item_id: str, item: Item):
stored_item_data = items[item_id]
stored_item_model = Item(**stored_item_data)
update_data = item.dict(exclude_unset=True)
updated_item = stored_item_model.copy(update=update_data)
items[item_id] = jsonable_encoder(updated_item)
return updated_item
|
tiangolo/fastapi
|
docs_src/body_updates/tutorial002.py
|
Python
|
mit
| 1,054
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
from cloudferry.cloud import cloud
from cloudferry.lib.base import migration
from cloudferry.lib.migration import observers
from cloudferry.lib.os.compute import nova_compute
from cloudferry.lib.os.identity import keystone
from cloudferry.lib.os.image import glance_image
from cloudferry.lib.os.network import neutron
from cloudferry.lib.os.storage import cinder_storage
from cloudferry.lib.scheduler import cursor
from cloudferry.lib.scheduler import namespace
from cloudferry.lib.scheduler import scheduler
from cloudferry.lib.utils import utils as utl
class OS2OSFerry(object):
def __init__(self, config, state_observer):
self.migration_observers = [
state_observer,
observers.LoggingMigrationObserver()
]
resources = {'identity': keystone.KeystoneIdentity,
'image': glance_image.GlanceImage,
'storage': cinder_storage.CinderStorage,
'network': neutron.NeutronNetwork,
'compute': nova_compute.NovaCompute}
self.config = config
self.src_cloud = cloud.Cloud(resources, cloud.SRC, config,
self.migration_observers)
self.dst_cloud = cloud.Cloud(resources, cloud.DST, config,
self.migration_observers)
self.src_cloud.migration = {
resource: migration.Migration(self.src_cloud, self.dst_cloud,
resource)
for resource in resources
}
self.dst_cloud.migration = {
resource: migration.Migration(self.src_cloud, self.dst_cloud,
resource)
for resource in resources
}
self.init = {
'src_cloud': self.src_cloud,
'dst_cloud': self.dst_cloud,
'cfg': self.config,
'migration_observers': self.migration_observers
}
self.scenario = None
def migrate(self, scenario=None):
self.scenario = scenario
namespace_scheduler = namespace.Namespace({
'__init_task__': self.init,
'info_result': {
utl.INSTANCES_TYPE: {}
}
})
# "process_migration" is dict with 3 keys:
# "preparation" - is cursor that points to tasks must be processed
# before migration i.e - taking snapshots,
# figuring out all services are up
# "migration" - is cursor that points to the first
# task in migration process
# "rollback" - is cursor that points to tasks must be processed
# in case of "migration" failure
scenario.init_tasks(self.init)
scenario.load_scenario()
process_migration = {k: cursor.Cursor(v)
for k, v in scenario.get_net().items() if v}
scheduler_migr = scheduler.Scheduler(namespace=namespace_scheduler,
**process_migration)
scheduler_migr.start()
return scheduler_migr.status_error
|
SVilgelm/CloudFerry
|
cloudferry/cloud/os2os.py
|
Python
|
apache-2.0
| 3,746
|
#!/usr/bin/env python3
from paramecio.citoplasma.keyutils import create_key_encrypt, create_key_encrypt_256, create_key
from oslo_concurrency import lockutils
try:
from settings import config
except:
class config:
cookie_name='paramecio.session'
key_encrypt=create_key_encrypt_256(30)
session_opts={'session.data_dir': 'sessions', 'session.type': 'file', 'session.path': 'paramecio'}
from itsdangerous import JSONWebSignatureSerializer
from bottle import request, response
import os
import json
import fcntl
import errno
import time
import shutil
import uuid
#from diskcache import Cache
#from dogpile.cache import make_region
# Cookie session
# This save the session in a cookie for maximum performance. In next version i can use memcached or something for session
# In next versions have two secret_keys for more security.
class ParamecioSession:
def __init__(self, session_dict):
self.session=session_dict
def get(self, name, default_value):
if not name in self.session:
self.session[name]=default_value
return self.session[name]
def __getitem__(self, key):
return self.session[key]
def __setitem__(self, key, value):
self.session[key]=value
def __delitem__(self, key):
if key!='token':
del self.session[key]
def __contains__(self, key):
if key in self.session:
return True
else:
return False
def __iter__(self):
return self.session
def __str__(self):
return self.session.__str__()
def keys(self):
return self.session.keys()
def remove(self):
response.delete_cookie(config.cookie_name, path="/")
def delete(self):
self.remove()
def save(self):
# Here get the function for load session
save_session(self.session['token'], self.session)
def reset(self):
token=self.session['token']
self.session={'token': token}
self.save()
def get_session():
s={}
try:
if request.environ:
if not 'session' in request.environ:
cookie=None
if request.cookies.get(config.cookie_name):
cookie=request.get_cookie(config.cookie_name)
if not cookie:
if hasattr(request, 'app'):
s=generate_session()
else:
# Here get the function for load session
s=load_session(cookie)
request.environ['session']=s
else:
s=request.environ['session']
except RuntimeError:
pass
return ParamecioSession(s)
if config.session_opts['session.type']=='mysql':
pass
elif config.session_opts['session.type']=='redis':
import redis
def load_session(token):
s={}
r=redis.StrictRedis(host=config.session_opts['session.host'], port=config.session_opts['session.port'], db=config.session_opts['session.db'])
value=r.get(token)
if not value:
s={'token': token}
else:
try:
s=json.loads(value.decode('utf-8'))
except:
s={'token': token}
return s
def save_session(token, session):
r=redis.StrictRedis(host=config.session_opts['session.host'], port=config.session_opts['session.port'], db=config.session_opts['session.db'])
r.set(token, json.dumps(session))
def after_session():
pass
else:
def generate_session(session={}):
#secret=URLSafeSerializer(config.key_encrypt)
#session=secret.dumps(session)
token=create_key(30).replace('/', '#')
s={'token': token}
response.set_cookie(config.cookie_name, token, path=config.session_opts['session.path'])
request.environ['session']=s
file_session=config.session_opts['session.data_dir']+'/'+token+'_session'
save_session(token, s, True)
request.environ['session']=s
return s
def regenerate_session():
token=create_key(30).replace('/', '#')
s={'token': token}
response.set_cookie(config.cookie_name, token, path=config.session_opts['session.path'])
file_session=config.session_opts['session.data_dir']+'/'+token+'_session'
save_session(token, s, True)
request.environ['session']=s
return ParamecioSession(s)
def load_session(token):
file_session=config.session_opts['session.data_dir']+'/'+token+'_session'
if os.path.isfile(file_session):
with open(file_session) as f:
try:
s=json.loads(f.read())
os.utime(file_session)
except:
s={'token': token}
else:
return generate_session({'token': token})
return s
@lockutils.synchronized('not_thread_safe')
def save_session(token, session, create_file=False):
file_session=config.session_opts['session.data_dir']+'/'+token+'_session'
# Check if exists lock
if os.path.isfile(file_session) or create_file:
with open(file_session, 'w') as f:
#try:
json_session=json.dumps(session)
f.write(json_session)
|
paramecio/parameciofm
|
paramecio/citoplasma/sessions.py
|
Python
|
gpl-3.0
| 6,082
|
#!/usr/bin/env python2
"""Script to do basic sanity checking for target_link_libraries() commands in
CMakeLists.txt files.
Scans C++ sources specified in add_library() commands for includes that look
like they are in the Quickstep source tree, then makes sure that the
corresponding libraries appear in the target_link_libraries() command for the
library.
TODO List / Known Issues & Limitations:
- Script skips over targets that are built conditionally (i.e. that have
multiple add_library() commands) and just prints a warning to the user.
- Script only validates libraries, not executables.
- Script only checks quickstep includes and libraries, so it will not
detect missing third party libraries.
"""
# Copyright 2011-2015 Quickstep Technologies LLC.
# Copyright 2015 Pivotal Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# Don't scan these directories for quickstep modules.
EXCLUDED_TOP_LEVEL_DIRS = ["build", "third_party"]
# Explicitly ignored dependencies (special headers with no other quickstep
# dependencies).
IGNORED_DEPENDENCIES = frozenset(
["quickstep_threading_WinThreadsAPI",
"quickstep_utility_textbasedtest_TextBasedTest",
"quickstep_utility_textbasedtest_TextBasedTestDriver",
"quickstep_storage_bitweaving_BitWeavingHIndexSubBlock",
"quickstep_storage_bitweaving_BitWeavingIndexSubBlock",
"quickstep_storage_bitweaving_BitWeavingVIndexSubBlock"])
# States when scanning a CMakeLists.txt file.
CMAKE_SCANNING_NONE = 0
CMAKE_SCANNING_LIBRARY = 1
CMAKE_SCANNING_TARGET_LINK_LIBRARIES = 2
CMAKE_SCANNING_IGNORE = 3
def convert_path_to_targetname(include_path):
"""Convert an included header file's path to a quickstep library target in
cmake.
Args:
include_path (str): A header file path taken from a C++ include
statement.
Returns:
str: The target name in CMake that corresponds to the specified header.
"""
path_components = include_path.split("/")
for idx in range(len(path_components) - 1):
path_components[idx] = path_components[idx].replace("_", "")
if path_components[-1].endswith("_gen.hpp"):
# Generated header (e.g. parser or lexer).
path_components[-1] = path_components[-1][:-8]
elif path_components[-1].endswith(".hpp"):
# Regular header.
path_components[-1] = path_components[-1][:-4]
elif path_components[-1].endswith(".pb.h"):
# Generated protobuf header.
path_components[-1] = path_components[-1][:-5] + "_proto"
return "quickstep_" + "_".join(path_components)
def convert_proto_path_to_targetname(import_path):
"""Convert an imported proto's path to a quickstep library target in CMake.
Args:
import_path (str): A proto definition file path taken from a protobuf
import statement.
Returns:
str: The target name in CMake that corresponds to the specified proto
definition.
"""
path_components = import_path.split("/")
for idx in range(len(path_components) - 1):
path_components[idx] = path_components[idx].replace("_", "")
if path_components[-1].endswith(".proto"):
path_components[-1] = path_components[-1][:-6] + "_proto"
return "quickstep_" + "_".join(path_components)
def get_module_targetname_for_cmakelists(cmakelists_filename):
"""Determine what the name for the all-in-one module target should be based
on the CMakeLists.txt filename with path.
Args:
cmakelists_filename (str): CMakeLists.txt filename with path from
quickstep root.
Returns:
str: The target name in CMake that corresponds to the special
all-in-one library for the module described by the CMakeLists.txt
file.
"""
components = []
(head, tail) = os.path.split(cmakelists_filename)
while head != "":
(head, tail) = os.path.split(head)
if tail != ".":
components.append(tail.replace("_", ""))
components.append("quickstep")
components.reverse()
return "_".join(components)
def get_dependency_set_from_cpp_src(src_filename, qs_module_dirs):
"""Read the C++ source file at 'src_filename' and return a set of all
quickstep libraries it includes headers for.
Args:
src_filename (str): A path to a C++ source file (may be header or
implementation).
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
Returns:
Set[str]: A set of CMake target names for the quickstep library targets
that the C++ file includes.
"""
dependency_set = set()
with open(src_filename, "r") as src_file:
for line in src_file:
if line.startswith("#include \""):
include_filename = line[len("#include \""):]
include_filename = (
include_filename[:include_filename.find("\"")])
# Skip over CMake-generated config headers and -inl companion
# headers.
if not (include_filename.endswith("Config.h")
or include_filename.endswith("-inl.hpp")):
for module_dir in qs_module_dirs:
if include_filename.startswith(module_dir):
dependency_set.add(
convert_path_to_targetname(include_filename))
break
return dependency_set
def get_dependency_set_from_proto_src(src_filename, qs_module_dirs):
"""Read the protobuf definition file at 'src_filename' and return a set of
all other Quickstep proto libraries it imports.
Args:
src_filename (str): A path to a proto definition file.
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
Returns:
Set[str]: A set of CMake target names for the quickstep library targets
that the proto file imports.
"""
dependency_set = set()
with open(src_filename, "r") as src_file:
for line in src_file:
if line.startswith("import \""):
import_filename = line[len("import \""):]
import_filename = import_filename[:import_filename.find("\"")]
for module_dir in qs_module_dirs:
if import_filename.startswith(module_dir):
dependency_set.add(
convert_proto_path_to_targetname(import_filename))
break
return dependency_set
def process_add_library(qs_module_dirs,
directory,
add_library_args,
deps_from_includes,
skipped_targets,
generated_targets):
"""Process a CMake add_library() command while scanning a CMakeLists.txt
file.
Args:
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules
directory (str): The directory that the CMakeLists.txt file we are
currently scanning resides in.
add_library_args (str): The arguments to an add_library() command in
CMakeLists.txt
deps_from_includes (Map[str, Set[str]]): A map from a CMake target name
to the set of other CMake targets it depends on, deduced based on
what headers the C++/proto sources for the target include. A new
entry will be added to this map for the target specified by the
add_library() command.
skipped_targets (Set[str]): A set of CMake target names that have been
skipped for dependency checking because multiple add_library()
commands specified the same target name. This probably means that
the target in question is built differently depending on some
configuration options or platform checks.
generated_targets (Set[str]): A set of CMake target names that appear
to be built from dynamically-generated source code that we can't
scan. Note, however, that we can and do scan proto definitions and
flex/bison sources for dependencies. An entry will be added to this
set of the given add_library() command references unscannable
generated sources.
"""
components = add_library_args.split()
if components[0].startswith("quickstep"):
if components[0] in deps_from_includes:
skipped_targets.add(components[0])
deps_from_includes[components[0]] = set()
return
deps = set()
for src_filename in components[1:]:
if src_filename.startswith("${"):
if (src_filename.endswith("proto_srcs}")
or src_filename.endswith("proto_hdrs}")):
# Scan protobuf definition instead of C++ source.
#
# src_filename has the form module_File_proto_srcs, so we
# split it by '_' and get the third-from-last part (i.e.
# the base filename without extension).
src_filename = src_filename.split("_")[-3] + ".proto"
full_src_filename = os.path.join(directory, src_filename)
deps.update(
get_dependency_set_from_proto_src(full_src_filename,
qs_module_dirs))
continue
elif src_filename.startswith("${BISON_"):
# Scan Bison parser source.
src_filename = (
src_filename[len("${BISON_"):-len("_OUTPUTS}")]
+ ".ypp")
elif src_filename.startswith("${FLEX_"):
# Scan Flex lexer source.
src_filename = (
src_filename[len("${FLEX_"):-len("_OUTPUTS}")]
+ ".lpp")
else:
generated_targets.add(components[0])
return
elif src_filename.startswith("\"${CMAKE_CURRENT_SOURCE_DIR}/"):
src_filename = src_filename[
len("\"${CMAKE_CURRENT_SOURCE_DIR}/"):-1]
full_src_filename = os.path.join(directory, src_filename)
deps.update(get_dependency_set_from_cpp_src(full_src_filename,
qs_module_dirs))
deps_from_includes[components[0]] = deps
def process_target_link_libraries(target_link_libraries_args,
deps_in_cmake):
"""Process a CMake target_link_libraries() while scanning a CMakeLists.txt
file.
Args:
target_link_libraries_args (str): The arguments to a
target_link_libraries() command in CMakeLists.txt
deps_in_cmake (Map[str, Set[str]]): A map of CMake target names to
their sets of dependencies (also CMake target names) specified by
target_link_libraries() commands. If the target being processed
already has an entry in the map, its set will be expanded with any
additional dependencies, otherwise a new entry will be created with
all the dependencies from the current target_link_libraries()
command. This way, if multiple target_link_libraries() commands are
processed for the same target, we will build up the union of all
dependencies for it (just like CMake does).
"""
components = target_link_libraries_args.split()
if components[0].startswith("quickstep"):
deps = set()
# Intentionally count the first part for self-includes
for component in components:
if component.startswith("quickstep"):
deps.add(component)
if components[0] in deps_in_cmake:
deps_in_cmake[components[0]].update(deps)
else:
deps_in_cmake[components[0]] = deps
def process_cmakelists_file(cmakelists_filename, qs_module_dirs):
"""Scan a CMakeLists.txt file and report any mistakes (missing or
superfluous dependencies in target_link_libraries() commands).
This function will deduce what other libraries a given library target
should depend on based on what headers are included in its source code. It
will then collect the set of link dependencies actually specified in
target_link_libraries() commands, and will print warnings about libraries
that appear in one set but not the other.
Args:
cmakelists_filename (str): The path to a CMakeLists.txt file to scan
and validate.
qs_module_dirs (List[str]): List of directories for top-level quickstep
modules.
Returns:
Tuple[Set[str], Set[str], Set[str]]: First element is the set of
targets that failed validation because they had missing and/or
superfluous dependencies. Second element is the set of targets
that were skipped over because they had multiple add_library()
commands (probably because they are built differently depending on
configuration options or platform checks). Third element is the
set of targets that were skipped because they appear to be built
from dynamically-generated source code (although proto definitions
and flex/bison sources are detected and scannned for dependencies).
"""
directory = os.path.dirname(cmakelists_filename)
module_targetname = get_module_targetname_for_cmakelists(
cmakelists_filename)
deps_from_includes = {}
deps_in_cmake = {}
validation_failed_targets = set()
skipped_targets = set()
generated_targets = set()
scan_state = CMAKE_SCANNING_NONE
previous_state = CMAKE_SCANNING_NONE
stitched_string = ""
with open(cmakelists_filename, "r") as cmakelists_file:
for line in cmakelists_file:
if ("CMAKE_VALIDATE_IGNORE_BEGIN" in line and
scan_state != CMAKE_SCANNING_IGNORE):
previous_state = scan_state
scan_state = CMAKE_SCANNING_IGNORE
continue
if scan_state == CMAKE_SCANNING_IGNORE:
if "CMAKE_VALIDATE_IGNORE_END" in line:
scan_state = previous_state
elif "CMAKE_VALIDATE_IGNORE_BEGIN" in line:
print "Nested IGNORE_BEGIN directives found in: "\
+ cmakelists_filename + ", exiting"
exit(-1)
else:
continue
elif scan_state == CMAKE_SCANNING_NONE:
add_library_pos = line.find("add_library(")
if add_library_pos != -1:
scan_state = CMAKE_SCANNING_LIBRARY
stitched_string = (
line[add_library_pos + len("add_library("):])
closing_paren_pos = stitched_string.find(")")
if closing_paren_pos != -1:
stitched_string = stitched_string[:closing_paren_pos]
process_add_library(qs_module_dirs,
directory,
stitched_string,
deps_from_includes,
skipped_targets,
generated_targets)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
else:
target_link_libraries_pos = line.find(
"target_link_libraries(")
if target_link_libraries_pos != -1:
scan_state = CMAKE_SCANNING_TARGET_LINK_LIBRARIES
stitched_string = (
line[target_link_libraries_pos
+ len("target_link_libraries("):])
closing_paren_pos = stitched_string.find(")")
if closing_paren_pos != -1:
stitched_string = (
stitched_string[:closing_paren_pos])
process_target_link_libraries(stitched_string,
deps_in_cmake)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
elif scan_state == CMAKE_SCANNING_LIBRARY:
closing_paren_pos = line.find(")")
if closing_paren_pos == -1:
stitched_string += line
else:
stitched_string += line[:closing_paren_pos]
process_add_library(qs_module_dirs,
directory,
stitched_string,
deps_from_includes,
skipped_targets,
generated_targets)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
elif scan_state == CMAKE_SCANNING_TARGET_LINK_LIBRARIES:
closing_paren_pos = line.find(")")
if closing_paren_pos == -1:
stitched_string += line
else:
stitched_string += line[:closing_paren_pos]
process_target_link_libraries(stitched_string,
deps_in_cmake)
stitched_string = ""
scan_state = CMAKE_SCANNING_NONE
# After scanning, report any missing dependencies.
for target, include_deps in deps_from_includes.iteritems():
if target in skipped_targets:
pass
elif len(include_deps) != 0:
if target not in deps_in_cmake:
if not (target in include_deps and len(include_deps) == 1):
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(include_deps):
print "\t" + dep
else:
missing_deps = (include_deps
- deps_in_cmake[target]
- IGNORED_DEPENDENCIES)
if len(missing_deps) != 0:
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(missing_deps):
print "\t" + dep
elif target == module_targetname:
# Special case hack for module all-in-one library
missing_deps = (frozenset(deps_from_includes.keys())
- deps_in_cmake[target])
# Filter out test-only libraries.
true_missing_deps = set()
for dep in missing_deps:
if not dep.startswith(module_targetname + "_tests"):
true_missing_deps.add(dep)
if len(true_missing_deps) != 0:
validation_failed_targets.add(target)
print "Missing target_link_libraries() for " + target + ":"
for dep in sorted(true_missing_deps):
print "\t" + dep
# Also report possibly superfluous extra dependencies.
for target, cmake_deps in deps_in_cmake.iteritems():
if (target not in skipped_targets) and (target in deps_from_includes):
extra_deps = cmake_deps - deps_from_includes[target]
if target in extra_deps:
extra_deps.remove(target)
if len(extra_deps) != 0 and target != module_targetname:
validation_failed_targets.add(target)
print ("Possibly superfluous target_link_libraries() for "
+ target + ":")
for dep in sorted(extra_deps):
print "\t" + dep
return (validation_failed_targets, skipped_targets, generated_targets)
def main(cmakelists_to_process):
"""Main function for script which scans and analyzes CMakeLists.txt files
and prints warnings about missing or superfluous dependencies, and about
targets that could not be automatically scanned and should be manually
checked.
Args:
cmakelists_to_process (List[str]): A list of relative paths of
CMakeLists.txt files to scan and report on. If empty, this function
will instead recursively walk the current working directory and
scan every CMakeLists.txt file that it finds.
Returns:
int: The total number of targets that failed validation because of
missing or superfluous dependencies.
"""
if not os.getcwd().endswith("quickstep"):
print ("WARNING: you don't appear to be running in the root quickstep "
"source directory. Don't blame me if something goes wrong.")
qs_module_dirs = []
for filename in os.listdir("."):
if (os.path.isdir(filename)
and not filename.startswith(".")
and filename not in EXCLUDED_TOP_LEVEL_DIRS):
qs_module_dirs.append(filename)
if len(cmakelists_to_process) == 0:
for (dirpath, dirnames, filenames) in os.walk('.'):
skip = False
for excluded_dir in EXCLUDED_TOP_LEVEL_DIRS:
if dirpath.startswith(excluded_dir):
skip = True
break
if not skip:
if "CMakeLists.txt" in filenames:
cmakelists_to_process.append(
os.path.join(dirpath, "CMakeLists.txt"))
global_validation_failed_targets = set()
global_skipped_targets = set()
global_generated_targets = set()
for cmakelists_filename in cmakelists_to_process:
(local_validation_failed_targets,
local_skipped_targets,
local_generated_targets) = (
process_cmakelists_file(cmakelists_filename, qs_module_dirs))
global_validation_failed_targets.update(
local_validation_failed_targets)
global_skipped_targets.update(local_skipped_targets)
global_generated_targets.update(local_generated_targets)
if len(global_skipped_targets) != 0:
print ("WARNING: The following targets had multiple add_library() "
+ "commands and were NOT checked by this script (they should "
+ "be manually checked):")
for target in sorted(global_skipped_targets):
print "\t" + target
if len(global_generated_targets) != 0:
print ("INFO: The add_library() commands for the following targets "
+ "appear to reference generated sources, so they were not "
+ "checked):")
for target in sorted(global_generated_targets):
print "\t" + target
return len(global_validation_failed_targets)
if __name__ == "__main__":
if main(sys.argv[1:]) > 0:
sys.exit(1)
else:
sys.exit(0)
|
pivotalsoftware/quickstep
|
validate_cmakelists.py
|
Python
|
apache-2.0
| 23,918
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Token persistence service."""
import abc
import copy
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
from keystone.common import cache
from keystone.common import dependency
from keystone.common import manager
from keystone import exception
from keystone.i18n import _LW
from keystone.token import utils
CONF = cfg.CONF
LOG = log.getLogger(__name__)
MEMOIZE = cache.get_memoization_decorator(section='token')
REVOCATION_MEMOIZE = cache.get_memoization_decorator(
section='token', expiration_section='revoke')
@dependency.requires('assignment_api', 'identity_api', 'resource_api',
'token_provider_api', 'trust_api')
class PersistenceManager(manager.Manager):
"""Default pivot point for the Token backend.
See :mod:`keystone.common.manager.Manager` for more details on how this
dynamically calls the backend.
"""
def __init__(self):
super(PersistenceManager, self).__init__(CONF.token.driver)
def _assert_valid(self, token_id, token_ref):
"""Raise TokenNotFound if the token is expired."""
current_time = timeutils.normalize_time(timeutils.utcnow())
expires = token_ref.get('expires')
if not expires or current_time > timeutils.normalize_time(expires):
raise exception.TokenNotFound(token_id=token_id)
def get_token(self, token_id):
if not token_id:
# NOTE(morganfainberg): There are cases when the
# context['token_id'] will in-fact be None. This also saves
# a round-trip to the backend if we don't have a token_id.
raise exception.TokenNotFound(token_id='')
unique_id = utils.generate_unique_id(token_id)
token_ref = self._get_token(unique_id)
# NOTE(morganfainberg): Lift expired checking to the manager, there is
# no reason to make the drivers implement this check. With caching,
# self._get_token could return an expired token. Make sure we behave
# as expected and raise TokenNotFound on those instances.
self._assert_valid(token_id, token_ref)
return token_ref
@MEMOIZE
def _get_token(self, token_id):
# Only ever use the "unique" id in the cache key.
return self.driver.get_token(token_id)
def create_token(self, token_id, data):
unique_id = utils.generate_unique_id(token_id)
data_copy = copy.deepcopy(data)
data_copy['id'] = unique_id
ret = self.driver.create_token(unique_id, data_copy)
if MEMOIZE.should_cache(ret):
# NOTE(morganfainberg): when doing a cache set, you must pass the
# same arguments through, the same as invalidate (this includes
# "self"). First argument is always the value to be cached
self._get_token.set(ret, self, unique_id)
return ret
def delete_token(self, token_id):
if not CONF.token.revoke_by_id:
return
unique_id = utils.generate_unique_id(token_id)
self.driver.delete_token(unique_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
if not CONF.token.revoke_by_id:
return
token_list = self.driver.delete_tokens(user_id, tenant_id, trust_id,
consumer_id)
for token_id in token_list:
unique_id = utils.generate_unique_id(token_id)
self._invalidate_individual_token_cache(unique_id)
self.invalidate_revocation_list()
@REVOCATION_MEMOIZE
def list_revoked_tokens(self):
return self.driver.list_revoked_tokens()
def invalidate_revocation_list(self):
# NOTE(morganfainberg): Note that ``self`` needs to be passed to
# invalidate() because of the way the invalidation method works on
# determining cache-keys.
self.list_revoked_tokens.invalidate(self)
def delete_tokens_for_domain(self, domain_id):
"""Delete all tokens for a given domain.
It will delete all the project-scoped tokens for the projects
that are owned by the given domain, as well as any tokens issued
to users that are owned by this domain.
However, deletion of domain_scoped tokens will still need to be
implemented as stated in TODO below.
"""
if not CONF.token.revoke_by_id:
return
projects = self.resource_api.list_projects()
for project in projects:
if project['domain_id'] == domain_id:
for user_id in self.assignment_api.list_user_ids_for_project(
project['id']):
self.delete_tokens_for_user(user_id, project['id'])
# TODO(morganfainberg): implement deletion of domain_scoped tokens.
users = self.identity_api.list_users(domain_id)
user_ids = (user['id'] for user in users)
self.delete_tokens_for_users(user_ids)
def delete_tokens_for_user(self, user_id, project_id=None):
"""Delete all tokens for a given user or user-project combination.
This method adds in the extra logic for handling trust-scoped token
revocations in a single call instead of needing to explicitly handle
trusts in the caller's logic.
"""
if not CONF.token.revoke_by_id:
return
self.delete_tokens(user_id, tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustee(user_id):
# Ensure we revoke tokens associated to the trust / project
# user_id combination.
self.delete_tokens(user_id, trust_id=trust['id'],
tenant_id=project_id)
for trust in self.trust_api.list_trusts_for_trustor(user_id):
# Ensure we revoke tokens associated to the trust / project /
# user_id combination where the user_id is the trustor.
# NOTE(morganfainberg): This revocation is a bit coarse, but it
# covers a number of cases such as disabling of the trustor user,
# deletion of the trustor user (for any number of reasons). It
# might make sense to refine this and be more surgical on the
# deletions (e.g. don't revoke tokens for the trusts when the
# trustor changes password). For now, to maintain previous
# functionality, this will continue to be a bit overzealous on
# revocations.
self.delete_tokens(trust['trustee_user_id'], trust_id=trust['id'],
tenant_id=project_id)
def delete_tokens_for_users(self, user_ids, project_id=None):
"""Delete all tokens for a list of user_ids.
:param user_ids: list of user identifiers
:param project_id: optional project identifier
"""
if not CONF.token.revoke_by_id:
return
for user_id in user_ids:
self.delete_tokens_for_user(user_id, project_id=project_id)
def _invalidate_individual_token_cache(self, token_id):
# NOTE(morganfainberg): invalidate takes the exact same arguments as
# the normal method, this means we need to pass "self" in (which gets
# stripped off).
# FIXME(morganfainberg): Does this cache actually need to be
# invalidated? We maintain a cached revocation list, which should be
# consulted before accepting a token as valid. For now we will
# do the explicit individual token invalidation.
self._get_token.invalidate(self, token_id)
self.token_provider_api.invalidate_individual_token_cache(token_id)
# NOTE(morganfainberg): @dependency.optional() is required here to ensure the
# class-level optional dependency control attribute is populated as empty
# this is because of the override of .__getattr__ and ensures that if the
# optional dependency injector changes attributes, this class doesn't break.
@dependency.optional()
@dependency.requires('token_provider_api')
@dependency.provider('token_api')
class Manager(object):
"""The token_api provider.
This class is a proxy class to the token_provider_api's persistence
manager.
"""
def __init__(self):
# NOTE(morganfainberg): __init__ is required for dependency processing.
super(Manager, self).__init__()
def __getattr__(self, item):
"""Forward calls to the `token_provider_api` persistence manager."""
# NOTE(morganfainberg): Prevent infinite recursion, raise an
# AttributeError for 'token_provider_api' ensuring that the dep
# injection doesn't infinitely try and lookup self.token_provider_api
# on _process_dependencies. This doesn't need an exception string as
# it should only ever be hit on instantiation.
if item == 'token_provider_api':
raise AttributeError()
f = getattr(self.token_provider_api._persistence, item)
LOG.warning(_LW('`token_api.%s` is deprecated as of Juno in favor of '
'utilizing methods on `token_provider_api` and may be '
'removed in Kilo.'), item)
setattr(self, item, f)
return f
@six.add_metaclass(abc.ABCMeta)
class Driver(object):
"""Interface description for a Token driver."""
@abc.abstractmethod
def get_token(self, token_id):
"""Get a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: token_ref
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def create_token(self, token_id, data):
"""Create a token by id and data.
:param token_id: identity of the token
:type token_id: string
:param data: dictionary with additional reference information
::
{
expires=''
id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref
}
:type data: dict
:returns: token_ref or None.
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_token(self, token_id):
"""Deletes a token by id.
:param token_id: identity of the token
:type token_id: string
:returns: None.
:raises: keystone.exception.TokenNotFound
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def delete_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Deletes tokens by user.
If the tenant_id is not None, only delete the tokens by user id under
the specified tenant.
If the trust_id is not None, it will be used to query tokens and the
user_id will be ignored.
If the consumer_id is not None, only delete the tokens by consumer id
that match the specified consumer id.
:param user_id: identity of user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: The tokens that have been deleted.
:raises: keystone.exception.TokenNotFound
"""
if not CONF.token.revoke_by_id:
return
token_list = self._list_tokens(user_id,
tenant_id=tenant_id,
trust_id=trust_id,
consumer_id=consumer_id)
for token in token_list:
try:
self.delete_token(token)
except exception.NotFound:
pass
return token_list
@abc.abstractmethod
def _list_tokens(self, user_id, tenant_id=None, trust_id=None,
consumer_id=None):
"""Returns a list of current token_id's for a user
This is effectively a private method only used by the ``delete_tokens``
method and should not be called by anything outside of the
``token_api`` manager or the token driver itself.
:param user_id: identity of the user
:type user_id: string
:param tenant_id: identity of the tenant
:type tenant_id: string
:param trust_id: identity of the trust
:type trust_id: string
:param consumer_id: identity of the consumer
:type consumer_id: string
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def list_revoked_tokens(self):
"""Returns a list of all revoked tokens
:returns: list of token_id's
"""
raise exception.NotImplemented() # pragma: no cover
@abc.abstractmethod
def flush_expired_tokens(self):
"""Archive or delete tokens that have expired.
"""
raise exception.NotImplemented() # pragma: no cover
|
jumpstarter-io/keystone
|
keystone/token/persistence/core.py
|
Python
|
apache-2.0
| 13,981
|
def quicksort(unsorted_list):
if len(unsorted_list) < 1:
return unsorted_list
else:
pivot = unsorted_list[-1]
pivot_list = [pivot] #using pivot_list should help with quicksort's difficulties with repeating elements
left_partition = []
right_partition = []
for i in unsorted_list[:-1]:
if i < pivot:
left_partition.append(i)
elif i > pivot:
right_partition.append(i)
else:
pivot_list.append(i)
return quicksort(left_partition) + pivot_list + quicksort(right_partition)
def quicksort_inplace(array):
if len(array) < 2:
return array
_quicksort_inplace(array, 0, len(array) - 1)
def _quicksort_inplace(array, index1, index2):
if index2 - index1 > 0:
pivot, left, right = array[index1], index1, index2
while left <= right:
while array[left] < pivot:
left += 1
while array[right] > pivot:
right -= 1
if left <= right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
_quicksort_inplace(array, index1, right)
_quicksort_inplace(array, left, index2)
|
markableidinger/sorting
|
quicksort.py
|
Python
|
mit
| 1,291
|
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Dag Wieers <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: mail
type: notification
short_description: Sends failure events via email
description:
- This callback will report failures via email
version_added: '2.0'
author:
- Dag Wieers (@dagwieers)
requirements:
- whitelisting in configuration
options:
mta:
description: Mail Transfer Agent, server that accepts SMTP
env:
- name: SMTPHOST
ini:
- section: callback_mail
key: smtphost
version_added: '2.5'
default: localhost
mtaport:
description: Mail Transfer Agent Port, port at which server SMTP
ini:
- section: callback_mail
key: smtpport
version_added: '2.5'
default: 25
to:
description: Mail recipient
ini:
- section: callback_mail
key: to
version_added: '2.5'
default: root
sender:
description: Mail sender
ini:
- section: callback_mail
key: sender
version_added: '2.5'
cc:
description: CC'd recipient
ini:
- section: callback_mail
key: cc
version_added: '2.5'
bcc:
description: BCC'd recipient
ini:
- section: callback_mail
key: bcc
version_added: '2.5'
note:
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
'''
import json
import os
import re
import smtplib
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
''' This Ansible callback plugin mails errors to interested parties. '''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'mail'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.sender = None
self.to = 'root'
self.smtphost = os.getenv('SMTPHOST', 'localhost')
self.smtpport = 25
self.cc = None
self.bcc = None
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sender = self.get_option('sender')
self.to = self.get_option('to')
self.smtphost = self.get_option('mta')
self.smtpport = int(self.get_option('mtaport'))
self.cc = self.get_option('cc')
self.bcc = self.get_option('bcc')
def mail(self, subject='Ansible error mail', body=None):
if body is None:
body = subject
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
b_sender = to_bytes(self.sender)
b_to = to_bytes(self.to)
b_cc = to_bytes(self.cc)
b_bcc = to_bytes(self.bcc)
b_subject = to_bytes(subject)
b_body = to_bytes(body)
b_content = b'From: %s\n' % b_sender
b_content += b'To: %s\n' % b_to
if self.cc:
b_content += b'Cc: %s\n' % b_cc
b_content += b'Subject: %s\n\n' % b_subject
b_content += b_body
b_addresses = b_to.split(b',')
if self.cc:
b_addresses += b_cc.split(b',')
if self.bcc:
b_addresses += b_bcc.split(b',')
for b_address in b_addresses:
smtp.sendmail(b_sender, b_address, b_content)
smtp.quit()
def subject_msg(self, multiline, failtype, linenr):
return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
def indent(self, multiline, indent=8):
return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
def body_blob(self, multiline, texttype):
''' Turn some text output in a well-indented block for sending in a mail body '''
intro = 'with the following %s:\n\n' % texttype
blob = ''
for line in multiline.strip('\r\n').splitlines():
blob += '%s\n' % line
return intro + self.indent(blob) + '\n'
def mail_result(self, result, failtype):
host = result._host.get_name()
if not self.sender:
self.sender = '"Ansible: %s" <root>' % host
# Add subject
if self.itembody:
subject = self.itemsubject
elif result._result.get('failed_when_result') is True:
subject = "Failed due to 'failed_when' condition"
elif result._result.get('msg'):
subject = self.subject_msg(result._result['msg'], failtype, 0)
elif result._result.get('stderr'):
subject = self.subject_msg(result._result['stderr'], failtype, -1)
elif result._result.get('stdout'):
subject = self.subject_msg(result._result['stdout'], failtype, -1)
elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
subject = self.subject_msg(result._result['exception'], failtype, -1)
else:
subject = '%s: %s' % (failtype, result._task.name or result._task.action)
# Make playbook name visible (e.g. in Outlook/Gmail condensed view)
body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
if result._task.name:
body += 'Task: %s\n' % result._task.name
body += 'Module: %s\n' % result._task.action
body += 'Host: %s\n' % host
body += '\n'
# Add task information (as much as possible)
body += 'The following task failed:\n\n'
if 'invocation' in result._result:
body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
elif result._task.name:
body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
else:
body += self.indent('%s\n' % result._task.action)
body += '\n'
# Add item / message
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
# Add stdout / stderr / exception / warnings / deprecations
if result._result.get('stdout'):
body += self.body_blob(result._result['stdout'], 'standard output')
if result._result.get('stderr'):
body += self.body_blob(result._result['stderr'], 'error output')
if result._result.get('exception'): # Unrelated exceptions are added to output :-/
body += self.body_blob(result._result['exception'], 'exception')
if result._result.get('warnings'):
for i in range(len(result._result.get('warnings'))):
body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
if result._result.get('deprecations'):
for i in range(len(result._result.get('deprecations'))):
body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
body += 'and a complete dump of the error:\n\n'
body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
self.mail(subject=subject, body=body)
def v2_playbook_on_start(self, playbook):
self.playbook = playbook
self.itembody = ''
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors:
return
self.mail_result(result, 'Failed')
def v2_runner_on_unreachable(self, result):
self.mail_result(result, 'Unreachable')
def v2_runner_on_async_failed(self, result):
self.mail_result(result, 'Async failure')
def v2_runner_item_on_failed(self, result):
# Pass item information to task failure
self.itemsubject = result._result['msg']
self.itembody += self.body_blob(json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4), "failed item dump '%(item)s'" % result._result)
|
vmindru/ansible
|
lib/ansible/plugins/callback/mail.py
|
Python
|
gpl-3.0
| 8,479
|
__author__ = 'Dani'
from wikidata_exp.wdexp.communications.input.wikidata.interfaces import IdTracker
from decimal import *
class Json01IdsParser(IdTracker):
"""
FORMAT JSON01:
Summary: It consist of a dict with a depth of 1. The keys are wikidata IDs
and the values of those keys are numbers (float).
The strings of the JSON should be quoted with the char " and the JSON
should be well formated, since this module does not use a JSON library,
but a faster plain text (structured) processor
Example:
{
"Q4167836": 0.12757293940694778,
"Q4167410": 0.048518222496095886,
"Q5": 0.04803422148902256,
"Q16521": 0.04055021999515414,
"Q7432": 0.033999504701633765,
...
}
"""
def __init__(self, source_file, break_char):
self._in_file = source_file
self._break_char = break_char
def yield_entity_ids(self):
with open(self._in_file, "r") as in_stream:
previous_result = ""
while True:
data = in_stream.read(1024)
if not data:
break
last_index = 0
for i in range(0, len(data)):
if data[i] == self._break_char:
yield self._extract_id_from_substring(previous_result + data[last_index:i + 1])
previous_result = ""
last_index = i + 1
previous_result += data[last_index:]
@staticmethod
def _extract_id_from_substring(target_str):
first_index = None
last_index = None
colon_index = None
i = 0
for char in target_str:
if char == '"':
if not first_index:
first_index = i
else:
last_index = i
if char == ":":
colon_index = i
break
i += 1
return target_str[first_index + 1:last_index], str(Decimal(target_str[colon_index + 2:-1]))
|
DaniFdezAlvarez/wikidataExplorer
|
wikidata_exp/wdexp/communications/input/wikidata/json01_ids_parser.py
|
Python
|
gpl-2.0
| 2,122
|
'''
This bottle-cql plugin allows for the passing of Cassandra Connection pools
around your application
The plugin inject an argument to all route callbacks that require a `db`
keyword.
Usage Example::
import bottle
from bottle import HTTPError
from bottle.ext import cql as bottle_cql
import cql
conn = conn = cql.connect('localhost',9160,'et', cql_version='3.0.0')
app = bottle.Bottle()
plugin = bottle_cql.Plugin(conn, keyword="cql")
app.install(plugin)
@app.get('/:query')
def show(query, cql):
cursor = conn.cursor()
cursor.execute(query)
for row in cursor:
do_stuff()
cursor.close()
Copyright (c) 2013, David McNelis and Emerging Threats
License: MIT (see LICENSE for details)
'''
import inspect
import bottle
import cql
# PluginError is defined to bottle >= 0.10
if not hasattr(bottle, 'PluginError'):
class PluginError(bottle.BottleException):
pass
bottle.PluginError = PluginError
class CQLPlugin(object):
name = 'bottle_cql'
api = 2
def __init__(self, conn,
keyword='cql', use_kwargs=False):
'''
:param conn: CQL Connection -- This should ultimately be pool of connections
:param keyword: Keyword used to inject session database in a route
'''
self.conn = conn
self.keyword = keyword
self.use_kwargs = use_kwargs
def setup(self, app):
''' Make sure that other installed plugins don't affect the same
keyword argument and check if metadata is available.'''
for other in app.plugins:
if not isinstance(other, CQLPlugin):
continue
if other.keyword == self.keyword:
raise bottle.PluginError("Found another SQLAlchemy plugin with "\
"conflicting settings (non-unique keyword).")
elif other.name == self.name:
self.name += '_%s' % self.keyword
def apply(self, callback, route):
# hack to support bottle v0.9.x
if bottle.__version__.startswith('0.9'):
allconfig = route['config']
_callback = route['callback']
else:
allconfig = route.config
_callback = route.callback
config = allconfig.get('cql', {})
keyword = config.get('keyword', self.keyword)
use_kwargs = config.get('use_kwargs', self.use_kwargs)
argspec = inspect.getargspec(_callback)
if not ((use_kwargs and argspec.keywords) or keyword in argspec.args):
return callback
def wrapper(*args, **kwargs):
kwargs[keyword] = self.conn
try:
rv = callback(*args, **kwargs)
except bottle.HTTPError:
raise
except bottle.HTTPResponse:
raise
return rv
return wrapper
Plugin = CQLPlugin
|
EmergingThreats/bottle-cql
|
bottle_cql.py
|
Python
|
mit
| 2,951
|
import os
from config import Package
from libXML2 import libXML2
from MPI import MPI
from pcu import pcu
class StGermain(Package):
def setup_dependencies(self):
self.mpi = self.add_dependency(MPI, required=True)
self.libxml2 = self.add_dependency(libXML2, required=True)
self.pcu = self.add_dependency(pcu, required=True)
def gen_locations(self):
yield ('/usr', [], [])
yield ('/usr/local', [], [])
def gen_envs(self, loc):
for env in Package.gen_envs(self, loc):
self.headers = [os.path.join('StGermain', 'StGermain.h')]
if self.find_libraries(loc[2], 'StGermain'):
env.PrependUnique(LIBS=['StGermain'])
yield env
|
geodynamics/gale
|
config/packages/StGermain.py
|
Python
|
gpl-2.0
| 735
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ParameterMapping'
db.create_table('lizard_wbconfiguration_parametermapping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parametercache', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_fewsnorm.ParameterCache'])),
('ident_wbconfiguration', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
))
db.send_create_signal('lizard_wbconfiguration', ['ParameterMapping'])
# Adding model 'WBConfigurationDBFMapping'
db.create_table('lizard_wbconfiguration_wbconfigurationdbfmapping', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('model_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('wbfield_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('dbffield_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('dbffield_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
('dbffield_length', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
('dbffield_decimals', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True)),
))
db.send_create_signal('lizard_wbconfiguration', ['WBConfigurationDBFMapping'])
# Adding model 'AreaGridConfiguration'
db.create_table('lizard_wbconfiguration_areagridconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('app_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('model_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('lizard_wbconfiguration', ['AreaGridConfiguration'])
# Adding model 'AreaField'
db.create_table('lizard_wbconfiguration_areafield', (
('code', self.gf('django.db.models.fields.CharField')(max_length=256, primary_key=True)),
('field_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('app_name', self.gf('django.db.models.fields.CharField')(max_length=50)),
('model_name', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal('lizard_wbconfiguration', ['AreaField'])
# Adding model 'AreaGridFieldConfiguration'
db.create_table('lizard_wbconfiguration_areagridfieldconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('field_name', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_wbconfiguration.AreaField'], max_length=128)),
('display_name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('editable', self.gf('django.db.models.fields.BooleanField')(default=False)),
('visible', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ts_parameter', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('field_type', self.gf('django.db.models.fields.CharField')(max_length=128)),
('grid', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_wbconfiguration.AreaGridConfiguration'])),
('sequence', self.gf('django.db.models.fields.IntegerField')()),
))
db.send_create_signal('lizard_wbconfiguration', ['AreaGridFieldConfiguration'])
# Adding model 'AreaConfiguration'
db.create_table('lizard_wbconfiguration_areaconfiguration', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('ident', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('area', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['lizard_area.Area'], unique=True)),
('start_dt', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('ts_precipitation', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_precipitation', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('ts_evaporation', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_evaporation', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('max_intake', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('max_outtake', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ts_concentr_chloride_1', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_concentr_chloride_1', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('ts_concentr_chloride_2', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_concentr_chloride_2', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('surface', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=1, blank=True)),
('bottom_height', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ts_water_level', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_water_level', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('kwel_is_ts', self.gf('django.db.models.fields.BooleanField')(default=False)),
('kwel', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ts_kwel', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_kwel', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('wegz_is_ts', self.gf('django.db.models.fields.BooleanField')(default=False)),
('wegz', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ts_wegz', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_wegz', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('peilh_issp', self.gf('django.db.models.fields.BooleanField')(default=False)),
('sp_is_ts', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ts_sp', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='sp', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('winterp', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('lentep', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('zomerp', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('herfstp', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('start_wp', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('start_lp', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('start_zp', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('start_hp', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('marge_ond', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('marge_bov', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_min_1', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_inc_1', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_min_2', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_inc_2', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_min_3', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_inc_3', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_min_4', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('nutc_inc_4', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ini_con_cl', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('init_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('concentr_chloride_precipitation', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('concentr_chloride_seepage', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_phosphate_precipitation', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_phopshate_seepage', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_phosphate_precipitation', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_phosphate_seepage', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_nitrogyn_precipitation', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_nitrogyn_seepage', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_nitrogyn_precipitation', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_nitrogyn_seepage', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
))
db.send_create_signal('lizard_wbconfiguration', ['AreaConfiguration'])
# Adding model 'Structure'
db.create_table('lizard_wbconfiguration_structure', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=128)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_wbconfiguration.AreaConfiguration'])),
('is_computed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('in_out', self.gf('django.db.models.fields.CharField')(max_length=3, null=True, blank=True)),
('deb_is_ts', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ts_debiet', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_debiet', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('deb_zomer', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('deb_wint', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('concentr_chloride', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_phosphate', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_phosphate', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_nitrogen', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_nitrogen', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('lizard_wbconfiguration', ['Structure'])
# Adding model 'BucketsType'
db.create_table('lizard_wbconfiguration_bucketstype', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('bucket_type', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
))
db.send_create_signal('lizard_wbconfiguration', ['BucketsType'])
# Adding model 'Bucket'
db.create_table('lizard_wbconfiguration_bucket', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128)),
('code', self.gf('django.db.models.fields.CharField')(max_length=128)),
('bucket_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_wbconfiguration.BucketsType'], null=True, blank=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_wbconfiguration.AreaConfiguration'])),
('replace_impact_by_nutricalc', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_computed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('ts_flowoff', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_flowoff_bucket', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('ts_drainageindraft', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_drainageindraf_bucket', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('ts_referenceoverflow', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_referenceoverflow_bucket', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('surface', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=1, blank=True)),
('kwelwegz_is_ts', self.gf('django.db.models.fields.BooleanField')(default=False)),
('kwelwegz', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('ts_kwelwegz', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='ts_kwelwegz_bucket', null=True, to=orm['lizard_fewsnorm.TimeSeriesCache'])),
('porosity', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('crop_evaporation_factor', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_crop_evaporation_factor', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('drainage_fraction', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('indraft_fraction', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('man_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_porosity', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_crop_evaporation_factor', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_min_crop_evaporation_factor', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_drainage_fraction', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_indraft_fraction', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_max_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_min_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('init_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('bottom_init_water_level', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('concentr_chloride_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('concentr_chloride_drainage_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_phosphate_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_phosphate_drainage_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_phosphate_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_phosphate_drainage_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_nitrogen_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('min_concentr_nitrogen_drainage_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_nitrogen_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('incr_concentr_nitrogen_drainage_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('label_flow_off', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('label_drainaige_indraft', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=5, decimal_places=3, blank=True)),
('deleted', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('lizard_wbconfiguration', ['Bucket'])
def backwards(self, orm):
# Deleting model 'ParameterMapping'
db.delete_table('lizard_wbconfiguration_parametermapping')
# Deleting model 'WBConfigurationDBFMapping'
db.delete_table('lizard_wbconfiguration_wbconfigurationdbfmapping')
# Deleting model 'AreaGridConfiguration'
db.delete_table('lizard_wbconfiguration_areagridconfiguration')
# Deleting model 'AreaField'
db.delete_table('lizard_wbconfiguration_areafield')
# Deleting model 'AreaGridFieldConfiguration'
db.delete_table('lizard_wbconfiguration_areagridfieldconfiguration')
# Deleting model 'AreaConfiguration'
db.delete_table('lizard_wbconfiguration_areaconfiguration')
# Deleting model 'Structure'
db.delete_table('lizard_wbconfiguration_structure')
# Deleting model 'BucketsType'
db.delete_table('lizard_wbconfiguration_bucketstype')
# Deleting model 'Bucket'
db.delete_table('lizard_wbconfiguration_bucket')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.areacode': {
'Meta': {'object_name': 'AreaCode'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.areatype': {
'Meta': {'object_name': 'AreaType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.basin': {
'Meta': {'object_name': 'Basin'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'area_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.AreaType']", 'null': 'True', 'blank': 'True'}),
'basin': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Basin']", 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.AreaCode']", 'null': 'True', 'blank': 'True'}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'municipality': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Municipality']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'province': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Province']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Status']", 'null': 'True', 'blank': 'True'}),
'watermanagementarea': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.WaterManagementArea']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.municipality': {
'Meta': {'object_name': 'Municipality'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.province': {
'Meta': {'object_name': 'Province'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.status': {
'Meta': {'object_name': 'Status'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_area.watermanagementarea': {
'Meta': {'object_name': 'WaterManagementArea'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_fewsnorm.fewsnormsource': {
'Meta': {'object_name': 'FewsNormSource'},
'database_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'lizard_fewsnorm.geolocationcache': {
'Meta': {'ordering': "('ident', 'name')", 'object_name': 'GeoLocationCache', '_ormbases': ['lizard_geo.GeoObject']},
'fews_norm_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.FewsNormSource']"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'icon': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'module': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.ModuleCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'parameter': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.ParameterCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}),
'shortname': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'timestep': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lizard_fewsnorm.TimeStepCache']", 'null': 'True', 'through': "orm['lizard_fewsnorm.TimeSeriesCache']", 'blank': 'True'}),
'tooltip': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'lizard_fewsnorm.modulecache': {
'Meta': {'ordering': "('ident',)", 'object_name': 'ModuleCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'lizard_fewsnorm.parametercache': {
'Meta': {'ordering': "('ident',)", 'object_name': 'ParameterCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'lizard_fewsnorm.timeseriescache': {
'Meta': {'object_name': 'TimeSeriesCache'},
'geolocationcache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.GeoLocationCache']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modulecache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ModuleCache']"}),
'parametercache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']"}),
'timestepcache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.TimeStepCache']"})
},
'lizard_fewsnorm.timestepcache': {
'Meta': {'ordering': "('ident',)", 'object_name': 'TimeStepCache'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_wbconfiguration.areaconfiguration': {
'Meta': {'object_name': 'AreaConfiguration'},
'area': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Area']", 'unique': 'True'}),
'bottom_height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'concentr_chloride_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'concentr_chloride_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'herfstp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'incr_concentr_nitrogyn_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_nitrogyn_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_phosphate_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_phosphate_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'ini_con_cl': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'kwel': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'kwel_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lentep': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'marge_bov': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'marge_ond': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'max_intake': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'max_outtake': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_nitrogyn_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_nitrogyn_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_phopshate_seepage': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_phosphate_precipitation': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'nutc_inc_1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_inc_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_inc_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_inc_4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_min_1': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_min_2': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_min_3': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'nutc_min_4': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'peilh_issp': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sp_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_dt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_hp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_lp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_wp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_zp': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}),
'ts_concentr_chloride_1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_concentr_chloride_1'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_concentr_chloride_2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_concentr_chloride_2'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_evaporation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_evaporation'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_kwel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_kwel'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_precipitation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_precipitation'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_sp': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'sp'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_water_level': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_water_level'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_wegz': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_wegz'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'wegz': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'wegz_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'winterp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'zomerp': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'})
},
'lizard_wbconfiguration.areafield': {
'Meta': {'object_name': 'AreaField'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '256', 'primary_key': 'True'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_wbconfiguration.areagridconfiguration': {
'Meta': {'object_name': 'AreaGridConfiguration'},
'app_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_wbconfiguration.areagridfieldconfiguration': {
'Meta': {'object_name': 'AreaGridFieldConfiguration'},
'display_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'editable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'field_name': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaField']", 'max_length': '128'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'grid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaGridConfiguration']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sequence': ('django.db.models.fields.IntegerField', [], {}),
'ts_parameter': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'visible': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'lizard_wbconfiguration.bucket': {
'Meta': {'ordering': "['id']", 'object_name': 'Bucket'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaConfiguration']"}),
'bottom_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_drainage_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_indraft_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_max_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_min_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_min_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bottom_porosity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'bucket_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.BucketsType']", 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'concentr_chloride_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'concentr_chloride_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'drainage_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'incr_concentr_nitrogen_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_nitrogen_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_phosphate_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_phosphate_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'indraft_fraction': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'init_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'is_computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'kwelwegz': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'kwelwegz_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'label_drainaige_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'label_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'man_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_nitrogen_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_nitrogen_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_phosphate_drainage_indraft': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_phosphate_flow_off': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_crop_evaporation_factor': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_water_level': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'porosity': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'replace_impact_by_nutricalc': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'surface': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '1', 'blank': 'True'}),
'ts_drainageindraft': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_drainageindraf_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_flowoff': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_flowoff_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_kwelwegz': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_kwelwegz_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"}),
'ts_referenceoverflow': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_referenceoverflow_bucket'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"})
},
'lizard_wbconfiguration.bucketstype': {
'Meta': {'object_name': 'BucketsType'},
'bucket_type': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'lizard_wbconfiguration.parametermapping': {
'Meta': {'object_name': 'ParameterMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident_wbconfiguration': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'parametercache': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_fewsnorm.ParameterCache']"})
},
'lizard_wbconfiguration.structure': {
'Meta': {'ordering': "['id']", 'object_name': 'Structure'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_wbconfiguration.AreaConfiguration']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'concentr_chloride': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'deb_is_ts': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'deb_wint': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'deb_zomer': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_out': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}),
'incr_concentr_nitrogen': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'incr_concentr_phosphate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'is_computed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'min_concentr_nitrogen': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'min_concentr_phosphate': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '3', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'ts_debiet': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'ts_debiet'", 'null': 'True', 'to': "orm['lizard_fewsnorm.TimeSeriesCache']"})
},
'lizard_wbconfiguration.wbconfigurationdbfmapping': {
'Meta': {'ordering': "['id']", 'object_name': 'WBConfigurationDBFMapping'},
'dbffield_decimals': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dbffield_length': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dbffield_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'dbffield_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model_name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'wbfield_name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
}
}
complete_apps = ['lizard_wbconfiguration']
|
lizardsystem/lizard-wbconfiguration
|
lizard_wbconfiguration/migrations/0001_initial.py
|
Python
|
gpl-3.0
| 54,331
|
#!/usr/bin/env python
from itertools import count
from nodes import Node
#Modulo
#Every
#Indexes
class Mod(Node):
char = "%"
args = 2
results = 1
contents = ["Padding", 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
@Node.test_func([2,3], [2])
@Node.test_func([6,5], [1])
def modulo(self, a:Node.number,b:Node.number):
"""a%b"""
if b == 0: return 0
return a%b
@Node.test_func(["testy",2], ["tsy"])
@Node.test_func([[1,2,3,4,5,6],3], [[1,4]])
def every(self, seq:Node.indexable, b:int):
"""seq[::b]"""
return [seq[::b]]
@Node.test_func(["t","testy"], [[0,3]])
@Node.test_func([1,(1,0,1,2,1)], [[0,2,4]])
def indexes(self, a, seq:Node.indexable):
"""Return a list of indecies in seq that equal a"""
return [list(i for i,v in enumerate(seq) if v==a)]
def inf_every(self, inf: Node.infinite, every: int):
return inf.modify(inf.every, every, count())
def inf_not_every(self, every: int, inf: Node.infinite):
return inf.modify(inf.not_every, every, count())
|
muddyfish/PYKE
|
node/mod.py
|
Python
|
mit
| 1,106
|
from switchboard.module import SwitchboardModule
@SwitchboardModule(
inputs=['client1.input.i'], # We have one input...
outputs={'client2.output.o':555}) # ...and one output that is set to 555 in case of error
def module(inp, out): # The 'inp' arg maps to 'input.i' while the 'out' arg maps to 'output.o'
out.set_value(inp.get_value() * 2)
|
josefschneider/switchboard
|
examples/simple_counters/test_module.py
|
Python
|
mit
| 394
|
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
from . import report
from . import wizard
|
OCA/account-financial-reporting
|
partner_statement/__init__.py
|
Python
|
agpl-3.0
| 113
|
"""Retrieve As information out of GeoIP (MaxMind) binding.
Beware: database location is hardcoded!"""
import GeoIP
import re
import numpy as np
import INDEX_VALUES
#WARNING: hard coded
GAS=GeoIP.open('/home/louis/streaming/flows/AS/GeoIPASNum.dat',
GeoIP.GEOIP_STANDARD)
#ugly but more efficient: compile only once
REGEXP = re.compile('(AS([0-9]+).*)')
def extend_fields_AS_down(d):
"Extend each line of array considered as list with src IP addresses."
fields = list(d)
src = GAS.org_by_addr(d['srcAddr'])
if src != None:
fields.extend(list(REGEXP.match(src).group(2,1)))
else:
fields.extend([0, 'Not found'])
return tuple(fields)
def extend_array_AS_down(flows_array):
"Return a new array with AS information upstream."
return np.array([extend_fields_AS_down(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_AS_down)
def extend_fields_AS(d):
"Extend each line of array considered as list with both IP addresses."
fields = list(d)
src = GAS.org_by_addr(d['srcAddr'])
if src != None:
fields.extend(list(REGEXP.match(src).group(2,1)))
else:
fields.extend([0, 'Not found'])
dst = GAS.org_by_addr(d['dstAddr'])
if dst != None:
fields.extend(list(REGEXP.match(dst).group(2,1)))
else:
fields.extend([0, 'Not found'])
return tuple(fields)
def extend_array_AS(flows_array):
"Return a new array with AS information on both sides."
return np.array([extend_fields_AS(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_AS)
def extend_array_BGP_AS(flows_array):
"Return a new array with AS information on both sides."
return np.array([extend_fields_AS(d) for d in flows_array],
dtype=INDEX_VALUES.dtype_GVB_BGP_AS)
#test_flows=np.loadtxt('test/flows_ftth_nov.head',
#dtype=INDEX_VALUES.dtype_GVB,skiprows=1).view(np.recarray)
#np.array(zip(test_flows,[[GAS.org_by_addr(src),GAS.org_by_addr(dst)]
#for src,dst in zip(test_flows.srcAddr,test_flows.dstAddr)]))
#[(f, GAS.org_by_addr(f['srcAddr']), GAS.org_by_addr(f['dstAddr']))
#for f in test_flows]
|
LouisPlisso/analysis_tools
|
add_ASN_geoip.py
|
Python
|
gpl-3.0
| 2,116
|
# Copyright 2010-2011 Dusty Phillips
# This file is part of Prickle.
# Prickle is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
# Prickle is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General
# Public License along with Prickle. If not, see
# <http://www.gnu.org/licenses/>.
import logging
from decimal import Decimal
from pylons.decorators import validate
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from prickle.lib.base import BaseController, render
from prickle.model.timesheet import Timesheet, Project, ProjectType
from prickle.forms.timesheet import RateForm
log = logging.getLogger(__name__)
class ProjectsController(BaseController):
def list(self):
c.projects = Project.objects()
return render('/project/project_list.html')
def view(self, id):
c.project, created = Project.objects.get_or_create(name=id)
return render('/project/project_form.html')
@validate(schema=RateForm, form='view')
def edit(self, id):
project, created = Project.objects.get_or_create(name=id)
project.rate = self.form_result['rate']
project.save()
return redirect(url(controller="timesheet", action="index"))
@validate(schema=RateForm, form='view')
def type_rate(self, project, type):
project, created = Project.objects.get_or_create(name=project)
project_type, created = ProjectType.objects.get_or_create(
project=project, type=type)
project_type.rate = self.form_result['rate']
project_type.save()
return redirect(url(controller="timesheet", action="project",
id=project))
|
buchuki/prickle
|
prickle/controllers/projects.py
|
Python
|
agpl-3.0
| 2,138
|
import pandas
class StateCollectionFactory(object):
"""
Factory class that builds a StateCollection.
Attributes
----------
state_list : list
The states in the model.
state_id_list : list
The identifier strings for each state.
"""
def __init__(self):
super(StateCollectionFactory, self).__init__()
self.state_list = []
self.state_id_list = []
def add_state(self, state):
"""
Adds a state (in dict form) to the growing list of states.
Parameters
----------
state : State
"""
self.state_id_list.append( state.get_id() )
self.state_list.append( state.as_dict() )
def make_state_collection(self):
"""
Creates a StateCollection from the states that have been
added to the factory via `add_state`.
Returns
-------
state_collection : StateCollection
"""
state_collection = StateCollection()
state_collection.data_frame = pandas.DataFrame(
self.state_list,
index=self.state_id_list)
return state_collection
class StateCollection(object):
"""
The states of a model. This data structures is used by
AggregatedKineticModels to organize their states.
Attributes
----------
data_frame : pandas DataFrame
Each row in the DataFrame corresponds to a state, and
the columns are based on the attributes of the states.
"""
def __init__(self):
super(StateCollection, self).__init__()
self.data_frame = None
def __len__(self):
return len(self.data_frame)
def __str__(self):
return self.data_frame.to_string()
def iter_states(self):
"""
Iterate through state collection.
Returns
-------
state_id : string
state_series : panda Series
The attributes of a state as a Series.
"""
for state_id, state_series in self.data_frame.iterrows():
yield (state_id, state_series)
def get_state_ids(self):
"""
Returns
-------
s : StateIDCollection
"""
s = StateIDCollection()
s.state_id_list = self.data_frame.index.tolist()
return s
def sort(self, sort_column):
"""
Parameters
----------
sort_column : string
Must correspond to an attribute of the states.
Useful for organizing states according to aggregated class.
"""
return self.data_frame.groupby(sort_column)
class StateIDCollection(object):
"""
The identifier strings of the states of a model.
Attributes
----------
state_id_list : list
"""
def __init__(self):
super(StateIDCollection, self).__init__()
self.state_id_list = []
def __str__(self):
return str(self.state_id_list)
def __iter__(self):
for s in self.state_id_list:
yield s
def __contains__(self, state_id):
return (state_id in self.state_id_list)
def __len__(self):
return len(self.state_id_list)
def add_id(self, state_id):
"""
Add another state id string to the collection.
Parameters
----------
state_id : string
"""
self.state_id_list.append(state_id)
def add_state_id_list(self, state_id_list):
"""
Add a list of id strings to the collection.
Parameters
----------
state_id_list : list
"""
self.state_id_list += state_id_list
def from_state_collection(self, state_collection):
"""
Replace current collection of id strings with those
from the states in a StateCollection.
Parameters
----------
state_collection : StateCollection
"""
self.state_id_list = state_collection.get_id_list()
def as_list(self):
return self.state_id_list
|
milapour/palm
|
palm/state_collection.py
|
Python
|
bsd-2-clause
| 4,073
|
import csv
import xml.etree.ElementTree as ET
import argparse
from collections import defaultdict
# Extracts questions, PAUs from Ground Truth Snapshot XML and creates a CSV file for NLC training purposes
# Pre-Requisite : The XML must be exported from WEA ExperienceManager using the GroundTruthSnapshot export option
# Parameters :
# gttsnapshotxml - GTT Snapshot XML
# outputfile - output CSV
# numquestion - No. of questions to limit (NLC has a limitation of 20 MB size for the jSON and 10K training instance)
# classesreportfile - Reports the number of unique classes. Each line is a class, with the primary question as a key
#TODO # o
def extract(gttsnapshotxml, csvfile):
root = ET.parse(gttsnapshotxml).getroot()
csvWriter = csv.writer(open(csvfile, 'w'), delimiter=',')
tCount = 0
count = 0
questionDictionary = dict()
classesDict = dict()
classCount = 0
for question in root.iter('question'):
id = question.find('id').text if question.find('id') is not None else ""
value = question.find('value').text if question.find('value') is not None else ""
questionText = value.lstrip().rstrip()
# Get primary question PAU
predefinedAnswerUnit = question.find('predefinedAnswerUnit').text if question.find(
'predefinedAnswerUnit') is not None else ""
# Look for mapped question
mappedQuestion = question.find('mappedQuestion') if question.find('mappedQuestion') is not None else ""
# Check for valid question, has mapped question section and not PAU - This means secondary question
if questionText != "" and predefinedAnswerUnit == "" and mappedQuestion != "":
parentQuestionPau = mappedQuestion.find('predefinedAnswerUnit') if mappedQuestion.find(
'predefinedAnswerUnit') is not None else ""
mappedQuestionText = mappedQuestion.find("value").text
if parentQuestionPau != "":
if questionDictionary.get(questionText) is None or questionDictionary.get(questionText) == "":
csvWriter.writerow([questionText.encode('utf-8'), parentQuestionPau.text])
questionDictionary.update({questionText: parentQuestionPau.text})
classesDict.update({parentQuestionPau.text: questionText.encode('utf-8')})
classCount += 1
count += 1
else:
existingPau = questionDictionary.get(questionText)
if questionDictionary.get(questionText) is None or questionDictionary.get(questionText) == "":
csvWriter.writerow([questionText.encode('utf-8'), existingPau])
questionDictionary.update({questionText: existingPau.text})
count += 1
elif questionText != "" and predefinedAnswerUnit != "" and mappedQuestion == "" and questionDictionary.get(questionText) is None:
# This means primary question
csvWriter.writerow([questionText.encode('utf-8'), predefinedAnswerUnit])
classesDict.update({predefinedAnswerUnit: questionText})
questionDictionary.update({questionText: predefinedAnswerUnit})
count += 1
classCount += 1
print("No. of classes found : " + str(len(classesDict)))
print("Total Questions Generated : " + str(count))
#compute question statistics
#invert question map to find # of questions per class
inv_map = {}
for k, v in questionDictionary.iteritems():
inv_map[v] = inv_map.get(v, [])
inv_map[v].append(k)
#turn the inverted map into a map where the key is list of classes and the value is the # of entries for that class
count_map = defaultdict(list)
for k,v in inv_map.iteritems():
count_map[len(v)].append(k)
#print statistics about the training file
print "\nThe NLC recommends having 8 items per class AT MINIMUM. Using the information below see if a substantial portion of your data does not meet this criteria. If not, consider adding more data to each class!!"
for k,v in count_map.iteritems():
print len(v), "classes with", k, "items"
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("gttsnapshotxml", help="Ground Truth Snapshot XML File")
parser.add_argument("outputfile", help="Output CSV File ")
args = parser.parse_args()
extract(args.gttsnapshotxml, args.outputfile)
|
mkaufmann1/Lightning
|
python/extract.py
|
Python
|
apache-2.0
| 4,532
|
#-------------------------------------------------------------------------------
# Name: lineFollower.py
# Purpose: Use line follower sensors for FortiTo's BuzzBot
# Author: Anasol Pena-Rios
# Created: 19/02/2014
# Copyright: (c) FortiTo 2014
# Version: 1.0
#-------------------------------------------------------------------------------
import smbus
import time
class lineFollower():
#I2C Addresses
#-------------------------------------------------------------------------------
bus = smbus.SMBus(0) #open the bus on /dev/i2c-0
address_sensors = 0x30 #address
#Global variables
#-------------------------------------------------------------------------------
read_val = 255
def __init__(self):
# Initialise values
self.read_val = 255
def clean(self):
# Clean values
self.read_val = 255
def setLineBias(self, value, debug = False):
# Write values - Default value 0
# 0 = Black or white line no bias.
# 1 = Black line bias
# 2 = White line bias
# 3 = Automatic black or white bias
msg = ""
if value in (0,1,2,3):
self.bus.write_byte(self.address_sensors, value) #set value
time.sleep(0.2) #0.2 second
msg = "OK"
else:
msg = "ERROR: Wrong value"
if debug:
print "Set Line Bias: ", msg
return msg
def getDirection(self, debug = False):
result = ""
# Read values - Default value 255 (0xFF)
self.read_val = self.bus.read_byte(self.address_sensors)
time.sleep(0.2) #0.2 second
if self.read_val == 0:
result = "NO LINE"
elif self.read_val in (1,2,3,5,6,7,13,15,19,22,23):
result = "RIGHT"
elif self.read_val in (4,10,11,14,27):
result = "CENTRE"
elif self.read_val in (8,12,13,15,16,20,24,25,26,28,29,30):
result = "LEFT"
else:
result = "ERROR"
if debug:
print "Binary values = ", bin(self.read_val)
print self.read_val, " Value read."
print "Direction ", result
#return result
return self.read_val
if __name__ == "__main__":
print ("----------------------------------------------------")
print ("running LINE FOLLOWER test")
print ("----------------------------------------------------")
result = ""
lineSensor = lineFollower()
lineSensor.clean()
lineSensor.setLineBias(1, True)
while 1:
result = lineSensor.getDirection (True)
print ("----------------------------------------------------")
print ("finished - LINE FOLLOWER test")
print ("----------------------------------------------------")
|
prlosana/BuzzBoards
|
lineFollower.py
|
Python
|
agpl-3.0
| 2,633
|
import pytest
import dill
import os
from backports.tempfile import TemporaryDirectory
from velox.obj import VeloxObject, register_object
from velox.lite import save_object, load_object
from velox.exceptions import VeloxConstraintError
from sklearn.linear_model import SGDClassifier
from sklearn.datasets import make_blobs
import boto3
from moto import mock_s3
import logging
logging.basicConfig(level=logging.DEBUG)
import velox_test_utils
TEST_BUCKET = 'ci-velox-bucket'
@pytest.fixture(
params=['sklearn', 'dict', 'custom_obj']
)
def obj_instance(request):
if request.param == 'sklearn':
return SGDClassifier().fit(*make_blobs())
elif request.param == 'custom_obj':
return velox_test_utils.FooBar(92)
else:
return {'foo': 'bar', 'biz': 'bap'}
@pytest.fixture(
params=['versioned', 'unversioned']
)
def versioned(request):
return request.param == 'versioned'
@pytest.fixture(
params=['secret', 'no_secret']
)
def secret(request):
if request.param == 'secret':
return 'VeloxTesting123'
return None
@pytest.fixture(
params=['s3', 'local']
)
def prefix(request):
if request.param == 'local':
with TemporaryDirectory() as d:
yield d
else:
with mock_s3():
conn = boto3.resource('s3', region_name='us-east-1')
# We need to create the bucket since this is all in Moto's 'virtual' AWS
# account
conn.create_bucket(Bucket=TEST_BUCKET)
yield 's3://{}/path'.format(TEST_BUCKET)
@pytest.fixture
def name():
return 'OBJECTNAME'
@register_object(registered_name=name())
class FullVeloxObj(VeloxObject):
def __init__(self, o=None):
super(FullVeloxObj, self).__init__()
self._o = o
def _save(self, fileobject):
dill.dump(self._o, fileobject)
@classmethod
def _load(cls, fileobject):
r = cls()
setattr(r, '_o', dill.load(fileobject))
return r
def obj(self):
return self._o
def test_save_load(name, prefix, obj_instance, versioned, secret):
save_object(obj_instance, name, prefix, versioned=versioned, secret=secret)
_ = load_object(name, prefix, versioned=versioned, secret=secret)
def test_save_once_unversioned(name, prefix, obj_instance, secret):
save_object(obj_instance, name, prefix, versioned=False, secret=secret)
with pytest.raises(IOError):
save_object(obj_instance, name, prefix, versioned=False, secret=secret)
def test_load_versioned(name, prefix, secret):
save_object(1, name, prefix, versioned=True, secret=secret)
assert 1 == load_object(name, prefix, versioned=True, secret=secret)
save_object(2, name, prefix, versioned=True, secret=secret)
save_object('foo', name, prefix, versioned=True, secret=secret)
assert 'foo' == load_object(name, prefix, versioned=True, secret=secret)
def test_load_versioned_pin_version(name, prefix, secret):
save_object(1, name, prefix, versioned=True, secret=secret)
save_object(2, name, prefix, versioned=True, secret=secret)
assert 1 == load_object(name, prefix, versioned=True,
version='0.1.0', secret=secret)
assert 2 == load_object(name, prefix, versioned=True, secret=secret)
def test_load_version_constraint(name, prefix, secret):
save_object(1, name, prefix, versioned=True, secret=secret)
save_object(2, name, prefix, versioned=True, secret=secret, bump='minor')
save_object(3, name, prefix, versioned=True, secret=secret, bump='major')
save_object(4, name, prefix, versioned=True, secret=secret, bump='major')
assert 1 == load_object(name, prefix, versioned=True,
version='0.1.0', secret=secret)
assert 2 == load_object(name, prefix, versioned=True,
version='>0.1.0,<1.0.0', secret=secret)
assert 3 == load_object(name, prefix, versioned=True,
version='>=1.0.0,<2.0.0', secret=secret)
assert 4 == load_object(name, prefix, versioned=True, version='>0.1.0',
secret=secret)
def test_load_full_velox_with_sha(name, prefix):
try:
instance = FullVeloxObj({'a': 'bam'})
instance.save(prefix)
result = load_object(name, prefix, versioned=True, return_sha=True)
assert isinstance(result, tuple)
assert len(result) == 2
finally:
velox_test_utils.RESET()
def test_load_not_saved(name, prefix, versioned, secret):
with pytest.raises(VeloxConstraintError):
load_object(name, prefix, versioned=versioned, secret=secret)
def test_load_versioned_error(name, prefix, secret):
with pytest.raises(RuntimeError):
load_object(name, prefix, versioned=False, version=True)
def test_load_secret_mismatch(name, prefix, versioned, secret):
save_object('foo', name, prefix, versioned=versioned, secret=secret)
with pytest.raises(RuntimeError):
load_object(name, prefix, versioned=versioned, secret='WrongSecret')
|
lukedeo/Velox
|
test/test_lite.py
|
Python
|
apache-2.0
| 5,048
|
def extractDevastatranslationsWordpressCom(item):
'''
Parser for 'devastatranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractDevastatranslationsWordpressCom.py
|
Python
|
bsd-3-clause
| 578
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
class BaseComponent:
"""This is a basic interface for defining components.
The only requirement is to implement the name method that
uniquely identifies a component. It should also define other
methods that implement the component functionality.
"""
@classmethod
def name(cls):
raise NotImplementedError()
|
superdesk/superdesk-core
|
apps/common/components/base_component.py
|
Python
|
agpl-3.0
| 653
|
__author__ = 'tonycastronova'
"""
Standard classes
"""
# On OSX you also need to install geos. e.g, sudo port install geos
import datetime
import hashlib
import uuid
from bisect import bisect_left, bisect_right
import numpy
from osgeo import osr, ogr
from emitLogging import elog
from sprint import *
class Status:
READY = 'READY'
NOTREADY = 'NOTREADY'
RUNNING = 'RUNNING'
UNDEFINED = 'UNDEFINED'
SUCCESS = 'SUCCESS'
FAILED = 'FAILED'
# derived from GDAL types
class GeomType():
POINT = 'POINT'
LINESTRING = 'LINESTRING'
POLYGON = 'POLYGON'
MULTIPOINT = 'MULTIPOINT'
MULTILINESTRING = 'MULTILINESTRING'
MULTIPOLYGON = 'MULTIPOLYGON'
GEOMETRYCOLLECTION = 'GEOMETRYCOLLECTION'
CIRCULARSTRING = 'CIRCULARSTRING'
COMPOUNDCURVE = 'COMPOUNDCURVE'
CURVEPOLYGON = 'CURVEPOLYGON'
MULTICURVE = 'MULTICURVE'
MULTISURFACE = 'MULTISURFACE'
_map = {'1': POINT,
'2': LINESTRING,
'3': POLYGON,
'4': MULTIPOINT,
'5': MULTILINESTRING,
'6': MULTIPOLYGON,
'7': GEOMETRYCOLLECTION,
'8': CIRCULARSTRING,
'9': COMPOUNDCURVE,
'10': CURVEPOLYGON,
'11': MULTICURVE,
'12': MULTISURFACE}
class ExchangeItemType():
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
class Variable(object):
"""
Defines the variable object
"""
def __init__(self):
# ODM2 terms
#__variableid = None
#__variableCode = None
self.__variableNameCV = None
self.__variableDefinition = None
#__speciationCV = None
#__noDataValue = None
def VariableNameCV(self,value=None):
if value is None:
return self.__variableNameCV
else:
self.__variableNameCV = value
def VariableDefinition(self,value=None):
if value is None:
return self.__variableDefinition
else:
self.__variableDefinition = value
class Unit(object):
"""
Defines the unit object
"""
def __init__(self):
# ODM2 terms
#__unitID= None
self.__unitTypeCV = None
self.__unitAbbreviation = None
self.__unitName = None
def UnitTypeCV(self,value=None):
if value is None:
return self.__unitTypeCV
else:
self.__unitTypeCV = value
def UnitAbbreviation(self,value=None):
if value is None:
return self.__unitAbbreviation
else:
self.__unitAbbreviation = value
def UnitName(self,value=None):
if value is None:
return self.__unitName
else:
self.__unitName = value
class Geometry2(ogr.Geometry):
def __init__(self, wkb_geom):
super(Geometry2, self).__init__(wkb_geom)
self.type = getattr(GeomType, self.GetGeometryName())
self.hash = None
def update_hash(self):
self.hash = hashlib.sha224(self.ExportToWkt()).hexdigest()
def AddPoint(self, *args, **kwargs):
super(Geometry2, self).AddPoint(*args, **kwargs)
# only update the geometry hash if AddGeometry will not be called (i.e. POINT and LINESTRING objects)
if self.type == GeomType.POINT or self.type == GeomType.LINESTRING:
self.update_hash()
def AddGeometry(self, *args):
super(Geometry2, self).AddGeometry(*args)
# update the geometry hash
self.update_hash()
class ExchangeItem(object):
def __init__(self, id=None, name=None, desc=None, geometry=[], unit=None, variable=None, noData=None, srs_epsg=4269, type=ExchangeItemType.INPUT):
self.__name = name
self.__description = desc
# variable and unit come from Variable and Unit standard classes
self.__unit = unit
self.__variable = variable
# set type using Exchange Item Type enum
if type in ExchangeItemType.__dict__:
self.__type = type
else:
raise Exception('Exchange Item of Type "%s" not recognized'%type)
# new style data encapsulation (everything is appended with '2', temporarily)
self.__geoms2 = []
self.__times2 = []
self.__values2 = []
# todo: there should be similar functionality for times2 and values2
# save the geometries (if provided)
if geometry is not None:
self.addGeometries2(geometry)
# no data values
self.__noData = noData
self.__id = uuid.uuid4().hex
if id is not None:
if isinstance(id, str):
self.__id = id
# self.__srs = osr.SpatialReference()
self.__srs = self.srs(srs_epsg=srs_epsg)
# if self.__srs.ImportFromEPSG(srs_epsg)
# except Exception, e:
# # set default
# elog.error('Error ExchangeItem.__init__: %s' % e)
# sPrint('Could not create spatial reference object from code: %s. '
# 'Using the default spatial reference system: North American Datum 1983.'% str(srs_epsg),
# MessageType.ERROR)
#
# self.__srs.ImportFromEPSG(4269)
# todo: REMOVE THE DEPRECATED VARIABLES BELOW
self.__geoms = geometry
# variables for saving/retrieving values from database
self.__session = None
self.__saved = False
self.__seriesID = None
def initializeDatesValues(self, start_datetime, end_datetime, timestep_in_seconds):
date = start_datetime
dates = []
while date <= end_datetime:
dates.append(date)
date = date + datetime.timedelta(seconds=timestep_in_seconds)
self.__times2 = numpy.array(dates)
geoms = self.__geoms2
self.__values2 = numpy.zeros(shape=(len(dates), len(geoms)))
self.__values2[:] = self.__noData
def noData(self, value = None):
if value is not None:
self.__noData = value
return self.__noData
def srs(self, srs_epsg=None):
if srs_epsg is not None:
self.__srs = osr.SpatialReference()
if self.__srs.ImportFromEPSG(srs_epsg) != 0:
from osgeo import gdal
msg = gdal.GetLastErrorMsg()
elog.error('Could Not set srs: %s' % msg)
sPrint('Unable to load spatial reference %s: %s ' % (str(srs_epsg), msg), MessageType.ERROR)
sPrint('This may cause errors with future operations. It is recommended that the GDAL_DATA path is fixed in the ' +
'EMIT environment settings before continuing.', MessageType.CRITICAL)
return self.__srs
def getEarliestTime2(self):
return self.__times2[0]
def getLatestTime2(self):
return self.__times2[-1]
def getGeometries2(self, idx=None, ndarray=False):
"""
returns geometries for the exchange item
:param idx: index of the geometry
:return: geometry of idx. If idx is None, all geometries are returned
"""
if idx is not None:
return self.__geoms2[idx]
else:
if ndarray:
return numpy.array(self.__geoms2)
return self.__geoms2
def addGeometry(self, geometry):
geoms = []
try:
if isinstance(geometry, Geometry2):
geoms.append(geometry)
else:
elog.error('Attempt to add unsupported geometry type: %s'%type(geometry))
sPrint('Attempt to add unsupported geometry type: %s'%type(geometry), MessageType.ERROR)
return 0
except Exception, e:
elog.error('Encountered an error while adding geometry: %s'%e)
sPrint('Encountered an error while adding geometry: %s'%e, MessageType.ERROR)
# save the geometry
self.__geoms2.extend(geoms)
return 1
def addGeometries2(self, geometries=None):
"""
adds geometries to the exchange item
:param geom: list of geometries or a single value
:return: None
"""
# make sure the input geometries are iterable
if not isinstance(geometries,list) and not isinstance(geometries,numpy.ndarray):
elog.error('Encountered an error while adding geometries: Unsupported argument type: %s'%type(geometries))
sPrint('Encountered an error while adding geometries: Unsupported argument type: %s'%type(geometries), MessageType.ERROR)
return 0
geoms = []
count = 0
for g in geometries:
if isinstance(g, Geometry2):
geoms.append(g)
count += 1
# save geometries
self.__geoms2.extend(geoms)
# notify that not all geometries were saved
if len(geoms) != count:
elog.error('Encountered unsupported geometry types while adding geometries to exchange item. Not all items may have been saved.')
sPrint('Encountered unsupported geometry types while adding geometries to exchange item. Not all items may have been saved.', MessageType.WARNING)
return 0
return 1
def setValuesBySlice (self, values, time_index_slice=(None, None, None), geometry_index_slice=(None, None, None)):
"""
sets datavalues for the component.
:param values: Values to set
# :param value_index_slice: tuple representing the start, stop, and step range of the values
:param time_index_slice: tuple representing the start, stop, and step range of the date times
:param geometry_index_slice: tuple representing the start, stop, and step range of the geometries
:return: True if successful, otherwise False
"""
if len(self.__values2) == 0 or len(self.__times2) == 0:
elog.critical('Error ExchangeItem.setValuesBySlice: Exchange item values and/or times arrays were not set properly')
sPrint('Exchange item values and/or times arrays were not set properly Make sure "initializeDatesValues" is being called during/after component initialization.', MessageType.CRITICAL)
return False
try:
# get index ranges
tb, te, tstep = time_index_slice
gb, ge, gstep = geometry_index_slice
# set initial values for missing slice indices
tb = 0 if tb is None else tb
gb = 0 if gb is None else gb
te = len(self.__times2) if te is None else te
ge = len(self.__geoms2) if ge is None else ge
tstep = 1 if tstep is None else tstep
gstep = 1 if gstep is None else gstep
# convert the values into a numpy array if they aren't already
if not isinstance(values, numpy.ndarray):
values = numpy.array(values)
# set the values[times][geoms]
# reshape the input array to fit the desired slicing
target_shape = ((te-tb) / tstep, (ge-gb) / gstep)
values = values.reshape(target_shape)
self.__values2[tb:te:tstep, gb:ge:gstep] = values
except Exception as e:
elog.error('Error ExchangeItem.setValuesBySlice: %s' % e)
sPrint('Error setting values for times %s, geometries %s: %s' % (str(time_index_slice), str(geometry_index_slice), e),
MessageType.ERROR)
return False
return True
def setValuesByTime(self, values, timevalue, geometry_index_slice=(None, None, None)):
try:
# get time index
idx, date = self.getDates2(timevalue)
# get index ranges
gb, ge, gstep = geometry_index_slice
# convert the values into a numpy array if they aren't already
if not isinstance(values, numpy.ndarray):
values = numpy.array(values)
# set the values[times][geoms]
self.__values2[idx, gb:ge:gstep] = values
except Exception as e:
elog.error('Error ExchangeItem.setValuesByTime: %s' % e)
sPrint('Error setting values for times %s, geometries %s' % (str(time_index_slice), str(geometry_index_slice)),
MessageType.ERROR)
return False
return True
def setValues2(self, values, timevalue):
"""
sets data values for all geometries at a given time index
:param timevalue: datetime object value for which the datavalues are associated
:param values: list of datavalues for all geometries at the given time
:return: values list index
"""
if hasattr(timevalue, "__len__"):
# make sure that the length of values matches the length of times
if len(timevalue) != len(values):
sPrint('Could not set data values. Length of timevalues and datavalues lists must be equal.',
MessageType.WARNING)
return 0
invalid_dates = False
for i in range(0, len(timevalue)):
if isinstance(timevalue[i], datetime.datetime):
self._setValues2(values[i], timevalue[i])
else: invalid_dates = True
if invalid_dates:
sPrint('Invalid datetimes were found while setting values. Data values may not be set correctly.',
MessageType.WARNING)
return 1
elif isinstance(timevalue, datetime.datetime):
self._setValues2(values, timevalue)
return 1
else:
sPrint('Could not set data values. Time value was not of type datetime.',
MessageType.WARNING)
return 0
def _setValues2(self, values, timevalue):
# insert by datetime need to get dates to determine which index to use
idx, date = self.getDates2(timevalue)
if date is not None and timevalue == self.__times2[idx]:
# replace the values for this time
self.__values2[idx] = values
else:
# insert new values at the specified index
if not hasattr(values, "__len__"):
values = [values]
self.__values2.insert(idx+1, values)
self.__times2.insert(idx+1, timevalue)
# self.__values2.append(values)
# idx = len(self.__values2) - 1
# self.setDates2(timevalue, idx)
def getValues2(self, geom_idx_start=0, geom_idx_end=None, start_time=None, end_time=None, time_idx=None, ndarray=False):
"""
gets datavalues of the exchange item for idx
:param geom_idx_start: the start value index to be returned.
:param geom_idx_end: the end value index to be returned.
:param start_time: start index for selecting a data subset
:param end_time: end index for selecting a data subset
:return: datavalues between start_time and end_time. If not given, entire time range will be returned.
"""
# set initial value end index as the length of the geometry array
if geom_idx_end is None:
geom_idx_end = len(self.__geoms2) + 1
else:
# add one to make return values from idx_start to idx_end inclusive
geom_idx_end += 1
if time_idx is None:
start_time_slice_idx = 0
end_time_slice_idx = len(self.__times2)
if start_time is not None:
start_time_slice_idx = self._nearest(self.__times2, start_time, 'left')
if end_time is not None:
end_time_slice_idx = self._nearest(self.__times2, end_time, 'right') + 1
else:
return self.__values2[time_idx][geom_idx_start:geom_idx_end] # return a single time index of values
values = []
for i in range(start_time_slice_idx, end_time_slice_idx):
values.append(self.__values2[i][geom_idx_start:geom_idx_end])
# if ndarray:
return numpy.array(values)
# else:
# return values
# def setDates2(self, timevalue):
# """
# sets the data-times for a geometry index. These should directly correspond with
# :param timevalue: datetime object
# :return: index of the datetime value
# """
#
# idx = self._nearest(self.__times2, timevalue, 'left') + 1
# self.__times2 = numpy.insert(self.__times2, idx, timevalue)
# # self.__times2.insert(idx+1, timevalue)
# return idx
def getDates(self):
"""
Get all datetimes for the exchange item
Returns: numpy array of datetimes
"""
times = [(idx, self.__times2[idx]) for idx in range(0, len(self.__times2))]
return numpy.array(times)
def getNearestDates(self, dateslist, search_direction='left'):
"""
Get the exchange item datetime objects nearest the datelist argument
Args:
dateslist: list of datetimes to search
search_direction: direction of search
Returns: numpy array((index, datetime),)
"""
if not isinstance(dateslist, list):
sPrint('stdlib.getNearestDates requires input datetimes as a list', MessageType.ERROR)
return numpy.empty((0,))
times = []
for t in dateslist:
idx = self._nearest(self.__times2, t, search_direction)
if len(self.__times2) and idx <= len(self.__times2):
times.append((idx, self.__times2[idx]))
else:
times.append(0, None)
return numpy.array(times)
def getDates2(self, timevalue=None, start=None, end=None, ndarray=False):
"""
gets datavalue indices for a datetime
:param timevalue: datetime object
:return: returns the datavalue index, and the time value corresponding to the nearest requested datetime
"""
if isinstance(timevalue, list):
times = []
for t in timevalue:
idx = self._nearest(self.__times2, timevalue, 'left')
if len(self.__times2) and idx <= len(self.__times2):
times.append((idx, self.__times2[idx]))
else:
times.append(0, None)
if ndarray:
return numpy.array(times)
else:
return times
elif start is not None and end is not None:
if not isinstance(start, datetime.datetime) or not isinstance(end, datetime.datetime):
elog.critical('ERROR ExchangeItem.getDates2: Could not fetch date time from range because the "start" and/or "endtimes" are not valued datetime objects.')
sPrint('Could not fetch date time from range because the "start" and/or "endtimes" are not valued datetime objects.', MessageType.CRITICAL)
return 0, None
st = self._nearest(self.__times2, start, 'left')
et = self._nearest(self.__times2, end, 'right') + 1
times = [(idx, self.__times2[idx]) for idx in range(st, et)]
if ndarray:
return numpy.array(times)
else:
return times
elif isinstance(timevalue, datetime.datetime):
idx = self._nearest(self.__times2, timevalue, 'left')
if len(self.__times2) and idx <= len(self.__times2):
return idx, self.__times2[idx]
else:
return 0, None
else: # return all known values
times = [(idx, self.__times2[idx]) for idx in range(0, len(self.__times2))]
if ndarray:
return numpy.array(times)
else:
return times
def unit(self,value=None):
if value is None:
return self.__unit
else:
self.__unit = value
def variable(self,value=None):
if value is None:
return self.__variable
else:
self.__variable = value
def id(self):
return self.__id
def type(self):
return self.__type
def name(self,value=None):
if value is None:
return self.__name
else:
self.__name = value
def description(self,value=None):
if value is None:
return self.__description
else:
self.__description = value
def _nearest(self, array, time, direction='left'):
"""
get the nearst datetime in list
:param array: sorted numpy array of datetimes
:param time: desired datetime
:param direction: the bisect direction.
:return: list index
"""
lst = list(array)
if len(lst) == 0:
return 0
if direction == 'left':
i = bisect_left(lst, time)
nearest = min(lst[max(0, i-1): i+2], key=lambda t: abs(time - t))
return lst.index(nearest)
elif direction == 'right':
i = bisect_right(lst, time)
nearest = min(lst[max(0, i-1): i+2], key=lambda t: abs(time - t))
return lst.index(nearest)
|
Castronova/EMIT
|
stdlib.py
|
Python
|
gpl-2.0
| 21,220
|
"""Support for Obihai Sensors."""
from datetime import timedelta
import logging
from pyobihai import PyObihai
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
DEVICE_CLASS_TIMESTAMP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=5)
OBIHAI = "Obihai"
DEFAULT_USERNAME = "admin"
DEFAULT_PASSWORD = "admin"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Obihai sensor platform."""
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
host = config[CONF_HOST]
sensors = []
pyobihai = PyObihai(host, username, password)
login = pyobihai.check_account()
if not login:
_LOGGER.error("Invalid credentials")
return
serial = pyobihai.get_device_serial()
services = pyobihai.get_state()
line_services = pyobihai.get_line_state()
call_direction = pyobihai.get_call_direction()
for key in services:
sensors.append(ObihaiServiceSensors(pyobihai, serial, key))
if line_services is not None:
for key in line_services:
sensors.append(ObihaiServiceSensors(pyobihai, serial, key))
for key in call_direction:
sensors.append(ObihaiServiceSensors(pyobihai, serial, key))
add_entities(sensors)
class ObihaiServiceSensors(Entity):
"""Get the status of each Obihai Lines."""
def __init__(self, pyobihai, serial, service_name):
"""Initialize monitor sensor."""
self._service_name = service_name
self._state = None
self._name = f"{OBIHAI} {self._service_name}"
self._pyobihai = pyobihai
self._unique_id = f"{serial}-{self._service_name}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return if sensor is available."""
if self._state is not None:
return True
return False
@property
def unique_id(self):
"""Return the unique ID."""
return self._unique_id
@property
def device_class(self):
"""Return the device class for uptime sensor."""
if self._service_name == "Last Reboot":
return DEVICE_CLASS_TIMESTAMP
return None
@property
def icon(self):
"""Return an icon."""
if self._service_name == "Call Direction":
if self._state == "No Active Calls":
return "mdi:phone-off"
if self._state == "Inbound Call":
return "mdi:phone-incoming"
return "mdi:phone-outgoing"
if "Caller Info" in self._service_name:
return "mdi:phone-log"
if "Port" in self._service_name:
if self._state == "Ringing":
return "mdi:phone-ring"
if self._state == "Off Hook":
return "mdi:phone-in-talk"
return "mdi:phone-hangup"
return "mdi:phone"
def update(self):
"""Update the sensor."""
services = self._pyobihai.get_state()
if self._service_name in services:
self._state = services.get(self._service_name)
services = self._pyobihai.get_line_state()
if services is not None:
if self._service_name in services:
self._state = services.get(self._service_name)
call_direction = self._pyobihai.get_call_direction()
if self._service_name in call_direction:
self._state = call_direction.get(self._service_name)
|
tchellomello/home-assistant
|
homeassistant/components/obihai/sensor.py
|
Python
|
apache-2.0
| 4,124
|
'''
the following import is only necessary because eip.py is not in this directory
'''
import sys
sys.path.append('..')
'''
Set the PLC clock
Sets the PLC clock to the same time as your computer
'''
from pylogix import PLC
with PLC() as comm:
comm.IPAddress = '192.168.1.9'
comm.SetPLCTime()
|
dmroeder/pylogix
|
examples/22_set_plc_clock.py
|
Python
|
apache-2.0
| 304
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="layout.xaxis.rangeselector", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["auto", "top", "middle", "bottom"]),
**kwargs
)
|
plotly/python-api
|
packages/python/plotly/plotly/validators/layout/xaxis/rangeselector/_yanchor.py
|
Python
|
mit
| 556
|
#!/usr/bin/env python
"""
Example 2.3.2: Symbolic differentiation
"""
def deriv(exp, var):
if is_const(exp, var):
return 0
if is_var(exp, var):
return 1
if isinstance(exp, Sum):
return Sum(
deriv(exp.left, var),
deriv(exp.right, var))
if isinstance(exp, Prod):
return Sum(
Prod(
exp.left,
deriv(exp.right, var)),
Prod(
exp.right,
deriv(exp.left, var)))
raise TypeError(
'unexpected expr type ({})'.format(
type(exp).__name__))
#
# representation of expression objects
#
def is_atom(exp):
return not isinstance(exp, BinOp)
def is_num(exp):
return is_atom(exp) and isinstance(exp, int)
def is_const(exp, var):
return is_atom(exp) and exp != var
def is_var(exp, var):
return is_atom(exp) and exp == var
class BinOp(object):
def __init__(self, left, right):
self.left = left
self.right = right
class Sum(BinOp):
op = ' + '
rank = 0
def __new__(cls, left, right):
# simplification applied at construction time
if left == 0:
return right
if right == 0:
return left
if is_num(left) and is_num(right):
return left + right
return object.__new__(cls, left, right)
class Prod(BinOp):
op = '*'
rank = 1
def __new__(cls, left, right):
# simplification applied at construction time
if is_num(left) and is_num(right):
return left * right
if left == 0 or right == 0:
return 0
if right == 1:
return left
if left == 1:
return right
return object.__new__(cls, left, right)
#
# rendering expression tree as string
#
class ExpRenderer(object):
def __call__(self, exp):
s, _ = self.render(exp)
return s
def render(self, exp):
handler = 'render_' + type(exp).__name__
return getattr(self, handler, None)(exp)
def render_str(self, exp):
return exp, 99
def render_int(self, exp):
return str(exp), 99
def render_bin_op(self, exp):
lstr, lrank = self.render(exp.left)
rstr, rrank = self.render(exp.right)
if lrank < exp.rank:
lstr = '(' + lstr + ')'
if rrank < exp.rank:
rstr = '(' + rstr + ')'
return lstr + exp.op + rstr, exp.rank
def render_Sum(self, exp):
return self.render_bin_op(exp)
def render_Prod(self, exp):
return self.render_bin_op(exp)
if __name__ == '__main__':
e2s = ExpRenderer()
# an example: derivative of "a*x*x + b*x + c"
exp = Sum(Prod('a', Prod('x', 'x')),
Sum(Prod('b', 'x'),
'c'))
dd = deriv(exp, 'x')
print e2s(dd) # prints "a*(x + x) + b"
#dd = deriv(exp, 'a')
#print e2s(dd) # FAILS!, infinite recursion, why??
dd = deriv(exp, 'b')
print e2s(dd) # prints "x"
dd = deriv(exp, 'c')
print e2s(dd) # prints "1"
|
blazk/sicp
|
ch2_building_abstractions_with_data/examples/2.3.2_symb_diff.py
|
Python
|
gpl-2.0
| 3,150
|
# -*- coding: utf-8 -*-
from operator import itemgetter
import time
from openerp import api, fields, models, _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.exceptions import ValidationError
class AccountFiscalPosition(models.Model):
_name = 'account.fiscal.position'
_description = 'Fiscal Position'
_order = 'sequence'
sequence = fields.Integer()
name = fields.Char(string='Fiscal Position', required=True)
active = fields.Boolean(default=True,
help="By unchecking the active field, you may hide a fiscal position without deleting it.")
company_id = fields.Many2one('res.company', string='Company')
account_ids = fields.One2many('account.fiscal.position.account', 'position_id', string='Account Mapping', copy=True)
tax_ids = fields.One2many('account.fiscal.position.tax', 'position_id', string='Tax Mapping', copy=True)
note = fields.Text('Notes')
auto_apply = fields.Boolean(string='Detect Automatically', help="Apply automatically this fiscal position.")
vat_required = fields.Boolean(string='VAT required', help="Apply only if partner has a VAT number.")
country_id = fields.Many2one('res.country', string='Country',
help="Apply only if delivery or invoicing country match.")
country_group_id = fields.Many2one('res.country.group', string='Country Group',
help="Apply only if delivery or invocing country match the group.")
state_ids = fields.Many2many('res.country.state', string='Federal States')
zip_from = fields.Integer(string='Zip Range From', default=0)
zip_to = fields.Integer(string='Zip Range To', default=0)
# To be used in hiding the 'Federal States' field('attrs' in view side) when selected 'Country' has 0 states.
states_count = fields.Integer(compute='_compute_states_count')
@api.one
def _compute_states_count(self):
self.states_count = len(self.country_id.state_ids)
@api.one
@api.constrains('zip_from', 'zip_to')
def _check_zip(self):
if self.zip_from > self.zip_to:
raise ValidationError(_('Invalid "Zip Range", please configure it properly.'))
return True
@api.v7
def map_tax(self, cr, uid, fposition_id, taxes, context=None):
if not taxes:
return []
if not fposition_id:
return map(lambda x: x.id, taxes)
result = set()
for t in taxes:
ok = False
for tax in fposition_id.tax_ids:
if tax.tax_src_id.id == t.id:
if tax.tax_dest_id:
result.add(tax.tax_dest_id.id)
ok = True
if not ok:
result.add(t.id)
return list(result)
@api.v8 # noqa
def map_tax(self, taxes):
result = self.env['account.tax'].browse()
for tax in taxes:
tax_count = 0
for t in self.tax_ids:
if t.tax_src_id == tax:
tax_count += 1
if t.tax_dest_id:
result |= t.tax_dest_id
if not tax_count:
result |= tax
return result
@api.v7
def map_account(self, cr, uid, fposition_id, account_id, context=None):
if not fposition_id:
return account_id
for pos in fposition_id.account_ids:
if pos.account_src_id.id == account_id:
account_id = pos.account_dest_id.id
break
return account_id
@api.v8
def map_account(self, account):
for pos in self.account_ids:
if pos.account_src_id == account:
return pos.account_dest_id
return account
@api.v8
def map_accounts(self, accounts):
""" Receive a dictionary having accounts in values and try to replace those accounts accordingly to the fiscal position.
"""
ref_dict = {}
for line in self.account_ids:
ref_dict[line.account_src_id] = line.account_dest_id
for key, acc in accounts.items():
if acc in ref_dict:
accounts[key] = ref_dict[acc]
return accounts
@api.onchange('country_id')
def _onchange_country_id(self):
if self.country_id:
self.zip_from = self.zip_to = self.country_group_id = False
self.state_ids = [(5,)]
self.states_count = len(self.country_id.state_ids)
@api.onchange('country_group_id')
def _onchange_country_group_id(self):
if self.country_group_id:
self.zip_from = self.zip_to = self.country_id = False
self.state_ids = [(5,)]
@api.model
def _get_fpos_by_region(self, country_id=False, state_id=False, zipcode=False, vat_required=False):
if not country_id:
return False
domains = [[('auto_apply', '=', True), ('vat_required', '=', vat_required)]]
if vat_required:
# Possibly allow fallback to non-VAT positions, if no VAT-required position matches
domains += [[('auto_apply', '=', True), ('vat_required', '=', False)]]
if zipcode and zipcode.isdigit():
zipcode = int(zipcode)
domain_zip = [('zip_from', '<=', zipcode), ('zip_to', '>=', zipcode)]
else:
zipcode, domain_zip = 0, [('zip_from', '=', 0), ('zip_to', '=', 0)]
state_domain = [('state_ids', '=', False)]
if state_id:
state_domain = [('state_ids', '=', state_id)]
for domain in domains:
# Build domain to search records with exact matching criteria
fpos_id = self.search(domain + [('country_id', '=', country_id)] + state_domain + domain_zip, limit=1).id
# return records that fit the most the criteria, and fallback on less specific fiscal positions if any can be found
if not fpos_id and zipcode:
fpos_id = self.search(domain + [('country_id', '=', country_id)] + state_domain + [('zip_from', '=', 0), ('zip_to', '=', 0)], limit=1).id
if not fpos_id and state_id:
fpos_id = self.search(domain + [('country_id', '=', country_id)] + [('state_ids', '=', False)] + domain_zip, limit=1).id
if not fpos_id and state_id and zipcode:
fpos_id = self.search(domain + [('country_id', '=', country_id)] + [('state_ids', '=', False)] + [('zip_from', '=', 0), ('zip_to', '=', 0)], limit=1).id
if fpos_id:
return fpos_id
return False
@api.model
def get_fiscal_position(self, partner_id, delivery_id=None):
if not partner_id:
return False
# This can be easily overriden to apply more complex fiscal rules
PartnerObj = self.env['res.partner']
partner = PartnerObj.browse(partner_id)
# if no delivery use invocing
if delivery_id:
delivery = PartnerObj.browse(delivery_id)
else:
delivery = partner
# partner manually set fiscal position always win
if delivery.property_account_position_id or partner.property_account_position_id:
return delivery.property_account_position_id.id or partner.property_account_position_id.id
fiscal_position_id = self._get_fpos_by_region(delivery.country_id.id, delivery.state_id.id, delivery.zip, bool(partner.vat))
if fiscal_position_id:
return fiscal_position_id
domains = [[('auto_apply', '=', True), ('vat_required', '=', bool(partner.vat))]]
if partner.vat:
# Possibly allow fallback to non-VAT positions, if no VAT-required position matches
domains += [[('auto_apply', '=', True), ('vat_required', '=', False)]]
for domain in domains:
if delivery.country_id.id:
fiscal_position = self.search(domain + [('country_group_id.country_ids', '=', delivery.country_id.id)], limit=1)
if fiscal_position:
return fiscal_position.id
fiscal_position = self.search(domain + [('country_id', '=', None), ('country_group_id', '=', None)], limit=1)
if fiscal_position:
return fiscal_position.id
return False
class AccountFiscalPositionTax(models.Model):
_name = 'account.fiscal.position.tax'
_description = 'Taxes Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position',
required=True, ondelete='cascade')
tax_src_id = fields.Many2one('account.tax', string='Tax on Product', required=True)
tax_dest_id = fields.Many2one('account.tax', string='Tax to Apply')
_sql_constraints = [
('tax_src_dest_uniq',
'unique (position_id,tax_src_id,tax_dest_id)',
'A tax fiscal position could be defined only once time on same taxes.')
]
class AccountFiscalPositionAccount(models.Model):
_name = 'account.fiscal.position.account'
_description = 'Accounts Fiscal Position'
_rec_name = 'position_id'
position_id = fields.Many2one('account.fiscal.position', string='Fiscal Position',
required=True, ondelete='cascade')
account_src_id = fields.Many2one('account.account', string='Account on Product',
domain=[('deprecated', '=', False)], required=True)
account_dest_id = fields.Many2one('account.account', string='Account to Use Instead',
domain=[('deprecated', '=', False)], required=True)
_sql_constraints = [
('account_src_dest_uniq',
'unique (position_id,account_src_id,account_dest_id)',
'An account fiscal position could be defined only once time on same accounts.')
]
class ResPartner(models.Model):
_name = 'res.partner'
_inherit = 'res.partner'
_description = 'Partner'
@api.multi
def _credit_debit_get(self):
tables, where_clause, where_params = self.env['account.move.line']._query_get()
where_params = [tuple(self.ids)] + where_params
self._cr.execute("""SELECT l.partner_id, act.type, SUM(l.debit-l.credit)
FROM account_move_line l
LEFT JOIN account_account a ON (l.account_id=a.id)
LEFT JOIN account_account_type act ON (a.user_type_id=act.id)
WHERE act.type IN ('receivable','payable')
AND l.partner_id IN %s
AND l.reconciled IS FALSE
""" + where_clause + """
GROUP BY l.partner_id, act.type
""", where_params)
for pid, type, val in self._cr.fetchall():
partner = self.browse(pid)
if type == 'receivable':
partner.credit = val
elif type == 'payable':
partner.debit = -val
@api.multi
def _asset_difference_search(self, type, args):
if not args:
return []
having_values = tuple(map(itemgetter(2), args))
where = ' AND '.join(
map(lambda x: '(SUM(bal2) %(operator)s %%s)' % {
'operator':x[1]},args))
query = self.env['account.move.line']._query_get()
self._cr.execute(('SELECT pid AS partner_id, SUM(bal2) FROM ' \
'(SELECT CASE WHEN bal IS NOT NULL THEN bal ' \
'ELSE 0.0 END AS bal2, p.id as pid FROM ' \
'(SELECT (debit-credit) AS bal, partner_id ' \
'FROM account_move_line l ' \
'WHERE account_id IN ' \
'(SELECT id FROM account_account '\
'WHERE type=%s AND active) ' \
'AND reconciled IS FALSE ' \
'AND '+query+') AS l ' \
'RIGHT JOIN res_partner p ' \
'ON p.id = partner_id ) AS pl ' \
'GROUP BY pid HAVING ' + where),
(type,) + having_values)
res = self._cr.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', map(itemgetter(0), res))]
@api.multi
def _credit_search(self, args):
return self._asset_difference_search('receivable', args)
@api.multi
def _debit_search(self, args):
return self._asset_difference_search('payable', args)
@api.multi
def _invoice_total(self):
account_invoice_report = self.env['account.invoice.report']
if not self.ids:
self.total_invoiced = 0.0
return True
user_currency_id = self.env.user.company_id.currency_id.id
for partner in self:
all_partner_ids = self.search([('id', 'child_of', partner.id)]).ids
# searching account.invoice.report via the orm is comparatively expensive
# (generates queries "id in []" forcing to build the full table).
# In simple cases where all invoices are in the same currency than the user's company
# access directly these elements
# generate where clause to include multicompany rules
where_query = account_invoice_report._where_calc([
('partner_id', 'in', all_partner_ids), ('state', 'not in', ['draft', 'cancel']), ('company_id', '=', self.env.user.company_id.id)
])
account_invoice_report._apply_ir_rules(where_query, 'read')
from_clause, where_clause, where_clause_params = where_query.get_sql()
query = """
SELECT SUM(price_total) as total
FROM account_invoice_report account_invoice_report
WHERE %s
""" % where_clause
# price_total is in the company currency
self.env.cr.execute(query, where_clause_params)
partner.total_invoiced = self.env.cr.fetchone()[0]
@api.multi
def _journal_item_count(self):
for partner in self:
partner.journal_item_count = self.env['account.move.line'].search_count([('partner_id', '=', partner.id)])
partner.contracts_count = self.env['account.analytic.account'].search_count([('partner_id', '=', partner.id)])
def get_followup_lines_domain(self, date, overdue_only=False, only_unblocked=False):
domain = [('reconciled', '=', False), ('account_id.deprecated', '=', False), ('account_id.internal_type', '=', 'receivable')]
if only_unblocked:
domain += [('blocked', '=', False)]
if self.ids:
domain += [('partner_id', 'in', self.ids)]
#adding the overdue lines
overdue_domain = ['|', '&', ('date_maturity', '!=', False), ('date_maturity', '<=', date), '&', ('date_maturity', '=', False), ('date', '<=', date)]
if overdue_only:
domain += overdue_domain
return domain
@api.multi
def _compute_issued_total(self):
""" Returns the issued total as will be displayed on partner view """
today = fields.Date.context_today(self)
for partner in self:
domain = partner.get_followup_lines_domain(today, overdue_only=True)
issued_total = 0
for aml in self.env['account.move.line'].search(domain):
issued_total += aml.amount_residual
partner.issued_total = issued_total
@api.one
def _compute_has_unreconciled_entries(self):
# Avoid useless work if has_unreconciled_entries is not relevant for this partner
if not self.active or not self.is_company and self.parent_id:
return
self.env.cr.execute(
""" SELECT 1 FROM(
SELECT
p.last_time_entries_checked AS last_time_entries_checked,
MAX(l.write_date) AS max_date
FROM
account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE
p.id = %s
AND EXISTS (
SELECT 1
FROM account_move_line l
WHERE l.account_id = a.id
AND l.partner_id = p.id
AND l.amount_residual > 0
)
AND EXISTS (
SELECT 1
FROM account_move_line l
WHERE l.account_id = a.id
AND l.partner_id = p.id
AND l.amount_residual < 0
)
GROUP BY p.last_time_entries_checked
) as s
WHERE (last_time_entries_checked IS NULL OR max_date > last_time_entries_checked)
""" % (self.id,))
self.has_unreconciled_entries = self.env.cr.rowcount == 1
@api.multi
def mark_as_reconciled(self):
return self.write({'last_time_entries_checked': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})
@api.one
def _get_company_currency(self):
if self.company_id:
self.currency_id = self.sudo().company_id.currency_id
else:
self.currency_id = self.env.user.company_id.currency_id
credit = fields.Monetary(compute='_credit_debit_get', search=_credit_search,
string='Total Receivable', help="Total amount this customer owes you.")
debit = fields.Monetary(compute='_credit_debit_get', search=_debit_search, string='Total Payable',
help="Total amount you have to pay to this vendor.")
debit_limit = fields.Monetary('Payable Limit')
total_invoiced = fields.Monetary(compute='_invoice_total', string="Total Invoiced",
groups='account.group_account_invoice')
currency_id = fields.Many2one('res.currency', compute='_get_company_currency', readonly=True,
help='Utility field to express amount currency')
contracts_count = fields.Integer(compute='_journal_item_count', string="Contracts", type='integer')
journal_item_count = fields.Integer(compute='_journal_item_count', string="Journal Items", type="integer")
issued_total = fields.Monetary(compute='_compute_issued_total', string="Journal Items")
property_account_payable_id = fields.Many2one('account.account', company_dependent=True,
string="Account Payable", oldname="property_account_payable",
domain="[('internal_type', '=', 'payable'), ('deprecated', '=', False)]",
help="This account will be used instead of the default one as the payable account for the current partner",
required=True)
property_account_receivable_id = fields.Many2one('account.account', company_dependent=True,
string="Account Receivable", oldname="property_account_receivable",
domain="[('internal_type', '=', 'receivable'), ('deprecated', '=', False)]",
help="This account will be used instead of the default one as the receivable account for the current partner",
required=True)
property_account_position_id = fields.Many2one('account.fiscal.position', company_dependent=True,
string="Fiscal Position",
help="The fiscal position will determine taxes and accounts used for the partner.", oldname="property_account_position")
property_payment_term_id = fields.Many2one('account.payment.term', company_dependent=True,
string ='Customer Payment Term',
help="This payment term will be used instead of the default one for sale orders and customer invoices", oldname="property_payment_term")
property_supplier_payment_term_id = fields.Many2one('account.payment.term', company_dependent=True,
string ='Vendor Payment Term',
help="This payment term will be used instead of the default one for purchase orders and vendor bills", oldname="property_supplier_payment_term")
ref_company_ids = fields.One2many('res.company', 'partner_id',
string='Companies that refers to partner', oldname="ref_companies")
has_unreconciled_entries = fields.Boolean(compute='_compute_has_unreconciled_entries',
help="The partner has at least one unreconciled debit and credit since last time the invoices & payments matching was performed.")
last_time_entries_checked = fields.Datetime(oldname='last_reconciliation_date',
string='Latest Invoices & Payments Matching Date', readonly=True, copy=False,
help='Last time the invoices & payments matching was performed for this partner. '
'It is set either if there\'s not at least an unreconciled debit and an unreconciled credit '
'or if you click the "Done" button.')
invoice_ids = fields.One2many('account.invoice', 'partner_id', string='Invoices', readonly=True, copy=False)
contract_ids = fields.One2many('account.analytic.account', 'partner_id', string='Contracts', readonly=True)
bank_account_count = fields.Integer(compute='_compute_bank_count', string="Bank")
@api.multi
def _compute_bank_count(self):
bank_data = self.env['res.partner.bank'].read_group([('partner_id', 'in', self.ids)], ['partner_id'], ['partner_id'])
mapped_data = dict([(bank['partner_id'][0], bank['partner_id_count']) for bank in bank_data])
for partner in self:
partner.bank_account_count = mapped_data.get(partner.id, 0)
def _find_accounting_partner(self, partner):
''' Find the partner for which the accounting entries will be created '''
return partner.commercial_partner_id
@api.model
def _commercial_fields(self):
return super(ResPartner, self)._commercial_fields() + \
['debit_limit', 'property_account_payable_id', 'property_account_receivable_id', 'property_account_position_id',
'property_payment_term_id', 'property_supplier_payment_term_id', 'last_time_entries_checked']
|
aifil/odoo
|
addons/account/models/partner.py
|
Python
|
gpl-3.0
| 22,008
|
"""Box node for use in geometry attribute of Shapes"""
from vrml import cache
from OpenGLContext.arrays import array
from OpenGL.arrays import vbo
from OpenGL.GL import *
from vrml.vrml97 import basenodes
from vrml import protofunctions
class Box( basenodes.Box ):
"""Simple Box object of given size centered about local origin
The Box geometry node can be used in the geometry
field of a Shape node to be displayed. Use Transform
nodes to position the box within the world.
The Box includes texture coordinates and normals.
Attributes of note within the Box object:
size -- x,y,z tuple giving the size of the box
listID -- used internally to store the display list
used to display the box during rendering
Reference:
http://www.web3d.org/technicalinfo/specifications/vrml97/part1/nodesRef.html#Box
"""
def compile( self, mode=None ):
"""Compile the box as a display-list"""
if vbo.get_implementation():
vb = vbo.VBO( array( list(yieldVertices( self.size )), 'f'))
def draw( textured=True,lit=True ):
vb.bind()
try:
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
glEnableClientState( GL_VERTEX_ARRAY )
if lit:
glEnableClientState( GL_NORMAL_ARRAY )
glNormalPointer( GL_FLOAT, 32, vb+8 )
if textured:
glEnableClientState( GL_TEXTURE_COORD_ARRAY )
glTexCoordPointer( 2, GL_FLOAT, 32, vb )
glVertexPointer( 3, GL_FLOAT, 32, vb+20 )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
finally:
vb.unbind()
else:
vb = array( list(yieldVertices( self.size )), 'f')
def draw(textured=True,lit=True):
glPushClientAttrib(GL_CLIENT_ALL_ATTRIB_BITS)
try:
glInterleavedArrays( GL_T2F_N3F_V3F, 0, vb )
glDrawArrays( GL_TRIANGLES, 0, 36 )
finally:
glPopClientAttrib()
holder = mode.cache.holder(self, draw)
holder.depend( self, protofunctions.getField(self, 'size') )
return draw
def render (
self,
visible = 1, # can skip normals and textures if not
lit = 1, # can skip normals if not
textured = 1, # can skip textureCoordinates if not
transparent = 0, # XXX should sort triangle geometry...
mode = None, # the renderpass object for which we compile
):
"""Render the Box (build and) call the display list"""
# do we have a cached array-geometry?
vb = mode.cache.getData(self)
if not vb:
vb = self.compile( mode=mode )
if vb:
vb(textured=textured,lit=lit)
return 1
def boundingVolume( self, mode ):
"""Create a bounding-volume object for this node"""
from OpenGLContext.scenegraph import boundingvolume
current = boundingvolume.getCachedVolume( self )
if current:
return current
return boundingvolume.cacheVolume(
self,
boundingvolume.AABoundingBox(
size = self.size,
),
( (self, 'size'), ),
)
def yieldVertices(size):
x,y,z = size
x,y,z = x/2.0,y/2.0,z/2.0
normal = ( 0.0, 0.0, 1.0)
yield (0.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 1.0)+ normal + (x,y,z);
yield (0.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 1.0)+ normal + (x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,z);
normal = ( 0.0, 0.0,-1.0);
yield (1.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 1.0)+ normal + (-x,y,-z);
yield (0.0, 1.0)+ normal + (x,y,-z);
yield (1.0, 0.0)+ normal + (-x,-y,-z);
yield (0.0, 1.0)+ normal + (x,y,-z);
yield (0.0, 0.0)+ normal + (x,-y,-z);
normal = ( 0.0, 1.0, 0.0)
yield (0.0, 1.0)+ normal + (-x,y,-z);
yield (0.0, 0.0)+ normal + (-x,y,z);
yield (1.0, 0.0)+ normal + (x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,-z);
yield (1.0, 0.0)+ normal + (x,y,z);
yield (1.0, 1.0)+ normal + (x,y,-z);
normal = ( 0.0,-1.0, 0.0)
yield (1.0, 1.0)+ normal + (-x,-y,-z);
yield (0.0, 1.0)+ normal + (x,-y,-z);
yield (0.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 1.0)+ normal + (-x,-y,-z);
yield (0.0, 0.0)+ normal + (x,-y,z);
yield (1.0, 0.0)+ normal + (-x,-y,z);
normal = ( 1.0, 0.0, 0.0)
yield (1.0, 0.0)+ normal + (x,-y,-z);
yield (1.0, 1.0)+ normal + (x,y,-z);
yield (0.0, 1.0)+ normal + (x,y,z);
yield (1.0, 0.0)+ normal + (x,-y,-z);
yield (0.0, 1.0)+ normal + (x,y,z);
yield (0.0, 0.0)+ normal + (x,-y,z);
normal = (-1.0, 0.0, 0.0)
yield (0.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 0.0)+ normal + (-x,-y,z);
yield (1.0, 1.0)+ normal + (-x,y,z);
yield (0.0, 0.0)+ normal + (-x,-y,-z);
yield (1.0, 1.0)+ normal + (-x,y,z);
yield (0.0, 1.0)+ normal + (-x,y,-z);
|
stack-of-tasks/rbdlpy
|
tutorial/lib/python2.7/site-packages/OpenGLContext/scenegraph/box.py
|
Python
|
lgpl-3.0
| 5,301
|
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer.functions.activation import tanh
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
class TestTanh(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.check_backward_options = {}
self.check_double_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
self.check_double_backward_options = {'atol': 5e-3, 'rtol': 5e-2}
def check_forward(self, x_data, use_cudnn='always'):
x = chainer.Variable(x_data)
with chainer.using_config('use_cudnn', use_cudnn):
y = functions.tanh(x)
self.assertEqual(y.data.dtype, self.dtype)
y_expect = functions.tanh(chainer.Variable(self.x))
testing.assert_allclose(y_expect.data, y.data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), 'always')
@attr.gpu
def test_forward_gpu_non_contiguous(self):
self.check_forward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
'always')
@attr.gpu
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), 'never')
def check_backward(self, x_data, gy_data, use_cudnn='always'):
with chainer.using_config('use_cudnn', use_cudnn):
gradient_check.check_backward(
functions.tanh, x_data, gy_data, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_backward_gpu_non_contiguous(self):
self.check_backward(cuda.cupy.asfortranarray(cuda.to_gpu(self.x)),
cuda.cupy.asfortranarray(cuda.to_gpu(self.gy)))
@attr.gpu
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy), 'never')
def check_double_backward(self, x_data, gy_data, ggx_data):
gradient_check.check_double_backward(
chainer.functions.tanh, x_data, gy_data, ggx_data,
dtype=numpy.float64, **self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@testing.parameterize(*testing.product({
'use_cudnn': ['always', 'auto', 'never'],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@attr.cudnn
class TestTanhCudnnCall(unittest.TestCase):
def setUp(self):
self.x = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
self.gy = cuda.cupy.random.uniform(-1, 1, (2, 3)).astype(self.dtype)
with chainer.using_config('use_cudnn', self.use_cudnn):
self.expect = chainer.should_use_cudnn('==always')
def forward(self):
x = chainer.Variable(self.x)
return functions.tanh(x)
def test_call_cudnn_forward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
default_func = cuda.cupy.cudnn.activation_forward
with testing.patch('cupy.cudnn.activation_forward') as func:
func.side_effect = default_func
self.forward()
self.assertEqual(func.called, self.expect)
def test_call_cudnn_backward(self):
with chainer.using_config('use_cudnn', self.use_cudnn):
y = self.forward()
y.grad = self.gy
default_func = cuda.cupy.cudnn.activation_backward
with testing.patch('cupy.cudnn.activation_backward') as func:
func.side_effect = default_func
y.backward()
self.assertEqual(func.called, self.expect)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@testing.fix_random()
class TestTanhGrad(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-.5, .5, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def check_backward(self, x_data, y_data, gy_data, ggx_data):
def f(y, gy):
return tanh.TanhGrad(x_data).apply((y, gy))[0]
gradient_check.check_backward(
f, (y_data, gy_data), ggx_data, dtype=numpy.float64,
**self.check_backward_options)
def test_backward_cpu(self):
y = numpy.array(numpy.tanh(self.x))
self.check_backward(self.x, y, self.gy, self.ggx)
@attr.gpu
def test_backward_gpu(self):
y = numpy.array(numpy.tanh(self.x))
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(y), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
testing.run_module(__name__, __file__)
|
anaruse/chainer
|
tests/chainer_tests/functions_tests/activation_tests/test_tanh.py
|
Python
|
mit
| 5,799
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if not settings.configured:
settings.configure(
DATABASE_ENGINE='sqlite3',
# HACK: this fixes our threaded runserver remote tests
# TEST_DATABASE_NAME='test_sentry',
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.sessions',
'django.contrib.sites',
# Included to fix Disqus' test Django which solves IntegrityMessage case
'django.contrib.contenttypes',
'nexus',
'nexus_celery'
],
ROOT_URLCONF='',
DEBUG=False,
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['nexus_celery']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
brilliant-org/nexus-memcache
|
runtests.py
|
Python
|
apache-2.0
| 1,054
|
from django_currentuser.db.models.fields import CurrentUserField
__all__ = ['CurrentUserField']
|
PaesslerAG/django-currentuser
|
django_currentuser/db/models/__init__.py
|
Python
|
bsd-3-clause
| 98
|
'''
Created on 2014. 3. 21.
@author: Su-Jin Lee
'''
from ast import literal_eval
from time import time, sleep
from twisted.internet.protocol import ServerFactory, ClientFactory
from twisted.protocols.basic import LineReceiver
class LFProtocol(LineReceiver):
def verifyMsg(self, msg):
if self.verifyFunc:
self.verifyFunc(msg)
else:
print('LFProtocol.verifyFunc: {0}'.format(msg))
def connectionMade(self):
ip = self.transport.getPeer().host
port = self.transport.getPeer().port
self.factory.comm.addPeer(ip, port, self)
def connectionLost(self, reason):
self.factory.comm.removePeer(self)
def lineReceived(self, line):
msg = literal_eval(line)
msg['peer_ip'] = self.transport.getPeer().host
msg['peer_port'] = self.transport.getPeer().port
self.verifyMsg(msg)
class LFServerFactory(ServerFactory):
protocol = LFProtocol
def setComm(self, comm):
self.comm = comm
class LFClientFactory(ClientFactory):
protocol = LFProtocol
def clientConnectionFailed(self, connector, reason):
print 'connection failed:', reason.getErrorMessage()
def clientConnectionLost(self, connector, reason):
print 'connection lost:', reason.getErrorMessage()
def buildProtocol(self, addr):
p = self.protocol()
p.factory = self
self.comm.addPeer(addr.host, addr.port, p)
return p
def setComm(self, comm):
self.comm = comm
|
NecromancerLev0001/LightingFury
|
LF/lfprotocol.py
|
Python
|
mit
| 1,561
|
"""Mailcap file handling. See RFC 1524."""
import os
__all__ = ["getcaps","findmatch"]
# Part 1: top-level interface.
def getcaps():
"""Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
"""
caps = {}
for mailcap in listmailcapfiles():
try:
fp = open(mailcap, 'r')
except IOError:
continue
morecaps = readmailcapfile(fp)
fp.close()
for key in morecaps.keys():
if not caps.has_key(key):
caps[key] = morecaps[key]
else:
caps[key] = caps[key] + morecaps[key]
return caps
def listmailcapfiles():
"""Return a list of all mailcap files found on the system."""
# XXX Actually, this is Unix-specific
if os.environ.has_key('MAILCAPS'):
str = os.environ['MAILCAPS']
mailcaps = str.split(':')
else:
if os.environ.has_key('HOME'):
home = os.environ['HOME']
else:
# Don't bother with getpwuid()
home = '.' # Last resort
mailcaps = [home + '/.mailcap', '/etc/mailcap',
'/usr/etc/mailcap', '/usr/local/etc/mailcap']
return mailcaps
# Part 2: the parser.
def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {}
while 1:
line = fp.readline()
if not line: break
# Ignore comments and blank lines
if line[0] == '#' or line.strip() == '':
continue
nextline = line
# Join continuation lines
while nextline[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: nextline = '\n'
line = line[:-2] + nextline
# Parse the line
key, fields = parseline(line)
if not (key and fields):
continue
# Normalize the key
types = key.split('/')
for j in range(len(types)):
types[j] = types[j].strip()
key = '/'.join(types).lower()
# Update the database
if caps.has_key(key):
caps[key].append(fields)
else:
caps[key] = [fields]
return caps
def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = []
i, n = 0, len(line)
while i < n:
field, i = parsefield(line, i, n)
fields.append(field)
i = i+1 # Skip semicolon
if len(fields) < 2:
return None, None
key, view, rest = fields[0], fields[1], fields[2:]
fields = {'view': view}
for field in rest:
i = field.find('=')
if i < 0:
fkey = field
fvalue = ""
else:
fkey = field[:i].strip()
fvalue = field[i+1:].strip()
if fields.has_key(fkey):
# Ignore it
pass
else:
fields[fkey] = fvalue
return key, fields
def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i
while i < n:
c = line[i]
if c == ';':
break
elif c == '\\':
i = i+2
else:
i = i+1
return line[start:i].strip(), i
# Part 3: using the database.
def findmatch(caps, MIMEtype, key='view', filename="/dev/null", plist=[]):
"""Find a match for a mailcap entry.
Return a tuple containing the command line, and the mailcap entry
used; (None, None) if no match is found. This may invoke the
'test' command of several matching entries before deciding which
entry to use.
"""
entries = lookup(caps, MIMEtype, key)
# XXX This code should somehow check for the needsterminal flag.
for e in entries:
if e.has_key('test'):
test = subst(e['test'], filename, plist)
if test and os.system(test) != 0:
continue
command = subst(e[key], MIMEtype, filename, plist)
return command, e
return None, None
def lookup(caps, MIMEtype, key=None):
entries = []
if caps.has_key(MIMEtype):
entries = entries + caps[MIMEtype]
MIMEtypes = MIMEtype.split('/')
MIMEtype = MIMEtypes[0] + '/*'
if caps.has_key(MIMEtype):
entries = entries + caps[MIMEtype]
if key is not None:
entries = filter(lambda e, key=key: e.has_key(key), entries)
return entries
def subst(field, MIMEtype, filename, plist=[]):
# XXX Actually, this is Unix-specific
res = ''
i, n = 0, len(field)
while i < n:
c = field[i]; i = i+1
if c != '%':
if c == '\\':
c = field[i:i+1]; i = i+1
res = res + c
else:
c = field[i]; i = i+1
if c == '%':
res = res + c
elif c == 's':
res = res + filename
elif c == 't':
res = res + MIMEtype
elif c == '{':
start = i
while i < n and field[i] != '}':
i = i+1
name = field[start:i]
i = i+1
res = res + findparam(name, plist)
# XXX To do:
# %n == number of parts if type is multipart/*
# %F == list of alternating type and filename for parts
else:
res = res + '%' + c
return res
def findparam(name, plist):
name = name.lower() + '='
n = len(name)
for p in plist:
if p[:n].lower() == name:
return p[n:]
return ''
# Part 4: test program.
def test():
import sys
caps = getcaps()
if not sys.argv[1:]:
show(caps)
return
for i in range(1, len(sys.argv), 2):
args = sys.argv[i:i+2]
if len(args) < 2:
print "usage: mailcap [MIMEtype file] ..."
return
MIMEtype = args[0]
file = args[1]
command, e = findmatch(caps, MIMEtype, 'view', file)
if not command:
print "No viewer found for", type
else:
print "Executing:", command
sts = os.system(command)
if sts:
print "Exit status:", sts
def show(caps):
print "Mailcap files:"
for fn in listmailcapfiles(): print "\t" + fn
print
if not caps: caps = getcaps()
print "Mailcap entries:"
print
ckeys = caps.keys()
ckeys.sort()
for type in ckeys:
print type
entries = caps[type]
for e in entries:
keys = e.keys()
keys.sort()
for k in keys:
print " %-15s" % k, e[k]
print
if __name__ == '__main__':
test()
|
DarioGT/OMS-PluginXML
|
org.modelsphere.sms/lib/jython-2.2.1/Lib/mailcap.py
|
Python
|
gpl-3.0
| 7,740
|
def main():
N = int(input())
H = [int(x) for x in input().split()]
dp = [0] * N
for i in range(1, N):
if i == 1:
dp[i] = abs(H[i] - H[i-1])
else:
dp[i] = min(dp[i-1] + abs(H[i] - H[i-1]), dp[i-2] + abs(H[i] - H[i-2]))
print(dp[-1])
if __name__ == '__main__':
main()
|
knuu/competitive-programming
|
atcoder/dp/edu_dp_a.py
|
Python
|
mit
| 332
|
#!/usr/bin/env python2
#
##############################################################################
### NZBGET POST-PROCESSING SCRIPT ###
# Post-Process to CouchPotato, SickBeard, NzbDrone, Mylar, Gamez, HeadPhones.
#
# This script sends the download to your automated media management servers.
#
# NOTE: This script requires Python to be installed on your system.
##############################################################################
#
### OPTIONS ###
## General
# Auto Update nzbToMedia (0, 1).
#
# Set to 1 if you want nzbToMedia to automatically check for and update to the latest version
#auto_update=0
# Safe Mode protection of DestDir (0, 1).
#
# Enable/Disable a safety check to ensure we don't process all downloads in the default_downloadDirectory by mistake.
#safe_mode=1
## Gamez
# Gamez script category.
#
# category that gets called for post-processing with Gamez.
#gzCategory=games
# Gamez api key.
#gzapikey=
# Gamez host.
#
# The ipaddress for your Gamez server. e.g For the Same system use localhost or 127.0.0.1
#gzhost=localhost
# Gamez port.
#gzport=8085
# Gamez uses ssl (0, 1).
#
# Set to 1 if using ssl, else set to 0.
#gzssl=0
# Gamez library
#
# move downloaded games here.
#gzlibrary
# Gamez web_root
#
# set this if using a reverse proxy.
#gzweb_root=
# Gamez watch directory.
#
# set this to where your Gamez completed downloads are.
#gzwatch_dir=
## Posix
# Niceness for external tasks Extractor and Transcoder.
#
# Set the Niceness value for the nice command. These range from -20 (most favorable to the process) to 19 (least favorable to the process).
#niceness=10
# ionice scheduling class (0, 1, 2, 3).
#
# Set the ionice scheduling class. 0 for none, 1 for real time, 2 for best-effort, 3 for idle.
#ionice_class=2
# ionice scheduling class data.
#
# Set the ionice scheduling class data. This defines the class data, if the class accepts an argument. For real time and best-effort, 0-7 is valid data.
#ionice_classdata=4
## WakeOnLan
# use WOL (0, 1).
#
# set to 1 to send WOL broadcast to the mac and test the server (e.g. xbmc) on the host and port specified.
#wolwake=0
# WOL MAC
#
# enter the mac address of the system to be woken.
#wolmac=00:01:2e:2D:64:e1
# Set the Host and Port of a server to verify system has woken.
#wolhost=192.168.1.37
#wolport=80
### NZBGET POST-PROCESSING SCRIPT ###
##############################################################################
import sys
import nzbToMedia
section = "Gamez"
result = nzbToMedia.main(sys.argv, section)
sys.exit(result)
|
DxCx/nzbToMedia
|
nzbToGamez.py
|
Python
|
gpl-3.0
| 2,702
|
from .__version__ import __version__
from .environment import get_active_envrionment
|
vmalloc/pydeploy
|
pydeploy/__init__.py
|
Python
|
bsd-3-clause
| 85
|
#!/usr/bin/env python
"""Make sequence context specific bigbed per sample"""
import argparse
import os
from nested_dict import nested_dict
from Bio import SeqIO
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Concatenate reference for display in jbrowse')
parser.add_argument('-f', '--fasta', help='concatenated reference sequence')
parser.add_argument('-i', '--input', help='methylation.bed input')
parser.add_argument('-b', '--bed', help='bed file with contig to concatenated mapping')
parser.add_argument('-o', '--outputdir', help='output directory for bigwig files')
parser.add_argument('-s', '--samples', help='make group statistics given sample distribution')
args = parser.parse_args()
if not os.path.exists(args.outputdir):
os.mkdir(args.outputdir)
return args
def get_mapping(args):
"""parse bed file for mapping contigs to concatenated ref"""
mapping_dict = {}
with open(args.bed,'r') as handle:
for line in handle:
target,target_start,target_end,name = line.rstrip('\n').split('\t')
mapping_dict[name] = (target,target_start)
return mapping_dict
def get_groups(args):
"""get groups defined in sample file"""
group_dict = nested_dict()
with open(args.samples) as handle:
header = handle.readline().rstrip('\n').split(',')
for line in handle:
split_line = line.rstrip('\n').split(',')
sample = split_line[0]
for name,item in zip(header[2:],split_line[2:]):
try:
group_dict[name][item].append(sample)
except AttributeError:
group_dict[name][item] = [sample]
return group_dict
def check_context(cluster,position,ref,context):
"""Check if context matches specified position"""
position = int(position)
nt = ref[str(cluster)][int(position)]
if nt.upper() == 'C':
try:
up_2 = ref[cluster][int(position)+1:int(position)+3]
if up_2[0] == 'G':
actual_context = 'CG'
elif up_2[1] == 'G':
actual_context = 'CHG'
else:
actual_context = 'CHH'
except IndexError:
# TODO: replace by expected nucleotides from enz recognition site
return 0
elif nt.upper() == 'G':
try:
down_2 = ref[cluster][int(position)-2:int(position)]
if down_2[1] == 'C':
actual_context = 'CG'
elif down_2[0] == 'C':
actual_context = 'CHG'
else:
actual_context = 'CHH'
except IndexError:
return 0
else:
print 'Not a C nucleotide!'
return 1
if actual_context != context:
# print "Context mismatch!"
return 1
else:
return 0
def make_bed_graph(mapping_dict, groups, args):
"""get methylation ratio of invididuals and groups"""
count = 0
with open(args.input) as input_handle:
header = input_handle.readline().rstrip('\n').split('\t')
#make file handles
file_handles = {}
ref = SeqIO.to_dict(SeqIO.parse(args.fasta,'fasta'))
for context in ['CG','CHG','CHH']:
for i in range(4, len(header), 2):
sample = header[i][:-11]
handle = open(os.path.join(args.outputdir,'%s.%s.bedGraph'%(sample,context)),'w')
# handle.write('track type=bedGraph\n')
file_handles['%s_%s' % (sample,context)] = handle
for category, sub_group in groups.items():
for value, list in sub_group.items():
group_name = '%s_%s' % (category, value)
handle = open(os.path.join(args.outputdir, '%s.%s.bedGraph' % (group_name,context)),'w')
# handle.write('track type=bedGraph\n')
file_handles['%s_%s' % (group_name,context)] = handle
for line in input_handle:
count += 1
if not count%1000000:
print '%s lines processes' % count
split_line = line.rstrip('\n').split('\t')
pos_in_contig = split_line[1]
context = split_line[2]
concat_contig, start_contig = mapping_dict[split_line[0].replace('chr', '')]
final_position = str(int(start_contig) + int(pos_in_contig) - 1)
nt = str(ref[concat_contig].seq)[int(final_position)]
if check_context(concat_contig, final_position, ref, context) != 0:
# print "Skipping cluster %s for position %s" % (cluster, position)
continue
meth_dict = {}
for i in range(4,len(header),2):
sample = header[i][:-11]
try:
meth_ratio = float(split_line[i]) / float(split_line[i+1])
except ValueError:
meth_ratio = None
meth_dict[sample] = meth_ratio
for category,sub_group in groups.items():
for value,list in sub_group.items():
try:
meth_ratio = sum([meth_dict[e] for e in list if meth_dict[e]])/\
float(len([e for e in list if meth_dict[e]]))
except ZeroDivisionError:
meth_ratio = None
meth_dict['%s_%s' % (category, value)] = meth_ratio
# concat_contig,position = mapping_dict[split_line[0].replace('chr','')]
# position = str(int(position) + int(split_line[1]) - 1)
context = split_line[2]
for key,value in meth_dict.items():
if not value:
continue
if nt == 'G':
value *= -1
handle = file_handles['%s_%s' % (key,context)]
out = [concat_contig,final_position,str(int(final_position)+1),'%.4f'%value]
handle.write('\t'.join(out) + '\n')
for name,handle in file_handles.items():
handle.close
print '%s closed' % handle
def bed_graph_to_bigwig(args):
"""convert bed graphs to bigwig"""
#make chrom.sizes!
chrom_sizes = os.path.join(args.outputdir,'chrom.sizes')
out_handle = open(chrom_sizes,'w')
with open(args.fasta) as handle:
while True:
seq_name = handle.readline().rstrip('\n')[1:]
if not seq_name:
break
seq_len = len(handle.readline().rstrip('\n'))
out_handle.write('%s\t%s\n' % (seq_name,seq_len))
out_handle.close()
for file in os.listdir(args.outputdir):
if not file.endswith('bedGraph'):
continue
file_in = os.path.join(args.outputdir, file)
file_sorted = os.path.join(args.outputdir, file+'.sorted')
name = '.'.join(file.split('.')[:-2])
context = file.split('.')[-2]
file_out = os.path.join(args.outputdir, '%s.bw.%s' % (name, context.lower()))
os.system('sort -k1,1 -k2,2n %s > %s' % (file_in, file_sorted))
os.system('bedGraphToBigWig %s %s %s' % (file_sorted, chrom_sizes, file_out)) #-itemsPerSlot=1
os.system('rm %s %s' % (file_in, file_sorted))
return 0
def main():
"""main function"""
args = parse_args()
mapping_dict = get_mapping(args)
groups = get_groups(args)
make_bed_graph(mapping_dict, groups, args)
bed_graph_to_bigwig(args)
return 0
if __name__ == '__main__':
return_code = main()
|
thomasvangurp/epiGBS
|
jbrowse/bed_split_sample_bigwig.py
|
Python
|
mit
| 7,605
|
import numpy as np
import parabem
from parabem import pan3d
v1 = parabem.PanelVector3(-0.5,-0.5, 0)
v2 = parabem.PanelVector3( 0.5,-0.5, 0)
v3 = parabem.PanelVector3( 0.5, 0.5, 0)
v4 = parabem.PanelVector3(-0.5, 0.5, 0)
p = parabem.Panel3([v1, v2, v3, v4])
v5 = parabem.Vector3(0.5, 0, 0.5)
v6 = parabem.Vector3(0.4999, 0 , 0)
v7 = parabem.Vector3(0.5, 0.0 , 0)
v8 = parabem.Vector3(0.5001, 0 , 0)
checklist = [v1, v2, v3, v4, v5, v6, v7, v8, p.center]
for v in checklist:
dip, src = pan3d.doublet_src_3_0_vsaero(v, p)
print(v, ": doublet:", dip, "source:", src)
|
looooo/panel-method
|
examples/tests/test_doublet_src.py
|
Python
|
gpl-3.0
| 576
|
#!/usr/bin/env python
# [SublimeLinter pep8-max-line-length:150]
# -*- coding: utf-8 -*-
"""
black_rhino is a multi-agent simulator for financial network analysis
Copyright (C) 2016 Co-Pierre Georg (co-pierre.georg@keble.ox.ac.uk)
Pawel Fiedor (pawel@fiedor.eu)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, version 3 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import logging
from abm_template.src.basemeasurement import BaseMeasurement
# ============================================================================
#
# class Measurement
#
# ============================================================================
class Measurement(BaseMeasurement):
#
# VARIABLES
#
# identifier for usual purposes
identifier = ""
# Now we set up a config for the measurements
# see notes on the xml config file in the method below
config = {}
# environment for access
environment = type('', (), {})()
# filename for the output csv
# runner for access
runner = type('', (), {})()
filename = ""
# and the file we're writing to
file = None
# plus the csv writer
csv_writer = None
#
# METHODS
#
def get_identifier(self):
return self.identifier
def set_identifier(self, identifier):
super(Measurement, self).set_identifier(identifier)
def get_config(self):
return self.config
def set_config(self, config):
super(Measurement, self).set_config(config)
def get_environment(self):
return self.environment
def set_environment(self, environment):
super(Measurement, self).set_environment(environment)
def get_runner(self):
return self.runner
def set_runner(self, runner):
super(Measurement, self).set_runner(runner)
def get_filename(self):
return self.filename
def set_filename(self, filename):
super(Measurement, self).set_filename(filename)
def get_file(self):
return self.file
def set_file(self, file):
super(Measurement, self).set_file(file)
def get_csv_writer(self):
return self.csv_writer
def set_csv_writer(self, csv_writer):
super(Measurement, self).set_csv_writer(csv_writer)
# -------------------------------------------------------------------------
# __init__(self, environment, runner)
# Initialises the Measurements object and reads the config
# -------------------------------------------------------------------------
def __init__(self, environment, runner):
super(Measurement, self).__init__(environment, runner)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# open_file(self)
# Opens the file and writes the headers
# -------------------------------------------------------------------------
def open_file(self):
super(Measurement, self).open_file()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# write_to_file(self)
# Writes a row of values for to store the state of the system
# at the time of calling this method
# -------------------------------------------------------------------------
def write_to_file(self):
super(Measurement, self).write_to_file()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# close_file(self, filename)
# Closes the file so we don't have issues with the disk and the file
# -------------------------------------------------------------------------
def close_file(self):
super(Measurement, self).close_file()
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# read_xml_config_file(self, config_file_name)
# Read the xml config file specifying the config file
# which is a list of lists
# We need to specify the filename
# We also need to specify each output:
# - type: 'output'
# - column: integer specifying which column will be used for this
# - header: string written as header in the csv file in the column
# - value: string or number, identifier for the wrapper function
# specifying what the wrapper function returns
# Thus:
# {column_number: [header, output, wrapper_id],...:[...]]
# [int: [string, string, string],...:[...]]
#
# Now we pass this on to the Measurement class through an xml file
# which should look like this
#
# <measurement identifier='test_output'>
# <parameter type='filename' value='TestMeasurement.csv'></parameter>
# <parameter type='output' column='1' header='Step' value='current_step'></parameter>
# <parameter type='output' column='2' header='Deposits' value='household_deposits' ></parameter>
# </measurement>
#
# -------------------------------------------------------------------------
def read_xml_config_file(self, config_file_name):
super(Measurement, self).read_xml_config_file(config_file_name)
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# wrapper(self, id)
# Wrapper for functions returning the desired values to be written
# -------------------------------------------------------------------------
def wrapper(self, ident):
if ident == "current_step":
return self.runner.current_step+1
if ident == "household_deposits":
#return self.environment.households[0].get_account("deposits")
wealth = 0.0
for household in self.environment.households:
for tranx in household.accounts:
if tranx.type_ == "deposits" and tranx.from_ == household:
wealth = wealth + tranx.amount
if tranx.type_ == "loans" and tranx.to == household:
wealth = wealth - tranx.amount
return wealth
# -------------------------------------------------------------------------
|
cogeorg/BlackRhino
|
src/measurement.py
|
Python
|
gpl-3.0
| 6,855
|
from datetime import datetime
import glob
import re
import os
import warnings
import h5py
import numpy as np
from uncertainties import ufloat
import openmc
import openmc.checkvalue as cv
_VERSION_STATEPOINT = 17
class StatePoint:
"""State information on a simulation at a certain point in time (at the end
of a given batch). Statepoints can be used to analyze tally results as well
as restart a simulation.
Parameters
----------
filepath : str or Path
Path to file to load
autolink : bool, optional
Whether to automatically link in metadata from a summary.h5 file and
stochastic volume calculation results from volume_*.h5 files. Defaults
to True.
Attributes
----------
cmfd_on : bool
Indicate whether CMFD is active
cmfd_balance : numpy.ndarray
Residual neutron balance for each batch
cmfd_dominance
Dominance ratio for each batch
cmfd_entropy : numpy.ndarray
Shannon entropy of CMFD fission source for each batch
cmfd_indices : numpy.ndarray
Number of CMFD mesh cells and energy groups. The first three indices
correspond to the x-, y-, and z- spatial directions and the fourth index
is the number of energy groups.
cmfd_srccmp : numpy.ndarray
Root-mean-square difference between OpenMC and CMFD fission source for
each batch
cmfd_src : numpy.ndarray
CMFD fission source distribution over all mesh cells and energy groups.
current_batch : int
Number of batches simulated
date_and_time : datetime.datetime
Date and time at which statepoint was written
entropy : numpy.ndarray
Shannon entropy of fission source at each batch
filters : dict
Dictionary whose keys are filter IDs and whose values are Filter
objects
generations_per_batch : int
Number of fission generations per batch
global_tallies : numpy.ndarray of compound datatype
Global tallies for k-effective estimates and leakage. The compound
datatype has fields 'name', 'sum', 'sum_sq', 'mean', and 'std_dev'.
k_combined : uncertainties.UFloat
Combined estimator for k-effective
k_col_abs : float
Cross-product of collision and absorption estimates of k-effective
k_col_tra : float
Cross-product of collision and tracklength estimates of k-effective
k_abs_tra : float
Cross-product of absorption and tracklength estimates of k-effective
k_generation : numpy.ndarray
Estimate of k-effective for each batch/generation
meshes : dict
Dictionary whose keys are mesh IDs and whose values are MeshBase objects
n_batches : int
Number of batches
n_inactive : int
Number of inactive batches
n_particles : int
Number of particles per generation
n_realizations : int
Number of tally realizations
path : str
Working directory for simulation
photon_transport : bool
Indicate whether photon transport is active
run_mode : str
Simulation run mode, e.g. 'eigenvalue'
runtime : dict
Dictionary whose keys are strings describing various runtime metrics
and whose values are time values in seconds.
seed : int
Pseudorandom number generator seed
source : numpy.ndarray of compound datatype
Array of source sites. The compound datatype has fields 'r', 'u',
'E', 'wgt', 'delayed_group', 'surf_id', and 'particle', corresponding to
the position, direction, energy, weight, delayed group, surface ID and
particle type of the source site, respectively.
source_present : bool
Indicate whether source sites are present
sparse : bool
Whether or not the tallies uses SciPy's LIL sparse matrix format for
compressed data storage
tallies : dict
Dictionary whose keys are tally IDs and whose values are Tally objects
tallies_present : bool
Indicate whether user-defined tallies are present
tally_derivatives : dict
Dictionary whose keys are tally derivative IDs and whose values are
TallyDerivative objects
version: tuple of Integral
Version of OpenMC
summary : None or openmc.Summary
A summary object if the statepoint has been linked with a summary file
"""
def __init__(self, filepath, autolink=True):
filename = str(filepath) # in case it's a Path
self._f = h5py.File(filename, 'r')
self._meshes = {}
self._filters = {}
self._tallies = {}
self._derivs = {}
# Check filetype and version
cv.check_filetype_version(self._f, 'statepoint', _VERSION_STATEPOINT)
# Set flags for what data has been read
self._meshes_read = False
self._filters_read = False
self._tallies_read = False
self._summary = None
self._global_tallies = None
self._sparse = False
self._derivs_read = False
# Automatically link in a summary file if one exists
if autolink:
path_summary = os.path.join(os.path.dirname(filename), 'summary.h5')
if os.path.exists(path_summary):
su = openmc.Summary(path_summary)
self.link_with_summary(su)
path_volume = os.path.join(os.path.dirname(filename), 'volume_*.h5')
for path_i in glob.glob(path_volume):
if re.search(r'volume_\d+\.h5', path_i):
vol = openmc.VolumeCalculation.from_hdf5(path_i)
self.add_volume_information(vol)
def __enter__(self):
return self
def __exit__(self, *exc):
self.close()
@property
def cmfd_on(self):
return self._f.attrs['cmfd_on'] > 0
@property
def cmfd_balance(self):
return self._f['cmfd/cmfd_balance'][()] if self.cmfd_on else None
@property
def cmfd_dominance(self):
return self._f['cmfd/cmfd_dominance'][()] if self.cmfd_on else None
@property
def cmfd_entropy(self):
return self._f['cmfd/cmfd_entropy'][()] if self.cmfd_on else None
@property
def cmfd_indices(self):
return self._f['cmfd/indices'][()] if self.cmfd_on else None
@property
def cmfd_src(self):
if self.cmfd_on:
data = self._f['cmfd/cmfd_src'][()]
return np.reshape(data, tuple(self.cmfd_indices), order='F')
else:
return None
@property
def cmfd_srccmp(self):
return self._f['cmfd/cmfd_srccmp'][()] if self.cmfd_on else None
@property
def current_batch(self):
return self._f['current_batch'][()]
@property
def date_and_time(self):
s = self._f.attrs['date_and_time'].decode()
return datetime.strptime(s, '%Y-%m-%d %H:%M:%S')
@property
def entropy(self):
if self.run_mode == 'eigenvalue':
return self._f['entropy'][()]
else:
return None
@property
def filters(self):
if not self._filters_read:
filters_group = self._f['tallies/filters']
# Iterate over all Filters
for group in filters_group.values():
new_filter = openmc.Filter.from_hdf5(group, meshes=self.meshes)
self._filters[new_filter.id] = new_filter
self._filters_read = True
return self._filters
@property
def generations_per_batch(self):
if self.run_mode == 'eigenvalue':
return self._f['generations_per_batch'][()]
else:
return None
@property
def global_tallies(self):
if self._global_tallies is None:
data = self._f['global_tallies'][()]
gt = np.zeros(data.shape[0], dtype=[
('name', 'a14'), ('sum', 'f8'), ('sum_sq', 'f8'),
('mean', 'f8'), ('std_dev', 'f8')])
gt['name'] = ['k-collision', 'k-absorption', 'k-tracklength',
'leakage']
gt['sum'] = data[:,1]
gt['sum_sq'] = data[:,2]
# Calculate mean and sample standard deviation of mean
n = self.n_realizations
gt['mean'] = gt['sum']/n
gt['std_dev'] = np.sqrt((gt['sum_sq']/n - gt['mean']**2)/(n - 1))
self._global_tallies = gt
return self._global_tallies
@property
def k_cmfd(self):
if self.cmfd_on:
return self._f['cmfd/k_cmfd'][()]
else:
return None
@property
def k_generation(self):
if self.run_mode == 'eigenvalue':
return self._f['k_generation'][()]
else:
return None
@property
def k_combined(self):
if self.run_mode == 'eigenvalue':
return ufloat(*self._f['k_combined'][()])
else:
return None
@property
def k_col_abs(self):
if self.run_mode == 'eigenvalue':
return self._f['k_col_abs'][()]
else:
return None
@property
def k_col_tra(self):
if self.run_mode == 'eigenvalue':
return self._f['k_col_tra'][()]
else:
return None
@property
def k_abs_tra(self):
if self.run_mode == 'eigenvalue':
return self._f['k_abs_tra'][()]
else:
return None
@property
def meshes(self):
if not self._meshes_read:
mesh_group = self._f['tallies/meshes']
# Iterate over all meshes
for group in mesh_group.values():
mesh = openmc.MeshBase.from_hdf5(group)
self._meshes[mesh.id] = mesh
self._meshes_read = True
return self._meshes
@property
def n_batches(self):
return self._f['n_batches'][()]
@property
def n_inactive(self):
if self.run_mode == 'eigenvalue':
return self._f['n_inactive'][()]
else:
return None
@property
def n_particles(self):
return self._f['n_particles'][()]
@property
def n_realizations(self):
return self._f['n_realizations'][()]
@property
def path(self):
return self._f.attrs['path'].decode()
@property
def photon_transport(self):
return self._f.attrs['photon_transport'] > 0
@property
def run_mode(self):
return self._f['run_mode'][()].decode()
@property
def runtime(self):
return {name: dataset[()]
for name, dataset in self._f['runtime'].items()}
@property
def seed(self):
return self._f['seed'][()]
@property
def source(self):
return self._f['source_bank'][()] if self.source_present else None
@property
def source_present(self):
return self._f.attrs['source_present'] > 0
@property
def sparse(self):
return self._sparse
@property
def tallies(self):
if self.tallies_present and not self._tallies_read:
# Read the number of tallies
tallies_group = self._f['tallies']
n_tallies = tallies_group.attrs['n_tallies']
# Read a list of the IDs for each Tally
if n_tallies > 0:
# Tally user-defined IDs
tally_ids = tallies_group.attrs['ids']
else:
tally_ids = []
# Ignore warnings about duplicate IDs
with warnings.catch_warnings():
warnings.simplefilter('ignore', openmc.IDWarning)
# Iterate over all tallies
for tally_id in tally_ids:
group = tallies_group[f'tally {tally_id}']
# Check if tally is internal and therefore has no data
if group.attrs.get("internal"):
continue
# Create Tally object and assign basic properties
tally = openmc.Tally(tally_id)
tally._sp_filename = self._f.filename
tally.name = group['name'][()].decode() if 'name' in group else ''
# Read the number of realizations
n_realizations = group['n_realizations'][()]
tally.estimator = group['estimator'][()].decode()
tally.num_realizations = n_realizations
# Read derivative information.
if 'derivative' in group:
deriv_id = group['derivative'][()]
tally.derivative = self.tally_derivatives[deriv_id]
# Read all filters
n_filters = group['n_filters'][()]
if n_filters > 0:
filter_ids = group['filters'][()]
filters_group = self._f['tallies/filters']
for filter_id in filter_ids:
filter_group = filters_group[f'filter {filter_id}']
new_filter = openmc.Filter.from_hdf5(
filter_group, meshes=self.meshes)
tally.filters.append(new_filter)
# Read nuclide bins
nuclide_names = group['nuclides'][()]
# Add all nuclides to the Tally
for name in nuclide_names:
nuclide = openmc.Nuclide(name.decode().strip())
tally.nuclides.append(nuclide)
# Add the scores to the Tally
scores = group['score_bins'][()]
for score in scores:
tally.scores.append(score.decode())
# Add Tally to the global dictionary of all Tallies
tally.sparse = self.sparse
self._tallies[tally_id] = tally
self._tallies_read = True
return self._tallies
@property
def tallies_present(self):
return self._f.attrs['tallies_present'] > 0
@property
def tally_derivatives(self):
if not self._derivs_read:
# Populate the dictionary if any derivatives are present.
if 'derivatives' in self._f['tallies']:
# Read the derivative ids.
base = 'tallies/derivatives'
deriv_ids = [int(k.split(' ')[1]) for k in self._f[base]]
# Create each derivative object and add it to the dictionary.
for d_id in deriv_ids:
group = self._f[f'tallies/derivatives/derivative {d_id}']
deriv = openmc.TallyDerivative(derivative_id=d_id)
deriv.variable = group['independent variable'][()].decode()
if deriv.variable == 'density':
deriv.material = group['material'][()]
elif deriv.variable == 'nuclide_density':
deriv.material = group['material'][()]
deriv.nuclide = group['nuclide'][()].decode()
elif deriv.variable == 'temperature':
deriv.material = group['material'][()]
self._derivs[d_id] = deriv
self._derivs_read = True
return self._derivs
@property
def version(self):
return tuple(self._f.attrs['openmc_version'])
@property
def summary(self):
return self._summary
@sparse.setter
def sparse(self, sparse):
"""Convert tally data from NumPy arrays to SciPy list of lists (LIL)
sparse matrices, and vice versa.
This property may be used to reduce the amount of data in memory during
tally data processing. The tally data will be stored as SciPy LIL
matrices internally within each Tally object. All tally data access
properties and methods will return data as a dense NumPy array.
"""
cv.check_type('sparse', sparse, bool)
self._sparse = sparse
# Update tally sparsities
if self._tallies_read:
for tally_id in self.tallies:
self.tallies[tally_id].sparse = self.sparse
def close(self):
"""Close the statepoint HDF5 file and the corresponding
summary HDF5 file if present.
"""
self._f.close()
if self._summary is not None:
self._summary._f.close()
def add_volume_information(self, volume_calc):
"""Add volume information to the geometry within the file
Parameters
----------
volume_calc : openmc.VolumeCalculation
Results from a stochastic volume calculation
"""
if self.summary is not None:
self.summary.add_volume_information(volume_calc)
def get_tally(self, scores=[], filters=[], nuclides=[],
name=None, id=None, estimator=None, exact_filters=False,
exact_nuclides=False, exact_scores=False):
"""Finds and returns a Tally object with certain properties.
This routine searches the list of Tallies and returns the first Tally
found which satisfies all of the input parameters.
NOTE: If any of the "exact" parameters are False (default), the input
parameters do not need to match the complete Tally specification and
may only represent a subset of the Tally's properties. If an "exact"
parameter is True then number of scores, filters, or nuclides in the
parameters must precisely match those of any matching Tally.
Parameters
----------
scores : list, optional
A list of one or more score strings (default is []).
filters : list, optional
A list of Filter objects (default is []).
nuclides : list, optional
A list of Nuclide objects (default is []).
name : str, optional
The name specified for the Tally (default is None).
id : Integral, optional
The id specified for the Tally (default is None).
estimator: str, optional
The type of estimator ('tracklength', 'analog'; default is None).
exact_filters : bool
If True, the number of filters in the parameters must be identical
to those in the matching Tally. If False (default), the filters in
the parameters may be a subset of those in the matching Tally.
exact_nuclides : bool
If True, the number of nuclides in the parameters must be identical
to those in the matching Tally. If False (default), the nuclides in
the parameters may be a subset of those in the matching Tally.
exact_scores : bool
If True, the number of scores in the parameters must be identical
to those in the matching Tally. If False (default), the scores
in the parameters may be a subset of those in the matching Tally.
Returns
-------
tally : openmc.Tally
A tally matching the specified criteria
Raises
------
LookupError
If a Tally meeting all of the input parameters cannot be found in
the statepoint.
"""
tally = None
# Iterate over all tallies to find the appropriate one
for test_tally in self.tallies.values():
# Determine if Tally has queried name
if name and name != test_tally.name:
continue
# Determine if Tally has queried id
if id and id != test_tally.id:
continue
# Determine if Tally has queried estimator
if estimator and estimator != test_tally.estimator:
continue
# The number of filters, nuclides and scores must exactly match
if exact_scores and len(scores) != test_tally.num_scores:
continue
if exact_nuclides and len(nuclides) != test_tally.num_nuclides:
continue
if exact_filters and len(filters) != test_tally.num_filters:
continue
# Determine if Tally has the queried score(s)
if scores:
if not all(score in test_tally.scores for score in scores):
continue
# Determine if Tally has the queried Filter(s)
if filters:
contains_filters = True
# Iterate over the Filters requested by the user
for outer_filter in filters:
contains_filters = False
# Test if requested filter is a subset of any of the test
# tally's filters and if so continue to next filter
for inner_filter in test_tally.filters:
if inner_filter.is_subset(outer_filter):
contains_filters = True
break
if not contains_filters:
break
if not contains_filters:
continue
# Determine if Tally has the queried Nuclide(s)
if nuclides:
if not all(nuclide in test_tally.nuclides for nuclide in nuclides):
continue
# If the current Tally met user's request, break loop and return it
tally = test_tally
break
# If we did not find the Tally, return an error message
if tally is None:
raise LookupError('Unable to get Tally')
return tally
def link_with_summary(self, summary):
"""Links Tallies and Filters with Summary model information.
This routine retrieves model information (materials, geometry) from a
Summary object populated with an HDF5 'summary.h5' file and inserts it
into the Tally objects. This can be helpful when viewing and
manipulating large scale Tally data. Note that it is necessary to link
against a summary to populate the Tallies with any user-specified "name"
XML tags.
Parameters
----------
summary : openmc.Summary
A Summary object.
Raises
------
ValueError
An error when the argument passed to the 'summary' parameter is not
an openmc.Summary object.
"""
if self.summary is not None:
warnings.warn('A Summary object has already been linked.',
RuntimeWarning)
return
if not isinstance(summary, openmc.Summary):
msg = f'Unable to link statepoint with "{summary}" which is not a' \
'Summary object'
raise ValueError(msg)
cells = summary.geometry.get_all_cells()
for tally in self.tallies.values():
tally.with_summary = True
for tally_filter in tally.filters:
if isinstance(tally_filter, (openmc.DistribcellFilter)):
cell_id = tally_filter.bins[0]
cell = cells[cell_id]
if not cell._paths:
summary.geometry.determine_paths()
tally_filter.paths = cell.paths
self._summary = summary
|
nelsonag/openmc
|
openmc/statepoint.py
|
Python
|
mit
| 23,386
|
from typing import TYPE_CHECKING, Optional
from babel.numbers import LC_NUMERIC
from babel.numbers import format_currency as babel_format_currency
if TYPE_CHECKING:
from . import Money
def format_money(
money: "Money",
format: Optional[str] = None,
locale: str = LC_NUMERIC,
currency_digits: bool = True,
format_type: str = "standard",
decimal_quantization: bool = True,
) -> str:
"""
See https://babel.pocoo.org/en/latest/api/numbers.html
"""
return babel_format_currency( # type: ignore[no-any-return]
money.amount,
money.currency.code,
format=format,
locale=locale,
currency_digits=currency_digits,
format_type=format_type,
decimal_quantization=decimal_quantization,
)
|
limist/py-moneyed
|
src/moneyed/l10n.py
|
Python
|
bsd-3-clause
| 781
|
__author__ = 'sibirrer'
import numpy as np
import scipy.ndimage.interpolation as interp
import astropy.io.fits as pyfits
import pyextract.pysex as pysex
import easylens.util as util
class ImageAnalysis(object):
"""
class for analysis routines acting on a single image
"""
def __init__(self):
pass
def estimate_bkg(self, image):
"""
:param image: 2d numpy array
:return: mean and sigma of background estimate
"""
HDUFile = self._get_cat(image)
mean, rms = self._get_background(HDUFile)
return mean, rms
def estimate_psf(self, path2exposure, kernel_size=21, kwargs_cut={}, restrict_psf=None):
"""
esitmates a psf kernel
:param image:
:return:
"""
fits = pyfits.open(path2exposure)
image = fits[0].data
fits.close()
HDUFile = self._get_cat(image)
cat = self._get_source_cat(HDUFile)
if kwargs_cut == {}:
kwargs_cut = self._estimate_star_thresholds(cat)
mask = self._find_objects(cat, kwargs_cut)
mag = np.array(cat.data['MAG_BEST'], dtype=float)
size = np.array(cat.data['FLUX_RADIUS'], dtype=float)
x_list, y_list, restrict_psf = self._get_coordinates(image, cat, mask, numPix=41, restrict_psf=restrict_psf)
if len(x_list) == 0:
return np.zeros((kernel_size,kernel_size)), restrict_psf, x_list, y_list, mask, mag, size, kwargs_cut
star_list = self._get_objects_image(image, x_list, y_list, numPix=41)
kernel = self._stacking(star_list, x_list, y_list)
kernel =util.cut_edges(kernel, kernel_size)
kernel = util.kernel_norm(kernel)
return kernel, restrict_psf, x_list, y_list, mask, mag, size, kwargs_cut
def _get_cat(self, image, conf_args={}):
"""
returns the sextractor catalogue of a given image
:param system:
:param image_name:
:return:
"""
params = ['NUMBER', 'FLAGS', 'X_IMAGE', 'Y_IMAGE', 'FLUX_BEST', 'FLUXERR_BEST', 'MAG_BEST', 'MAGERR_BEST',
'FLUX_RADIUS', 'CLASS_STAR', 'A_IMAGE', 'B_IMAGE', 'THETA_IMAGE', 'ELLIPTICITY']
HDUFile = pysex.run(image=image, params=params, conf_file=None, conf_args=conf_args, keepcat=False, rerun=False, catdir=None)
return HDUFile
def _get_source_cat(self, HDUFile):
"""
:param HDUFile:
:return: catalogue
"""
return HDUFile[2]
def _get_background(self, HDUFile):
"""
filters the mean and rms value of the background computed by sextractor
:param cat:
:return: mean, rms
"""
mean, rms = 0, 0
mean_found = False
rms_found = False
list = HDUFile[1].data[0][0]
for line in list:
line = line.strip()
line = line.split()
if line[0] == 'SEXBKGND' or line[0] == 'SEXBKGND=':
mean = float(line[1])
mean_found = True
if line[0] == 'SEXBKDEV' or line[0] == 'SEXBKDEV=':
rms = float(line[1])
rms_found = True
if mean_found == False or rms_found == False:
raise ValueError('no mean and rms value found in list.')
return mean, rms
def _estimate_star_thresholds(self, cat):
"""
estimates the cuts in the different sextractor quantities
:param cat:
:return:
"""
mag = np.array(cat.data['MAG_BEST'],dtype=float)
size = np.array(cat.data['FLUX_RADIUS'],dtype=float)
#ellipticity = cat.data['ELLIPTICITY']
kwargs_cuts = {}
mag_max = min(np.max(mag), 34)
mag_min = np.min(mag)
delta_mag = mag_max - mag_min
kwargs_cuts['MagMaxThresh'] = mag_max - 0.7*delta_mag
kwargs_cuts['MagMinThresh'] = mag_min #+ 0.01*delta_mag
mask = (mag<mag_max-0.5*delta_mag)
kwargs_cuts['SizeMinThresh'] = max(0, np.min(size[mask]))
kwargs_cuts['SizeMaxThresh'] = max(0, np.min(size[mask])+4)
kwargs_cuts['EllipticityThresh'] = 0.1
kwargs_cuts['ClassStarMax'] = 1.
kwargs_cuts['ClassStarMin'] = 0.5
return kwargs_cuts
def _find_objects(self, cat, kwargs_cut):
"""
:param cat: hdu[2] catalogue objects comming from sextractor
:return: selected objects in the catalogue data list
"""
mag = np.array(cat.data['MAG_BEST'],dtype=float)
size = np.array(cat.data['FLUX_RADIUS'],dtype=float)
ellipticity = cat.data['ELLIPTICITY']
classStar = cat.data['CLASS_STAR']
SizeMaxThresh = kwargs_cut['SizeMaxThresh']
SizeMinThresh = kwargs_cut['SizeMinThresh']
EllipticityThresh = kwargs_cut['EllipticityThresh']
MagMaxThresh = kwargs_cut['MagMaxThresh']
MagMinThresh = kwargs_cut['MagMinThresh']
ClassStarMax = kwargs_cut['ClassStarMax']
ClassStarMin = kwargs_cut['ClassStarMin']
mask = (size<SizeMaxThresh) & (ellipticity<EllipticityThresh) & (size>SizeMinThresh) & (mag<MagMaxThresh) & (mag>MagMinThresh) & (classStar<ClassStarMax) & (classStar>ClassStarMin)
return mask
def _get_coordinates(self, image, cat, mask, numPix=10, restrict_psf=None):
"""
:param image:
:param cat:
:param mask:
:param restrict_psf:
:return:
"""
nx, ny = image.shape
x_center = np.array(cat.data['X_IMAGE'], dtype=float)
y_center = np.array(cat.data['Y_IMAGE'], dtype=float)
x_center_mask = x_center[mask]
y_center_mask = y_center[mask]
num_objects = len(x_center_mask)
if restrict_psf == None:
restrict_psf = [True]*num_objects
x_list = []
y_list = []
for i in range(num_objects):
xc, yc = x_center_mask[i], y_center_mask[i]
if (int(xc)-numPix > 0) and (int(xc)+numPix < nx) and (int(yc)-numPix > 0) and (int(yc)+numPix < ny):
if restrict_psf[i]:
x_list.append(xc)
y_list.append(yc)
return x_list, y_list, restrict_psf
def _get_objects_image(self, image, x_list, y_list, numPix=10):
"""
returns all the cutouts of the locations of the selected objects
:param image:
:param cat:
:param mask:
:return:
"""
num_objects = len(x_list)
cutout_list = []
print("number of objects: ", num_objects)
for i in range(np.minimum(10, num_objects)):
xc, yc = x_list[i], y_list[i]
cutout = image[int(xc)-numPix-1:int(xc)+numPix, int(yc)-numPix-1:int(yc)+numPix]
cutout_list.append(cutout)
return cutout_list
def _stacking(self, star_list, x_list, y_list):
"""
:param star_list:
:return:
"""
n_stars = len(star_list)
shifteds = []
for i in range(n_stars):
xc, yc = x_list[i], y_list[i]
data = star_list[i]
x_shift = int(xc) - xc
y_shift = int(yc) - yc
shifted = interp.shift(data, [-y_shift, -x_shift], order=1)
shifteds.append(shifted)
print('=== object ===', i)
import matplotlib.pylab as plt
fig, ax1 = plt.subplots()
im = ax1.matshow(np.log10(shifted), origin='lower')
plt.axes(ax1)
fig.colorbar(im)
plt.show()
combined = sum(shifteds)
new=np.empty_like(combined)
max_pix = np.max(combined)
p = combined[combined>=max_pix/10**6] #in the SIS regime
new[combined < max_pix/10**6] = 0
new[combined >= max_pix/10**6] = p
kernel = util.kernel_norm(new)
return kernel
|
DES-SL/EasyLens
|
easylens/Data/image_analysis.py
|
Python
|
mit
| 7,851
|
# -*- coding: utf-8 -*-
import re
import json
def parse_input(string, replace_vkurl=True, replace_url=True, replace_nl=True):
new_string = string
if replace_vkurl:
new_string = re.sub(r'\b(https?://)?m\.?vk\.com/?.*\b',
'__vkurl__',
new_string # поиск ссылок vk.com
)
if replace_url:
new_string = re.sub(
r'''(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/?)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+(?:(([^\s()<>]+|(([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))'''
, '__url__', # поиск всех остальных ссылок
new_string
)
if replace_nl:
new_string = re.sub('\n', ' __nl__ ', new_string) # поиск переносов на новую строку
return new_string
def get_sticker_meaning(new_sticker):
try:
with open('data/meanings_of_stiсkers.txt', 'r') as f:
text = re.sub("'", '"', f.read())
stickers = json.loads(text)
except FileNotFoundError:
with open('data/meanings_of_stiсkers.txt', 'w') as f:
pass
f = '{"product_id":{"0":{"id":{"0":{"photo_64":"0","meaning":"0"}}}}}'
stickers = json.loads(f)
if str(new_sticker['product_id']) not in stickers['product_id']:
stickers['product_id'].update({str(new_sticker['product_id']):{'id':{}}})
if str(new_sticker['id']) not in stickers['product_id'][str(new_sticker['product_id'])]['id']:
stickers['product_id'][str(new_sticker['product_id'])]['id'].update({str(new_sticker['id']):{'meaning':'__sticker__','photo_64':new_sticker['photo_64']}})
if stickers['product_id'][str(new_sticker['product_id'])]['id'][str(new_sticker['id'])]['meaning'] == '__sticker__':
print('Ссылка на стикер: {}'.format(new_sticker['photo_64']))
meaning = input('Попробуйте определить, что он значает короткой фразой: ')
if meaning == '':
meaning = '__sticker__'
stickers['product_id'][str(new_sticker['product_id'])]['id'][str(new_sticker['id'])]['meaning'] = meaning
with open('data/meanings_of_stiсkers.txt', 'w') as f:
f.write(str(stickers))
return '__sticker__ ' + meaning
else:
return stickers['product_id'][str(new_sticker['product_id'])]['id'][str(new_sticker['id'])]['meaning']
def parse_chat_dump(path, new_path=None):
new_lines = []
new_line = ''
last_line = '<A> '
dialog_started = False
was_new_message = False
print('Parsing chat history, do not close the program...')
with open(path, 'r') as file:
lines = file.readlines()
for line in lines:
if line.startswith('###'):
dialog_started = not dialog_started
if dialog_started:
last_line = '<A> '
was_new_message = False
continue
elif line == '\n':
continue
elif line[:4] == last_line[:4]: # "<Q> " and "<A> "
new_lines.append(line[4:-1] + ' __nm__ ')
was_new_message = True
last_line = line
continue
if was_new_message:
new_lines[len(new_lines)-1] = new_lines[len(new_lines)-1][:-len(' __nm__ ')] + '\n'
was_new_message = False
new_line += line[4:]
new_lines.append(new_line)
last_line = line
new_line = ''
with open('{}'.format(new_path if new_path else path), 'w') as new_file:
for line in new_lines:
new_file.write(line)
|
Fogapod/ChatBot
|
utils.py
|
Python
|
mit
| 3,253
|
#!/usr/bin/env python
"""A simple TopicServer stress-test.
Start the TopicServer and then run this test,
which will check the TopicServer's advertised capacities
and basic error handling.
Judiciary Pag
March 2016
"""
from __future__ import print_function
import ConfigParser
import datetime
import httplib
from twisted.web import http
# The TopicServer default port
PORT = 8000
# The topic-server configuration file section
# that contains a number of useful parameters.
OUR_CONFIG_SECTION = 'mockdb'
# Read TopicServer configuration
# so we know just we can do...
config = ConfigParser.RawConfigParser()
config.read('../server/topicserver.cfg')
max_subscribers = config.getint(OUR_CONFIG_SECTION, 'MAX_SUBSCRIBERS')
max_topics = config.getint(OUR_CONFIG_SECTION, 'MAX_TOPICS')
max_per_topic_messages = config.getint(OUR_CONFIG_SECTION, 'MAX_PER_TOPIC_MESSAGES')
max_subscriber_name_length = config.getint(OUR_CONFIG_SECTION, 'MAX_SUBSCRIBER_NAME_LENGTH')
max_topic_name_length = config.getint(OUR_CONFIG_SECTION, 'MAX_TOPIC_NAME_LENGTH')
max_message_length = config.getint(OUR_CONFIG_SECTION, 'MAX_MESSAGE_LENGTH')
# -----------------------------------------------------------------------------
def _subscribe(conn, topic, user, expected_status=200):
"""Subscribe to a topic.
Given a topic and user, this method attempts to subscribe
to the topic, expecting a successful response. The response
code can be specified."""
conn.request('POST', '/{}/{}'.format(topic, user))
# We must get the response and (essentially)
# we must read it. If we do not read the response
# we'll get a 'httplib.ResponseNotReady' exception
# when we try and get the next response.
resp = conn.getresponse()
resp.read()
assert resp.status == expected_status
# -----------------------------------------------------------------------------
def _unsubscribe(conn, topic, user, expected_status=http.OK):
"""Un-subscribe from a topic.
Given a topic and user, this method attempts to un-subscribe
to the topic, expecting a successful response. The response
code can be specified."""
conn.request('DELETE', '/{}/{}'.format(topic, user))
resp = conn.getresponse()
resp.read()
assert resp.status == expected_status
# -----------------------------------------------------------------------------
def check_max_per_topic_messages(conn, topic, user):
""""Test the TopicServer's maximum per-topic messages.
"""
print('check_max_per_topic_messages...')
# SUBSCRIBE
#
_subscribe(conn, topic, user)
# POST
# Keep posting until we get an error.
# Then verify the number of messages.
#
post_start_time = datetime.datetime.now()
num_posts = 0
the_message = 'The message'
while True:
conn.request('POST', '/{}'.format(topic), the_message)
resp = conn.getresponse()
resp.read()
if resp.status != http.OK:
break
num_posts += 1
assert num_posts == max_per_topic_messages
post_finish_time = datetime.datetime.now()
print(post_finish_time - post_start_time)
# UN-SUBSCRIBE
# Un-subscribing will remove all messages
#
_unsubscribe(conn, topic, user)
print(' Done.')
# -----------------------------------------------------------------------------
def check_max_topics(conn, user):
""""Test the TopicServer's maximum number of topics.
"""
print('check_max_topics...')
# SUBSCRIBE
#
for topic_num in xrange(1, max_topics + 1):
_subscribe(conn, 'topic-{}'.format(topic_num), user)
# Shouldn't be able to subscribe to another topic...
_subscribe(conn, 'topic-too-far', user, http.INTERNAL_SERVER_ERROR)
# UN-SUBSCRIBE
# Un-subscribing will remove all messages
#
for topic_num in xrange(1, max_topics + 1):
_unsubscribe(conn, 'topic-{}'.format(topic_num), user)
print(' Done.')
# -----------------------------------------------------------------------------
def check_max_subscribers(conn, topic):
""""Test the TopicServer's maximum number of topics.
"""
print('check_max_subscribers...')
# SUBSCRIBE
#
for user_num in xrange(1, max_subscribers + 1):
_subscribe(conn, topic, 'user-{}'.format(user_num))
# Shouldn't be able to subscribe with another user...
_subscribe(conn, topic, 'user-to-far', http.INTERNAL_SERVER_ERROR)
# UN-SUBSCRIBE
# Un-subscribing will remove the topics
#
for user_num in xrange(1, max_subscribers + 1):
_unsubscribe(conn, topic, 'user-{}'.format(user_num))
print(' Done.')
# -----------------------------------------------------------------------------
def check_max_subscriber_name_length(conn, topic):
""""Test the TopicServer's maximum subscriber name length.
"""
print('check_max_subscribers...')
# SUBSCRIBE
#
user = 'a' * max_subscriber_name_length
_subscribe(conn, topic, user)
_subscribe(conn, topic, user + ".", http.INTERNAL_SERVER_ERROR)
# UN-SUBSCRIBE
# Un-subscribing will remove the topics
#
_unsubscribe(conn, topic, user)
print(' Done.')
# -----------------------------------------------------------------------------
def check_max_topic_name_length(conn, user):
""""Test the TopicServer's maximum topic name length.
"""
print('check_max_topic_name_length...')
# SUBSCRIBE
#
topic = 'a' * max_topic_name_length
_subscribe(conn, topic, user)
_subscribe(conn, topic + '.', user, http.INTERNAL_SERVER_ERROR)
# UN-SUBSCRIBE
# Un-subscribing will remove the topics
#
_unsubscribe(conn, topic, user)
print(' Done.')
# -----------------------------------------------------------------------------
def check_max_message_size(conn, topic, user):
""""Test the TopicServer's maximum message size.
"""
print('check_max_message_size...')
# SUBSCRIBE
#
_subscribe(conn, topic, user)
# POST
# Maximum size message then try one character-larger...
#
the_message = 'a' * max_message_length
conn.request('POST', '/{}'.format(topic), the_message)
resp = conn.getresponse()
resp.read()
assert resp.status == http.OK
the_message += '!'
conn.request('POST', '/{}'.format(topic), the_message)
resp = conn.getresponse()
resp.read()
assert resp.status == http.INTERNAL_SERVER_ERROR
# UN-SUBSCRIBE
# Un-subscribing will remove all messages
#
_unsubscribe(conn, topic, user)
print(' Done.')
# -----------------------------------------------------------------------------
def check_read_and_write(conn, topic, user):
""""Test the TopicServer's ability to receive and deliver messages.
"""
print('check_read_and_write...')
# SUBSCRIBE
#
_subscribe(conn, topic, user)
# POST
# Maximum size message then try one character-larger...
#
conn.request('POST', '/{}'.format(topic), 'Message-1')
resp = conn.getresponse()
resp.read()
assert resp.status == http.OK
conn.request('POST', '/{}'.format(topic), 'Message-2')
resp = conn.getresponse()
resp.read()
assert resp.status == http.OK
# GET
#
conn.request('GET', '/{}/{}'.format(topic, user))
resp = conn.getresponse()
body = resp.read()
assert resp.status == http.OK
assert body == 'Message-1'
conn.request('GET', '/{}/{}'.format(topic, user))
resp = conn.getresponse()
body = resp.read()
assert resp.status == http.OK
assert body == 'Message-2'
conn.request('GET', '/{}/{}'.format(topic, user))
resp = conn.getresponse()
resp.read()
assert resp.status == http.NO_CONTENT
# UN-SUBSCRIBE
# Un-subscribing will remove all messages
#
_unsubscribe(conn, topic, user)
print(' Done.')
# -----------------------------------------------------------------------------
if __name__ == "__main__":
# Connect to the TopicServer...
conn = httplib.HTTPConnection('localhost:{:d}'.format(PORT))
check_max_per_topic_messages(conn, 'pi-zero', 'Bob')
check_max_topics(conn, 'Bob')
check_max_subscribers(conn, 'pi-zero')
check_max_message_size(conn, 'pi-zero', 'Bob')
check_max_subscriber_name_length(conn, 'pi-zero')
check_max_topic_name_length(conn, 'Bob')
check_read_and_write(conn, 'pi-zero', 'Bob')
|
JudiciaryPag/TopicServer
|
client/stress.py
|
Python
|
apache-2.0
| 8,417
|
import json
import re
from datetime import datetime, timedelta
import scrapy
from feeds.loaders import FeedEntryItemLoader
from feeds.spiders import FeedsSpider
class DelinskiAtSpider(FeedsSpider):
name = "delinski.at"
feed_title = "Delinski"
feed_link = "https://{}".format(name)
feed_logo = "https://{}/favicon.ico".format(name)
def start_requests(self):
yield scrapy.Request(
"https://www.delinski.at/wien/restaurants",
# The restaurants page is not cached and takes a few seconds to load.
# Don't query more than once a day.
meta={"cache_expires": timedelta(days=1)},
)
def parse(self, response):
m = re.search("window.DELINSKI, {listViewEntities: (.*)}", response.text)
restaurants = sorted(
json.loads(m.group(1))["restaurants"]["entities"].values(),
key=lambda r: int(r["created"]),
reverse=True,
)
for restaurant in restaurants[:20]:
il = FeedEntryItemLoader(timezone="UTC", base_url=response.url)
url = response.urljoin(restaurant["url"])
il.add_value("link", url)
il.add_value("title", restaurant["name"])
content = """
<img src="{image}">
<ul>
<li>{address}</li>
<li>{price_range_human}</li>
<li>{cuisine_text}</li>
</ul>
"""
il.add_value("content_html", content.format(**restaurant))
il.add_value(
"updated", datetime.utcfromtimestamp(int(restaurant["created"]))
)
yield scrapy.Request(url, self._parse_restaurant, meta={"il": il})
def _parse_restaurant(self, response):
il = FeedEntryItemLoader(
response=response,
base_url=response.url,
parent=response.meta["il"],
remove_elems=[".external"],
)
il.add_css("content_html", ".content .right p")
il.add_css("content_html", ".restaurant-link")
il.add_css("category", ".tags a ::text")
yield il.load_item()
|
nblock/feeds
|
feeds/spiders/delinski_at.py
|
Python
|
agpl-3.0
| 2,150
|
from flask.views import View
from flask import render_template, request
class CompanyDir(View):
def dispatch_request(self):
return render_template('company_directory.html', komuna='prishtina')
|
opendatakosovo/municipality-procurement-visualizer
|
gpv/views/pages/company_dir_pages.py
|
Python
|
gpl-2.0
| 207
|
# -*- coding: utf-8 -*-
import logging
import odoo
from odoo.tools.func import lazy_property
from .sessionstore import PostgresSessionStore
_logger = logging.getLogger(__name__)
class RootTkobr(odoo.http.Root):
@lazy_property
def session_store(self):
# Setup http sessions
_logger.debug('HTTP sessions stored in Postgres')
return PostgresSessionStore(session_class=odoo.http.OpenERPSession)
root = RootTkobr()
odoo.http.root.session_store = root.session_store
|
meta-it/misc-addons
|
base_session_store_psql/http.py
|
Python
|
lgpl-3.0
| 500
|
#print("not init")
#import core
#from core import PiUMP
#print("init")
|
xavigisbeg/ELECTROAD_PiUMP
|
src/__init__.py
|
Python
|
apache-2.0
| 71
|
#!/usr/bin/env python
#------------------------------------------------------------------------------
#
# sensor metadata-extraction profiles - spot4 and spot5 scene 1A products
#
# Project: XML Metadata Handling
# Authors: Martin Paces <martin.paces@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from .common import (
GDAL_TYPES, OGC_TYPE_DEFS,
check, extract, #tag, text, attr,
)
from .interfaces import ProfileDimap
from lxml import etree
import ns_opt20
import numpy as np
import geom as ig
class SimplifiedLocationModel(object):
def __init__(self, elm):
lcdir = []
for elm_coef in elm.find("./Direct_Location_Model/lc_List"):
lcdir.append(float(elm_coef.text))
self._lcdir = np.array(lcdir)
pcdir = []
for elm_coef in elm.find("./Direct_Location_Model/pc_List"):
pcdir.append(float(elm_coef.text))
self._pcdir = np.array(pcdir)
lcrev = []
for elm_coef in elm.find("./Reverse_Location_Model/lc_List"):
lcrev.append(float(elm_coef.text))
self._lcrev = np.array(lcrev)
pcrev = []
for elm_coef in elm.find("./Reverse_Location_Model/pc_List"):
pcrev.append(float(elm_coef.text))
self._pcrev = np.array(pcrev)
def __call__(self, x, y, reverse=False):
xy = x * y
xx = x * x
yy = y * y
if reverse:
lc, pc = self._lcrev, self._pcrev
else:
lc, pc = self._lcdir, self._pcdir
l = lc[0]; p = pc[0]
l += lc[1]*x; p += pc[1]*x
l += lc[2]*y; p += pc[2]*y
l += lc[3]*xy; p += pc[3]*xy
l += lc[4]*xx; p += pc[4]*xx
l += lc[5]*yy; p += pc[5]*yy
return l, p
def get_footprint_and_center(xml, n=10):
elm_slm = check(xml.find("//Simplified_Location_Model"), "Simplified_Location_Model")
loc_mod = SimplifiedLocationModel(elm_slm)
#ncol = int(extract(xml, "//Raster_Dimensions/NCOLS"))
#nrow = int(extract(xml, "//Raster_Dimensions/NROWS"))
elm = check(xml.find("//Dataset_Frame/Scene_Center"), 'Scene_Center')
lon_cnt = float(extract(elm, "./FRAME_LON"))
lat_cnt = float(extract(elm, "./FRAME_LAT"))
vlist = []
for elm in xml.iterfind("//Dataset_Frame/Vertex"):
_lon = float(extract(elm, "./FRAME_LON"))
_lat = float(extract(elm, "./FRAME_LAT"))
_row = float(extract(elm, "./FRAME_ROW"))
_col = float(extract(elm, "./FRAME_COL"))
vlist.append((_lat, _lon, _row, _col))
row, col = [], []
for i in xrange(len(vlist)):
_, _, r0, c0 = vlist[i]
_, _, r1, c1 = vlist[(i+1)%len(vlist)]
row.append(np.linspace(r0, r1, n, False))
col.append(np.linspace(c0, c1, n, False))
lon, lat = loc_mod(np.concatenate(row), np.concatenate(col))
if hasattr(np, 'nditer'):
wkt0 = ",".join("%.9g %.9g"%(x, y) for x, y in np.nditer([lon, lat]))
else:
wkt0 = ",".join("%.9g %.9g"%(x, y) for x, y in zip(lon, lat))
wkt0 = "EPSG:4326;POLYGON((%s, %.9g %.9g))"%(wkt0, lon[0], lat[0])
wkt1 = "EPSG:4326;POINT(%.9g %.9g)"%(lon_cnt, lat_cnt)
return ig.parseGeom(wkt0), ig.parseGeom(wkt1)
class ProfileSpotScene1a(ProfileDimap):
version = "1.1"
profile = "SPOTSCENE_1A"
c_types = {(8, "UNSIGNED"): "uint8",}
@classmethod
def get_identifier(cls, xml):
""" get dataset's unique identifier """
src_id = extract(xml, "//Source_Information/SOURCE_ID")[:-1]
mname = extract(xml, "//Scene_Source/MISSION")
mindex = extract(xml, "//Scene_Source/MISSION_INDEX")
iname = extract(xml, "//Scene_Source/INSTRUMENT")
iindex = extract(xml, "//Scene_Source/INSTRUMENT_INDEX")
#scode = extract(xml, "//Scene_Source/SENSOR_CODE")
scode = ""
for elm in xml.iterfind("//Scene_Source"):
scode += extract(elm, "SENSOR_CODE")
geom = extract(xml, "//Data_Processing/GEOMETRIC_PROCESSING")
return "%s%s:%s%s:%s%s:%s"%(mname, mindex, iname, iindex, src_id, scode, geom)
@classmethod
def get_parent_id(cls, xml):
""" get collections's unique identifier """
mname = extract(xml, "//Scene_Source/MISSION")
mindex = extract(xml, "//Scene_Source/MISSION_INDEX")
iname = extract(xml, "//Scene_Source/INSTRUMENT")
iindex = extract(xml, "//Scene_Source/INSTRUMENT_INDEX")
geom = extract(xml, "//Data_Processing/GEOMETRIC_PROCESSING")
scode = ""
for elm in xml.iterfind("//Scene_Source"):
scode += extract(elm, "SENSOR_CODE")
return "%s%s:%s%s:%s:%s"%(mname, mindex, iname, iindex, scode, geom)
@classmethod
def extract_range_type(cls, xml):
""" Extract full range type definition."""
src_type = extract(xml, "//Source_Information/SOURCE_TYPE")
if src_type != "SCENE":
raise ValueError("Unknown SOURCE_TYPE '%s'"%src_type)
base_name = cls.get_identifier(xml)
nbands = int(extract(xml, "//Raster_Dimensions/NBANDS"))
nbits = int(extract(xml, "//Raster_Encoding/NBITS"))
dtype = extract(xml, "//Raster_Encoding/DATA_TYPE")
dtype = check(cls.c_types.get((nbits, dtype)), 'data type')
gdal_dtype = check(GDAL_TYPES.get(dtype), 'data_type')
ogc_dtype = check(OGC_TYPE_DEFS.get(dtype), 'data_type')
nilval = []
for elm in xml.iterfind("//Image_Display/Special_Value"):
svalidx = extract(elm, "SPECIAL_VALUE_INDEX")
svaltext = extract(elm, "SPECIAL_VALUE_TEXT")
if svaltext == 'NODATA':
nilval.append((0, {
"reason": "http://www.opengis.net/def/nil/OGC/0/inapplicable",
"value": svalidx,
}))
elif svaltext == 'SATURATED':
nilval.append((1, {
"reason": "http://www.opengis.net/def/nil/OGC/0/AboveDetectionRange",
"value": svalidx,
}))
# make sure the no-data goes first
nilval = [obj for _, obj in sorted(nilval)]
bands = []
for elm in xml.iterfind("//Spectral_Band_Info"):
bname = extract(elm, "BAND_DESCRIPTION")
bidx = int(extract(elm, "BAND_INDEX"))
bunit = extract(elm, "PHYSICAL_UNIT")
bgain = extract(elm, "PHYSICAL_GAIN")
bbias = extract(elm, "PHYSICAL_BIAS")
#cal_date = extract(elm, "PHYSICAL_CALIBRATION_DATE")
bands.append((bidx, {
"identifier": bname,
"name": bname,
"description": "\n".join([
"INFO: Radiance digital numbers.",
"BAND_INDEX: %s"%bidx,
"BAND: %s"%bname,
"UNIT: %s"%bunit,
"GAIN: %s"%bgain,
"BIAS: %s"%bbias,
]),
"definition": ogc_dtype,
"data_type": gdal_dtype,
"gdal_interpretation": "Undefined",
"uom": "none",
"nil_values": nilval,
}))
return {
"name": "%s:%d:%s"%(base_name, nbands, dtype),
"bands": [obj for _, obj in sorted(bands)],
}
@classmethod
def extract_range_type_sloppy(cls, xml):
""" Extract range definition applicable to all product
of the same type.
"""
src_type = extract(xml, "//Source_Information/SOURCE_TYPE")
if src_type != "SCENE":
raise ValueError("Unknown SOURCE_TYPE '%s'"%src_type)
base_name = cls.get_parent_id(xml)
nbands = int(extract(xml, "//Raster_Dimensions/NBANDS"))
nbits = int(extract(xml, "//Raster_Encoding/NBITS"))
dtype = extract(xml, "//Raster_Encoding/DATA_TYPE")
dtype = check(cls.c_types.get((nbits, dtype)), 'data type')
gdal_dtype = check(GDAL_TYPES.get(dtype), 'data_type')
ogc_dtype = check(OGC_TYPE_DEFS.get(dtype), 'data_type')
nilval = []
for elm in xml.iterfind("//Image_Display/Special_Value"):
svalidx = extract(elm, "SPECIAL_VALUE_INDEX")
svaltext = extract(elm, "SPECIAL_VALUE_TEXT")
if svaltext == 'NODATA':
nilval.append((0, {
"reason": "http://www.opengis.net/def/nil/OGC/0/inapplicable",
"value": svalidx,
}))
elif svaltext == 'SATURATED':
nilval.append((1, {
"reason": "http://www.opengis.net/def/nil/OGC/0/AboveDetectionRange",
"value": svalidx,
}))
# make sure the no-data goes first
nilval = [obj for _, obj in sorted(nilval)]
bands = []
for elm in xml.iterfind("//Spectral_Band_Info"):
bname = extract(elm, "BAND_DESCRIPTION")
bidx = int(extract(elm, "BAND_INDEX"))
bands.append((bidx, {
"identifier": bname,
"name": bname,
"description": "\n".join([
"INFO: Radiance digital numbers.",
"BAND_INDEX: %s"%bidx,
"BAND: %s"%bname,
]),
"definition": ogc_dtype,
"data_type": gdal_dtype,
"gdal_interpretation": "Undefined",
"uom": "none",
"nil_values": nilval,
}))
return {
"name": "%s:%d:%s"%(base_name, nbands, dtype),
"bands": [obj for _, obj in sorted(bands)],
}
@classmethod
def extract_eop_metadata(cls, xml, ns_opt=None, **kwarg):
""" Extract range definition applicable to all product
of the same type.
"""
ns_opt = ns_opt or ns_opt20
ns_eop = ns_opt.ns_eop
ns_gml = ns_opt.ns_gml
ns_om = ns_opt.ns_om
OPT = ns_opt.E
EOP = ns_eop.E
OM = ns_om.E
#GML = ns_gml.E
time_acq_start = "%sT%sZ"%(extract(xml, "//Scene_Source/IMAGING_DATE"),
extract(xml, "//Scene_Source/IMAGING_TIME"))
time_acq_stop = time_acq_start
time_prod = extract(xml, "//Production/DATASET_PRODUCTION_DATE")+'Z'
grid_reference = extract(xml, "//Scene_Source/GRID_REFERENCE")
grid_ref_lon = grid_reference[0:3]
grid_ref_lat = grid_reference[3:6]
eo_equipment = EOP.EarthObservationEquipment(
ns_gml.getRandomId(),
EOP.platform(EOP.Platform(
EOP.shortName(extract(xml, "//Scene_Source/MISSION")),
EOP.serialIdentifier(extract(xml, "//Scene_Source/MISSION_INDEX")),
EOP.orbitType("LEO"),
)),
EOP.instrument(EOP.Instrument(
EOP.shortName("%s%s"%(
extract(xml, "//Scene_Source/INSTRUMENT"),
extract(xml, "//Scene_Source/INSTRUMENT_INDEX"),
)),
)),
EOP.sensor(EOP.Sensor(
EOP.sensorType("OPTICAL"),
)),
EOP.acquisitionParameters(EOP.Acquisition(
EOP.orbitNumber(extract(xml, "//Imaging_Parameters/REVOLUTION_NUMBER")),
EOP.lastOrbitNumber(extract(xml, "//Imaging_Parameters/REVOLUTION_NUMBER")),
EOP.orbitDirection("DESCENDING"),
EOP.wrsLongitudeGrid(grid_ref_lon),
EOP.wrsLatitudeGrid(grid_ref_lat),
EOP.illuminationAzimuthAngle(extract(xml, "//Scene_Source/SUN_AZIMUTH"), {"uom": "deg"}),
EOP.illuminationElevationAngle(extract(xml, "//Scene_Source/SUN_ELEVATION"), {"uom": "deg"}),
EOP.incidenceAngle(extract(xml, "//Scene_Source/INCIDENCE_ANGLE"), {"uom": "deg"}),
)),
)
metadata = EOP.EarthObservationMetaData(
EOP.identifier(cls.get_identifier(xml)),
EOP.parentIdentifier(cls.get_parent_id(xml)),
EOP.acquisitionType("NOMINAL"),
EOP.productType("IMAGE"),
EOP.status("ACQUIRED"),
)
xml_eop = OPT.EarthObservation(
ns_gml.getRandomId(),
ns_eop.getSchemaLocation("OPT"),
#EOP.parameter(), #optional
OM.phenomenonTime(ns_gml.getTimePeriod(time_acq_start, time_acq_stop)),
#OM.resultQuality(), #optional
OM.resultTime(ns_gml.getTimeInstant(time_prod)),
#OM.validTime(), # optional
OM.procedure(eo_equipment),
OM.observedProperty({"nillReason": "unknown"}),
OM.featureOfInterest(
ns_eop.getFootprint(*get_footprint_and_center(xml))
),
OM.result(OPT.EarthObservationResult(ns_gml.getRandomId())),
EOP.metaDataProperty(metadata),
)
xml_eop = etree.ElementTree(xml_eop)
#xml_eop.getroot().addprevious(ns_eop.getSchematronPI())
return xml_eop
|
DREAM-ODA-OS/tools
|
metadata/profiles/spot_scene_1a.py
|
Python
|
mit
| 14,275
|
CHEMICAL = {
'typeclass': 'typeclasses.objects.Chemical',
'key': 'generic chemical',
'desc': 'A blob of some generic chemical',
'effect': 'GenericEffect()'
}
def GenericEffect():
print("You've got a chemical in you!")
|
maxsond/Spacebase
|
Hail/typeclasses/prototypes/chemicals.py
|
Python
|
bsd-3-clause
| 239
|
#!/usr/bin/env python
"""
@package mi.dataset.parser.test
@file mi-dataset/mi/dataset/parser/test/test_ctdbp_cdef.py
@author Jeff Roy
@brief Test code for ctdbp_cdef data parser
Files used for testing:
data1.log
Contains Header + 100 Sensor records
invalid_data.log
Contains 7 lines of invalid data
no_sensor_data.log
Contains a header section and no sensor records
"""
import os
from nose.plugins.attrib import attr
from mi.core.log import get_logger
from mi.dataset.driver.ctdbp_cdef.resource import RESOURCE_PATH
from mi.dataset.parser.ctdbp_cdef import CtdbpCdefParser
from mi.dataset.test.test_parser import ParserUnitTestCase
MODULE_NAME = 'mi.dataset.parser.ctdbp_cdef'
log = get_logger()
@attr('UNIT', group='mi')
class CtdbpCdefParserUnitTestCase(ParserUnitTestCase):
"""
ctdbp_cdef Parser unit test suite
"""
def test_simple(self):
"""
Simple test to verify that records are successfully read and parsed from a data file
"""
log.debug('===== START SIMPLE TEST =====')
path = RESOURCE_PATH
log.info(path)
# test with Endurance data
with open(os.path.join(RESOURCE_PATH, 'simple_test_endurance.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# In a single read, get all particles in this file.
number_expected_results = 5
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
self.assertListEqual(self.exception_callback_value, [])
# test with Pioneer data
with open(os.path.join(RESOURCE_PATH, 'simple_test_pioneer.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# In a single read, get all particles in this file.
number_expected_results = 5
result = parser.get_records(number_expected_results)
self.assertEqual(len(result), number_expected_results)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END SIMPLE TEST =====')
def test_verify_record_against_yaml(self):
"""
Read data from a file and pull out data particles
one at a time. Verify that the results are those we expected.
"""
log.debug('===== START YAML TEST =====')
# test with Endurance data
with open(os.path.join(RESOURCE_PATH, 'data1_endurance.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# In a single read, get all particles in this file.
number_expected_results = 143
result = parser.get_records(number_expected_results)
self.assert_particles(result, 'data1_endurance.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
# test with Pioneer data
with open(os.path.join(RESOURCE_PATH, 'data1_pioneer.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# In a single read, get all particles in this file.
number_expected_results = 100
result = parser.get_records(number_expected_results)
self.assert_particles(result, 'data1_pioneer.yml', RESOURCE_PATH)
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END YAML TEST =====')
def test_invalid_sensor_data_records(self):
"""
Read data from a file containing invalid sensor data records.
Verify that no instrument particles are produced
and the correct number of exceptions are detected.
"""
log.debug('===== START TEST INVALID SENSOR DATA =====')
# test with Endurance data
with open(os.path.join(RESOURCE_PATH, 'invalid_data_endurance.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(len(self.exception_callback_value), 11)
self.exception_callback_value = [] # reset exceptions
# test with Pioneer data
with open(os.path.join(RESOURCE_PATH, 'invalid_data_pioneer.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# Try to get records and verify that none are returned.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertEqual(len(self.exception_callback_value), 11)
log.debug('===== END TEST INVALID SENSOR DATA =====')
def test_no_sensor_data(self):
"""
Read a file containing no sensor data records
and verify that no particles are produced.
"""
log.debug('===== START TEST NO SENSOR DATA RECOVERED =====')
# test with Endurance data
with open(os.path.join(RESOURCE_PATH, 'no_sensor_data_endurance.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertListEqual(self.exception_callback_value, [])
# test with Pioneer data
with open(os.path.join(RESOURCE_PATH, 'no_sensor_data_pioneer.log'), 'rU') as file_handle:
parser = CtdbpCdefParser(file_handle,
self.exception_callback)
# Try to get a record and verify that none are produced.
result = parser.get_records(1)
self.assertEqual(result, [])
self.assertListEqual(self.exception_callback_value, [])
log.debug('===== END TEST NO SENSOR DATA =====')
|
janeen666/mi-instrument
|
mi/dataset/parser/test/test_ctdbp_cdef.py
|
Python
|
bsd-2-clause
| 6,282
|
# Copyright (c) Moshe Zadka
# See LICENSE for details.
from twisted.application.service import ServiceMaker
serviceMaker = ServiceMaker(
"Web-based write-only editor",
"nanoauto.web",
"An editor which allows writing only",
"nanoauto",
)
|
moshez/nanoauto
|
twisted/plugins/nanoauto.py
|
Python
|
mit
| 255
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .usage_details_operations import UsageDetailsOperations
from .marketplaces_operations import MarketplacesOperations
from .reservations_summaries_operations import ReservationsSummariesOperations
from .reservations_details_operations import ReservationsDetailsOperations
from .budgets_operations import BudgetsOperations
from .operations import Operations
from .price_sheet_operations import PriceSheetOperations
__all__ = [
'UsageDetailsOperations',
'MarketplacesOperations',
'ReservationsSummariesOperations',
'ReservationsDetailsOperations',
'BudgetsOperations',
'Operations',
'PriceSheetOperations',
]
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-consumption/azure/mgmt/consumption/operations/__init__.py
|
Python
|
mit
| 1,113
|
# -*- coding: utf-8 -*-
################################################
###### Copyright (c) 2016, Alexandre Popoff
###
import numpy as np
import itertools
import time
from .categoryaction import CatObject
class MultQ(object):
def __init__(self,x):
"""Initializes an element of the multiplicative quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return MultQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return MultQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the multiplicative quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The product self * rhs.
In the case of the multiplicative quantale, it is the ordinary
product of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(self.x*rhs.x)
def __add__(self,rhs):
"""Compute the supremum in the multiplicative quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
The supremum self v rhs.
In the case of the multiplicative quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the multiplicative quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the multiplicative quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of MultQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid MultQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the multiplicative
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "MultQ({})".format(self.x)
class IntvQ(object):
def __init__(self,x):
"""Initializes an element of the interval quantale.
Parameters
----------
x: a float value between 0 and 1
Returns
-------
None
Raise an exception if the float value is not in the interval [0,1].
"""
if x<0 or x>1:
raise Exception("Real number should be comprised between 0 and 1")
self.x = x
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the multiplicative quantale for the monoid operation.
"""
return IntvQ(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return IntvQ(0.0)
def __mul__(self,rhs):
"""Compose two numbers in the interval quantale
Overloads the '*' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The product self * rhs.
In the case of the interval quantale, it is the min of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(min([self.x,rhs.x]))
def __add__(self,rhs):
"""Compute the supremum in the interval quantale
Overloads the '+' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
The supremum self v rhs.
In the case of the interval quantale, 'v' is the maximum
of the two numbers.
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.__class__(max([self.x,rhs.x]))
def __eq__(self,rhs):
"""Checks if the two numbers in the interval quantale are equal.
Overloads the '==' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x==rhs.x
def __lt__(self,rhs):
"""Checks if the given number is strictly inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is strictly inferior 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<rhs.x
def __le__(self,rhs):
"""Checks if the given number is inferior to the rhs given the
poset structure of the interval quantale.
Overloads the '<=' operator of Python
Parameters
----------
rhs : an instance of IntvQ
Returns
-------
True if 'self' is inferior or equal to 'rhs'
"""
if not isinstance(rhs,self.__class__):
raise Exception("RHS is not a valid IntvQ")
return self.x<=rhs.x
def __str__(self):
"""Returns a verbose description of the number in the interval
quantale.
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A string description of the number value.
"""
return str(self.x)
def __repr__(self):
return "IntvQ({})".format(self.x)
class Lin3Q(IntvQ):
def __init__(self,x):
"""Initializes an element of the linear order quantale with 3 elements.
It is a sub-quantale of the interval quantale with values 0, 1/2, and 1.
Parameters
----------
x: a float value between being either 0, 1/2, or 1.
Returns
-------
None
Raise an exception if the float value is not one of the above-mentionned
values.
"""
if not (x==0 or x==0.5 or x==1):
raise Exception("The possibles values are 0, 1/2, and 1")
super().__init__(x)
@staticmethod
def Unit():
"""Static method returning the unit of the monoid operation in the
quantale.
Parameters
----------
None
Returns
-------
The unit of the linear order quantale for the monoid operation.
"""
return Lin3Q(1.0)
@staticmethod
def Zero():
"""Static method returning the zero value in the
quantale.
Parameters
----------
None
Returns
-------
The zero value in the quantale.
"""
return Lin3Q(0.0)
def __str__(self):
return str(self.x)
def __repr__(self):
return "Lin3Q({})".format(self.x)
########################################################
class QMorphism(object):
def __init__(self,name,source,target,qtype=None,mapping=None):
"""Initializes a quantaloid morphism between two sets.
Parameters
----------
name: a string representing the name of the morphism
source: an instance of CatObject representing the domain of the morphism
target: an instance of CatObject representing the codomain of
the morphism
qtype: class of quantale for the morphism
mapping: optional argument representing the mapping of elements
between the domain and the codomain. The mapping can be
given as a NumPy array matrix or as a dictionary.
Returns
-------
None
Raises an exception if
- the source is not an instance of a CatObject
- the target is not an instance of a CatObject
- the type (class) of quantale is not specified
"""
if not isinstance(source,CatObject):
raise Exception("Source is not a valid CatObject class\n")
if not isinstance(target,CatObject):
raise Exception("Target is not a valid CatObject class\n")
if qtype is None:
raise Exception("Type of quantale should be specified")
self.name = name
self.source = source
self.target = target
self.qtype = qtype
if mapping is not None:
if isinstance(mapping,np.ndarray)==False:
self.set_mapping(mapping)
else:
self.set_mapping_matrix(mapping)
def set_name(self,name):
"""Sets the name of the morphism
Parameters
----------
name: a string representing the new name of the morphism
Returns
-------
None
"""
if not len(name):
raise Exception("The specified morphism name is empty")
self.name = name
def set_to_identity(self):
"""Sets the morphism to be an identity morphism. The domain and codomain
must be identical.
Parameters
----------
None
Returns
-------
None
"""
if not (self.source==self.target):
raise Exception("Source and target should be identical")
card_source = self.source.get_cardinality()
M = np.empty((card_source,card_source),dtype=self.qtype)
for i in range(card_source):
for j in range(card_source):
if i==j:
M[i,j] = self.qtype.Unit()
else:
M[i,j] = self.qtype.Zero()
self.matrix = M
def set_mapping(self,mapping):
"""Sets the mapping of elements between the domain and the codomain
Parameters
----------
mapping: a dictionary, with:
- keys: the element names in the domain of the morphism
- values: a list of pairs of element names in the codomain of
the morphism and a number in the specified quantale.
The mapping can be one-on-many as we are working in the category Rel(Q)
of finite sets and quantale-valued relations.
Returns
-------
None
"""
card_source = self.source.get_cardinality()
card_target = self.target.get_cardinality()
self.matrix = np.empty((card_target,card_source),dtype=self.qtype)
for i in range(card_source):
for j in range(card_target):
self.matrix[j,i] = self.qtype.Zero()
for elem,images in sorted(mapping.items()):
id_elem = self.source.get_idx_by_name(elem)
for image,value in images:
id_image = self.target.get_idx_by_name(image)
self.matrix[id_image,id_elem] = self.qtype(value)
def set_mapping_matrix(self,matrix):
"""Sets the mapping of elements between the domain and the codomain
Parameters
----------
matrix: a quantale-valued matrix (m,n), where m is the cardinality of the codomain
and n the cardinality of the domain, indicating the image of the elements.
Returns
-------
None
"""
self.matrix = matrix
def get_mapping(self):
"""Retrieves the mapping in the form of a dictionary
Parameters
----------
None
Returns
-------
A dictionary, with:
- keys: the element names in the domain of the morphism
- values: a list of pairs of element names in the
codomain of the morphism with the value in the
quantale
"""
dest_cardinality,source_cardinality = self.matrix.shape
d={}
for i in range(source_cardinality):
l=[]
for j in range(dest_cardinality):
v = self.matrix[j,i]
l.append((self.target.get_name_by_idx(j),v.x))
d[self.source.get_name_by_idx(i)]=l
return d
def get_mapping_matrix(self):
"""Retrieves the mapping in matrix form
Parameters
----------
None
Returns
-------
A boolean matrix representing the morphism in Rel(Q)
"""
return self.matrix
def copy(self):
"""Copy the current morphism
Parameters
----------
None
Returns
-------
A new instance of QMorphism with the same domain, codomain, and mapping
"""
U = QMorphism(self.name,self.source,self.target,qtype=self.qtype)
U.set_mapping_matrix(self.get_mapping_matrix())
return U
def _is_lefttotal(self):
"""Checks if the morphism is left total
Parameters
----------
None
Returns
-------
True if the morphism is left total, False otherwise.
"""
return np.all(np.sum(self.matrix,axis=0)>self.qtype.Zero())
def __str__(self):
"""Returns a verbose description of the morphism
Overloads the 'str' operator of Python
Parameters
----------
None
Returns
-------
A description of the morphism via its source, target, and mapping.
"""
descr = self.name+":"+self.source.name+"->"+self.target.name+"\n\n"
for s,t in sorted(self.get_mapping().items()):
descr += " "*(len(self.name)+1)
descr += s+"->"+(",".join([(x[0],str(x[1])) for x in t]))+"\n"
return descr
def __call__(self,elem):
"""Apply the current morphism to an element of its domain
Parameters
----------
elem : string representing an element of self.source
Returns
-------
List of pairs of elements and quantale values mapped by the given
QMorphism.
"""
idx_elem = self.source.get_idx_by_name(elem)
return [(self.target.get_name_by_idx(j),v.x) for j,v in enumerate(self.matrix[:,idx_elem]) if v!=self.qtype.Zero()]
def __pow__(self,int_power):
"""Raise the morphism to the power int_power
Overloads the '**' operator of Python
Parameters
----------
int_power : an integer
Returns
-------
The power self^int_power. Raises an exception if the morphism is not an
endomorphism.
"""
if not self.target==self.source:
raise Exception("Morphism should be an endomorphism")
U = self.copy()
U.set_to_identity()
for i in range(int_power):
U = self*U
U.set_name(self.name+"^"+str(int_power))
return U
def __mul__(self,morphism):
"""Compose two morphisms
Overloads the '*' operator of Python
Parameters
----------
morphism : an instance of CatMorphism
Returns
-------
The product self * morphism.
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types.
Returns None if the two morphisms are not composable.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid QMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if not morphism.target==self.source:
return None
new_morphism = QMorphism(self.name+morphism.name,morphism.source,self.target,qtype=self.qtype)
new_morphism.set_mapping_matrix((self.matrix.dot(morphism.matrix)))
return new_morphism
def __eq__(self,morphism):
"""Checks if the given morphism is equal to 'morphism'
Overloads the '==' operator of Python
Parameters
----------
morphism : an instance of QMorphism
Returns
-------
True if 'self' is equal to 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid QMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if self is None or morphism is None:
return False
return (self.source == morphism.source) and \
(self.target == morphism.target) and \
(np.array_equal(self.matrix,morphism.matrix))
def __le__(self, morphism):
"""Checks if the given morphism is included in 'morphism', i.e. if there
is a 2-morphism in Rel from 'self' to 'morphism'.
Overloads the '<=' operator of Python
Parameters
----------
morphism : an instance of QMorphism
Returns
-------
True if 'self' is included in 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types, or if the domain and codomain
differ.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid CatMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if self is None or morphism is None:
return False
if not (self.source == morphism.source) and (self.target == morphism.target):
raise Exception("Morphisms should have the same domain and codomain")
return np.all(self.matrix<=morphism.matrix)
def __lt__(self, morphism):
"""Checks if the given morphism is strictly included in 'morphism', i.e. if there
is a non-identity 2-morphism in Rel from 'self' to 'morphism'.
Overloads the '<' operator of Python
Parameters
----------
morphism : an instance of CatMorphism
Returns
-------
True if 'self' is strictly included in 'morphism'
Raises an exception if the rhs is not a QMorphism, or if the two
QMorphisms are of different quantale types, or if the domain and codomain
differ.
"""
if not isinstance(morphism,QMorphism):
raise Exception("RHS is not a valid CatMorphism class\n")
if not self.qtype==morphism.qtype:
raise Exception("QMorphisms use different quantales")
if not (self.source == morphism.source) and (self.target == morphism.target):
raise Exception("Morphisms should have the same domain and codomain")
if self is None or morphism is None:
return False
return np.all(self.matrix<morphism.matrix)
########################################"""
class CategoryQAction(object):
def __init__(self,qtype=None,objects=None,generators=None,generate=True):
"""Instantiates a CategoryQAction class with morphisms in a given
quantale
Parameters
----------
objects: optional list of CatObject instances representing
the objects in the category.
generators: optional list of QMorphism instances
representing the generators of the category.
generator: optional boolean indicating whether the category
should be generated upon instantiation.
Returns
-------
None
Raises an exception if the quantale type (class) is not specified.
"""
if qtype is None:
raise Exception("Type of quantale should be specified")
self.qtype=qtype
self.objects={}
self.generators={}
self.morphisms={}
self.equivalences=[]
if objects is not None:
self.set_objects(objects)
if generators is not None:
self.set_generators(generators)
if generate==True:
self.generate_category()
def set_objects(self,list_objects):
"""Sets the objects constituting the category action. This erases
all previous objects, morphisms, and generators.
Parameters
----------
list_objects: a list of CatObject classes representing the objects in
the category.
Returns
-------
None. Checks if all objects have distinct names, raises an Exception
otherwise.
"""
self.objects={}
self.generators={}
self.morphisms={}
self.equivalences=[]
ob_names = [catobject.name for catobject in list_objects]
if not len(ob_names)==len(np.unique(ob_names)):
raise Exception("Objects should have distinct names")
for catobject in list_objects:
self.objects[catobject.name] = catobject
def get_objects(self):
"""Returns the objects in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the object
- y is the corresponding instance of CatObject
"""
return list(sorted(self.objects.items()))
def get_morphisms(self):
"""Returns the morphisms in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the morphism
- y is the corresponding instance of QMorphism
"""
return list(sorted(self.morphisms.items()))
def get_generators(self):
"""Returns the generators in the category action.
Parameters
----------
None
Returns
-------
A list of pairs (x,y), where:
- x is the name of the generator
- y is the corresponding instance of QMorphism
"""
return list(sorted(self.generators.items()))
def set_generators(self,list_morphisms):
"""Set generators to the category action. This erases
all previous morphisms and generators.
Parameters
----------
list_morphisms: a list of QMorphism instances representing the
generator morphisms to be added.
Returns
-------
None.
Checks if sources and targets of generators are objects present
in the category, raises an Exception otherwise
Checks if all generators have distinct names, raises an Exception
otherwise.
"""
self.generators={}
self.morphisms={}
self.equivalences=[]
all_gennames = [m.name for m in list_morphisms]
if not len(all_gennames)==len(np.unique(all_gennames)):
raise Exception("Generators must have distinct names")
cat_obj_names = [x[0] for x in self.get_objects()]
for m in list_morphisms:
if not isinstance(m,QMorphism):
raise Exception("Generator is not a valid QMorphism class\n")
if not m.source.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if not m.target.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
self.generators[m.name] = m
def _add_morphisms(self,list_morphisms):
"""Add morphisms to the category action.
Parameters
----------
list_morphisms: a list of QMorphism instances representing the
morphisms to be added.
Returns
-------
None
Checks if sources and targets of generators are objects present
in the category, raises an Exception otherwise.
Checks if the morphisms have a distinct name, raises an Exception
otherwise.
"""
cat_obj_names = [x[0] for x in self.get_objects()]
cat_mor_names = [x[0] for x in self.get_morphisms()]
for m in list_morphisms:
if not m.source.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if not m.target.name in cat_obj_names:
raise Exception("Domain or codomain of a generator is not present in the category")
if m.name in cat_mor_names:
raise Exception("Morphisms should have distinct names")
self.morphisms[m.name] = m
def _add_identities(self):
"""Automatically add identity morphisms on each object of the category
action
Parameters
----------
None
Returns
-------
None
"""
for name,catobject in sorted(self.objects.items()):
identity_morphism = QMorphism("id_"+name,catobject,catobject,qtype=self.qtype)
identity_morphism.set_to_identity()
self._add_morphisms([identity_morphism])
def generate_category(self):
"""Generates all morphisms in the category based on the given list of
generators. The generation proceeds by successive multiplication of
generators and morphisms until completion. This is suited to small
category action, but the performance would be prohibitive for very
large categories containing many morphisms.
Parameters
----------
None
Returns
-------
None
"""
self.morphisms = self.generators.copy()
self._add_identities()
new_liste = self.generators.copy()
added_liste = self.generators.copy()
while(len(added_liste)>0):
added_liste = {}
for name_x,morphism_x in sorted(new_liste.items()):
for name_g,morphism_g in self.get_generators():
new_morphism = morphism_g*morphism_x
if not new_morphism is None:
c=0
for name_y,morphism_y in self.get_morphisms():
if new_morphism==morphism_y:
c=1
self.equivalences.append([new_morphism.name,morphism_y.name])
if c==0:
added_liste[new_morphism.name] = new_morphism
self.morphisms[new_morphism.name] = new_morphism
new_liste = added_liste
def mult(self,name_g,name_f):
"""Multiplies two morphisms and returns the corresponding morphism.
Parameters
----------
name_g, name_f: a string representing the names of the morphisms
to be multiplied.
Returns
-------
A string representing the name of the morphism corresponding
to name_g*name_f.
"""
new_morphism = self.morphisms[name_g]*self.morphisms[name_f]
if new_morphism is None:
return new_morphism
else:
return [name_x for name_x,x in self.get_morphisms() if x==new_morphism][0]
def apply_operation(self,name_f,element):
"""Applies a morphism to a given element.
Parameters
----------
name_f: a string representing the name of the morphisms to be applied.
elem: a string representing the name of the element.
Returns
-------
A list of pairs representing the images of elem by name_f and their
quantale values.
"""
return self.morphisms[name_f](element)
def get_operation(self,element_1,element_2):
"""Returns the operations taking the element element_1 to the element
element_2.
Parameters
----------
element_1,element_2 : strings representing the name of the elements.
Returns
-------
A list of strings representing the morphisms f such that element_2 is
an image of element_1 by f.
"""
res = []
for name_f,f in self.get_morphisms():
try:
if element_2 in [x[0] for x in f(element_1)]:
res.append(name_f)
except:
pass
return res
def rename_operation(self,name_f,new_name):
"""Renames a morphism in the category
Parameters
----------
name_f: a string representing the name of the morphism to be renamed.
new_name: a string representing the new name of the morphism.
Returns
-------
None
"""
if not name_f in self.morphisms:
raise Exception("The specified operation cannot be found")
new_op = self.morphisms[name_f].copy()
new_op.set_name(new_name)
del self.morphisms[name_f]
self.morphisms[new_name] = new_op
def rewrite_operations(self):
"""Rewrites morphism names in the category action by trying to reduce
repeated substrings.
Parameters
----------
None
Returns
-------
None
"""
operation_names = sorted(self.morphisms.keys())
for op_name in operation_names:
self.rename_operation(op_name,self._rewrite(op_name))
equivalences_new=[]
for x,y in self.equivalences:
equivalences_new.append([self._rewrite(x),self._rewrite(y)])
self.equivalences = equivalences_new
def _rewrite(self,the_string):
"""Rewrites a string by trying to reduce repeated patterns of the
category action generator names.
Parameters
----------
None
Returns
-------
None
"""
if "id" in the_string:
return the_string
generator_names = sorted(self.generators.keys())
count_list=[["",0]]
while(len(the_string)):
flag=0
for name_g in generator_names:
if the_string[:len(name_g)]==name_g:
flag=1
if count_list[-1][0]==name_g:
count_list[-1][1]+=1
else:
count_list.append([name_g,1])
the_string=the_string[len(name_g):]
if not flag:
raise Exception("Operation name cannot be rewritten")
new_string=""
for name,count in count_list:
if count>1:
new_string+="("+name+"^"+str(count)+")"
else:
new_string+=name
return new_string
def get_description(self,name_f):
"""Gets a string description of a given morphism.
Parameters
----------
name_f: a string representing the name of the morphism
Returns
-------
A string representing the corresponding morphism
"""
return str(self.morphisms[name_f])
|
AlexPof/opycleid
|
opycleid/q_categoryaction.py
|
Python
|
bsd-3-clause
| 33,498
|
from pontoon.checks import DB_LIBRARIES
def bulk_run_checks(translations):
"""
Run checks on a list of translations
*Important*
To avoid performance problems, translations have to prefetch entities and locales objects.
"""
from pontoon.checks.libraries import run_checks
from pontoon.checks.models import Warning, Error
warnings, errors = [], []
if not translations:
return
for translation in translations:
warnings_, errors_ = get_failed_checks_db_objects(
translation,
run_checks(
translation.entity,
translation.locale.code,
translation.entity.string,
translation.string,
use_tt_checks=False,
),
)
warnings.extend(warnings_)
errors.extend(errors_)
# Remove old warnings and errors
Warning.objects.filter(translation__pk__in=[t.pk for t in translations]).delete()
Error.objects.filter(translation__pk__in=[t.pk for t in translations]).delete()
# Insert new warnings and errors
Warning.objects.bulk_create(warnings)
Error.objects.bulk_create(errors)
return warnings, errors
def get_failed_checks_db_objects(translation, failed_checks):
"""
Return model instances of Warnings and Errors
:arg Translation translation: instance of translation
:arg dict failed_checks: dictionary with failed checks
"""
from pontoon.checks.models import Warning, Error
warnings = []
errors = []
for check_group, messages in failed_checks.items():
library = check_group.replace("Warnings", "").replace("Errors", "")
if library not in DB_LIBRARIES:
continue
if check_group.endswith("Errors"):
severity_cls, messages_list = Error, errors
else:
severity_cls, messages_list = Warning, warnings
messages_list.extend(
[
severity_cls(
library=library,
message=message,
translation=translation,
)
for message in messages
]
)
return warnings, errors
def save_failed_checks(translation, failed_checks):
"""
Save all failed checks to Database
:arg Translation translation: instance of translation
:arg dict failed_checks: dictionary with failed checks
"""
warnings, errors = get_failed_checks_db_objects(translation, failed_checks)
translation.warnings.all().delete()
translation.errors.all().delete()
translation.warnings.bulk_create(warnings)
translation.errors.bulk_create(errors)
def are_blocking_checks(checks, ignore_warnings):
"""
Return True if checks are errors or unignored warnings.
:arg dict checks: dictionary with a list of errors/warnings per library
:arg bool ignore_warnings: ignores failed checks of type warning
"""
has_errors = any(p.endswith("Errors") for p in checks)
return (not ignore_warnings and checks) or has_errors
|
mathjazz/pontoon
|
pontoon/checks/utils.py
|
Python
|
bsd-3-clause
| 3,083
|
# -*- coding: utf-8 -*-
'''
Local settings
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
'''
from .common import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env("DJANGO_SECRET_KEY", default='CHANGEME!!!')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_HOST = 'localhost'
EMAIL_PORT = 1025
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
# MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
# INSTALLED_APPS += ('debug_toolbar', )
INTERNAL_IPS = ('127.0.0.1', '10.0.2.2',)
# DEBUG_TOOLBAR_CONFIG = {
# 'DISABLE_PANELS': [
# 'debug_toolbar.panels.redirects.RedirectsPanel',
# ],
# 'SHOW_TEMPLATE_CONTEXT': True,
# }
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ('django_extensions', )
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
'default': env.db("DATABASE_URL", default="postgres:///htp")
}
DATABASES['default']['ATOMIC_REQUESTS'] = True
|
tosfan4ever/hacktheplanet
|
config/settings/local.py
|
Python
|
bsd-3-clause
| 2,187
|
import tensorflow as tf
from .template import BaseLayer
class Linear(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, this_dim=None, W=None, b=None, stddev=0.1):
"""
Description:
This is a fully connected layer
Args:
this_dim (int): dimension of this layer
"""
self.this_dim = this_dim
self.stddev = stddev
self.W = W
self.b = b
@BaseLayer.init_name_scope
def __init_var__(self, state_below):
prev_dim = int(state_below.shape[1])
if self.W is None:
self.W = tf.Variable(tf.random_normal([prev_dim, self.this_dim], stddev=self.stddev),
name=self.__class__.__name__ + '_W')
if self.b is None:
self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b')
def _train_fprop(self, state_below):
return tf.matmul(state_below, self.W) + self.b
@property
def _variables(self):
return [self.W, self.b]
class LinearMasked(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, this_dim=None, W=None, b=None, mask=None, stddev=0.1):
"""
Description:
This is a fully connected layer with an applied mask for partial connections
Args:
this_dim (int): dimension of this layer
name (string): name of the layer
W (tensor variable): Weight of 2D tensor matrix
b (tensor variable): bias of 2D tensor matrix
mask (numpy.ndarray or tensorflow placeholder): mask for partial connection
params (list): a list of params in layer that can be updated
"""
self.this_dim = this_dim
self.mask = mask
self.stddev = stddev
self.W = W
self.b = b
@BaseLayer.init_name_scope
def __init_var__(self, state_below):
prev_dim = int(state_below.shape[1])
if self.W is None:
self.W = tf.Variable(tf.random_normal([prev_dim, self.this_dim], stddev=self.stddev),
name=self.__class__.__name__ + '_W')
if self.b is None:
self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b')
def _train_fprop(self, state_below):
return tf.multiply(tf.matmul(state_below, self.W) + self.b, self.mask)
@property
def _variables(self):
return [self.W, self.b]
class SparseLinear(BaseLayer):
@BaseLayer.init_name_scope
def __init__(self, prev_dim=None, this_dim=None, W=None, b=None, batchsize=None, stddev=0.1):
"""
Description:
This is a fully connected layer with sparse inputs are two tensors
one is index tensor of dimension [N, prev_dim] and another one is value
tensor of [N]
Args:
prev_dim (int): dimension of previous layer
this_dim (int): dimension of this layer
name (string): name of the layer
W (tensor variable): Weight of 2D tensor matrix
b (tensor variable): bias of 2D tensor matrix
params (list): a list of params in layer that can be updated
"""
self.prev_dim = prev_dim
self.this_dim = this_dim
self.batchsize = batchsize
self.stddev = stddev
self.W = W
self.b = b
if self.W is None:
self.W = tf.Variable(tf.random_normal([self.prev_dim, self.this_dim], stddev=self.stddev),
name=self.__class__.__name__ + '_W')
if self.b is None:
self.b = tf.Variable(tf.zeros([self.this_dim]), name=self.__class__.__name__ + '_b')
def _train_fprop(self, state_below):
idx, val = state_below
X = tf.SparseTensor(tf.cast(idx, 'int64'), val, dense_shape=[self.batchsize, self.prev_dim])
X_order = tf.sparse_reorder(X)
XW = tf.sparse_tensor_dense_matmul(X_order, self.W, adjoint_a=False, adjoint_b=False)
return tf.add(XW, self.b)
@property
def _variables(self):
return [self.W, self.b]
|
hycis/TensorGraph
|
tensorgraph/layers/linear.py
|
Python
|
apache-2.0
| 4,195
|
from ..Qt import QtCore, QtGui, QtOpenGL, USE_PYQT5
from OpenGL.GL import *
import OpenGL.GL.framebufferobjects as glfbo
import numpy as np
from .. import Vector
from .. import functions as fn
##Vector = QtGui.QVector3D
ShareWidget = None
class GLViewWidget(QtOpenGL.QGLWidget):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
- Axis/grid display
- Export options
"""
def __init__(self, parent=None):
global ShareWidget
if ShareWidget is None:
## create a dummy widget to allow sharing objects (textures, shaders, etc) between views
ShareWidget = QtOpenGL.QGLWidget()
QtOpenGL.QGLWidget.__init__(self, parent, ShareWidget)
self.setFocusPolicy(QtCore.Qt.ClickFocus)
self.opts = {
'center': Vector(0,0,0), ## will always appear at the center of the widget
'distance': 10.0, ## distance of camera from center
'fov': 60, ## horizontal field of view in degrees
'elevation': 30, ## camera's angle of elevation in degrees
'azimuth': 45, ## camera's azimuthal angle in degrees
## (rotation around z-axis 0 points along x-axis)
'viewport': None, ## glViewport params; None == whole widget
}
self.setBackgroundColor('k')
self.items = []
self.noRepeatKeys = [QtCore.Qt.Key_Right, QtCore.Qt.Key_Left, QtCore.Qt.Key_Up, QtCore.Qt.Key_Down, QtCore.Qt.Key_PageUp, QtCore.Qt.Key_PageDown]
self.keysPressed = {}
self.keyTimer = QtCore.QTimer()
self.keyTimer.timeout.connect(self.evalKeyState)
self.makeCurrent()
def addItem(self, item):
self.items.append(item)
if hasattr(item, 'initializeGL'):
self.makeCurrent()
try:
item.initializeGL()
except:
self.checkOpenGLVersion('Error while adding item %s to GLViewWidget.' % str(item))
item._setView(self)
#print "set view", item, self, item.view()
self.update()
def removeItem(self, item):
self.items.remove(item)
item._setView(None)
self.update()
def initializeGL(self):
self.resizeGL(self.width(), self.height())
def setBackgroundColor(self, *args, **kwds):
"""
Set the background color of the widget. Accepts the same arguments as
pg.mkColor().
"""
self.opts['bgcolor'] = fn.mkColor(*args, **kwds)
self.update()
def getViewport(self):
vp = self.opts['viewport']
if vp is None:
return (0, 0, self.width(), self.height())
else:
return vp
def resizeGL(self, w, h):
pass
#glViewport(*self.getViewport())
#self.update()
def setProjection(self, region=None):
m = self.projectionMatrix(region)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def projectionMatrix(self, region=None):
# Xw = (Xnd + 1) * width/2 + X
if region is None:
region = (0, 0, self.width(), self.height())
x0, y0, w, h = self.getViewport()
dist = self.opts['distance']
fov = self.opts['fov']
nearClip = dist * 0.001
farClip = dist * 1000.
r = nearClip * np.tan(fov * 0.5 * np.pi / 180.)
t = r * h / w
# convert screen coordinates (region) to normalized device coordinates
# Xnd = (Xw - X0) * 2/width - 1
## Note that X0 and width in these equations must be the values used in viewport
left = r * ((region[0]-x0) * (2.0/w) - 1)
right = r * ((region[0]+region[2]-x0) * (2.0/w) - 1)
bottom = t * ((region[1]-y0) * (2.0/h) - 1)
top = t * ((region[1]+region[3]-y0) * (2.0/h) - 1)
tr = QtGui.QMatrix4x4()
tr.frustum(left, right, bottom, top, nearClip, farClip)
return tr
def setModelview(self):
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
m = self.viewMatrix()
a = np.array(m.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
def viewMatrix(self):
tr = QtGui.QMatrix4x4()
tr.translate( 0.0, 0.0, -self.opts['distance'])
tr.rotate(self.opts['elevation']-90, 1, 0, 0)
tr.rotate(self.opts['azimuth']+90, 0, 0, -1)
center = self.opts['center']
tr.translate(-center.x(), -center.y(), -center.z())
return tr
def itemsAt(self, region=None):
"""
Return a list of the items displayed in the region (x, y, w, h)
relative to the widget.
"""
region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#buf = np.zeros(100000, dtype=np.uint)
buf = glSelectBuffer(100000)
try:
glRenderMode(GL_SELECT)
glInitNames()
glPushName(0)
self._itemNames = {}
self.paintGL(region=region, useItemNames=True)
finally:
hits = glRenderMode(GL_RENDER)
items = [(h.near, h.names[0]) for h in hits]
items.sort(key=lambda i: i[0])
return [self._itemNames[i[1]] for i in items]
def paintGL(self, region=None, viewport=None, useItemNames=False):
"""
viewport specifies the arguments to glViewport. If None, then we use self.opts['viewport']
region specifies the sub-region of self.opts['viewport'] that should be rendered.
Note that we may use viewport != self.opts['viewport'] when exporting.
"""
if viewport is None:
glViewport(*self.getViewport())
else:
glViewport(*viewport)
self.setProjection(region=region)
self.setModelview()
bgcolor = self.opts['bgcolor']
glClearColor(bgcolor.red(), bgcolor.green(), bgcolor.blue(), 1.0)
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
self.drawItemTree(useItemNames=useItemNames)
def drawItemTree(self, item=None, useItemNames=False):
if item is None:
items = [x for x in self.items if x.parentItem() is None]
else:
items = item.childItems()
items.append(item)
items.sort(key=lambda a: a.depthValue())
for i in items:
if not i.visible():
continue
if i is item:
try:
glPushAttrib(GL_ALL_ATTRIB_BITS)
if useItemNames:
glLoadName(i._id)
self._itemNames[i._id] = i
i.paint()
except:
from .. import debug
debug.printExc()
msg = "Error while drawing item %s." % str(item)
ver = glGetString(GL_VERSION)
if ver is not None:
ver = ver.split()[0]
if int(ver.split(b'.')[0]) < 2:
print(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
print(msg)
finally:
glPopAttrib()
else:
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
try:
tr = i.transform()
a = np.array(tr.copyDataTo()).reshape((4,4))
glMultMatrixf(a.transpose())
self.drawItemTree(i, useItemNames=useItemNames)
finally:
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
def setCameraPosition(self, pos=None, distance=None, elevation=None, azimuth=None):
if distance is not None:
self.opts['distance'] = distance
if elevation is not None:
self.opts['elevation'] = elevation
if azimuth is not None:
self.opts['azimuth'] = azimuth
self.update()
def cameraPosition(self):
"""Return current position of camera based on center, dist, elevation, and azimuth"""
center = self.opts['center']
dist = self.opts['distance']
elev = self.opts['elevation'] * np.pi/180.
azim = self.opts['azimuth'] * np.pi/180.
pos = Vector(
center.x() + dist * np.cos(elev) * np.cos(azim),
center.y() + dist * np.cos(elev) * np.sin(azim),
center.z() + dist * np.sin(elev)
)
return pos
def orbit(self, azim, elev):
"""Orbits the camera around the center position. *azim* and *elev* are given in degrees."""
self.opts['azimuth'] += azim
#self.opts['elevation'] += elev
self.opts['elevation'] = np.clip(self.opts['elevation'] + elev, -90, 90)
self.update()
def pan(self, dx, dy, dz, relative=False):
"""
Moves the center (look-at) position while holding the camera in place.
If relative=True, then the coordinates are interpreted such that x
if in the global xy plane and points to the right side of the view, y is
in the global xy plane and orthogonal to x, and z points in the global z
direction. Distances are scaled roughly such that a value of 1.0 moves
by one pixel on screen.
"""
if not relative:
self.opts['center'] += QtGui.QVector3D(dx, dy, dz)
else:
cPos = self.cameraPosition()
cVec = self.opts['center'] - cPos
dist = cVec.length() ## distance from camera to center
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.) ## approx. width of view at distance of center point
xScale = xDist / self.width()
zVec = QtGui.QVector3D(0,0,1)
xVec = QtGui.QVector3D.crossProduct(zVec, cVec).normalized()
yVec = QtGui.QVector3D.crossProduct(xVec, zVec).normalized()
self.opts['center'] = self.opts['center'] + xVec * xScale * dx + yVec * xScale * dy + zVec * xScale * dz
self.update()
def pixelSize(self, pos):
"""
Return the approximate size of a screen pixel at the location pos
Pos may be a Vector or an (N,3) array of locations
"""
cam = self.cameraPosition()
if isinstance(pos, np.ndarray):
cam = np.array(cam).reshape((1,)*(pos.ndim-1)+(3,))
dist = ((pos-cam)**2).sum(axis=-1)**0.5
else:
dist = (pos-cam).length()
xDist = dist * 2. * np.tan(0.5 * self.opts['fov'] * np.pi / 180.)
return xDist / self.width()
def mousePressEvent(self, ev):
self.mousePos = ev.pos()
def mouseMoveEvent(self, ev):
diff = ev.pos() - self.mousePos
self.mousePos = ev.pos()
if ev.buttons() == QtCore.Qt.LeftButton:
self.orbit(-diff.x(), diff.y())
#print self.opts['azimuth'], self.opts['elevation']
elif ev.buttons() == QtCore.Qt.MidButton:
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.pan(diff.x(), 0, diff.y(), relative=True)
else:
self.pan(diff.x(), diff.y(), 0, relative=True)
def mouseReleaseEvent(self, ev):
pass
# Example item selection code:
#region = (ev.pos().x()-5, ev.pos().y()-5, 10, 10)
#print(self.itemsAt(region))
## debugging code: draw the picking region
#glViewport(*self.getViewport())
#glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
#region = (region[0], self.height()-(region[1]+region[3]), region[2], region[3])
#self.paintGL(region=region)
#self.swapBuffers()
def wheelEvent(self, ev):
delta = 0
if not USE_PYQT5:
delta = ev.delta()
else:
delta = ev.angleDelta().x()
if delta == 0:
delta = ev.angleDelta().y()
if (ev.modifiers() & QtCore.Qt.ControlModifier):
self.opts['fov'] *= 0.999**delta
else:
self.opts['distance'] *= 0.999**delta
self.update()
def keyPressEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
self.keysPressed[ev.key()] = 1
self.evalKeyState()
def keyReleaseEvent(self, ev):
if ev.key() in self.noRepeatKeys:
ev.accept()
if ev.isAutoRepeat():
return
try:
del self.keysPressed[ev.key()]
except:
self.keysPressed = {}
self.evalKeyState()
def evalKeyState(self):
speed = 2.0
if len(self.keysPressed) > 0:
for key in self.keysPressed:
if key == QtCore.Qt.Key_Right:
self.orbit(azim=-speed, elev=0)
elif key == QtCore.Qt.Key_Left:
self.orbit(azim=speed, elev=0)
elif key == QtCore.Qt.Key_Up:
self.orbit(azim=0, elev=-speed)
elif key == QtCore.Qt.Key_Down:
self.orbit(azim=0, elev=speed)
elif key == QtCore.Qt.Key_PageUp:
pass
elif key == QtCore.Qt.Key_PageDown:
pass
self.keyTimer.start(16)
else:
self.keyTimer.stop()
def checkOpenGLVersion(self, msg):
## Only to be called from within exception handler.
ver = glGetString(GL_VERSION).split()[0]
if int(ver.split('.')[0]) < 2:
from .. import debug
pyqtgraph.debug.printExc()
raise Exception(msg + " The original exception is printed above; however, pyqtgraph requires OpenGL version 2.0 or greater for many of its 3D features and your OpenGL version is %s. Installing updated display drivers may resolve this issue." % ver)
else:
raise
def readQImage(self):
"""
Read the current buffer pixels out as a QImage.
"""
w = self.width()
h = self.height()
self.repaint()
pixels = np.empty((h, w, 4), dtype=np.ubyte)
pixels[:] = 128
pixels[...,0] = 50
pixels[...,3] = 255
glReadPixels(0, 0, w, h, GL_RGBA, GL_UNSIGNED_BYTE, pixels)
# swap B,R channels for Qt
tmp = pixels[...,0].copy()
pixels[...,0] = pixels[...,2]
pixels[...,2] = tmp
pixels = pixels[::-1] # flip vertical
img = fn.makeQImage(pixels, transpose=False)
return img
def renderToArray(self, size, format=GL_BGRA, type=GL_UNSIGNED_BYTE, textureSize=1024, padding=256):
w,h = map(int, size)
self.makeCurrent()
tex = None
fb = None
try:
output = np.empty((w, h, 4), dtype=np.ubyte)
fb = glfbo.glGenFramebuffers(1)
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, fb )
glEnable(GL_TEXTURE_2D)
tex = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, tex)
texwidth = textureSize
data = np.zeros((texwidth,texwidth,4), dtype=np.ubyte)
## Test texture dimensions first
glTexImage2D(GL_PROXY_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
if glGetTexLevelParameteriv(GL_PROXY_TEXTURE_2D, 0, GL_TEXTURE_WIDTH) == 0:
raise Exception("OpenGL failed to create 2D texture (%dx%d); too large for this hardware." % shape[:2])
## create teture
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, texwidth, texwidth, 0, GL_RGBA, GL_UNSIGNED_BYTE, data.transpose((1,0,2)))
self.opts['viewport'] = (0, 0, w, h) # viewport is the complete image; this ensures that paintGL(region=...)
# is interpreted correctly.
p2 = 2 * padding
for x in range(-padding, w-padding, texwidth-p2):
for y in range(-padding, h-padding, texwidth-p2):
x2 = min(x+texwidth, w+padding)
y2 = min(y+texwidth, h+padding)
w2 = x2-x
h2 = y2-y
## render to texture
glfbo.glFramebufferTexture2D(glfbo.GL_FRAMEBUFFER, glfbo.GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, tex, 0)
self.paintGL(region=(x, h-y-h2, w2, h2), viewport=(0, 0, w2, h2)) # only render sub-region
## read texture back to array
data = glGetTexImage(GL_TEXTURE_2D, 0, format, type)
data = np.fromstring(data, dtype=np.ubyte).reshape(texwidth,texwidth,4).transpose(1,0,2)[:, ::-1]
output[x+padding:x2-padding, y+padding:y2-padding] = data[padding:w2-padding, -(h2-padding):-padding]
finally:
self.opts['viewport'] = None
glfbo.glBindFramebuffer(glfbo.GL_FRAMEBUFFER, 0)
glBindTexture(GL_TEXTURE_2D, 0)
if tex is not None:
glDeleteTextures([tex])
if fb is not None:
glfbo.glDeleteFramebuffers([fb])
return output
|
onlyjus/pyqtgraph
|
pyqtgraph/opengl/GLViewWidget.py
|
Python
|
mit
| 18,189
|
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Share Resource formatter."""
from aquilon.worker.formats.formatters import ObjectFormatter
from aquilon.worker.formats.resource import ResourceFormatter
from aquilon.aqdb.model import Share
class ShareFormatter(ResourceFormatter):
def extra_details(self, share, indent=""):
details = []
if share.latency_threshold:
details.append(indent + " Latency threshold: %d" %
share.latency_threshold)
details.append(indent + " Server: %s" % share.server)
details.append(indent + " Mountpoint: %s" % share.mount)
details.append(indent + " Disk Count: %d" % share.virtual_disk_count)
details.append(indent + " Machine Count: %d" % share.virtual_machine_count)
return details
def fill_proto(self, share, skeleton, embedded=True, indirect_attrs=True):
super(ShareFormatter, self).fill_proto(share, skeleton)
if share.server:
skeleton.share.server = share.server
if share.mount:
skeleton.share.mount = share.mount
skeleton.share.disk_count = share.virtual_disk_count
skeleton.share.machine_count = share.virtual_machine_count
ObjectFormatter.handlers[Share] = ShareFormatter()
|
quattor/aquilon
|
lib/aquilon/worker/formats/share.py
|
Python
|
apache-2.0
| 1,959
|
# Generated by Django 2.2.24 on 2022-01-27 22:04
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("course_catalog", "0088_change_image_src_type")]
operations = [
migrations.AddField(
model_name="contentfile",
name="learning_resource_types",
field=django.contrib.postgres.fields.ArrayField(
base_field=models.CharField(max_length=256),
blank=True,
null=True,
size=None,
),
)
]
|
mitodl/open-discussions
|
course_catalog/migrations/0089_contentfile_learning_resource_types.py
|
Python
|
bsd-3-clause
| 607
|
def factor(n):
if n == 1: return [1]
i = 2
limit = n**0.5
while i <= limit:
if n % i == 0:
ret = factor(n/i)
ret.append(i)
return ret
i += 1
return [n]
def uniqify(seq):
return list(set(seq))
def phi(x):
t = x
for k in uniqify(factor(x)):
t -= t // k
return t
def resilience(x):
return phi(x) / (x-1.0)
lastprime = 11
base = 2*3*5*7*11.0
multiplier = 1.0
print "Starting value", base
while (resilience(base * multiplier) > (15499.0/94744.0)):
multiplier = multiplier +1.0
if (multiplier > lastprime):
if (len(factor(multiplier)) == 1):
lastprime = multiplier
base = base * multiplier
print "New starting value for search", base
multiplier = 1.0
print "Answer :: ", base * multiplier
|
Jiri-Kremser/euler
|
243/Problem.py
|
Python
|
gpl-2.0
| 980
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Doriancoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test running doriancoind with the -rpcbind and -rpcallowip options."""
import socket
import sys
from test_framework.test_framework import DoriancoinTestFramework, SkipTest
from test_framework.util import *
from test_framework.netutil import *
class RPCBindTest(DoriancoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def setup_network(self):
self.add_nodes(self.num_nodes, None)
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
'''
Start a node with requested rpcallowip and rpcbind parameters,
then try to connect, and check if the set of bound addresses
matches the expected set.
'''
self.log.info("Bind test for %s" % str(addresses))
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
base_args = ['-disablewallet', '-nolisten']
if allow_ips:
base_args += ['-rpcallowip=' + x for x in allow_ips]
binds = ['-rpcbind='+addr for addr in addresses]
self.nodes[0].rpchost = connect_to
self.start_node(0, base_args + binds)
pid = self.nodes[0].process.pid
assert_equal(set(get_bind_addrs(pid)), set(expected))
self.stop_nodes()
def run_allowip_test(self, allow_ips, rpchost, rpcport):
'''
Start a node with rpcallow IP, and request getnetworkinfo
at a non-localhost IP.
'''
self.log.info("Allow IP test for %s:%d" % (rpchost, rpcport))
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
self.nodes[0].rpchost = None
self.start_nodes([base_args])
# connect to node through non-loopback interface
node = get_rpc_proxy(rpc_url(get_datadir_path(self.options.tmpdir, 0), 0, "%s:%d" % (rpchost, rpcport)), 0, coveragedir=self.options.coveragedir)
node.getnetworkinfo()
self.stop_nodes()
def run_test(self):
# due to OS-specific network stats queries, this test works only on Linux
if not sys.platform.startswith('linux'):
raise SkipTest("This test can only be run on linux.")
# find the first non-loopback interface for testing
non_loopback_ip = None
for name,ip in all_interfaces():
if ip != '127.0.0.1':
non_loopback_ip = ip
break
if non_loopback_ip is None:
raise SkipTest("This test requires at least one non-loopback IPv4 interface.")
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("::1",1))
s.close
except OSError:
raise SkipTest("This test requires IPv6 support.")
self.log.info("Using interface %s for testing" % non_loopback_ip)
defaultport = rpc_port(0)
# check default without rpcallowip (IPv4 and IPv6 localhost)
self.run_bind_test(None, '127.0.0.1', [],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check default with rpcallowip (IPv6 any)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
[('::0', defaultport)])
# check only IPv4 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
[('127.0.0.1', defaultport)])
# check only IPv4 localhost (explicit) with alternative port
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
[('127.0.0.1', 32171)])
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
# check only IPv6 localhost (explicit)
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
[('::1', defaultport)])
# check both IPv4 and IPv6 localhost (explicit)
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
[('127.0.0.1', defaultport), ('::1', defaultport)])
# check only non-loopback interface
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
[(non_loopback_ip, defaultport)])
# Check that with invalid rpcallowip, we are denied
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
assert_raises_rpc_error(-342, "non-JSON HTTP response with '403 Forbidden' from server", self.run_allowip_test, ['1.1.1.1'], non_loopback_ip, defaultport)
if __name__ == '__main__':
RPCBindTest().main()
|
doriancoins/doriancoin
|
test/functional/rpc_bind.py
|
Python
|
mit
| 4,886
|
# -*- coding: utf-8 -*-
##
## Some functions about dates
##
## This file is part of Invenio.
## Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
API for date conversion and date related GUI creation.
Lexicon
datetext:
textual format => 'YEAR-MONTH-DAY HOUR:MINUTE:SECOND'
e.g. '2005-11-16 15:11:44'
default value: '0000-00-00 00:00:00'
datestruct:
tuple format => see http://docs.python.org/lib/module-time.html
(YEAR, MONTH, DAY, HOUR, MINUTE, SECOND, WEEKDAY, YEARDAY, DAYLIGHT)
e.g. (2005, 11, 16, 15, 11, 44, 2, 320, 0)
default value: (0, 0, 0, 0, 0, 0, 0, 0, 0)
dategui:
textual format for output => 'DAY MONTH YEAR, HOUR:MINUTE'
e.g. '16 nov 2005, 15:11'
default value: _("N/A")
"""
__revision__ = "$Id$"
import re
import time
from datetime import date as real_date, \
datetime as real_datetime, \
time as real_time, \
timedelta
from invenio.config import CFG_SITE_LANG
from invenio.messages import gettext_set_language
try:
from mx.DateTime import Parser
CFG_HAS_EGENIX_DATETIME = True
except ImportError:
CFG_HAS_EGENIX_DATETIME = False
try:
import dateutil
if not hasattr(dateutil, '__version__') or dateutil.__version__ != '2.0':
from dateutil import parser as du_parser
from dateutil.relativedelta import relativedelta as du_delta
from dateutil import relativedelta
GOT_DATEUTIL = True
else:
from warnings import warn
warn("Not using dateutil module because the version %s is not compatible with Python-2.x" % dateutil.__version__)
GOT_DATEUTIL = False
except ImportError:
# Ok, no date parsing is possible, but continue anyway,
# since this package is only recommended, not mandatory.
GOT_DATEUTIL = False
datetext_default = '0000-00-00 00:00:00'
datestruct_default = (0, 0, 0, 0, 0, 0, 0, 0, 0)
datetext_format = "%Y-%m-%d %H:%M:%S"
class date(real_date):
def strftime(self, fmt):
return strftime(fmt, self)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(fmt, self)
@classmethod
def combine(self, date, time):
return self(date.year, date.month, date.day, time.hour, time.minute, time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
def convert_datetext_to_dategui(datetext, ln=CFG_SITE_LANG, secs=False):
"""
Convert:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11'
Or optionally with seconds:
'2005-11-16 15:11:57' => '16 nov 2005, 15:11:57'
Month is internationalized
"""
try:
datestruct = convert_datetext_to_datestruct(datetext)
if datestruct == datestruct_default:
raise ValueError
month = get_i18n_month_name(datestruct[1], ln=ln)
if secs:
output_format = "%d " + month + " %Y, %H:%M:%S"
else:
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datetext_to_datestruct(datetext):
"""
Convert:
'2005-11-16 15:11:57' => (2005, 11, 16, 15, 11, 44, 2, 320, 0)
"""
try:
return time.strptime(datetext, datetext_format)
except:
return datestruct_default
def convert_datestruct_to_dategui(datestruct, ln=CFG_SITE_LANG):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '16 nov 2005, 15:11'
Month is internationalized
"""
try:
if datestruct[0] and datestruct[1] and datestruct[2]:
month = get_i18n_month_name(datestruct[1], ln=ln)
output_format = "%d " + month + " %Y, %H:%M"
return strftime(output_format, datestruct)
else:
raise ValueError
except:
_ = gettext_set_language(ln)
return _("N/A")
def convert_datestruct_to_datetext(datestruct):
"""
Convert:
(2005, 11, 16, 15, 11, 44, 2, 320, 0) => '2005-11-16 15:11:57'
"""
try:
return strftime(datetext_format, datestruct)
except:
return datetext_default
def convert_datecvs_to_datestruct(datecvs):
"""
Convert CVS $Date$ and
$Id$
formats into datestruct. Useful for later conversion of Last
updated timestamps in the page footers.
Example: '$Date$' => (2006, 09, 20, 19, 27, 11, 0, 0)
"""
try:
if datecvs.startswith("$Id"):
date_time = ' '.join(datecvs.split(" ")[3:5])
return time.strptime(date_time, '%Y/%m/%d %H:%M:%S')
else:
# here we have to use '$' + 'Date...' here, otherwise the CVS
# commit would erase this time format to put commit date:
return time.strptime(datecvs, '$' + 'Date: %Y/%m/%d %H:%M:%S $')
except ValueError:
return datestruct_default
def get_datetext(year, month, day):
"""
year=2005, month=11, day=16 => '2005-11-16 00:00:00'
"""
input_format = "%Y-%m-%d"
try:
datestruct = time.strptime("%i-%i-%i"% (year, month, day), input_format)
return strftime(datetext_format, datestruct)
except:
return datetext_default
def get_datestruct(year, month, day):
"""
year=2005, month=11, day=16 => (2005, 11, 16, 0, 0, 0, 2, 320, -1)
"""
input_format = "%Y-%m-%d"
try:
return time.strptime("%i-%i-%i"% (year, month, day), input_format)
except ValueError or TypeError:
return datestruct_default
def get_i18n_day_name(day_nb, display='short', ln=CFG_SITE_LANG):
"""
get the string representation of a weekday, internationalized
@param day_nb: number of weekday UNIX like.
=> 0=Sunday
@param ln: language for output
@return: the string representation of the day
"""
_ = gettext_set_language(ln)
if display == 'short':
days = {0: _("Sun"),
1: _("Mon"),
2: _("Tue"),
3: _("Wed"),
4: _("Thu"),
5: _("Fri"),
6: _("Sat")}
else:
days = {0: _("Sunday"),
1: _("Monday"),
2: _("Tuesday"),
3: _("Wednesday"),
4: _("Thursday"),
5: _("Friday"),
6: _("Saturday")}
return days[day_nb]
def get_i18n_month_name(month_nb, display='short', ln=CFG_SITE_LANG):
"""
get a non-numeric representation of a month, internationalized.
@param month_nb: number of month, (1 based!)
=>1=jan,..,12=dec
@param ln: language for output
@return: the string representation of month
"""
_ = gettext_set_language(ln)
if display == 'short':
months = {0: _("Month"),
1: _("Jan"),
2: _("Feb"),
3: _("Mar"),
4: _("Apr"),
5: _("May"),
6: _("Jun"),
7: _("Jul"),
8: _("Aug"),
9: _("Sep"),
10: _("Oct"),
11: _("Nov"),
12: _("Dec")}
else:
months = {0: _("Month"),
1: _("January"),
2: _("February"),
3: _("March"),
4: _("April"),
5: _("May "), # trailing space distinguishes short/long form
6: _("June"),
7: _("July"),
8: _("August"),
9: _("September"),
10: _("October"),
11: _("November"),
12: _("December")}
return months[month_nb].strip()
def create_day_selectbox(name, selected_day=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for day selection. (0..31 values).
@param name: name of the control (i.e. name of the var you'll get)
@param selected_day: preselect a day. Use 0 for the label 'Day'
@param ln: language of the menu
@return: html a string
"""
_ = gettext_set_language(ln)
out = "<select name=\"%s\">\n"% name
for i in range(0, 32):
out += " <option value=\"%i\""% i
if (i == selected_day):
out += " selected=\"selected\""
if (i == 0):
out += ">%s</option>\n"% _("Day")
else:
out += ">%i</option>\n"% i
out += "</select>\n"
return out
def create_month_selectbox(name, selected_month=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu for month selection. Value of selected field is numeric
@param name: name of the control (your form will be sent with name=value...)
@param selected_month: preselect a month. use 0 for the Label 'Month'
@param ln: language of the menu
@return: html as string
"""
out = "<select name=\"%s\">\n"% name
for i in range(0, 13):
out += "<option value=\"%i\""% i
if (i == selected_month):
out += " selected=\"selected\""
out += ">%s</option>\n"% get_i18n_month_name(i, ln)
out += "</select>\n"
return out
def create_year_inputbox(name, value=0):
"""
Creates an HTML field (simple input) for year selection.
@param name: name of the control (i.e. name of the variable you'll get)
@param value: prefilled value (int)
@return: html as string
"""
out = "<input type=\"text\" name=\"%s\" value=\"%i\" maxlength=\"4\" size=\"4\"/>\n"% (name, value)
return out
def create_year_selectbox(name, from_year=-1, length=10, selected_year=0, ln=CFG_SITE_LANG):
"""
Creates an HTML menu (dropdownbox) for year selection.
@param name: name of control( i.e. name of the variable you'll get)
@param from_year: year on which to begin. if <0 assume it is current year
@param length: number of items in menu
@param selected_year: initial selected year (if in range), else: label is selected
@param ln: language
@return: html as string
"""
_ = gettext_set_language(ln)
if from_year < 0:
from_year = time.localtime()[0]
out = "<select name=\"%s\">\n"% name
out += ' <option value="0"'
if selected_year == 0:
out += ' selected="selected"'
out += ">%s</option>\n"% _("Year")
for i in range(from_year, from_year + length):
out += "<option value=\"%i\""% i
if (i == selected_year):
out += " selected=\"selected\""
out += ">%i</option>\n"% i
out += "</select>\n"
return out
_RE_RUNTIMELIMIT_FULL = re.compile(r"(?P<weekday>[a-z]+)?\s*((?P<begin>\d\d?(:\d\d?)?)(-(?P<end>\d\d?(:\d\d?)?))?)?", re.I)
_RE_RUNTIMELIMIT_HOUR = re.compile(r'(?P<hours>\d\d?)(:(?P<minutes>\d\d?))?')
def parse_runtime_limit(value):
"""
Parsing CLI option for runtime limit, supplied as VALUE.
Value could be something like: Sunday 23:00-05:00, the format being
[Wee[kday]] [hh[:mm][-hh[:mm]]].
The function will return two valid time ranges. The first could be in the past, containing the present or in the future. The second is always in the future.
"""
def extract_time(value):
value = _RE_RUNTIMELIMIT_HOUR.search(value).groupdict()
return timedelta(hours=int(value['hours']),
minutes=int(value['minutes']))
def extract_weekday(value):
key = value[:3].lower()
try:
return {
'mon' : 0,
'tue' : 1,
'wed' : 2,
'thu' : 3,
'fri' : 4,
'sat' : 5,
'sun' : 6,
}[key]
except KeyError:
raise ValueError("%s is not a good weekday name." % value)
today = date.today()
g = _RE_RUNTIMELIMIT_FULL.search(value)
if not g:
raise ValueError('"%s" does not seem to be correct format for parse_runtime_limit() [Wee[kday]] [hh[:mm][-hh[:mm]]]).' % value)
pieces = g.groupdict()
if pieces['weekday'] is None:
## No weekday specified. So either today or tomorrow
first_occasion_day = timedelta(days=0)
next_occasion_delta = timedelta(days=1)
else:
## Weekday specified. So either this week or next
weekday = extract_weekday(pieces['weekday'])
days = (weekday - today.weekday()) % 7
first_occasion_day = timedelta(days=days)
next_occasion_delta = timedelta(days=7)
if pieces['begin'] is None:
pieces['begin'] = '00:00'
if pieces['end'] is None:
pieces['end'] = '00:00'
beginning_time = extract_time(pieces['begin'])
ending_time = extract_time(pieces['end'])
if not ending_time:
ending_time = beginning_time + timedelta(days=1)
elif beginning_time and ending_time and beginning_time > ending_time:
ending_time += timedelta(days=1)
start_time = real_datetime.combine(today, real_time(hour=0, minute=0))
current_range = (
start_time + first_occasion_day + beginning_time,
start_time + first_occasion_day + ending_time
)
if datetime.now() > current_range[1]:
current_range = tuple(t + next_occasion_delta for t in current_range)
future_range = (
current_range[0] + next_occasion_delta,
current_range[1] + next_occasion_delta
)
return current_range, future_range
def guess_datetime(datetime_string):
"""
Try to guess the datetime contained in a string of unknow format.
@param datetime_string: the datetime representation.
@type datetime_string: string
@return: the guessed time.
@rtype: L{time.struct_time}
@raises ValueError: in case it's not possible to guess the time.
"""
if CFG_HAS_EGENIX_DATETIME:
try:
return Parser.DateTimeFromString(datetime_string).timetuple()
except ValueError:
pass
else:
for format in (None, '%x %X', '%X %x', '%Y-%M-%dT%h:%m:%sZ'):
try:
return time.strptime(datetime_string, format)
except ValueError:
pass
raise ValueError("It is not possible to guess the datetime format of %s" % datetime_string)
def get_time_estimator(total):
"""
Given a total amount of items to compute, return a function that,
if called every time an item is computed (or every step items are computed)
will give a time estimation for how long it will take to compute the whole
set of itmes. The function will return two values: the first is the
number of seconds that are still needed to compute the whole set, the second
value is the time in the future when the operation is expected to end.
"""
t1 = time.time()
count = [0]
def estimate_needed_time(step=1):
count[0] += step
t2 = time.time()
t3 = 1.0 * (t2 - t1) / count[0] * (total - count[0])
return t3, t3 + t1
return estimate_needed_time
def pretty_date(ugly_time=False, ln=CFG_SITE_LANG):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc.
"""
_ = gettext_set_language(ln)
now = real_datetime.now()
if isinstance(ugly_time, basestring):
#try to convert it to epoch timestamp
date_format = '%Y-%m-%d %H:%M:%S.%f'
try:
ugly_time = time.strptime(ugly_time, date_format)
ugly_time = int(time.mktime(ugly_time))
except ValueError:
# doesn't match format, let's try to guess
try:
ugly_time = int(guess_datetime(ugly_time))
except ValueError:
return ugly_time
ugly_time = int(time.mktime(ugly_time))
# Initialize the time period difference
if isinstance(ugly_time, int):
diff = now - real_datetime.fromtimestamp(ugly_time)
elif isinstance(ugly_time, real_datetime):
diff = now - ugly_time
elif not ugly_time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 10:
return _("just now")
if second_diff < 60:
return str(second_diff) + _(" seconds ago")
if second_diff < 120:
return _("a minute ago")
if second_diff < 3600:
return str(second_diff / 60) + _(" minutes ago")
if second_diff < 7200:
return _("an hour ago")
if second_diff < 86400:
return str(second_diff / 3600) + _(" hours ago")
if day_diff == 1:
return _("Yesterday")
if day_diff < 7:
return str(day_diff) + _(" days ago")
if day_diff < 31:
if day_diff / 7 == 7:
return _("Last week")
else:
return str(day_diff / 7) + _(" weeks ago")
if day_diff < 365:
if day_diff / 30 == 1:
return _("Last month")
else:
return str(day_diff / 30) + _(" months ago")
if day_diff / 365 == 1:
return _("Last year")
else:
return str(day_diff / 365) + _(" years ago")
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i=j+1
return sites
def strftime(fmt, dt):
if not isinstance(dt, real_date):
dt = datetime(dt.tm_year, dt.tm_mon, dt.tm_mday, dt.tm_hour, dt.tm_min, dt.tm_sec)
if dt.year >= 1900:
return time.strftime(fmt, dt.timetuple())
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = time.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = time.strftime(fmt, (year+28,) + timetuple[1:])
sites2 = _findall(s2, str(year+28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site+4:]
return s
def strptime(date_string, fmt):
return real_datetime(*(time.strptime(date_string, fmt)[:6]))
|
kntem/webdeposit
|
modules/miscutil/lib/dateutils.py
|
Python
|
gpl-2.0
| 19,589
|
import logging
import numpy as np
import os
import pkgutil
import pytest
import aacgmv2
# @pytest.mark.skip(reason="Not meant to be run alone")
class TestModuleStructure:
def setup(self):
# Define the acceptable output
if not hasattr(self, "reference_list"):
self.reference_list = list()
if not hasattr(self, "module_name"):
self.module_name = None
def teardown(self):
del self.reference_list, self.module_name
def test_module_existence(self):
"""Test the module existence"""
# Get the dictionary of functions for the specified module
retrieved_dict = aacgmv2.__dict__
# Submodules only go one level down
if self.module_name is None:
assert True
elif self.module_name != "aacgmv2":
assert self.module_name in retrieved_dict.keys()
else:
assert isinstance(retrieved_dict, dict)
def test_module_functions(self):
"""Test module function structure"""
# Get the dictionary of functions for the specified module
retrieved_dict = aacgmv2.__dict__
if self.module_name is None:
assert True
else:
if self.module_name != "aacgmv2":
assert self.module_name in retrieved_dict.keys()
retrieved_dict = retrieved_dict[self.module_name].__dict__
# Get the functions attached to this module and make sure they
# are supposed to be there
retrieved_list = list()
for name in retrieved_dict.keys():
if callable(retrieved_dict[name]):
assert name in self.reference_list
retrieved_list.append(name)
# Test to see if all of the modules match
assert len(retrieved_list) == len(self.reference_list)
def test_modules(self):
"""Test module submodule structure"""
if self.module_name is None:
assert True
else:
# Get the submodules and make sure they are supposed to be there
retrieved_list = list()
for imp, name, ispkg in pkgutil.iter_modules(path=aacgmv2.__path__):
assert name in self.reference_list
retrieved_list.append(name)
# Test to see if all of the modules match
assert len(retrieved_list) == len(self.reference_list)
class TestDepStructure(TestModuleStructure):
def setup(self):
self.module_name = None
self.reference_list = ["subsol", "igrf_dipole_axis", "gc2gd_lat"]
def teardown(self):
del self.module_name, self.reference_list
def test_dep_existence(self):
""" Test the deprecated functions"""
self.module_name = "deprecated"
self.test_module_existence()
def test_dep_functions(self):
""" Test the deprecated functions"""
self.module_name = "deprecated"
self.test_module_functions()
class TestUtilsStructure(TestModuleStructure):
def setup(self):
self.module_name = None
self.reference_list = ["subsol", "igrf_dipole_axis", "gc2gd_lat"]
def teardown(self):
del self.module_name, self.reference_list
def test_dep_existence(self):
""" Test the utility functions"""
self.module_name = "utils"
self.test_module_existence()
def test_dep_functions(self):
""" Test the utility functions"""
self.module_name = "utils"
self.test_module_functions()
class TestCStructure(TestModuleStructure):
def setup(self):
self.module_name = None
self.reference_list = ["set_datetime", "convert", "inv_mlt_convert",
"inv_mlt_convert_yrsec", "mlt_convert",
"mlt_convert_yrsec", "inv_mlt_convert_arr",
"mlt_convert_arr", "convert_arr"]
def teardown(self):
del self.module_name, self.reference_list
def test_c_existence(self):
""" Test the C module existence"""
self.module_name = "_aacgmv2"
self.test_module_existence()
def test_c_functions(self):
""" Test the C functions"""
self.module_name = "_aacgmv2"
self.test_module_functions()
class TestPyStructure(TestModuleStructure):
def setup(self):
self.module_name = None
self.reference_list = ["convert_bool_to_bit", "convert_str_to_bit",
"convert_mlt", "convert_latlon", "test_height",
"convert_latlon_arr", "get_aacgm_coord",
"get_aacgm_coord_arr", "set_coeff_path",
"test_time"]
def teardown(self):
del self.module_name, self.reference_list
def test_py_existence(self):
""" Test the python module existence"""
self.module_name = "wrapper"
self.test_module_existence()
def test_py_functions(self):
""" Test the python functions"""
self.module_name = "wrapper"
self.test_module_functions()
class TestTopStructure(TestModuleStructure):
def setup(self):
self.module_name = None
self.reference_list = list()
def teardown(self):
del self.module_name, self.reference_list
def test_top_existence(self):
""" Test the top level existence"""
self.module_name = "aacgmv2"
self.test_module_existence()
def test_top_functions(self):
""" Test the deprecated functions"""
self.module_name = "aacgmv2"
self.reference_list = ["convert_bool_to_bit", "convert_str_to_bit",
"convert_mlt", "convert_latlon",
"convert_latlon_arr", "get_aacgm_coord",
"get_aacgm_coord_arr"]
self.test_module_functions()
def test_top_modules(self):
""" Test the deprecated functions"""
self.module_name = "aacgmv2"
self.reference_list = ["_aacgmv2", "wrapper", "utils",
"deprecated", "__main__"]
self.test_modules()
class TestTopVariables:
def setup(self):
self.alt_limits = {"coeff": 2000.0, "trace": 6378.0}
self.coeff_file = {"coeff": os.path.join("aacgmv2", "aacgmv2",
"aacgm_coeffs",
"aacgm_coeffs-13-"),
"igrf": os.path.join("aacgmv2", "aacgmv2",
"magmodel_1590-2020.txt")}
def teardown(self):
del self.alt_limits, self.coeff_file
@pytest.mark.parametrize("env_var,fkey",
[(aacgmv2.AACGM_v2_DAT_PREFIX, "coeff"),
(aacgmv2.IGRF_COEFFS, "igrf")])
def test_top_parameters(self, env_var, fkey):
"""Test module constants"""
if env_var.find(self.coeff_file[fkey]) < 0:
raise AssertionError("Bad env variable: {:} not {:}".format(
self.coeff_file[fkey], env_var))
@pytest.mark.parametrize("alt_var,alt_ref",
[(aacgmv2.high_alt_coeff, "coeff"),
(aacgmv2.high_alt_trace, "trace")])
def test_high_alt_variables(self, alt_var, alt_ref):
""" Test that module altitude limits exist and are appropriate"""
if not isinstance(alt_var, type(self.alt_limits[alt_ref])):
raise TypeError("Altitude limit variable isn't a float")
np.testing.assert_almost_equal(alt_var, self.alt_limits[alt_ref],
decimal=4)
def test_module_logger(self):
""" Test the module logger instance"""
if not isinstance(aacgmv2.logger, logging.Logger):
raise TypeError("Logger incorrect type")
|
cmeeren/aacgmv2
|
aacgmv2/tests/test_struct_aacgmv2.py
|
Python
|
mit
| 7,911
|
#!/usr/bin/env python3
# dirtool.py - diff tool for directories
# Copyright (C) 2018 Ingo Ruhnke <grumbel@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import signal
import sys
from PyQt5.QtCore import QCoreApplication, QFileSystemWatcher
def directory_changed(path):
print("directory_changed: {}".format(path))
def file_changed(path):
print("file_changed: {}".format(path))
def main(argv):
signal.signal(signal.SIGINT, signal.SIG_DFL)
app = QCoreApplication([])
watcher = QFileSystemWatcher()
print("Watching /tmp/")
watcher.addPath("/tmp/")
watcher.addPath("/tmp/foo")
# Files have to be watched specifically for this to trigger.
# Deleting and recreating a file makes this no longer trigger.
watcher.fileChanged.connect(file_changed)
# This triggers on file creation and deletion
watcher.directoryChanged.connect(directory_changed)
print("files:", watcher.files())
print("directories:", watcher.directories())
sys.exit(app.exec())
if __name__ == "__main__":
main(sys.argv)
# EOF #
|
Grumbel/dirtool
|
experiments/qnotify/qnotify.py
|
Python
|
gpl-3.0
| 1,673
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\MillerLab\Desktop\database-development\Views\ui_dialog_addsitecolumn.ui'
#
# Created by: PyQt4 UI code generator 4.11.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName(_fromUtf8("Dialog"))
Dialog.resize(529, 259)
Dialog.setStyleSheet(_fromUtf8(".QLabel{\n"
" background: None;\n"
"}\n"
".QComboBox {\n"
" border: 1px solid gray;\n"
" border-radius: 7px;\n"
" padding: 2px;\n"
" padding-left: 15px;\n"
" background: #EEEEEE;\n"
"}\n"
".QFrame, .QWidget{\n"
" border-radius: 7;\n"
" background: white;\n"
"} \n"
"\n"
".QLineEdit{\n"
" padding: 1px;\n"
" border-style: solid;\n"
" border: 1px solid black;\n"
" border-radius: 8px;\n"
"}\n"
"\n"
".QPushButton {\n"
" color: black;\n"
" background: #EEEEEE;\n"
" border-width: 1px;\n"
" border-color: black;\n"
" border-style: solid;\n"
" border-radius: 7;\n"
" margin-left: 5px;\n"
" margin-right:5px; \n"
" padding-left: 5px;\n"
" padding-right: 5px;\n"
" padding-top: 3px;\n"
" padding-bottom: 3px;\n"
"}\n"
"\n"
"QTabWidget::tab-bar {\n"
" left: 5px; /* move to the right by 5px */\n"
"}\n"
"/* Style the tab using the tab sub-control. Note that it reads QTabBar _not_ QTabWidget */\n"
"\n"
"QTabBar::tab {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #E1E1E1, stop: 0.4 #DDDDDD, stop: 0.5 #D8D8D8, stop: 1.0 #D3D3D3);\n"
" border: 2px solid #C4C4C3;\n"
" border-bottom-color: #C2C7CB; /* same as the pane color */\n"
" border-top-left-radius: 4px;\n"
" border-top-right-radius: 4px;\n"
" min-width: 8ex;\n"
" padding: 2px;\n"
"}\n"
"QTabBar::tab:selected, QTabBar::tab:hover {\n"
" background: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 #fafafa, stop: 0.4 #f4f4f4, stop: 0.5 #e7e7e7, stop: 1.0 #fafafa);\n"
"}\n"
"QTabBar::tab:selected {\n"
" border-color: #9B9B9B;\n"
" border-bottom-color: #C2C7CB; /* same as pane color */\n"
"}\n"
"QTabBar::tab:!selected {\n"
" margin-top: 2px; /* make non-selected tabs look smaller */\n"
"}\n"
"\n"
""))
self.verticalLayout_4 = QtGui.QVBoxLayout(Dialog)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.frame = QtGui.QFrame(Dialog)
self.frame.setFrameShape(QtGui.QFrame.StyledPanel)
self.frame.setFrameShadow(QtGui.QFrame.Raised)
self.frame.setObjectName(_fromUtf8("frame"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.frame)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.groupBox = QtGui.QGroupBox(self.frame)
font = QtGui.QFont()
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.groupBox.setFont(font)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.groupBox)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
spacerItem = QtGui.QSpacerItem(18, 13, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem1)
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.lnedAddsite = QtGui.QLineEdit(self.groupBox)
self.lnedAddsite.setText(_fromUtf8(""))
self.lnedAddsite.setAlignment(QtCore.Qt.AlignCenter)
self.lnedAddsite.setObjectName(_fromUtf8("lnedAddsite"))
self.horizontalLayout.addWidget(self.lnedAddsite)
self.btnAddsite = QtGui.QPushButton(self.groupBox)
self.btnAddsite.setObjectName(_fromUtf8("btnAddsite"))
self.horizontalLayout.addWidget(self.btnAddsite)
self.horizontalLayout_2.addLayout(self.horizontalLayout)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
spacerItem3 = QtGui.QSpacerItem(18, 13, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout.addItem(spacerItem3)
self.horizontalLayout_3.addLayout(self.verticalLayout)
self.verticalLayout_2.addWidget(self.groupBox)
self.verticalLayout_3.addWidget(self.frame)
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem4)
self.btnSaveClose = QtGui.QPushButton(Dialog)
self.btnSaveClose.setObjectName(_fromUtf8("btnSaveClose"))
self.horizontalLayout_4.addWidget(self.btnSaveClose)
self.btnCancel = QtGui.QPushButton(Dialog)
self.btnCancel.setObjectName(_fromUtf8("btnCancel"))
self.horizontalLayout_4.addWidget(self.btnCancel)
self.verticalLayout_3.addLayout(self.horizontalLayout_4)
self.verticalLayout_4.addLayout(self.verticalLayout_3)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(_translate("Dialog", "Dialog", None))
self.groupBox.setTitle(_translate("Dialog", "Enter Value for New Column Records", None))
self.lnedAddsite.setPlaceholderText(_translate("Dialog", "Label", None))
self.btnAddsite.setText(_translate("Dialog", "Submit", None))
self.btnSaveClose.setText(_translate("Dialog", "Save && Close", None))
self.btnCancel.setText(_translate("Dialog", "Cancel", None))
|
bibsian/database-development
|
Views/ui_dialog_addsitecolumn.py
|
Python
|
mit
| 6,845
|
from django.conf.urls import url
from downloads import views
urlpatterns = [
url(r'^$', views.index, name='downloads'),
]
|
FSavoy/visuo-server
|
downloads/urls.py
|
Python
|
bsd-3-clause
| 127
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.