code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved.
## Use of this source code is governed by a BSD-style license that can be
## found in the COPYING file.
""" test qibuild init """
import qisys.script
import pytest
def test_works_from_an_empty_dir(tmpdir, monkeypatch):
''' positive test '''
monkeypatch.chdir(tmpdir)
qisys.script.run_action("qibuild.actions.init")
assert tmpdir.join(".qi").check(dir=True)
def test_fails_if_qi_dir(tmpdir, monkeypatch):
''' negative test '''
tmpdir.mkdir(".qi")
monkeypatch.chdir(tmpdir)
# pylint: disable-msg=E1101
with pytest.raises(Exception) as err:
qisys.script.run_action("qibuild.actions.init")
assert ".qi directory" in str(err.value)
|
dmerejkowsky/qibuild
|
python/qibuild/test/test_qibuild_init.py
|
Python
|
bsd-3-clause
| 747
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __init__.py
"""
Test suite.
Copyright (c) 2020, David Hoffman
"""
|
david-hoffman/pyOTF
|
tests/__init__.py
|
Python
|
apache-2.0
| 115
|
# #
# Copyright 2013-2020 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# https://github.com/easybuilders/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Unit tests for easyconfig/format/format.py
@author: Stijn De Weirdt (Ghent University)
"""
import sys
from easybuild.framework.easyconfig.format.format import FORMAT_VERSION_HEADER_TEMPLATE, FORMAT_VERSION_REGEXP
from test.framework.utilities import EnhancedTestCase, TestLoaderFiltered
from unittest import TextTestRunner
class EasyConfigFormatTest(EnhancedTestCase):
"""Test the parser"""
def test_parser_version_regex(self):
"""Trivial parser test"""
version = {'major': 1, 'minor': 0}
txt = FORMAT_VERSION_HEADER_TEMPLATE % version
res = FORMAT_VERSION_REGEXP.search(txt).groupdict()
self.assertEqual(version['major'], int(res['major']))
self.assertEqual(version['minor'], int(res['minor']))
def suite():
""" returns all the testcases in this module """
return TestLoaderFiltered().loadTestsFromTestCase(EasyConfigFormatTest, sys.argv[1:])
if __name__ == '__main__':
res = TextTestRunner(verbosity=1).run(suite())
sys.exit(len(res.failures))
|
pescobar/easybuild-framework
|
test/framework/easyconfigformat.py
|
Python
|
gpl-2.0
| 2,082
|
'''
Created by auto_sdk on 2015.04.21
'''
from aliyun.api.base import RestApi
class Rds20140815ImportDataForSQLServerRequest(RestApi):
def __init__(self,domain='rds.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.DBInstanceId = None
self.FileName = None
def getapiname(self):
return 'rds.aliyuncs.com.ImportDataForSQLServer.2014-08-15'
|
wanghe4096/website
|
aliyun/api/rest/Rds20140815ImportDataForSQLServerRequest.py
|
Python
|
bsd-2-clause
| 376
|
#!/usr/bin/python
import sys, getopt, re, time , getpass
from pysphere import *
#############################
def usage():
print "\n * Usage: "+sys.argv[0]+" {--verbose|--host <vcenter host> | --user <user> | --pass <password> --myvm <vmname> --action [ON|OFF|RESET]}\n"
#####
def myArgument(opts):
verbose = False
mypass = None
myaction = "STATUS"
for o, a in opts:
#print "data: %s - %s " % (o,a)
if o in ("-v", "--verbose"):
verbose = True
elif o in ("-h", "--host"):
host = a
elif o in ("-u", "--user"):
user = a
elif o in ("-p", "--pass"):
mypass = a
elif o in ("-m", "--myvm"):
myvm = a
elif o in ("-a", "--action"):
myaction = a
else:
assert False, "Unhandled option"
if not mypass:
#sys.stdout.write('Enter Password: ')
#mypass = sys.stdin.readline().strip()
mypass = getpass.getpass()
return [verbose,host,user,mypass,myvm,myaction]
#
#####
def IPAddr(vmip):
for myip in net['ip_addresses']:
if re.match('\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}',myip) and myip != '127.0.0.1':
#print('IPv4 address found: %s' % myip)
return myip
else:
return None
#############################
############# main program ################
myopts, args = getopt.getopt(sys.argv[1:] ,"v:h:u:p:m:a:", ["verbose", "host=", "user=", "pass=", "myvm=", "action="])
if not myopts:
usage()
sys.exit()
[verbose,VChost,VCuser,VCpass,MyVM,MyAction] = myArgument(myopts);
#print ("Verbose: %s, Host: %s, User: %s, Pass: %s, Action: %s") % (verbose,VChost,VCuser,VCpass,MyAction)
#connect to vcenter
server = VIServer()
server.connect(VChost, VCuser, VCpass)
vm_data = server.get_vm_by_name(MyVM)
myName = vm_data.get_property('name')
myHost = vm_data.get_property('hostname')
myIP = vm_data.get_property('ip_address')
myOS = vm_data.get_property('guest_full_name')
myCPU = vm_data.get_property('num_cpu')
myMem = vm_data.get_property('memory_mb')
myState = vm_data.get_status(basic_status=True)
myNet = vm_data.get_property('net')
myPath = vm_data.get_property('path')
myDS, VMPath = myPath.split();
##Power Cycle
if MyAction == "ON":
if myState == "POWERED ON":
print "Name: %s and VM Current Status: %s" % (myName,myState)
#print vm_data.is_powering_on()
#print "VM is already ON"
else:
print "Name: %s and PowerCycle: %s" % (myName,"ON")
vm_data.power_on()
time.sleep(10)
NewState = vm_data.get_status(basic_status=True)
print "Name: %s and VM New Status: %s" % (myName,NewState)
elif MyAction == "OFF":
if myState == "POWERED OFF":
print "Name: %s and VM Current Status: %s" % (myName,myState)
#print vm_data.is_powering_on()
#print "VM is already OFF"
else:
print "Name: %s and PowerCycle: %s" % (myName,"OFF")
vm_data.power_off()
time.sleep(10)
NewState = vm_data.get_status(basic_status=True)
print "Name: %s and VM New Status: %s" % (myName,NewState)
elif MyAction == "RESET":
print "Name: %s and PowerCycle: %s" % (myName,"RESET")
vm_data.reset()
time.sleep(10)
NewState = vm_data.get_status(basic_status=True)
print "Name: %s and VM New Status: %s" % (myName,NewState)
elif MyAction == "STATUS":
print "Name;Hostname;IPAddr;CPU;Mem(MB);OS;State"
print "%s;%s;%s;%s;%s;%s;%s" % (myName,myHost,myIP,myCPU,myMem,myOS,myState)
else:
usage()
#disconnect
server.disconnect()
#end
|
arunbagul/openinfra
|
infra-daemon/openinfra-vcenter-vmPowerCycle.py
|
Python
|
gpl-3.0
| 3,475
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import os
import unittest
from azure.common import AzureHttpError
from azure.storage.blob import (
BlobBlock,
BlobBlockList,
BlockBlobService,
ContentSettings,
)
from tests.testcase import (
StorageTestCase,
TestMode,
record,
)
#------------------------------------------------------------------------------
TEST_BLOB_PREFIX = 'blob'
FILE_PATH = 'blob_input.temp.dat'
LARGE_BLOB_SIZE = 64 * 1024 + 5
#------------------------------------------------------------------------------
class StorageBlockBlobTest(StorageTestCase):
def setUp(self):
super(StorageBlockBlobTest, self).setUp()
self.bs = self._create_storage_service(BlockBlobService, self.settings)
self.container_name = self.get_resource_name('utcontainer')
if not self.is_playback():
self.bs.create_container(self.container_name)
# test chunking functionality by reducing the threshold
# for chunking and the size of each chunk, otherwise
# the tests would take too long to execute
self.bs.MAX_BLOCK_SIZE = 4 * 1024
self.bs.MAX_SINGLE_PUT_SIZE = 32 * 1024
def tearDown(self):
if not self.is_playback():
try:
self.bs.delete_container(self.container_name)
except:
pass
if os.path.isfile(FILE_PATH):
try:
os.remove(FILE_PATH)
except:
pass
return super(StorageBlockBlobTest, self).tearDown()
#--Helpers-----------------------------------------------------------------
def _get_blob_reference(self):
return self.get_resource_name(TEST_BLOB_PREFIX)
def _create_blob(self):
blob_name = self._get_blob_reference()
self.bs.create_blob_from_bytes(self.container_name, blob_name, b'')
return blob_name
def assertBlobEqual(self, container_name, blob_name, expected_data):
actual_data = self.bs.get_blob_to_bytes(container_name, blob_name)
self.assertEqual(actual_data.content, expected_data)
class NonSeekableFile(object):
def __init__(self, wrapped_file):
self.wrapped_file = wrapped_file
def write(self, data):
self.wrapped_file.write(data)
def read(self, count):
return self.wrapped_file.read(count)
#--Test cases for block blobs --------------------------------------------
@record
def test_put_block(self):
# Arrange
blob_name = self._create_blob()
# Act
for i in range(5):
resp = self.bs.put_block(self.container_name,
blob_name,
'block {0}'.format(i).encode('utf-8'),
i)
self.assertIsNone(resp)
# Assert
@record
def test_put_block_unicode(self):
# Arrange
blob_name = self._create_blob()
# Act
with self.assertRaises(TypeError):
resp = self.bs.put_block(self.container_name, blob_name, u'啊齄丂狛狜', '1')
# Assert
@record
def test_put_block_with_md5(self):
# Arrange
blob_name = self._create_blob()
# Act
self.bs.put_block(self.container_name,
blob_name,
b'block',
1,
validate_content=True)
# Assert
@record
def test_put_block_list(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
# Assert
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name)
self.assertEqual(blob.content, b'AAABBBCCC')
@record
def test_put_block_list_invalid_block_id(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
try:
block_list = [ BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='4')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
self.fail()
except AzureHttpError as e:
self.assertGreaterEqual(str(e).find('specified block list is invalid'), 0)
# Assert
@record
def test_put_block_list_with_md5(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list, validate_content=True)
# Assert
@record
def test_get_block_list_no_blocks(self):
# Arrange
blob_name = self._create_blob()
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 0)
self.assertEqual(len(block_list.committed_blocks), 0)
@record
def test_get_block_list_uncommitted_blocks(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 3)
self.assertEqual(len(block_list.committed_blocks), 0)
self.assertEqual(block_list.uncommitted_blocks[0].id, '1')
self.assertEqual(block_list.uncommitted_blocks[0].size, 3)
self.assertEqual(block_list.uncommitted_blocks[1].id, '2')
self.assertEqual(block_list.uncommitted_blocks[1].size, 3)
self.assertEqual(block_list.uncommitted_blocks[2].id, '3')
self.assertEqual(block_list.uncommitted_blocks[2].size, 3)
@record
def test_get_block_list_committed_blocks(self):
# Arrange
blob_name = self._get_blob_reference()
self.bs.put_block(self.container_name, blob_name, b'AAA', '1')
self.bs.put_block(self.container_name, blob_name, b'BBB', '2')
self.bs.put_block(self.container_name, blob_name, b'CCC', '3')
block_list = [BlobBlock(id='1'), BlobBlock(id='2'), BlobBlock(id='3')]
self.bs.put_block_list(self.container_name, blob_name, block_list)
# Act
block_list = self.bs.get_block_list(self.container_name, blob_name, None, 'all')
# Assert
self.assertIsNotNone(block_list)
self.assertIsInstance(block_list, BlobBlockList)
self.assertEqual(len(block_list.uncommitted_blocks), 0)
self.assertEqual(len(block_list.committed_blocks), 3)
self.assertEqual(block_list.committed_blocks[0].id, '1')
self.assertEqual(block_list.committed_blocks[0].size, 3)
self.assertEqual(block_list.committed_blocks[1].id, '2')
self.assertEqual(block_list.committed_blocks[1].size, 3)
self.assertEqual(block_list.committed_blocks[2].id, '3')
self.assertEqual(block_list.committed_blocks[2].size, 3)
@record
def test_create_blob_from_bytes_single_put(self):
# Arrange
blob_name = self._get_blob_reference()
data = b'hello world'
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_from_bytes_blob_unicode(self):
# Arrange
blob_name = self._get_blob_reference()
# Act
data = u'hello world'
with self.assertRaises(TypeError):
resp = self.bs.create_blob_from_bytes(self.container_name, blob_name, data)
# Assert
def test_create_from_bytes_blob_with_lease_id(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._create_blob()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
lease_id = self.bs.acquire_blob_lease(self.container_name, blob_name)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, lease_id=lease_id)
# Assert
blob = self.bs.get_blob_to_bytes(self.container_name, blob_name, lease_id=lease_id)
self.assertEqual(blob.content, data)
def test_create_blob_from_bytes_with_metadata(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
metadata = {'hello': 'world', 'number': '42'}
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, metadata=metadata)
# Assert
md = self.bs.get_blob_metadata(self.container_name, blob_name)
self.assertDictEqual(md, metadata)
def test_create_blob_from_bytes_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_bytes_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_bytes_with_index(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3)
# Assert
self.assertEqual(data[3:], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
@record
def test_create_blob_from_bytes_with_index_and_count(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3, 5)
# Assert
self.assertEqual(data[3:8], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
@record
def test_create_blob_from_bytes_with_index_and_count_and_properties(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, 3, 5, content_settings=content_settings)
# Assert
self.assertEqual(data[3:8], self.bs.get_blob_to_bytes(self.container_name, blob_name).content)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@record
def test_create_blob_from_bytes_non_parallel(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_path(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_path_non_parallel(self):
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(100)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_path_with_progress(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH,
progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_path_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
self.bs.create_blob_from_path(self.container_name, blob_name, FILE_PATH, content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_stream_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_stream_non_seekable_chunked_upload_known_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
blob_size = len(data) - 66
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageBlockBlobTest.NonSeekableFile(stream)
self.bs.create_blob_from_stream(self.container_name, blob_name, non_seekable_file,
count=blob_size, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_non_seekable_chunked_upload_unknown_size(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
with open(FILE_PATH, 'rb') as stream:
non_seekable_file = StorageBlockBlobTest.NonSeekableFile(stream)
self.bs.create_blob_from_stream(self.container_name, blob_name,
non_seekable_file, max_connections=1)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
def test_create_blob_from_stream_with_progress_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
progress = []
def callback(current, total):
progress.append((current, total))
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream, progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress, unknown_size=True)
def test_create_blob_from_stream_chunked_upload_with_count(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
resp = self.bs.create_blob_from_stream(self.container_name, blob_name, stream, blob_size)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
def test_create_blob_from_stream_chunked_upload_with_count_and_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
blob_size = len(data) - 301
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
blob_size, content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data[:blob_size])
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
def test_create_blob_from_stream_chunked_upload_with_properties(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
with open(FILE_PATH, 'wb') as stream:
stream.write(data)
# Act
content_settings=ContentSettings(
content_type='image/png',
content_language='spanish')
with open(FILE_PATH, 'rb') as stream:
self.bs.create_blob_from_stream(self.container_name, blob_name, stream,
content_settings=content_settings)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
properties = self.bs.get_blob_properties(self.container_name, blob_name).properties
self.assertEqual(properties.content_settings.content_type, content_settings.content_type)
self.assertEqual(properties.content_settings.content_language, content_settings.content_language)
@record
def test_create_blob_from_text(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-8')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, text)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_text_with_encoding(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, text, 'utf-16')
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
@record
def test_create_blob_from_text_with_encoding_and_progress(self):
# Arrange
blob_name = self._get_blob_reference()
text = u'hello 啊齄丂狛狜 world'
data = text.encode('utf-16')
# Act
progress = []
def callback(current, total):
progress.append((current, total))
self.bs.create_blob_from_text(self.container_name, blob_name, text, 'utf-16',
progress_callback=callback)
# Assert
self.assertBlobEqual(self.container_name, blob_name, data)
self.assert_upload_progress(len(data), self.bs.MAX_BLOCK_SIZE, progress)
def test_create_blob_from_text_chunked_upload(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_text_data(LARGE_BLOB_SIZE)
encoded_data = data.encode('utf-8')
# Act
self.bs.create_blob_from_text(self.container_name, blob_name, data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, encoded_data)
# Assert
self.assertBlobEqual(self.container_name, blob_name, encoded_data)
@record
def test_create_blob_with_md5(self):
# Arrange
blob_name = self._get_blob_reference()
data = b'hello world'
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
def test_create_blob_with_md5_chunked(self):
# parallel tests introduce random order of requests, can only run live
if TestMode.need_recording_file(self.test_mode):
return
# Arrange
blob_name = self._get_blob_reference()
data = self.get_random_bytes(LARGE_BLOB_SIZE)
# Act
self.bs.create_blob_from_bytes(self.container_name, blob_name, data,
validate_content=True)
# Assert
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
|
emgerner-msft/azure-storage-python
|
tests/test_block_blob.py
|
Python
|
apache-2.0
| 27,242
|
"""
A pretty-printing dump function for the ast module. The code was copied from
the ast.dump function and modified slightly to pretty-print.
Alex Leone (acleone ~AT~ gmail.com), 2010-01-30
From http://alexleone.blogspot.co.uk/2010/01/python-ast-pretty-printer.html
"""
from ast import *
def dump(node, annotate_fields=True, include_attributes=False, indent=' '):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node, level=0):
if isinstance(node, AST):
fields = [(a, _format(b, level)) for a, b in iter_fields(node)]
if include_attributes and node._attributes:
fields.extend([(a, _format(getattr(node, a), level))
for a in node._attributes])
return ''.join([
node.__class__.__name__,
'(',
', '.join(('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)),
')'])
elif isinstance(node, list):
lines = ['[']
lines.extend((indent * (level + 2) + _format(x, level + 2) + ','
for x in node))
if len(lines) > 1:
lines.append(indent * (level + 1) + ']')
else:
lines[-1] += ']'
return '\n'.join(lines)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def parseprint(code, filename="<string>", mode="exec", **kwargs):
"""Parse some code from a string and pretty-print it."""
node = parse(code, mode=mode) # An ode to the code
print(dump(node, **kwargs))
# Short name: pdp = parse, dump, print
pdp = parseprint
def load_ipython_extension(ip):
from IPython.core.magic import Magics, magics_class, cell_magic
from IPython.core import magic_arguments
@magics_class
class AstMagics(Magics):
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-m', '--mode', default='exec',
help="The mode in which to parse the code. Can be exec (the default), "
"eval or single."
)
@cell_magic
def dump_ast(self, line, cell):
"""Parse the code in the cell, and pretty-print the AST."""
args = magic_arguments.parse_argstring(self.dump_ast, line)
parseprint(cell, mode=args.mode)
ip.register_magics(AstMagics)
if __name__ == '__main__':
import sys, tokenize
for filename in sys.argv[1:]:
print('=' * 50)
print('AST tree for', filename)
print('=' * 50)
with tokenize.open(filename) as f:
fstr = f.read()
parseprint(fstr, filename=filename, include_attributes=True)
print()
|
funkydelpueblo/pyredund
|
astpp.py
|
Python
|
gpl-2.0
| 3,317
|
# -*- coding: utf-8 -*-
"""
***************************************************************************
supervisedclassification.py
---------------------
Date : July 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'July 2013'
__copyright__ = '(C) 2013, Victor Olaya'
from processing.tests.TestData import table
def editCommands(commands):
commands[-3] = commands[-3] + ' -STATS ' + table()
return commands
|
mhugo/QGIS
|
python/plugins/processing/algs/saga/ext/supervisedclassification.py
|
Python
|
gpl-2.0
| 1,160
|
from __future__ import unicode_literals
import os
def assert_env_var_defined(name):
value = os.environ.get(name)
if value is None:
raise Exception('{0} is not defined'.format(name))
print('{name}: {value}'.format(name=name, value=value))
def setup():
assert_env_var_defined('AGRAPH_PORT')
assert_env_var_defined('AGRAPH_SSL_PORT')
|
franzinc/agraph-python
|
src/franz/openrdf/tests/__init__.py
|
Python
|
mit
| 364
|
import unittest
import VerbTrainer
import languages.spanish as es
import languages.french as fr
class TestMenus(unittest.TestCase):
def test_tense_menu_one(self):
expected = 'presente'
actual = VerbTrainer.construct_tense_menu(es._TENSES,1)
self.assertEqual(expected, actual['1'])
def test_spanish_tense_menu_five(self):
expected = ['presente',
'pretérito imperfecto',
'pretérito indefinido',
'futuro simple',
'pretérito perfecto']
actual = VerbTrainer.construct_tense_menu(es._TENSES, 5)
self.assertEqual(expected, list(actual.values()))
def test_spanish_tense_menu_all(self):
expected = es._TENSES
actual = VerbTrainer.construct_tense_menu(es._TENSES)
self.assertEqual(expected, list(actual.values()))
|
wmealem/VerbTrainer
|
tests/test_menus.py
|
Python
|
mit
| 875
|
# THIS FILE IS PART OF THE CYLC WORKFLOW ENGINE.
# Copyright (C) NIWA & British Crown (Met Office) & Contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Test the cylc.flow.host_select module with hosts.
NOTE: These tests require a remote host to work with and are skipped
unless one is provided.
NOTE: These are functional tests, for unit tests see the docstrings in
the host_select module.
"""
from shlex import quote
import socket
from subprocess import call, DEVNULL
import pytest
from cylc.flow.cfgspec.glbl_cfg import glbl_cfg
from cylc.flow.exceptions import HostSelectException
from cylc.flow.host_select import (
select_host,
select_workflow_host
)
from cylc.flow.hostuserutil import get_fqdn_by_host
local_host, local_host_alises, _ = socket.gethostbyname_ex('localhost')
local_host_fqdn = get_fqdn_by_host(local_host)
try:
# get a suitable remote host for running tests on
# NOTE: do NOT copy this testing approach in other python tests
remote_platform = glbl_cfg().get(
['platforms', '_remote_background_shared_tcp', 'hosts'],
[]
)[0]
# don't run tests unless host is contactable
if call(
['ssh', quote(remote_platform), 'hostname'],
stdin=DEVNULL, stdout=DEVNULL, stderr=DEVNULL
):
raise KeyError('remote platform')
# get the fqdn for this host
remote_platform_fqdn = get_fqdn_by_host(remote_platform)
except (KeyError, IndexError):
pytest.skip('Remote test host not available', allow_module_level=True)
remote_platform = None
def test_remote_select():
"""Test host selection works with remote host names."""
assert select_host([remote_platform]) == (
remote_platform, remote_platform_fqdn
)
def test_remote_blacklict():
"""Test that blacklisting works with remote host names."""
# blacklist by fqdn
with pytest.raises(HostSelectException):
select_host(
[remote_platform],
blacklist=[remote_platform]
)
# blacklist by short name
with pytest.raises(HostSelectException):
select_host(
[remote_platform],
blacklist=[remote_platform_fqdn]
)
# make extra sure filters are really being applied
for _ in range(10):
assert select_host(
[remote_platform, local_host],
blacklist=[remote_platform]
) == (local_host, local_host_fqdn)
def test_remote_rankings():
"""Test that ranking evaluation works on hosts (via SSH)."""
assert select_host(
[remote_platform],
ranking_string='''
# if this test fails due to race conditions
# then you have bigger issues than a test failure
virtual_memory().available > 1
getloadavg()[0] < 500
cpu_count() > 1
disk_usage('/').free > 1
'''
) == (remote_platform, remote_platform_fqdn)
def test_remote_exclude(monkeypatch):
"""Ensure that hosts get excluded if they don't meet the rankings.
Already tested elsewhere but this double-checks that it works if more
than one host is provided to choose from."""
def mocked_get_metrics(hosts, metrics, _=None):
# pretend that ssh to remote_platform failed
return (
{f'{local_host_fqdn}': {('cpu_count',): 123}},
{}
)
monkeypatch.setattr(
'cylc.flow.host_select._get_metrics',
mocked_get_metrics
)
assert select_host(
[local_host, remote_platform],
ranking_string='''
cpu_count()
'''
) == (local_host, local_host_fqdn)
def test_remote_workflow_host_select(mock_glbl_cfg):
"""test [scheduler][run hosts]available"""
mock_glbl_cfg(
'cylc.flow.host_select.glbl_cfg',
f'''
[scheduler]
[[run hosts]]
available = {remote_platform}
'''
)
assert select_workflow_host() == (remote_platform, remote_platform_fqdn)
def test_remote_workflow_host_condemned(mock_glbl_cfg):
"""test [scheduler][run hosts]condemned hosts"""
mock_glbl_cfg(
'cylc.flow.host_select.glbl_cfg',
f'''
[scheduler]
[[run hosts]]
available = {remote_platform}, {local_host}
condemned = {remote_platform}
'''
)
for _ in range(10):
assert select_workflow_host() == (local_host, local_host_fqdn)
def test_remote_workflow_host_rankings(mock_glbl_cfg):
"""test [scheduler][run hosts]rankings"""
mock_glbl_cfg(
'cylc.flow.host_select.glbl_cfg',
f'''
[scheduler]
[[run hosts]]
available = {remote_platform}
ranking = """
# if this test fails due to race conditions
# then you are very lucky
virtual_memory().available > 123456789123456789
cpu_count() > 512
disk_usage('/').free > 123456789123456789
"""
'''
)
with pytest.raises(HostSelectException) as excinfo:
select_workflow_host()
# ensure that host selection actually evaluated rankings
assert set(excinfo.value.data[remote_platform_fqdn]) == {
'virtual_memory().available > 123456789123456789',
'cpu_count() > 512',
"disk_usage('/').free > 123456789123456789"
}
# ensure that none of the rankings passed
assert not any(excinfo.value.data[remote_platform_fqdn].values())
|
cylc/cylc
|
tests/unit/test_host_select_remote.py
|
Python
|
gpl-3.0
| 6,193
|
"""initial migration
Revision ID: 338ed9e4e95d
Revises: None
Create Date: 2014-10-13 19:22:16.038576
"""
# revision identifiers, used by Alembic.
revision = '338ed9e4e95d'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('posts', sa.Column('body_html', sa.Text(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('posts', 'body_html')
### end Alembic commands ###
|
AguNnamdi/flask_microblog
|
migrations/versions/338ed9e4e95d_initial_migration.py
|
Python
|
mit
| 592
|
#!/usr/bin/env python
# coding:utf8
from event import models as event_models
from asset import models as asset_models
from alarm import engine
def check_event_type():
if not event_models.TopType.objects.filter(name='ceph'):
event_models.TopType.objects.create(name='ceph')
if not event_models.SecondType.objects.filter(name='ceph_osd'):
event_models.SecondType.objects.create(
name='ceph_osd',
top_type_id=event_models.TopType.objects.get(name='ceph').id
)
def osd_info(hostname, content):
check_event_type()
event_dic = {
'event_content': content,
'event_type_id': event_models.SecondType.objects.get(
name='ceph_osd').id,
'level': 'INFO',
'event_node_id': asset_models.Host.objects.get(hostname=hostname).id
}
event_keystone_obj = event_models.Event(**event_dic)
event_keystone_obj.save()
def down(hostname=None):
event_dic = {
'event_content': 'ceph osd status: down',
'event_type_id': event_models.SecondType.objects.get(
name='ceph_osd').id,
'level': 'ERROR',
}
if hostname:
event_dic['event_node_id'] = \
asset_models.Host.objects.get(hostname=hostname).id
event_dic['event_content'] = '%s ceph osd status: down' % hostname
event_obj = event_models.Event(**event_dic)
event_obj.save()
al = engine.alarm_type()
al(event_obj.id, 'down')
def up(hostname=None):
event_dic = {
'event_content': 'ceph osd status: up',
'event_type_id': event_models.SecondType.objects.get(
name='ceph_osd').id,
'level': 'WARNING',
}
if hostname:
event_dic['event_node_id'] = \
asset_models.Host.objects.get(hostname=hostname).id
event_dic['event_content'] = '%s ceph osd status: up' % hostname
event_obj = event_models.Event(**event_dic)
event_obj.save()
al = engine.alarm_type()
al(event_obj.id, 'up')
|
zhaogaolong/oneFinger
|
event/ceph/osd.py
|
Python
|
apache-2.0
| 2,001
|
# Exercise 1
#
# Write a program that simulates a fortune cookie. The program should display one of five unique fortunes, at random,
# each time it's run.
#
import random
val = random.randint(1, 5)
if (val == 1):
print("You staying in the company till the end of times.")
elif (val == 2):
print("You will get fired from the company.")
elif (val == 3):
print("You will find something else and leave the company.")
elif (val == 4):
print("You will leave the company by your own.")
elif (val == 5):
print("You will leave the company, stay here for a few months and in meanwhile you will find something else")
|
dmartinezgarcia/Python-Programming
|
Chapter 3 - Branching and while loops/exercise_1.py
|
Python
|
gpl-2.0
| 617
|
from bottle import route, run, view, static_file, post, get, redirect
from bottle import jinja2_template as template
from db import Slide
from settings import STATIC_URL, CONFIG_ROOT
from forms import DemographicsForm
import os
__version__ = "0.2.0"
# Homebrewed JSONDB Manager on initialization
s = Slide()
s._tmpdb()
s._dynodb()
## logging functions
# Static
#files
@route('/static/<filename>')
def server_static(filename):
return static_file(filename, root=STATIC_ROOT)
#the homepage just uses the first slide (based on weight)
@route('/')
def index():
# get the first slide
slide = s.by_id(1)
ret = dict(STATIC_URL=STATIC_URL,
slide=slide,
nxt=s.nxt()['_metadata']['url'],
previous=s.previous()['_metadata']['url']
)
return template("slide.html", page=ret)
slide = s.by_weight()[0]
slide['_url'] = '/'
page = dict(slide=slide,config=dict(STATIC_URL=STATIC_URL))
return template("slide.html", page=page)
# Dynamic
#forms are pumped through here
#this probably needs to be worked on after the urls
@get('/input')
def posted_slide():
form = DemographicsForm()
ret = {}
## get this query into db
for slide in s.objects():
if slide['kind'] == 'InputSlide':
ret = slide
next_slide_id = "testing"
next_slide = "/slide/%s" % next_slide_id
return template("input.html", page=ret, form=form)
##redirect(next_slide)
@post('/input')
def posted_input():
form = DemographicsForm(request.forms)
if form.validate():
return "Posted month: %s" % form.birth_month.data
else:
return "Error..."
@route('/s/<slide_id:int>')
def slide(slide_id=None):
if slide_id is None or 0:
return redirect('/')
slide = s.by_id(slide_id)
nxt, previous = '', ''
try:
nxt=s.nxt()['_metadata']['url'],
previous=s.previous()['_metadata']['url']
except:
pass
ret = dict(STATIC_URL=STATIC_URL,
slide=slide,
nxt=nxt,
previous=previous
)
return template("slide.html", page=ret)
# admin for version 2.0
@route('/admin')
def admin_index():
ret = dict(hello=u"admin not implemented")
return template("admin_index.html", ret=ret)
# This is where the conductor taps her wand.
if __name__ =="__main__":
import bottle
bottle.debug(True)
bottle.TEMPLATES.clear()
run(host='0.0.0.0', port=8080)
|
LBConsulting/headroom
|
headroom/app.py
|
Python
|
mit
| 2,458
|
import ctds
from .base import TestExternalDatabase
class TestConnectionEnter(TestExternalDatabase):
def test___doc__(self):
self.assertEqual(
ctds.Connection.__enter__.__doc__,
'''\
__enter__()
Enter the connection's runtime context. On exit, the connection is
closed automatically.
:return: The connection object.
:rtype: ctds.Connection
'''
)
|
zillow/ctds
|
tests/test_connection___enter__.py
|
Python
|
mit
| 394
|
# -*- coding: utf-8 -*-
from .api import Funimation
|
ABusers/A-Certain-Magical-API
|
funimation/__init__.py
|
Python
|
mit
| 51
|
# Copyright 2022 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for MNIST <> MNIST transfer.
"""
# pylint:disable=invalid-name
import functools
from magenta.models.latent_transfer import model_joint
import tensorflow.compat.v1 as tf
FLAGS = tf.flags.FLAGS
n_latent = FLAGS.n_latent
n_latent_shared = FLAGS.n_latent_shared
layers = (128,) * 4
batch_size = 128
Encoder = functools.partial(
model_joint.EncoderLatentFull,
input_size=n_latent,
output_size=n_latent_shared,
layers=layers)
Decoder = functools.partial(
model_joint.DecoderLatentFull,
input_size=n_latent_shared,
output_size=n_latent,
layers=layers)
vae_config_A = {
'Encoder': Encoder,
'Decoder': Decoder,
'prior_loss_beta': FLAGS.prior_loss_beta_A,
'prior_loss': 'KL',
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
vae_config_B = {
'Encoder': Encoder,
'Decoder': Decoder,
'prior_loss_beta': FLAGS.prior_loss_beta_B,
'prior_loss': 'KL',
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
config = {
'vae_A': vae_config_A,
'vae_B': vae_config_B,
'config_A': 'mnist_0_nlatent64',
'config_B': 'mnist_0_nlatent64',
'config_classifier_A': 'mnist_classifier_0',
'config_classifier_B': 'mnist_classifier_0',
# model
'prior_loss_align_beta': FLAGS.prior_loss_align_beta,
'mean_recons_A_align_beta': FLAGS.mean_recons_A_align_beta,
'mean_recons_B_align_beta': FLAGS.mean_recons_B_align_beta,
'mean_recons_A_to_B_align_beta': FLAGS.mean_recons_A_to_B_align_beta,
'mean_recons_B_to_A_align_beta': FLAGS.mean_recons_B_to_A_align_beta,
'pairing_number': FLAGS.pairing_number,
# training dynamics
'batch_size': batch_size,
'n_latent': n_latent,
'n_latent_shared': n_latent_shared,
}
|
magenta/magenta
|
magenta/models/latent_transfer/configs/joint_exp_2mnist_parameterized.py
|
Python
|
apache-2.0
| 2,401
|
# -*- coding: utf-8 -*-
"""
This module contais classes to package the model data necessary for plotting.
DataSeries is the main class of interest.
"""
class DataItem(object):
"""One line in a plot. It contains y-axis values and a title."""
def __init__(self, title, values, params):
"""Init the DataItem.
Args:
title (string): The title to be used in the legend.
values (list): A list of y-axis values to for this line.
params (list): A list of model parameters containing (title, value)
"""
self.title = title
self.values = values
self.params = params
class DataSeries(object):
"""
DataSeries contains series of data to plot in a single plot.
The y-axis and x-axis scales should be identical for each dataItem.
Each dataItem is represented by a line in the plot and legend.
"""
def __init__(self, x_title, y_title, x_values, dataItems):
"""
Init the Data Series.
Args:
x_title (string): The title for the x-axis
y_title (string): The title for the y-axis
x_values (list): A list of values for the x-axis.
dataItems (list): A list of DataItem objects, one for each line.
"""
self.dataItems = dataItems
self.y_title = y_title
self.x_title = x_title
self.x_values = x_values
class PlotData(object):
"""PlotData contains data necessary to create plots from a model run."""
def __init__(self, dataSeries, description):
"""
Init the Plot Data.
Args:
dataSeries (list): A list of DataSeries items for this plot.
description (string): A description to display below the plot.
"""
self.dataSeries = dataSeries
self.description = description
|
dolfandringa/AquaponicsModeler
|
AquaponicsModeler/plotdata.py
|
Python
|
gpl-3.0
| 1,869
|
# Generated by Django 2.1.7 on 2019-04-11 06:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('landing', '0044_section_order_index'),
]
operations = [
migrations.AddField(
model_name='section',
name='featured',
field=models.BooleanField(default=False, verbose_name='featured'),
),
]
|
flavoi/diventi
|
diventi/landing/migrations/0045_section_featured.py
|
Python
|
apache-2.0
| 416
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
import re
from jx_base.expressions import (
FindOp as FindOp_,
NULL,
is_literal,
)
from jx_base.language import is_op
from jx_elasticsearch.es52.expressions.literal import Literal
from jx_elasticsearch.es52.expressions.variable import Variable
from jx_elasticsearch.es52.expressions.utils import ES52
from jx_elasticsearch.es52.expressions.not_op import NotOp
from jx_elasticsearch.es52.expressions.reg_exp_op import RegExpOp
from jx_elasticsearch.es52.painless import Painless
from mo_imports import export
from mo_json import STRING
class FindOp(FindOp_):
def to_es(self, schema):
if (
is_op(self.value, Variable)
and is_literal(self.find)
and self.default is NULL
and is_literal(self.start)
and self.start.value == 0
):
columns = [c for c in schema.leaves(self.value.var) if c.jx_type == STRING]
if len(columns) == 1:
return {"regexp": {
columns[0].es_column: ".*" + re.escape(self.find.value) + ".*"
}}
# CONVERT TO SCRIPT, SIMPLIFY, AND THEN BACK TO FILTER
self.simplified = False
return self.partial_eval(Painless).to_es(schema)
def partial_eval(self, lang):
value = self.value.partial_eval(lang)
find = self.find.partial_eval(lang)
default = self.default.partial_eval(lang)
start = self.start.partial_eval(lang)
return FindOp([value, find], default=default, start=start)
def missing(self, lang):
slim = self.partial_eval(lang)
if (
is_op(slim.value, Variable)
and is_literal(slim.find)
and slim.default is NULL
and is_literal(slim.start)
and slim.start.value == 0
):
return NotOp(RegExpOp([
slim.value,
Literal(".*" + re.escape(slim.find.value) + ".*"),
]))
return NotOp(self.partial_eval(Painless))
def exists(self):
return NotOp(self.missing(ES52)).partial_eval(ES52)
export("jx_elasticsearch.es52.expressions.boolean_op", FindOp)
|
klahnakoski/ActiveData
|
vendor/jx_elasticsearch/es52/expressions/find_op.py
|
Python
|
mpl-2.0
| 2,491
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 University of Dundee & Open Microscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import object
from github.AuthenticatedUser import AuthenticatedUser
from github.Repository import Repository
from scc.git import GHManager
from scc.git import GitHubRepository
from mox3.mox import Mox
class MoxTestBase(object):
def setup_method(self, method):
self.mox = Mox()
def teardown_method(self, method):
self.mox.UnsetStubs()
self.mox.VerifyAll()
class MockTest(MoxTestBase):
def setup_method(self, method):
super(MockTest, self).setup_method(method)
# Mocks
self.gh = self.mox.CreateMock(GHManager)
self.user = self.mox.CreateMock(AuthenticatedUser)
self.org = self.mox.CreateMock(AuthenticatedUser)
self.repo = self.mox.CreateMock(Repository)
self.repo.organization = None
self.user.login = "test"
self.gh.get_repo("mock/mock").AndReturn(self.repo)
self.mox.ReplayAll()
self.gh_repo = GitHubRepository(self.gh, "mock", "mock")
|
sbesson/snoopycrimecop
|
test/unit/Mock.py
|
Python
|
gpl-2.0
| 1,844
|
import logging
import json
import random
import hashlib
import collections
import datetime
import etcd.exceptions
import mr.constants
import mr.config.kv
import mr.models.kv.common
import mr.models.kv.data_layer
import mr.compat
_logger = logging.getLogger(__name__)
_logger.setLevel(logging.INFO)
class ValidationError(Exception):
def __init__(self, name, message):
extended_message = ("%s: %s" % (name, message))
super(ValidationError, self).__init__(extended_message)
class Field(object):
def __init__(self, is_required=True, default_value=None, empty_value=None):
# TODO(dustin): Check the existing field assignments to determine if we need to adjust their default_value or empty_value parameters (if given).
self.__is_required = is_required
self.__default_value = default_value
self.__empty_value = empty_value
def validate(self, name, value):
"""Raise ValidationError on error."""
pass
def is_empty(self, value):
return value == self.__empty_value or value is None
@property
def is_required(self):
return self.__is_required
@property
def default_value(self):
return self.__default_value
# We want to get everything using this as the default (not Field).
TextField = Field
class EnumField(Field):
def __init__(self, values, *args, **kwargs):
super(EnumField, self).__init__(*args, **kwargs)
self.__values = values
def validate(self, name, value):
if value not in self.__values:
raise ValidationError(name, "Not a valid enum value.")
class TimestampField(Field):
def validate(self, name, value):
try:
datetime.datetime.strptime(value, mr.constants.DATETIME_STD)
except ValueError:
pass
else:
return
raise ValidationError(name, "Timestamp is not properly formatted.")
@property
def default_value(self):
return datetime.datetime.now().strftime(mr.constants.DATETIME_STD)
_dl = mr.models.kv.data_layer.DataLayerKv()
class Model(mr.models.kv.common.CommonKv):
entity_class = None
key_field = None
def __init__(self, is_stored=False, *args, **data):
assert issubclass(is_stored.__class__, bool) is True
_logger.debug("Instantiating [%s]. IS_STORED=[%s]",
self.__class__.__name__, is_stored)
self.__state = None
self.__load_from_data(data, is_stored=is_stored)
def __load_from_data(self, data, is_stored=False):
cls = self.__class__
_logger.debug("Loading data on model [%s]. IS_STORED=[%s]",
cls.__name__, is_stored)
try:
# If the key wasn't non-None, assign it randomly.
#
# Previously, we allowed this column to be optional, but we'd get
# spurious keys without realizing it.
if data[cls.key_field] is None:
key = cls.make_opaque()
_logger.debug("Model [%s] was not loaded with a key. Generating "
"key: [%s]", cls.entity_class, key)
data[cls.key_field] = key
# Make sure we received all of the fields.
all_fields = set(self.__get_field_names())
actual_fields = set(data.keys())
# Make sure that only valid fields were given.
invalid_fields = actual_fields - all_fields
if invalid_fields:
raise ValueError("Invalid fields were given: %s" % (invalid_fields,))
# Fill-in any missing fields.
for field_name in (all_fields - actual_fields):
data[field_name] = getattr(cls, field_name).default_value
# Determine which fields had empty data.
data_info = [(field_name,
getattr(cls, field_name),
data[field_name])
for field_name
in all_fields]
for name, field_obj, datum in data_info:
if field_obj.is_empty(datum) is False:
field_obj.validate(name, datum)
elif field_obj.is_required:
raise ValidationError(name, "Required field is empty/omitted")
setattr(self, name, datum)
# Reflects whether or not the data came from storage.
self.__is_stored = is_stored
except:
try:
pk_value = data[cls.key_field]
except KeyError:
pk_value = None
_logger.exception("There was an error while loading data for "
"model [%s]. PK=[%s]", cls.__name__, pk_value)
raise
def __str__(self):
cls = self.__class__
return ('<%s>' % (getattr(self, cls.key_field),))
def __repr__(self):
cls = self.__class__
truncated_data = {}
for k, v in self.get_data().iteritems():
if issubclass(v.__class__, mr.compat.basestring) is True and \
len(v) > mr.config.kv.REPR_DATA_TRUNCATE_WIDTH:
v = v[:mr.config.kv.REPR_DATA_TRUNCATE_WIDTH] + '...'
truncated_data[k] = v
return ('<%s [%s] %s>' %
(cls.__name__, getattr(self, cls.key_field), truncated_data))
def get_debug(self, ignore_keys=()):
cls = self.__class__
data = dict([(k, v)
for k, v
in self.get_data().iteritems()
if k != cls.key_field and \
k not in ignore_keys])
info = collections.OrderedDict()
info['entity_name'] = cls.__name__
info['primary_key'] = collections.OrderedDict()
info['primary_key'][cls.key_field] = getattr(self, cls.key_field)
info['data'] = data
return json.dumps(
info,
indent=4,
separators=(',', ': '))
def __get_required_field_names(self):
cls = self.__class__
for field_name in self.__get_field_names():
if getattr(cls, field_name).is_required is True:
yield field_name
def __get_field_names(self):
cls = self.__class__
# We use None so we don't error with the private attributes.
for attr in dir(self):
if issubclass(getattr(cls, attr, None).__class__, Field):
yield attr
def get_data(self):
"""Return a dictionary of data. If the value is considered to be
"empty" by the particular field-type, then coalesce it to whatever the
default-value for each field-type is, e.g. both None and ''.
"""
cls = self.__class__
data = []
for k in self.__get_field_names():
if k == cls.key_field:
continue
datum = getattr(self, k)
field_obj = getattr(cls, k)
if getattr(cls, k).is_empty(datum) is True:
datum = field_obj.default_value
data.append((k, datum))
return dict(data)
def get_key(self):
cls = self.__class__
return getattr(self, cls.key_field)
def presave(self):
pass
def postsave(self):
pass
def predelete(self):
pass
def postdelete(self):
pass
@classmethod
def atomic_update(cls, get_cb, set_cb,
max_attempts=\
mr.config.kv.DEFAULT_ATOMIC_UPDATE_MAX_ATTEMPTS):
# TODO(dustin): This functionality is now native to the client (the *node*
# module).
i = max_attempts
while i > 0:
obj = get_cb()
try:
set_cb(obj)
obj.save(enforce_pristine=True)
except mr.models.kv.data_layer.KvPreconditionException:
obj.refresh()
else:
return obj
i -= 1
raise SystemError("Atomic update failed: %s" % (obj,))
def save(self, enforce_pristine=False):
cls = self.__class__
self.presave()
identity = self.get_identity()
_logger.debug("Saving model [%s]. IS_STORED=[%s]",
cls.__name__, self.__is_stored)
if self.__is_stored is True:
state = self.__state if enforce_pristine is True else None
cls.__update_entity(
identity,
self.get_data(),
check_against_state=state)
else:
state = cls.__create_entity(
identity,
self.get_data())
attributes = {
'state': str(state),
}
self.__class__.__apply_attributes(self, attributes)
self.__is_stored = True
self.postsave()
def delete(self):
cls = self.__class__
self.predelete()
identity = self.get_identity()
cls.delete_entity(identity)
self.__is_stored = False
self.postdelete()
def refresh(self):
cls = self.__class__
identity = self.get_identity()
key = getattr(self, cls.key_field)
assert identity is not None
assert key is not None
_logger.debug("Refreshing entity with identity and key: [%s] [%s]",
identity, key)
(attributes, data) = cls.__get_entity(identity)
data[cls.key_field] = key
self.__load_from_data(data, is_stored=True)
self.__class__.__apply_attributes(self, attributes)
@property
def is_stored(self):
return self.__is_stored
@property
def state_string(self):
return self.__state
def get_identity(self):
raise NotImplementedError()
@classmethod
def __build_from_stored_data(cls, key, data):
data[cls.key_field] = key
return cls(is_stored=True, **data)
@classmethod
def __apply_attributes(cls, obj, attributes):
# This won't be set prior to this. There's no benefit to it having a
# None value, and might potentially be confusing.
obj.__state = attributes['state']
@classmethod
def get_and_build(cls, identity, key):
(attributes, data) = cls.__get_entity(identity)
obj = cls.__build_from_stored_data(key, data)
cls.__apply_attributes(obj, attributes)
return obj
@classmethod
def __create_entity(cls, identity, data={}):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
_logger.debug("Creating [%s] entity with parent [%s]: [%s]",
cls.entity_class, parent, identity)
try:
return cls.__create_only_encoded(parent, identity, data)
except etcd.exceptions.EtcdPreconditionException:
pass
# Re-raising here rather than in the catch above makes for cleaner
# logging (no exception-from-exception messages).
raise ValueError("[%s] entity identity with parent [%s] already "
"exists: [%s]" %
(cls.entity_class, parent, identity))
@classmethod
def __update_entity(cls, identity, data={}, check_against_state=None):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
_logger.debug("Updating [%s] entity with parent [%s]: [%s]",
cls.entity_class, parent, identity)
try:
cls.__update_only_encoded(
parent,
identity,
data,
check_against_state=check_against_state)
except etcd.exceptions.EtcdPreconditionException:
pass
else:
return identity
# Re-raising here rather than in the catch above makes for cleaner
# logging (no exception-from-exception messages).
raise ValueError("[%s] entity identity with parent [%s] doesn't "
"exist: [%s]" %
(cls.entity_class, parent, identity))
@classmethod
def delete_entity(cls, identity):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
_logger.debug("Deleting [%s] entity with parent [%s]: [%s]",
cls.entity_class, parent, identity)
cls.__delete(parent, identity)
@classmethod
def __get_entity(cls, identity):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
_logger.debug("Getting [%s] entity with parent [%s]: [%s]",
cls.entity_class, parent, identity)
return cls.__get_encoded(parent, identity)
@classmethod
def __get_encoded(cls, parent, identity):
key = cls.key_from_identity(parent, identity)
(state, value) = _dl.get(key)
return (
{
'state': str(state),
},
mr.config.kv.DECODER(value)
)
def wait_for_change(self):
cls = self.__class__
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
identity = self.get_identity()
key = getattr(self, cls.key_field)
_logger.debug("Waiting on entity [%s] with parent [%s]: [%s]",
cls.entity_class, parent, identity)
(attributes, data) = cls.__wait_encoded(parent, identity)
obj = cls.__build_from_stored_data(key, data)
cls.__apply_attributes(obj, attributes)
return obj
@classmethod
def __wait_encoded(cls, parent, identity):
key = cls.key_from_identity(parent, identity)
(state, value) = _dl.wait(key)
return (
{
'state': str(state),
},
mr.config.kv.DECODER(value)
)
@classmethod
def __update_only_encoded(cls, parent, identity, value,
check_against_state=None):
key = cls.key_from_identity(parent, identity)
return _dl.update_only(
key,
mr.config.kv.ENCODER(value),
check_against_state=check_against_state)
@classmethod
def __create_only_encoded(cls, parent, identity, value):
key = cls.key_from_identity(parent, identity)
return _dl.create_only(key, mr.config.kv.ENCODER(value))
@classmethod
def __delete(cls, parent, identity):
key = cls.key_from_identity(parent, identity)
return _dl.delete(key)
@classmethod
def list(cls, *args):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
_logger.debug("Getting children [%s] entities of parent [%s].",
cls.entity_class, parent)
for key, data in cls.__list_encoded(parent, args):
yield cls.__build_from_stored_data(key, data)
@classmethod
def __list_encoded(cls, parent, identity_prefix):
key = cls.key_from_identity(parent, identity_prefix)
for name, data in _dl.list(key):
# Don't just decode the data, but derive the identity for this
# child as well (clip the search-key path-prefix from the child-key).
yield (name, mr.config.kv.DECODER(data))
@classmethod
def list_children(cls, *args):
parent = mr.config.kv.ENTITY_ROOT + (cls.entity_class,)
key = cls.key_from_identity(parent, args)
return _dl.list_children(key)
|
dsoprea/JobX
|
mr/models/kv/model.py
|
Python
|
gpl-2.0
| 15,499
|
class MimeError(Exception):
pass
class DecodingError(MimeError):
"""Thrown when there is an encoding error."""
pass
class EncodingError(MimeError):
"""Thrown when there is an decoding error."""
pass
|
alex/flanker
|
flanker/mime/message/errors.py
|
Python
|
apache-2.0
| 223
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_certificate_ca
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_certificate_ca.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_certificate_ca_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_ca': {
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'auto-update-days': '3',
'auto-update-days-warning': '4',
'ca': 'test_value_5',
'last-updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep-url': 'test_value_9',
'source': 'factory',
'source-ip': '84.230.14.11',
'trusted': 'enable'
}
set_method_mock.assert_called_with('vpn.certificate', 'ca', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_certificate_ca_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_ca': {
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'auto-update-days': '3',
'auto-update-days-warning': '4',
'ca': 'test_value_5',
'last-updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep-url': 'test_value_9',
'source': 'factory',
'source-ip': '84.230.14.11',
'trusted': 'enable'
}
set_method_mock.assert_called_with('vpn.certificate', 'ca', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_certificate_ca_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_certificate_ca': {
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.certificate', 'ca', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_certificate_ca_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_certificate_ca': {
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.certificate', 'ca', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_certificate_ca_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_ca': {
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'auto-update-days': '3',
'auto-update-days-warning': '4',
'ca': 'test_value_5',
'last-updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep-url': 'test_value_9',
'source': 'factory',
'source-ip': '84.230.14.11',
'trusted': 'enable'
}
set_method_mock.assert_called_with('vpn.certificate', 'ca', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_certificate_ca_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_certificate_ca': {
'random_attribute_not_valid': 'tag',
'auto_update_days': '3',
'auto_update_days_warning': '4',
'ca': 'test_value_5',
'last_updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep_url': 'test_value_9',
'source': 'factory',
'source_ip': '84.230.14.11',
'trusted': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_certificate_ca.fortios_vpn_certificate(input_data, fos_instance)
expected_data = {
'auto-update-days': '3',
'auto-update-days-warning': '4',
'ca': 'test_value_5',
'last-updated': '6',
'name': 'default_name_7',
'range': 'global',
'scep-url': 'test_value_9',
'source': 'factory',
'source-ip': '84.230.14.11',
'trusted': 'enable'
}
set_method_mock.assert_called_with('vpn.certificate', 'ca', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
anryko/ansible
|
test/units/modules/network/fortios/test_fortios_vpn_certificate_ca.py
|
Python
|
gpl-3.0
| 10,627
|
# Copyright (C) 2011 Jason Anderson
#
#
# This file is part of PseudoTV.
#
# PseudoTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PseudoTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PseudoTV. If not, see <http://www.gnu.org/licenses/>.
import xbmc, xbmcgui, xbmcaddon
import subprocess, os
import time, threading
import datetime, traceback
import sys, re
import urllib
import urllib2
import fanarttv
from Playlist import Playlist
from Globals import *
from Channel import Channel
from ChannelList import ChannelList
from FileAccess import FileLock, FileAccess
from xml.etree import ElementTree as ET
from fanarttv import *
# from tvdb import *
class EPGWindow(xbmcgui.WindowXMLDialog):
def __init__(self, *args, **kwargs):
self.focusRow = 0
self.focusIndex = 0
self.focusTime = 0
self.focusEndTime = 0
self.shownTime = 0
self.centerChannel = 0
self.rowCount = 6
self.channelButtons = [None] * self.rowCount
self.buttonCache = []
self.buttonCount = 0
self.actionSemaphore = threading.BoundedSemaphore()
self.lastActionTime = time.time()
self.channelLogos = ''
self.textcolor = "FFFFFFFF"
self.focusedcolor = "FF7d7d7d"
self.clockMode = 0
self.textfont = "font14"
self.startup = time.time()
# self.showingInfo = False
self.infoOffset = 0
self.infoOffsetV = 0
self.log('Using EPG Coloring = ' + str(REAL_SETTINGS.getSetting('EPGcolor_enabled')))
if os.path.exists(xbmc.translatePath(os.path.join(ADDON_INFO, 'resources', 'skins', Skin_Select, 'media'))):
self.mediaPath = xbmc.translatePath(os.path.join(ADDON_INFO, 'resources', 'skins', Skin_Select, 'media')) + '/'
else:
self.mediaPath = xbmc.translatePath(os.path.join(ADDON_INFO, 'resources', 'skins', 'default', 'media')) + '/'
self.AltmediaPath = xbmc.translatePath(os.path.join(ADDON_INFO, 'resources', 'skins', 'default', 'media')) + '/'
self.log('Mediapath is ' + self.mediaPath)
# Use the given focus and non-focus textures if they exist. Otherwise use the defaults.
if os.path.exists(self.mediaPath + BUTTON_FOCUS):
self.textureButtonFocus = self.mediaPath + BUTTON_FOCUS
elif xbmc.skinHasImage(self.mediaPath + BUTTON_FOCUS):
self.textureButtonFocus = self.mediaPath + BUTTON_FOCUS
else:
self.textureButtonFocus = 'pstvButtonFocus.png'
if os.path.exists(self.mediaPath + BUTTON_NO_FOCUS):
self.textureButtonNoFocus = self.mediaPath + BUTTON_NO_FOCUS
elif xbmc.skinHasImage(self.mediaPath + BUTTON_NO_FOCUS):
self.textureButtonNoFocus = self.mediaPath + BUTTON_NO_FOCUS
else:
self.textureButtonNoFocus = 'pstvButtonNoFocus.png'
for i in range(self.rowCount):
self.channelButtons[i] = []
self.clockMode = ADDON_SETTINGS.getSetting("ClockMode")
self.toRemove = []
def onFocus(self, controlid):
pass
# set the time labels
def setTimeLabels(self, thetime):
self.log('setTimeLabels')
now = datetime.datetime.fromtimestamp(thetime)
self.getControl(104).setLabel(now.strftime('%A, %b %d'))
delta = datetime.timedelta(minutes=30)
for i in range(3):
if self.clockMode == "0":
self.getControl(101 + i).setLabel(now.strftime("%I:%M%p").lower())
else:
self.getControl(101 + i).setLabel(now.strftime("%H:%M"))
now = now + delta
self.log('setTimeLabels return')
self.log('thetime ' + str(now))
def log(self, msg, level = xbmc.LOGDEBUG):
log('EPGWindow: ' + msg, level)
def logDebug(self, msg, level = xbmc.LOGDEBUG):
if REAL_SETTINGS.getSetting('enable_Debug') == "true":
log('EPGWindow: ' + msg, level)
def onInit(self):
self.log('onInit')
timex, timey = self.getControl(120).getPosition()
timew = self.getControl(120).getWidth()
timeh = self.getControl(120).getHeight()
if os.path.exists(xbmc.translatePath(os.path.join(ADDON_INFO, 'resources', 'skins', Skin_Select, 'media', TIME_BAR))):
self.currentTimeBar = xbmcgui.ControlImage(timex, timey, timew, timeh, self.mediaPath + TIME_BAR)
else:
self.currentTimeBar = xbmcgui.ControlImage(timex, timey, timew, timeh, self.AltmediaPath + TIME_BAR)
self.log('Mediapath Time_Bar = ' + self.mediaPath + TIME_BAR)
self.addControl(self.currentTimeBar)
try:
textcolor = int(self.getControl(100).getLabel(), 16)
focusedcolor = int(self.getControl(100).getLabel2(), 16)
self.textfont = self.getControl(105).getLabel()
if textcolor > 0:
self.textcolor = hex(textcolor)[2:]
if focusedcolor > 0:
self.focusedcolor = hex(focusedcolor)[2:]
except:
pass
try:
if self.setChannelButtons(time.time(), self.MyOverlayWindow.currentChannel) == False:
self.log('Unable to add channel buttons')
return
curtime = time.time()
self.focusIndex = -1
basex, basey = self.getControl(113).getPosition()
baseh = self.getControl(113).getHeight()
basew = self.getControl(113).getWidth()
# set the button that corresponds to the currently playing show
for i in range(len(self.channelButtons[2])):
left, top = self.channelButtons[2][i].getPosition()
width = self.channelButtons[2][i].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
if curtime >= starttime and curtime <= endtime:
self.focusIndex = i
self.setFocus(self.channelButtons[2][i])
self.focusTime = int(time.time())
self.focusEndTime = endtime
break
# If nothing was highlighted, just select the first button
if self.focusIndex == -1:
self.focusIndex = 0
self.setFocus(self.channelButtons[2][0])
left, top = self.channelButtons[2][0].getPosition()
width = self.channelButtons[2][0].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
self.focusTime = int(starttime + 30)
self.focusEndTime = endtime
self.focusRow = 2
self.setShowInfo()
except:
self.log("Unknown EPG Initialization Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
try:
self.close()
except:
self.log("Error closing", xbmc.LOGERROR)
self.MyOverlayWindow.sleepTimeValue = 1
self.MyOverlayWindow.startSleepTimer()
return
self.log('onInit return')
# setup all channel buttons for a given time
def setChannelButtons(self, starttime, curchannel, singlerow = -1):
self.log('setChannelButtons ' + str(starttime) + ', ' + str(curchannel))
self.centerChannel = self.MyOverlayWindow.fixChannel(curchannel)
# This is done twice to guarantee we go back 2 channels. If the previous 2 channels
# aren't valid, then doing a fix on curchannel - 2 may result in going back only
# a single valid channel.
curchannel = self.MyOverlayWindow.fixChannel(curchannel - 1, False)
curchannel = self.MyOverlayWindow.fixChannel(curchannel - 1, False)
starttime = self.roundToHalfHour(int(starttime))
self.setTimeLabels(starttime)
self.shownTime = starttime
basex, basey = self.getControl(111).getPosition()
basew = self.getControl(111).getWidth()
tmpx, tmpy = self.getControl(110 + self.rowCount).getPosition()
timex, timey = self.getControl(120).getPosition()
timew = self.getControl(120).getWidth()
timeh = self.getControl(120).getHeight()
basecur = curchannel
self.toRemove.append(self.currentTimeBar)
myadds = []
for i in range(self.rowCount):
if singlerow == -1 or singlerow == i:
self.setButtons(starttime, basecur, i)
myadds.extend(self.channelButtons[i])
basecur = self.MyOverlayWindow.fixChannel(basecur + 1)
basecur = curchannel
for i in range(self.rowCount):
self.getControl(301 + i).setLabel(self.MyOverlayWindow.channels[basecur - 1].name)
basecur = self.MyOverlayWindow.fixChannel(basecur + 1)
for i in range(self.rowCount):
try:
self.getControl(311 + i).setLabel(str(curchannel))
except:
pass
try:
self.getControl(321 + i).setImage(self.channelLogos + self.MyOverlayWindow.channels[curchannel - 1].name + '_c.png')
except:
pass
curchannel = self.MyOverlayWindow.fixChannel(curchannel + 1)
if time.time() >= starttime and time.time() < starttime + 5400:
dif = int((starttime + 5400 - time.time()))
self.currentTimeBar.setPosition(int((basex + basew - 2) - (dif * (basew / 5400.0))), timey)
else:
if time.time() < starttime:
self.currentTimeBar.setPosition(basex + 2, timey)
else:
self.currentTimeBar.setPosition(basex + basew - 2 - timew, timey)
myadds.append(self.currentTimeBar)
try:
self.removeControls(self.toRemove)
except:
for cntrl in self.toRemove:
try:
self.removeControl(cntrl)
except:
pass
self.addControls(myadds)
self.toRemove = []
self.log('setChannelButtons return')
# round the given time down to the nearest half hour
def roundToHalfHour(self, thetime):
n = datetime.datetime.fromtimestamp(thetime)
delta = datetime.timedelta(minutes=30)
if n.minute > 29:
n = n.replace(minute=30, second=0, microsecond=0)
else:
n = n.replace(minute=0, second=0, microsecond=0)
return time.mktime(n.timetuple())
# create the buttons for the specified channel in the given row
def setButtons(self, starttime, curchannel, row):
self.log('setButtons ' + str(starttime) + ", " + str(curchannel) + ", " + str(row))
try:
curchannel = self.MyOverlayWindow.fixChannel(curchannel)
basex, basey = self.getControl(111 + row).getPosition()
baseh = self.getControl(111 + row).getHeight()
basew = self.getControl(111 + row).getWidth()
chtype = int(ADDON_SETTINGS.getSetting('Channel_' + str(curchannel) + '_type'))
self.lastExitTime = (ADDON_SETTINGS.getSetting("LastExitTime"))
self.log('chtype = ' + str(chtype))
if xbmc.Player().isPlaying() == False:
self.log('No video is playing, not adding buttons')
self.closeEPG()
return False
# Backup all of the buttons to an array
self.toRemove.extend(self.channelButtons[row])
del self.channelButtons[row][:]
# if the channel is paused, then only 1 button needed
nowDate = datetime.datetime.now()
self.log("setbuttonnowtime " + str(nowDate))
if self.MyOverlayWindow.channels[curchannel - 1].isPaused:
self.channelButtons[row].append(xbmcgui.ControlButton(basex, basey, basew, baseh, self.MyOverlayWindow.channels[curchannel - 1].getCurrentTitle() + " (paused)", focusTexture=self.textureButtonFocus, noFocusTexture=self.textureButtonNoFocus, alignment=4, textColor=self.textcolor, focusedColor=self.focusedcolor))
else:
# Find the show that was running at the given time
# Use the current time and show offset to calculate it
# At timedif time, channelShowPosition was playing at channelTimes
# The only way this isn't true is if the current channel is curchannel since
# it could have been fast forwarded or rewinded (rewound)?
if curchannel == self.MyOverlayWindow.currentChannel: #currentchannel epg
#Live TV pull date from the playlist entry
if chtype == 8:
playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
#episodetitle is actually the start time of each show that the playlist gets from channellist.py
tmpDate = self.MyOverlayWindow.channels[curchannel - 1].getItemtimestamp(playlistpos)
self.log("setbuttonnowtime2 " + str(tmpDate))
t = time.strptime(tmpDate, '%Y-%m-%d %H:%M:%S')
epochBeginDate = time.mktime(t)
#beginDate = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
#videotime = (nowDate - beginDate).seconds
videotime = time.time() - epochBeginDate
reftime = time.time()
else:
playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
videotime = xbmc.Player().getTime()
reftime = time.time()
else:
#Live TV pull date from the playlist entry
if chtype == 8:
playlistpos = self.MyOverlayWindow.channels[curchannel - 1].playlistPosition
#playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
#episodetitle is actually the start time of each show that the playlist gets from channellist.py
tmpDate = self.MyOverlayWindow.channels[curchannel - 1].getItemtimestamp(playlistpos)
self.log("setbuttonnowtime2 " + str(tmpDate))
t = time.strptime(tmpDate, '%Y-%m-%d %H:%M:%S')
epochBeginDate = time.mktime(t)
#beginDate = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
#videotime = (nowDate - beginDate).seconds
#loop to ensure we get the current show in the playlist
while epochBeginDate + self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos) < time.time():
epochBeginDate += self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos)
playlistpos = self.MyOverlayWindow.channels[curchannel - 1].fixPlaylistIndex(playlistpos + 1)
videotime = time.time() - epochBeginDate
reftime = time.time()
else:
playlistpos = self.MyOverlayWindow.channels[curchannel - 1].playlistPosition #everyotherchannel epg
videotime = self.MyOverlayWindow.channels[curchannel - 1].showTimeOffset
reftime = self.MyOverlayWindow.channels[curchannel - 1].lastAccessTime
self.log('videotime & reftime + starttime + channel === ' + str(videotime) + ', ' + str(reftime) + ', ' + str(starttime) + ', ' + str(curchannel))
# normalize reftime to the beginning of the video
reftime -= videotime
while reftime > starttime:
playlistpos -= 1
# No need to check bounds on the playlistpos, the duration function makes sure it is correct
reftime -= self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos)
while reftime + self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos) < starttime:
reftime += self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos)
playlistpos += 1
# create a button for each show that runs in the next hour and a half
endtime = starttime + 5400
totaltime = 0
totalloops = 0
while reftime < endtime and totalloops < 1000:
xpos = int(basex + (totaltime * (basew / 5400.0)))
tmpdur = self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos)
shouldskip = False
# this should only happen the first time through this loop
# it shows the small portion of the show before the current one
if reftime < starttime:
tmpdur -= starttime - reftime
reftime = starttime
if tmpdur < 60 * 3:
shouldskip = True
# Don't show very short videos
if self.MyOverlayWindow.hideShortItems and shouldskip == False:
if self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos) < self.MyOverlayWindow.shortItemLength:
shouldskip = True
tmpdur = 0
else:
nextlen = self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos + 1)
prevlen = self.MyOverlayWindow.channels[curchannel - 1].getItemDuration(playlistpos - 1)
if nextlen < 60:
tmpdur += nextlen / 2
if prevlen < 60:
tmpdur += prevlen / 2
width = int((basew / 5400.0) * tmpdur)
if width < 30 and shouldskip == False:
width = 30
tmpdur = int(30.0 / (basew / 5400.0))
if width + xpos > basex + basew:
width = basex + basew - xpos
if shouldskip == False and width >= 30:
mylabel = self.MyOverlayWindow.channels[curchannel - 1].getItemTitle(playlistpos)
mygenre = self.MyOverlayWindow.channels[curchannel - 1].getItemgenre(playlistpos)
chtype = int(ADDON_SETTINGS.getSetting('Channel_' + str(curchannel) + '_type'))
# self.log('mygenre = ' + str(mygenre))#debug
## EPG Genres button path
if FileAccess.exists(self.mediaPath + '/epg-genres/' + mygenre + '.png'):
self.textureButtonNoFocusGenre = (self.mediaPath + 'epg-genres/' + mygenre + '.png')
else:
self.textureButtonNoFocusGenre = self.mediaPath + BUTTON_NO_FOCUS
## EPG Chtype button path
if FileAccess.exists(self.mediaPath + '/epg-genres/' + str(chtype) + '.png'):
self.textureButtonNoFocusChtype = (self.mediaPath + 'epg-genres/' + str(chtype) + '.png')
else:
self.textureButtonNoFocusChtype = self.mediaPath + BUTTON_NO_FOCUS
if REAL_SETTINGS.getSetting('EPGcolor_enabled') == '1':
self.channelButtons[row].append(xbmcgui.ControlButton(xpos, basey, width, baseh, mylabel, focusTexture=self.textureButtonFocus, noFocusTexture=self.textureButtonNoFocusGenre, alignment=4, font=self.textfont, textColor=self.textcolor, focusedColor=self.focusedcolor))
elif REAL_SETTINGS.getSetting('EPGcolor_enabled') == '2':
self.channelButtons[row].append(xbmcgui.ControlButton(xpos, basey, width, baseh, mylabel, focusTexture=self.textureButtonFocus, noFocusTexture=self.textureButtonNoFocusChtype, alignment=4, font=self.textfont, textColor=self.textcolor, focusedColor=self.focusedcolor))
else:
self.channelButtons[row].append(xbmcgui.ControlButton(xpos, basey, width, baseh, mylabel, focusTexture=self.textureButtonFocus, noFocusTexture=self.textureButtonNoFocus, alignment=4, font=self.textfont, textColor=self.textcolor, focusedColor=self.focusedcolor))
totaltime += tmpdur
reftime += tmpdur
playlistpos += 1
totalloops += 1
if totalloops >= 1000:
self.log("Broken big loop, too many loops, reftime is " + str(reftime) + ", endtime is " + str(endtime))
# If there were no buttons added, show some default button
if len(self.channelButtons[row]) == 0:
self.channelButtons[row].append(xbmcgui.ControlButton(basex, basey, basew, baseh, self.MyOverlayWindow.channels[curchannel - 1].name, focusTexture=self.textureButtonFocus, noFocusTexture=self.textureButtonNoFocus, alignment=4, textColor=self.textcolor, focusedColor=self.focusedcolor))
except:
self.log("Exception in setButtons", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
self.log('setButtons return')
return True
def onAction(self, act):
self.log('onAction ' + str(act.getId()))
if self.actionSemaphore.acquire(False) == False:
self.log('Unable to get semaphore')
return
action = act.getId()
try:
if action in ACTION_PREVIOUS_MENU:
self.closeEPG()
if self.showingInfo:
self.infoOffset = 0
self.infoOffsetV = 0
elif action == ACTION_MOVE_DOWN:
self.GoDown()
if self.showingInfo:
self.infoOffsetV -= 1
elif action == ACTION_MOVE_UP:
self.GoUp()
if self.showingInfo:
self.infoOffsetV += 1
elif action == ACTION_MOVE_LEFT:
self.GoLeft()
if self.showingInfo:
self.infoOffset -= 1
elif action == ACTION_MOVE_RIGHT:
self.GoRight()
if self.showingInfo:
self.infoOffset += 1
elif action == ACTION_STOP:
self.closeEPG()
if self.showingInfo:
self.infoOffset = 0
self.infoOffsetV = 0
elif action == ACTION_SELECT_ITEM:
lastaction = time.time() - self.lastActionTime
if self.showingInfo:
self.infoOffset = 0
self.infoOffsetV = 0
if lastaction >= 2:
self.selectShow()
self.closeEPG()
self.lastActionTime = time.time()
except:
self.log("Unknown EPG Exception", xbmc.LOGERROR)
self.log(traceback.format_exc(), xbmc.LOGERROR)
try:
self.close()
except:
self.log("Error closing", xbmc.LOGERROR)
self.MyOverlayWindow.sleepTimeValue = 1
self.MyOverlayWindow.startSleepTimer()
return
self.actionSemaphore.release()
self.log('onAction return')
def closeEPG(self):
self.log('closeEPG')
try:
self.removeControl(self.currentTimeBar)
self.MyOverlayWindow.startSleepTimer()
except:
pass
self.close()
def onControl(self, control):
self.log('onControl')
# Run when a show is selected, so close the epg and run the show
def onClick(self, controlid):
self.log('onClick')
if self.actionSemaphore.acquire(False) == False:
self.log('Unable to get semaphore')
return
lastaction = time.time() - self.lastActionTime
if lastaction >= 2:
try:
selectedbutton = self.getControl(controlid)
except:
self.actionSemaphore.release()
self.log('onClick unknown controlid ' + str(controlid))
return
for i in range(self.rowCount):
for x in range(len(self.channelButtons[i])):
mycontrol = 0
mycontrol = self.channelButtons[i][x]
if selectedbutton == mycontrol:
self.focusRow = i
self.focusIndex = x
self.selectShow()
self.closeEPG()
self.lastActionTime = time.time()
self.actionSemaphore.release()
self.log('onClick found button return')
return
self.lastActionTime = time.time()
self.closeEPG()
self.actionSemaphore.release()
self.log('onClick return')
def GoDown(self):
self.log('goDown')
# change controls to display the proper junks
if self.focusRow == self.rowCount - 1:
self.setChannelButtons(self.shownTime, self.MyOverlayWindow.fixChannel(self.centerChannel + 1))
self.focusRow = self.rowCount - 2
self.setProperButton(self.focusRow + 1)
self.log('goDown return')
def GoUp(self):
self.log('goUp')
# same as godown
# change controls to display the proper junks
if self.focusRow == 0:
self.setChannelButtons(self.shownTime, self.MyOverlayWindow.fixChannel(self.centerChannel - 1, False))
self.focusRow = 1
self.setProperButton(self.focusRow - 1)
self.log('goUp return')
def GoLeft(self):
self.log('goLeft')
basex, basey = self.getControl(111 + self.focusRow).getPosition()
basew = self.getControl(111 + self.focusRow).getWidth()
# change controls to display the proper junks
if self.focusIndex == 0:
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
self.setChannelButtons(self.shownTime - 1800, self.centerChannel)
curbutidx = self.findButtonAtTime(self.focusRow, starttime + 30)
if(curbutidx - 1) >= 0:
self.focusIndex = curbutidx - 1
else:
self.focusIndex = 0
else:
self.focusIndex -= 1
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
self.setFocus(self.channelButtons[self.focusRow][self.focusIndex])
self.setShowInfo()
self.focusEndTime = endtime
self.focusTime = starttime + 30
self.log('goLeft return')
def GoRight(self):
self.log('goRight')
basex, basey = self.getControl(111 + self.focusRow).getPosition()
basew = self.getControl(111 + self.focusRow).getWidth()
# change controls to display the proper junks
if self.focusIndex == len(self.channelButtons[self.focusRow]) - 1:
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
self.setChannelButtons(self.shownTime + 1800, self.centerChannel)
curbutidx = self.findButtonAtTime(self.focusRow, starttime + 30)
if(curbutidx + 1) < len(self.channelButtons[self.focusRow]):
self.focusIndex = curbutidx + 1
else:
self.focusIndex = len(self.channelButtons[self.focusRow]) - 1
else:
self.focusIndex += 1
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
self.setFocus(self.channelButtons[self.focusRow][self.focusIndex])
self.setShowInfo()
self.focusEndTime = endtime
self.focusTime = starttime + 30
self.log('goRight return')
def findButtonAtTime(self, row, selectedtime):
self.log('findButtonAtTime ' + str(row))
basex, basey = self.getControl(111 + row).getPosition()
baseh = self.getControl(111 + row).getHeight()
basew = self.getControl(111 + row).getWidth()
for i in range(len(self.channelButtons[row])):
left, top = self.channelButtons[row][i].getPosition()
width = self.channelButtons[row][i].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
if selectedtime >= starttime and selectedtime <= endtime:
return i
return -1
# based on the current focus row and index, find the appropriate button in
# the new row to set focus to
def setProperButton(self, newrow, resetfocustime = False):
self.log('setProperButton ' + str(newrow))
self.focusRow = newrow
basex, basey = self.getControl(111 + newrow).getPosition()
baseh = self.getControl(111 + newrow).getHeight()
basew = self.getControl(111 + newrow).getWidth()
for i in range(len(self.channelButtons[newrow])):
left, top = self.channelButtons[newrow][i].getPosition()
width = self.channelButtons[newrow][i].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
if self.focusTime >= starttime and self.focusTime <= endtime:
self.focusIndex = i
self.setFocus(self.channelButtons[newrow][i])
self.setShowInfo()
self.focusEndTime = endtime
if resetfocustime:
self.focusTime = starttime + 30
self.log('setProperButton found button return')
return
self.focusIndex = 0
self.setFocus(self.channelButtons[newrow][0])
left, top = self.channelButtons[newrow][0].getPosition()
width = self.channelButtons[newrow][0].getWidth()
left = left - basex
starttime = self.shownTime + (left / (basew / 5400.0))
endtime = starttime + (width / (basew / 5400.0))
self.focusEndTime = endtime
if resetfocustime:
self.focusTime = starttime + 30
self.setShowInfo()
self.log('setProperButton return')
def setShowInfo(self):
self.log('setShowInfo')
self.showingInfo = True
basex, basey = self.getControl(111 + self.focusRow).getPosition()
baseh = self.getControl(111 + self.focusRow).getHeight()
basew = self.getControl(111 + self.focusRow).getWidth()
# use the selected time to set the video
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex + (width / 2)
starttime = self.shownTime + (left / (basew / 5400.0))
chnoffset = self.focusRow - 2
newchan = self.centerChannel
while chnoffset != 0:
if chnoffset > 0:
newchan = self.MyOverlayWindow.fixChannel(newchan + 1, True)
chnoffset -= 1
else:
newchan = self.MyOverlayWindow.fixChannel(newchan - 1, False)
chnoffset += 1
plpos = self.determinePlaylistPosAtTime(starttime, newchan)
if plpos == -1:
self.log('Unable to find the proper playlist to set from EPG')
return
if REAL_SETTINGS.getSetting("art.enable") == "true":
if self.infoOffset > 0:
self.getControl(522).setLabel('COMING UP:')
elif self.infoOffset < 0:
self.getControl(522).setLabel('ALREADY SEEN:')
elif self.infoOffset == 0 and self.infoOffsetV == 0:
self.getControl(522).setLabel('NOW WATCHING:')
elif self.infoOffsetV < 0 and self.infoOffset == 0:
self.getControl(522).setLabel('ON NOW:')
elif self.infoOffsetV > 0 and self.infoOffset == 0:
self.getControl(522).setLabel('ON NOW:')
elif self.infoOffset == 0 and self.infoOffsetV == 0:
self.getControl(522).setLabel('NOW WATCHING:')
else:
self.getControl(522).setLabel('NOW WATCHING:')
tvdbid = 0
imdbid = 0
Artpath = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/')##write code to clean on channel rebuild
self.logDebug('EPG.Artpath.1 = ' + str(Artpath))
mediapath = uni(self.MyOverlayWindow.channels[newchan - 1].getItemFilename(plpos))
self.logDebug('EPG.mediapath.1 = ' + uni(mediapath))
chtype = int(ADDON_SETTINGS.getSetting('Channel_' + str(newchan) + '_type'))
genre = str(self.MyOverlayWindow.channels[newchan - 1].getItemgenre(plpos))
title = uni(self.MyOverlayWindow.channels[newchan - 1].getItemTitle(plpos))
LiveID = str(self.MyOverlayWindow.channels[newchan - 1].getItemLiveID(plpos))
self.logDebug('EPG.LiveID.1 = ' + str(LiveID))
type1 = str(self.getControl(507).getLabel())
self.logDebug('EPG.type1 = ' + str(type1))
type2 = str(self.getControl(509).getLabel())
self.logDebug('EPG.type2 = ' + str(type2))
if not 'LiveID' in LiveID:
try:
LiveLST = LiveID.split("|", 4)
self.logDebug('EPG.LiveLST = ' + str(LiveLST))
imdbid = LiveLST[0]
self.logDebug('EPG.LiveLST.imdbid.1 = ' + str(imdbid))
imdbid = imdbid.split('imdb_', 1)[-1]
self.logDebug('EPG.LiveLST.imdbid.2 = ' + str(imdbid))
tvdbid = LiveLST[1]
self.logDebug('EPG.LiveLST.tvdbid.1 = ' + str(tvdbid))
tvdbid = tvdbid.split('tvdb_', 1)[-1]
self.logDebug('EPG.LiveLST.tvdbid.2 = ' + str(tvdbid))
SBCP = LiveLST[2]
self.logDebug('EPG.LiveLST.SBCP = ' + str(SBCP))
Unaired = LiveLST[3]
self.logDebug('EPG.LiveLST.Unaired = ' + str(Unaired))
except:
pass
try:
#Try, and pass if label isn't found (Backward compatibility with PTV Skins)
#Sickbeard/Couchpotato
if SBCP == 'SB':
self.getControl(511).setImage(self.mediaPath + 'SB.png')
elif SBCP == 'CP':
self.getControl(511).setImage(self.mediaPath + 'CP.png')
else:
self.getControl(511).setImage(self.mediaPath + 'NA.png')
except:
self.getControl(511).setImage(self.mediaPath + 'NA.png')
pass
try:
#Try, and pass if label isn't found (Backward compatibility with PTV Skins)
#Unaired/aired
if Unaired == 'NEW':
self.getControl(512).setImage(self.mediaPath + 'NEW.png')
elif Unaired == 'OLD':
self.getControl(512).setImage(self.mediaPath + 'OLD.png')
else:
self.getControl(512).setImage(self.mediaPath + 'NA.png')
except:
self.getControl(512).setImage(self.mediaPath + 'NA.png')
pass
if REAL_SETTINGS.getSetting("art.enable") == "true":
self.log('setShowInfo, Dynamic artwork enabled')
if chtype <= 7:
mediapathSeason, filename = os.path.split(mediapath)
self.logDebug('EPG.mediapath.2 = ' + str(mediapathSeason))
mediapathSeries = os.path.dirname(mediapathSeason)
self.logDebug('EPG.mediapath.3 = ' + str(mediapathSeries))
mediapathSeries1PNG = (mediapathSeries + '/' + type1 + ('.png'))
mediapathSeries1JPG = (mediapathSeries + '/' + type1 + ('.jpg'))
mediapathSeason1PNG = (mediapathSeason + '/' + type1 + ('.png'))
mediapathSeason1JPG = (mediapathSeason + '/' + type1 + ('.jpg'))
if FileAccess.exists(mediapathSeries1PNG):
self.getControl(508).setImage(mediapathSeries1PNG)
self.logDebug('EPG.mediapathSeries1.png = ' + str(mediapathSeries1PNG))
elif FileAccess.exists(mediapathSeries1JPG):
self.getControl(508).setImage(mediapathSeries1JPG)
self.logDebug('EPG.mediapathSeries1.png = ' + str(mediapathSeries1JPG))
elif FileAccess.exists(mediapathSeason1PNG):
self.getControl(508).setImage(mediapathSeason1PNG)
self.logDebug('EPG.mediapathSeason1.png = ' + str(mediapathSeason1PNG))
elif FileAccess.exists(mediapathSeason1JPG):
self.getControl(508).setImage(mediapathSeason1JPG)
self.logDebug('EPG.mediapathSeason1.png = ' + str(mediapathSeason1JPG))
else:
self.getControl(508).setImage(self.mediaPath + type1 + '.png')#default fallback art
mediapathSeries2PNG = (mediapathSeries + '/' + type2 + ('.png'))
mediapathSeries2JPG = (mediapathSeries + '/' + type2 + ('.jpg'))
mediapathSeason2PNG = (mediapathSeason + '/' + type2 + ('.png'))
mediapathSeason2JPG = (mediapathSeason + '/' + type2 + ('.jpg'))
if FileAccess.exists(mediapathSeries2PNG):
self.getControl(510).setImage(mediapathSeries2PNG)
self.logDebug('EPG.mediapathSeries2.png = ' + str(mediapathSeries2PNG))
elif FileAccess.exists(mediapathSeries2JPG):
self.getControl(510).setImage(mediapathSeries2JPG)
self.logDebug('EPG.mediapathSeries2.png = ' + str(mediapathSeries2JPG))
elif FileAccess.exists(mediapathSeason2PNG):
self.getControl(510).setImage(mediapathSeason2PNG)
self.logDebug('EPG.mediapathSeason2.png = ' + str(mediapathSeason2PNG))
elif FileAccess.exists(mediapathSeason2JPG):
self.getControl(510).setImage(mediapathSeason2JPG)
self.logDebug('EPG.mediapathSeason2.png = ' + str(mediapathSeason2JPG))
else:
self.getControl(510).setImage(self.mediaPath + type2 + '.png')#default fallback art
elif chtype == 8:#LiveTV w/ TVDBID via Fanart.TV
if tvdbid > 0 and genre != 'Movie':
fanartTV = fanarttv.FTV_TVProvider()
URLLST = fanartTV.get_image_list(tvdbid)
self.logDebug('EPG.tvdb.URLLST.1 = ' + str(URLLST))
if URLLST != None:
URLLST = str(URLLST)
URLLST = URLLST.split("{'art_type': ")
self.logDebug('EPG.tvdb.URLLST.2 = ' + str(URLLST))
try:
Art1 = [s for s in URLLST if type1 in s]
Art1 = Art1[0]
self.logDebug('EPG.tvdb.Art1.1 = ' + str(Art1))
Art1 = Art1[Art1.find("'url': '")+len("'url': '"):Art1.rfind("',")]
self.logDebug('EPG.tvdb.Art1.2 = ' + str(Art1))
Art1 = Art1.split("',")[0]
self.logDebug('EPG.tvdb.Art1.3 = ' + str(Art1))
URLimage1 = Art1
URLimage1 = URLimage1.rsplit('/')[-1]
self.logDebug('EPG.tvdb.URLimage1.1 = ' + str(URLimage1))
URLimage1 = (type1 + '-' + URLimage1)
self.logDebug('EPG.tvdb.URLimage1.2 = ' + str(URLimage1))
flename1 = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/' + URLimage1)
if FileAccess.exists(flename1):
self.getControl(508).setImage(flename1)
else:
if not os.path.exists(os.path.join(Artpath)):
os.makedirs(os.path.join(Artpath))
resource = urllib.urlopen(Art1)
self.logDebug('EPG.tvdb.resource = ' + str(resource))
output = open(flename1,"wb")
self.logDebug('EPG.tvdb.output = ' + str(output))
output.write(resource.read())
output.close()
self.getControl(508).setImage(flename1)
except:
self.getControl(508).setImage(self.mediaPath + type1 + '.png')
pass
try:
Art2 = [s for s in URLLST if type2 in s]
Art2 = Art2[0]
self.logDebug('EPG.tvdb.Art2 = ' + str(Art2))
Art2 = Art2[Art2.find("'url': '")+len("'url': '"):Art2.rfind("',")]
self.logDebug('EPG.tvdb.Art2.2 = ' + str(Art2))
Art2 = Art2.split("',")[0]
self.logDebug('EPG.tvdb.Art2.3 = ' + str(Art2))
URLimage2 = Art2
URLimage2 = URLimage2.rsplit('/')[-1]
self.logDebug('EPG.tvdb.URLimage1.1 = ' + str(URLimage1))
URLimage2 = (type2 + '-' + URLimage2)
self.logDebug('EPG.tvdb.URLimage2.2 = ' + str(URLimage2))
flename2 = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/' + URLimage2)
if FileAccess.exists(flename2):
self.getControl(510).setImage(flename2)
else:
if not os.path.exists(os.path.join(Artpath)):
os.makedirs(os.path.join(Artpath))
resource = urllib.urlopen(Art2)
self.logDebug('EPG.tvdb.resource = ' + str(resource))
output = open(flename2,"wb")
self.logDebug('EPG.tvdb.output = ' + str(output))
output.write(resource.read())
output.close()
self.getControl(510).setImage(flename2)
except:
self.getControl(510).setImage(self.mediaPath + type2 + '.png')
pass
else:#fallback all artwork because there is no id
self.getControl(508).setImage(self.mediaPath + type1 + '.png')
self.getControl(510).setImage(self.mediaPath + type2 + '.png')
elif imdbid != 0 and genre == 'Movie':#LiveTV w/ IMDBID via Fanart.TV
fanartTV = fanarttv.FTV_MovieProvider()
URLLST = fanartTV.get_image_list(imdbid)
self.logDebug('EPG.imdb.URLLST.1 = ' + str(imdbid))
if URLLST != None:
try:
URLLST = str(URLLST)
URLLST = URLLST.split("{'art_type': ")
self.logDebug('EPG.imdb.URLLST.2 = ' + str(URLLST))
Art1 = [s for s in URLLST if type1 in s]
Art1 = Art1[0]
self.logDebug('EPG.imdb.Art1.1 = ' + str(Art1))
Art2 = [s for s in URLLST if type2 in s]
Art2 = Art2[0]
self.logDebug('EPG.imdb.Art2 = ' + str(Art2))
Art1 = Art1[Art1.find("'url': '")+len("'url': '"):Art1.rfind("',")]
self.logDebug('EPG.imdb.Art1.2 = ' + str(Art1))
Art1 = Art1.split("',")[0]
self.logDebug('EPG.imdb.Art1.3 = ' + str(Art1))
Art2 = Art2[Art2.find("'url': '")+len("'url': '"):Art2.rfind("',")]
self.logDebug('EPG.imdb.Art2.2 = ' + str(Art2))
Art2 = Art2.split("',")[0]
self.logDebug('EPG.imdb.Art2.3 = ' + str(Art2))
URLimage1 = Art1
URLimage1 = URLimage1.rsplit('/')[-1]
self.logDebug('EPG.imdb.URLimage1.1 = ' + str(URLimage1))
URLimage2 = Art2
URLimage1 = (type1 + '-' + URLimage1)
self.logDebug('EPG.imdb.URLimage1.2 = ' + str(URLimage1))
URLimage2 = URLimage1.rsplit('/')[-1]
self.logDebug('EPG.imdb.URLimage2.2 = ' + str(URLimage2))
URLimage2 = (type2 + '-' + URLimage2)
############################################### Move to function todo
flename1 = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/' + URLimage1)
if FileAccess.exists(flename1):
self.getControl(508).setImage(flename1)
else:
if not os.path.exists(os.path.join(Artpath)):
os.makedirs(os.path.join(Artpath))
resource = urllib.urlopen(Art1)
self.logDebug('EPG.tvdb.resource = ' + str(resource))
output = open(flename1,"wb")
self.logDebug('EPG.tvdb.output = ' + str(output))
output.write(resource.read())
output.close()
self.getControl(508).setImage(flename1)
flename2 = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/' + URLimage2)
if FileAccess.exists(flename2):
self.getControl(510).setImage(flename2)
else:
if not os.path.exists(os.path.join(Artpath)):
os.makedirs(os.path.join(Artpath))
resource = urllib.urlopen(Art2)
self.logDebug('EPG.tvdb.resource = ' + str(resource))
output = open(flename2,"wb")
self.logDebug('EPG.tvdb.output = ' + str(output))
output.write(resource.read())
output.close()
self.getControl(510).setImage(flename2)
##############################################
except:
pass
else:#fallback all artwork because there is no id
self.getControl(508).setImage(self.mediaPath + type1 + '.png')
self.getControl(510).setImage(self.mediaPath + type2 + '.png')
else:#fallback all artwork because there is no id
self.getControl(508).setImage(self.mediaPath + type1 + '.png')
self.getControl(510).setImage(self.mediaPath + type2 + '.png')
# except:
# pass
elif chtype == 9:
self.getControl(508).setImage(self.mediaPath + 'EPG.Internet.508.png')
self.getControl(510).setImage(self.mediaPath + 'EPG.Internet.510.png')
elif chtype == 10:
self.getControl(508).setImage(self.mediaPath + 'EPG.Youtube.508.png')
self.getControl(510).setImage(self.mediaPath + 'EPG.Youtube.510.png')
elif chtype == 11:
self.getControl(508).setImage(self.mediaPath + 'EPG.RSS.508.png')
self.getControl(510).setImage(self.mediaPath + 'EPG.RSS.510.png')
# #TVDB
# if tvdbid > 0:
# try:
# tvdbAPI = TVDB(REAL_SETTINGS.getSetting('tvdb.apikey'))
# self.logDebug('EPG.tvdb.type = ' + str(type))
# Banner = tvdbAPI.getBannerByID(tvdbid, type)
# Banner = str(Banner)
# self.logDebug('EPG.tvdb.Banners.1 = ' + str(Banner))
# if Banner != 0:
# Banner = Banner.split("', '")[0]
# self.logDebug('EPG.tvdb.Banners.2 = ' + str(Banner))
# Banner = Banner.split("[('", 1)[-1]
# self.logDebug('EPG.tvdb.Banners.3 = ' + str(Banner))
# URLimage = Banner
# URLimage = URLimage.split("http://www.thetvdb.com/banners/", 1)[-1]
# self.logDebug('EPG.tvdb.URLimage.1 = ' + str(URLimage))
# URLimage = URLimage.rsplit('/', 1)[-1]
# self.logDebug('EPG.tvdb.URLimage.2 = ' + str(URLimage))
# URLimage = (type + '-' + URLimage)
# self.logDebug('EPG.tvdb.URLimage.3 = ' + str(URLimage))
# flename = xbmc.translatePath(os.path.join(CHANNELS_LOC, 'generated') + '/' + 'artwork' + '/' + URLimage)
# if FileAccess.exists(flename):
# self.getControl(508).setImage(flename)
# else:
# if not os.path.exists(os.path.join(Artpath)):
# os.makedirs(os.path.join(Artpath))
# resource = urllib.urlopen(Banner)
# self.logDebug('EPG.tvdb.resource = ' + str(resource))
# output = open(flename,"wb")
# self.logDebug('EPG.tvdb.output = ' + str(output))
# output.write(resource.read())
# output.close()
# self.getControl(508).setImage(flename)
# except:
# pass
self.getControl(500).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemTitle(plpos))
#code to display "Live TV" instead of date (date does confirm sync)
#if chtype == 8:
# self.getControl(501).setLabel("LiveTV")
#else:
self.getControl(501).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemEpisodeTitle(plpos))
self.getControl(502).setLabel(self.MyOverlayWindow.channels[newchan - 1].getItemDescription(plpos))
self.getControl(503).setImage(self.channelLogos + ascii(self.MyOverlayWindow.channels[newchan - 1].name) + '_c.png')
self.log('setShowInfo return')
# using the currently selected button, play the proper shows
def selectShow(self):
self.log('selectShow')
basex, basey = self.getControl(111 + self.focusRow).getPosition()
baseh = self.getControl(111 + self.focusRow).getHeight()
basew = self.getControl(111 + self.focusRow).getWidth()
# use the selected time to set the video
left, top = self.channelButtons[self.focusRow][self.focusIndex].getPosition()
width = self.channelButtons[self.focusRow][self.focusIndex].getWidth()
left = left - basex + (width / 2)
starttime = self.shownTime + (left / (basew / 5400.0))
chnoffset = self.focusRow - 2
newchan = self.centerChannel
nowDate = datetime.datetime.now()
while chnoffset != 0:
if chnoffset > 0:
newchan = self.MyOverlayWindow.fixChannel(newchan + 1, True)
chnoffset -= 1
else:
newchan = self.MyOverlayWindow.fixChannel(newchan - 1, False)
chnoffset += 1
plpos = self.determinePlaylistPosAtTime(starttime, newchan)
chtype = int(ADDON_SETTINGS.getSetting('Channel_' + str(newchan) + '_type'))
if plpos == -1:
self.log('Unable to find the proper playlist to set from EPG', xbmc.LOGERROR)
return
timedif = (time.time() - self.MyOverlayWindow.channels[newchan - 1].lastAccessTime)
pos = self.MyOverlayWindow.channels[newchan - 1].playlistPosition
showoffset = self.MyOverlayWindow.channels[newchan - 1].showTimeOffset
#code added for "LiveTV" types
#Get the Start time of the show from "episodeitemtitle"
#we just passed this from channellist.py ; just a fill in to get value
#Start at the beginning of the playlist get the first epoch date
#position pos of the playlist convert the string add until we get to the current item in the playlist
if chtype == 8:
tmpDate = self.MyOverlayWindow.channels[newchan - 1].getItemtimestamp(pos)
self.log("selectshow tmpdate " + str(tmpDate))
t = time.strptime(tmpDate, '%Y-%m-%d %H:%M:%S')
epochBeginDate = time.mktime(t)
#beginDate = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
#loop till we get to the current show
while epochBeginDate + self.MyOverlayWindow.channels[newchan - 1].getItemDuration(pos) < time.time():
epochBeginDate += self.MyOverlayWindow.channels[newchan - 1].getItemDuration(pos)
pos = self.MyOverlayWindow.channels[newchan - 1].fixPlaylistIndex(pos + 1)
self.log('live tv while loop')
# adjust the show and time offsets to properly position inside the playlist
else:
while showoffset + timedif > self.MyOverlayWindow.channels[newchan - 1].getItemDuration(pos):
self.log('duration ' + str(self.MyOverlayWindow.channels[newchan - 1].getItemDuration(pos)))
timedif -= self.MyOverlayWindow.channels[newchan - 1].getItemDuration(pos) - showoffset
pos = self.MyOverlayWindow.channels[newchan - 1].fixPlaylistIndex(pos + 1)
showoffset = 0
self.log('pos + plpos ' + str(pos) +', ' + str(plpos))
if self.MyOverlayWindow.currentChannel == newchan:
if plpos == xbmc.PlayList(xbmc.PLAYLIST_MUSIC).getposition():
self.log('selectShow return current show')
return
if chtype == 8:
self.log('selectShow return current LiveTV channel')
return
if pos != plpos:
if chtype == 8:
self.log('selectShow return different LiveTV channel')
return
else:
self.MyOverlayWindow.channels[newchan - 1].setShowPosition(plpos)
self.MyOverlayWindow.channels[newchan - 1].setShowTime(0)
self.MyOverlayWindow.channels[newchan - 1].setAccessTime(time.time())
self.MyOverlayWindow.newChannel = newchan
self.log('selectShow return')
def determinePlaylistPosAtTime(self, starttime, channel):
self.log('determinePlaylistPosAtTime ' + str(starttime) + ', ' + str(channel))
channel = self.MyOverlayWindow.fixChannel(channel)
chtype = int(ADDON_SETTINGS.getSetting('Channel_' + str(channel) + '_type'))
self.lastExitTime = ADDON_SETTINGS.getSetting("LastExitTime")
nowDate = datetime.datetime.now()
# if the channel is paused, then it's just the current item
if self.MyOverlayWindow.channels[channel - 1].isPaused:
self.log('determinePlaylistPosAtTime paused return')
return self.MyOverlayWindow.channels[channel - 1].playlistPosition
else:
# Find the show that was running at the given time
# Use the current time and show offset to calculate it
# At timedif time, channelShowPosition was playing at channelTimes
# The only way this isn't true is if the current channel is curchannel since
# it could have been fast forwarded or rewinded (rewound)?
if channel == self.MyOverlayWindow.currentChannel: #currentchannel epg
#Live TV pull date from the playlist entry
if chtype == 8:
playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
#episodetitle is actually the start time of each show that the playlist gets from channellist.py
tmpDate = self.MyOverlayWindow.channels[channel - 1].getItemtimestamp(playlistpos)
self.log("setbuttonnowtime2 " + str(tmpDate))
t = time.strptime(tmpDate, '%Y-%m-%d %H:%M:%S')
epochBeginDate = time.mktime(t)
#beginDate = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
#videotime = (nowDate - beginDate).seconds
videotime = time.time() - epochBeginDate
reftime = time.time()
else:
playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
videotime = xbmc.Player().getTime()
reftime = time.time()
else:
#Live TV pull date from the playlist entry
if chtype == 8:
playlistpos = self.MyOverlayWindow.channels[channel - 1].playlistPosition
#playlistpos = int(xbmc.PlayList(xbmc.PLAYLIST_VIDEO).getposition())
#episodetitle is actually the start time of each show that the playlist gets from channellist.py
tmpDate = self.MyOverlayWindow.channels[channel - 1].getItemtimestamp(playlistpos)
self.log("setbuttonnowtime2 " + str(tmpDate))
t = time.strptime(tmpDate, '%Y-%m-%d %H:%M:%S')
epochBeginDate = time.mktime(t)
#beginDate = datetime.datetime(t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec)
#videotime = (nowDate - beginDate).seconds
while epochBeginDate + self.MyOverlayWindow.channels[channel - 1].getItemDuration(playlistpos) < time.time():
epochBeginDate += self.MyOverlayWindow.channels[channel - 1].getItemDuration(playlistpos)
playlistpos = self.MyOverlayWindow.channels[channel - 1].fixPlaylistIndex(playlistpos + 1)
videotime = time.time() - epochBeginDate
self.log('videotime ' + str(videotime))
reftime = time.time()
else:
playlistpos = self.MyOverlayWindow.channels[channel - 1].playlistPosition
videotime = self.MyOverlayWindow.channels[channel - 1].showTimeOffset
reftime = self.MyOverlayWindow.channels[channel - 1].lastAccessTime
# normalize reftime to the beginning of the video
reftime -= videotime
while reftime > starttime:
playlistpos -= 1
reftime -= self.MyOverlayWindow.channels[channel - 1].getItemDuration(playlistpos)
while reftime + self.MyOverlayWindow.channels[channel - 1].getItemDuration(playlistpos) < starttime:
reftime += self.MyOverlayWindow.channels[channel - 1].getItemDuration(playlistpos)
playlistpos += 1
self.log('determinePlaylistPosAtTime return' + str(self.MyOverlayWindow.channels[channel - 1].fixPlaylistIndex(playlistpos)))
return self.MyOverlayWindow.channels[channel - 1].fixPlaylistIndex(playlistpos)
|
yolanother/script.pseudotv.live
|
resources/lib/EPGWindow.py
|
Python
|
gpl-3.0
| 63,459
|
"""
================================
Digits Classification Exercise
================================
This exercise is used in the :ref:`clf_tut` part of the
:ref:`supervised_learning_tut` section of the
:ref:`stat_learn_tut_index`.
"""
print(__doc__)
from sklearn import datasets, neighbors, linear_model
digits = datasets.load_digits()
X_digits = digits.data
y_digits = digits.target
n_samples = len(X_digits)
X_train = X_digits[:.9 * n_samples]
y_train = y_digits[:.9 * n_samples]
X_test = X_digits[.9 * n_samples:]
y_test = y_digits[.9 * n_samples:]
knn = neighbors.KNeighborsClassifier()
logistic = linear_model.LogisticRegression()
print('KNN score: %f' % knn.fit(X_train, y_train).score(X_test, y_test))
print('LogisticRegression score: %f'
% logistic.fit(X_train, y_train).score(X_test, y_test))
|
florian-f/sklearn
|
examples/exercises/plot_digits_classification_exercise.py
|
Python
|
bsd-3-clause
| 816
|
# Copyright (c) 2010-2014, Lawrence Livermore National Security, LLC
# Produced at Lawrence Livermore National Laboratory
# LLNL-CODE-462894
# All rights reserved.
#
# This file is part of MixDown. Please read the COPYRIGHT file
# for Our Notice and the LICENSE file for the GNU Lesser General Public
# License.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License (as published by
# the Free Software Foundation) version 3 dated June 2007.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
tepperly/MixDown
|
md/__init__.py
|
Python
|
lgpl-3.0
| 1,016
|
from setuptools import setup
setup(
name='grabfeed',
packages=['grabfeed'],
version='0.4',
description='Detects and return RSS feeds for a given website.',
author='Sudip Kafle',
author_email='soodip.kafle@gmail.com',
url='https://github.com/kaflesudip/grabfeed',
download_url='https://github.com/kaflesudip/grabfeed/tarball/0.4',
keywords=['RSS', 'Feeds', 'Scraping'],
install_requires=[
'beautifulsoup4==4.4.1',
'requests==2.8.1'
],
classifiers=[],
)
|
kaflesudip/grabfeed
|
setup.py
|
Python
|
apache-2.0
| 516
|
"""
Tests for lqcontrol.py file
"""
import sys
import unittest
import numpy as np
from numpy.testing import assert_allclose
from numpy import dot
from quantecon.lqcontrol import LQ, LQMarkov
class TestLQControl(unittest.TestCase):
def setUp(self):
# Initial Values
q = 1.
r = 1.
rf = 1.
a = .95
b = -1.
c = .05
beta = .95
T = 1
self.lq_scalar = LQ(q, r, a, b, C=c, beta=beta, T=T, Rf=rf)
Q = np.array([[0., 0.], [0., 1]])
R = np.array([[1., 0.], [0., 0]])
RF = np.eye(2) * 100
A = np.ones((2, 2)) * .95
B = np.ones((2, 2)) * -1
self.lq_mat = LQ(Q, R, A, B, beta=beta, T=T, Rf=RF)
self.methods = ['doubling', 'qz']
def tearDown(self):
del self.lq_scalar
del self.lq_mat
def test_scalar_sequences(self):
lq_scalar = self.lq_scalar
x0 = 2
x_seq, u_seq, w_seq = lq_scalar.compute_sequence(x0)
# Solution found by hand
u_0 = (-2*lq_scalar.A*lq_scalar.B*lq_scalar.beta*lq_scalar.Rf) / \
(2*lq_scalar.Q+lq_scalar.beta*lq_scalar.Rf*2*lq_scalar.B**2) \
* x0
x_1 = lq_scalar.A * x0 + lq_scalar.B * u_0 + \
dot(lq_scalar.C, w_seq[0, -1])
assert_allclose(u_0, u_seq, rtol=1e-4)
assert_allclose(x_1, x_seq[0, -1], rtol=1e-4)
def test_scalar_sequences_with_seed(self):
lq_scalar = self.lq_scalar
x0 = 2
x_seq, u_seq, w_seq = \
lq_scalar.compute_sequence(x0, 10, random_state=5)
expected_output = np.array([[ 0.44122749, -0.33087015]])
assert_allclose(w_seq, expected_output)
def test_mat_sequences(self):
lq_mat = self.lq_mat
x0 = np.random.randn(2) * 25
x_seq, u_seq, w_seq = lq_mat.compute_sequence(x0)
assert_allclose(np.sum(u_seq), .95 * np.sum(x0), atol=1e-3)
assert_allclose(x_seq[:, -1], np.zeros_like(x0), atol=1e-3)
def test_stationary_mat(self):
x0 = np.random.randn(2) * 25
lq_mat = self.lq_mat
f_answer = np.array([[-.95, -.95], [0., 0.]])
val_func_answer = x0[0]**2
for method in self.methods:
P, F, d = lq_mat.stationary_values(method=method)
val_func_lq = np.dot(x0, P).dot(x0)
assert_allclose(f_answer, F, atol=1e-3)
assert_allclose(val_func_lq, val_func_answer, atol=1e-3)
class TestLQMarkov(unittest.TestCase):
def setUp(self):
# Markov chain transition matrix
Π = np.array([[0.8, 0.2],
[0.2, 0.8]])
# discount rate
beta = .95
# scalar case
q1, q2 = 1., .5
r1, r2 = 1., .5
a1, a2 = .95, .9
b1, b2 = -1., -.5
self.lq_markov_scalar = LQMarkov(Π, [q1, q2], [r1, r2], [a1, a2],
[b1, b2], beta=beta)
# matrix case
Π = np.array([[0.8, 0.2],
[0.2, 0.8]])
Qs = np.array([[[0.9409]], [[0.870489]]])
Rs = np.array([[[1., 0., 1.],
[0., 0., 0.],
[1., 0., 1.]],
[[1., 0., 1.],
[0., 0., 0.],
[1., 0., 1.]]])
Ns = np.array([[[-0.97, 0., -0.97]],
[[-0.933, 0., -0.933]]])
As = np.array([[[0., 0., 0.],
[0., 1., 0.],
[0., 5., 0.8]],
[[0., 0., 0.],
[0., 1., 0.],
[0., 5., 0.8]]])
B = np.array([[1., 0., 0.]]).T
Bs = [B, B]
C = np.array([[0., 0., 1.]]).T
Cs = [C, C]
self.lq_markov_mat1 = LQMarkov(Π, Qs, Rs, As, Bs,
Cs=Cs, Ns=Ns, beta=0.95)
self.lq_markov_mat2 = LQMarkov(Π, Qs, Rs, As, Bs,
Cs=Cs, Ns=Ns, beta=1.05)
def tearDown(self):
del self.lq_markov_scalar
del self.lq_markov_mat1
del self.lq_markov_mat2
def test_print(self):
print(self.lq_markov_scalar)
print(self.lq_markov_mat1)
def test_scalar_sequences_with_seed(self):
lq_markov_scalar = self.lq_markov_scalar
x0 = 2
expected_x_seq = np.array([[2., 1.15977567, 0.6725398]])
expected_u_seq = np.array([[1.28044866, 0.7425166]])
expected_w_seq = np.array([[1.3486939, 0.55721062, 0.53423587]])
expected_state = np.array([1, 1, 1])
x_seq, u_seq, w_seq, state = \
lq_markov_scalar.compute_sequence(x0, ts_length=2,
random_state=1234)
assert_allclose(x_seq, expected_x_seq, atol=1e-6)
assert_allclose(u_seq, expected_u_seq, atol=1e-6)
assert_allclose(w_seq, expected_w_seq, atol=1e-6)
assert_allclose(state, expected_state, atol=1e-6)
def test_stationary_scalar(self):
lq_markov_scalar = self.lq_markov_scalar
P_answer = np.array([[[1.51741465]],
[[1.07334181]]])
d_answer = np.array([0., 0.])
F_answer = np.array([[[-0.54697435]],
[[-0.64022433]]])
Ps, ds, Fs = lq_markov_scalar.stationary_values()
assert_allclose(F_answer, Fs, atol=1e-6)
assert_allclose(P_answer, Ps, atol=1e-6)
assert_allclose(d_answer, ds, atol=1e-6)
def test_mat_sequences(self):
lq_markov_mat = self.lq_markov_mat1
x0 = np.array([[1000, 1, 25]])
expected_x_seq = np.array([[1.00000000e+03, 1.01372101e+03],
[1.00000000e+00, 1.00000000e+00],
[2.50000000e+01, 2.61845443e+01]])
expected_u_seq = np.array([[1013.72101253]])
expected_w_seq = np.array([[0.41782708, 1.18454431]])
expected_state = np.array([1, 1])
x_seq, u_seq, w_seq, state = \
lq_markov_mat.compute_sequence(x0, ts_length=1, random_state=1234)
assert_allclose(x_seq, expected_x_seq, atol=1e-6)
assert_allclose(u_seq, expected_u_seq, atol=1e-6)
assert_allclose(w_seq, expected_w_seq, atol=1e-6)
assert_allclose(state, expected_state, atol=1e-6)
def test_stationary_mat(self):
lq_markov_mat = self.lq_markov_mat1
d_answer = np.array([16.2474886, 16.31935939])
P_answer = np.array([[[4.5144056e-02, 1.8627227e+01, 1.9348906e-01],
[1.8627227e+01, 7.9343733e+03, 8.0055130e+01],
[1.9348906e-01, 8.0055130e+01, 8.2991316e-01]],
[[5.3606323e-02, 2.0136657e+01, 2.1763323e-01],
[2.0136657e+01, 7.8100167e+03, 8.1960509e+01],
[2.1763323e-01, 8.1960509e+01, 8.8413147e-01]]])
F_answer = np.array([[[-0.98437714, 19.2051657, -0.83142157]],
[[-1.01434303, 21.58480004, -0.83851124]]])
Ps, ds, Fs = lq_markov_mat.stationary_values()
assert_allclose(F_answer, Fs, atol=1e-6)
assert_allclose(P_answer, Ps, atol=1e-6)
assert_allclose(d_answer, ds, atol=1e-6)
def test_raise_error(self):
# test raising error for not converging
lq_markov_mat = self.lq_markov_mat2
self.assertRaises(ValueError, lq_markov_mat.stationary_values)
if __name__ == '__main__':
for Test in [TestLQControl, TestLQMarkov]:
suite = unittest.TestLoader().loadTestsFromTestCase(Test)
unittest.TextTestRunner(verbosity=2, stream=sys.stderr).run(suite)
|
oyamad/QuantEcon.py
|
quantecon/tests/test_lqcontrol.py
|
Python
|
bsd-3-clause
| 7,700
|
#How to run this:
#Python libraries needed to run this file: Flask, Git Python, SQLAlchemy
#You will need to have Git installed, and it will need to be in your path.
#For example, on Windows you should be able to run a command like 'git pull' from the
#ordinary Windows command prompt and not just from Git Bash.
#You will need a MySQL server with the MSR14 datasource or other GHTorrent database with the same schema.
#Edit the line in this code that says db = sqlalchemy.create_engine to match your username:password@hostname:port/database.
#This file is hardcoded to download the ghdata repository.
#Since it is a preliminary example, each time it runs,
#it deletes the local ghdata repo and re-downloads it (though this might not be a good option for the future).
#Because of this: if you have a folder named ghdata whose contents you do not want deleted,
#do not place this file in the same folder as your ghdata folder.
#to run this, type "python pythonBlameHistoryTree.py" into the command prompt
#You will see some output about running on 127.0.0.1:5000 in the command prompt
#Open a web browser and navigate to 127.0.0.1:5000.
#This page will load for quite a while. At least several minutes is expected.
#You can see it is still running due to the testing output in the command prompt Outer loop: commit# Inner loop: commit#
#When the testing output stops running you should see some output in the browser tab.
#the output shows the commit number and date, the total lines of code and other files (for example, the readme)
#and the percentage written by each organization.
#expected output for ghdata should show only the spdx-tools organization (Matt is a member)
#Number of lines corresponds to the lines written by Matt.
#You can see that earlier commits are lower on the page, and chronologically later ones appear higher up.
#An "error" I expect us to encounter when testing other repos:
#The way my sql query works right now, a user can be a member of multiple organizations.
#For a simple case of expected output problems:
#User1 wrote the entire repository (100%)
#User1 is a member of Microsoft and IBM
#Microsoft wrote 100% of the repository. IBM also wrote 100% of the repository for a total of 200%
#Other issues:
#If a user does not have both an email and organization available in GHTorrent database,
#the user will not be counted towards any organization.
#Future changes planned for this file:
#Code cleanup for better readability
#Code commenting for each portion
#Thorough testing for various potential cases we might encounter
#Deciding for certain how to decide whether a user is a member of an organization
#A better method of dealing with local repository rather than deleting each time and re-downloading
#Not having the database password directly in the code
#Look into improving code efficiency where possible for faster runtime
from flask import Flask
from git import *
import sqlalchemy
from sqlalchemy import text
import shutil
import os
import stat
import time
app = Flask(__name__)
@app.route("/")
def pythonBlameHistory():
#path is the hardcoded folder for the last download of ghdata
repo_path = './ghdata'
#We must remove the old ghdata if we want to download a new copy.
#In order to delete it, we must first change the permissions
#To be writable for all files and directories.
#Based on this: http://stackoverflow.com/questions/2853723/whats-the-python-way-for-recursively-setting-file-permissions
if os.path.exists(repo_path):
for root, directories, files in os.walk(repo_path):
for directory in directories:
os.chmod(os.path.join(root, directory), stat.S_IWRITE)
for file in files:
os.chmod(os.path.join(root, file), stat.S_IWRITE)
os.chmod(repo_path, stat.S_IWRITE)
#delete the old ghdata
shutil.rmtree(repo_path)
#connect to the database username:password@hostname:port/databasename
db = sqlalchemy.create_engine('mysql+pymysql://root:password@localhost:3306/msr14')
schema = sqlalchemy.MetaData()
schema.reflect(bind=db)
#Get the ghdata repository from GitHub
repo = Repo.init('ghdata')
origin = repo.create_remote('origin','https://github.com/OSSHealth/ghdata.git')
origin.fetch()
origin.pull(origin.refs[0].remote_head)
#Dictionary to store results of sql queries
#associating emails with organizations.
#Without this dictionary, we would have to repeat
#the same query over and over, which on my local machine
#meant a runtime of over 24 hours (as opposed to several minutes using the dictionary)
orgs_associated_with_user = {}
#This dictionary keeps track of the lines written per organization for a single file.
lines_per_organization_per_file = {}
#This is the total number of lines in a single file
total_lines_in_file = 0
#this is used later to hold percentage results for output
percentage = 0
#This is the total number of lines in an entire repo
total_lines_in_repo = 0
#This dictionary keeps track of the lines written per organization for the entire repo.
lines_per_organization_entire_repo = {}
#The output string will be displayed to the screen once everything is done running.
outputString = ""
#Outer loop: loop through each commit in the master branch.
#This corresponds to the history of commits over time.
for history_commit in repo.iter_commits('master'):
#Since we want to see the change over time in repo percentage by organization,
#clear the variables for total lines and organization lines for each new commit
#we examine.
lines_per_organization_entire_repo = {}
total_lines_in_repo = 0
#Testing output: only purpose is to show you it's still running :)
print("Outer loop: " + str(history_commit))
#Now loop through every file in the repo.
#You cannot use the os library file/directory loop for this part.
#(as was used above to change file permissions)
#That is because some files do not exist in every commit.
#You must loop through the commit tree, not the ghdata directory.
for file_in_repo in history_commit.tree.traverse():
#For each file, we want to clear out the total lines and organization totals per file.
#That's because we're starting over with a new file.
lines_per_organization_per_file = {}
total_lines_in_file = 0
#Files are of the blob type. This if statement prevents us from trying
#to examine 'lines' in a directory.
if file_in_repo.type == 'blob':
#Now for each file, perform git blame. This will traverse
#the lines in the file.
#You can see there are now two variables of type commit:
#history_commit and blame_commit (will improve variable naming in a future update)
#history_commit is the commit with respect to the overall repo history.
#blame_commit is the commit in which this line was most recently changed
#as obtained through git blame. We use the "blame_commit" variable
#to obtain the author of the commit for when the lines were last changed.
for blame_commit, lines in repo.blame(history_commit, file_in_repo.path):
#Git blame does not always return one line at a time.
#Sometimes we are returned several lines committed by the same author.
#In that case, we must count how many lines there are or our
#total will not match the actual file.
blameLineCount = 0
for line in lines:
#increment lines to later attribute to an organization.
blameLineCount += 1
#increment lines in the file as a whole
total_lines_in_file += 1
#Testing output: only shows that things are still running.
print("Inner loop: " + str(blame_commit))
#Get the email address of the author of this commit.
#If we already have it in our dictionary, increase the total
#lines for the associated organization by blameLineCount
if blame_commit.author.email in orgs_associated_with_user:
for organization in orgs_associated_with_user[blame_commit.author.email]:
if organization not in lines_per_organization_per_file:
lines_per_organization_per_file[organization] = blameLineCount
else:
lines_per_organization_per_file[organization] += blameLineCount
#If the email address is not in our dictionary, we must query
#the database to get any associated organizations.
else:
sql = text('select orgUser.login as org_name '
'from users as thisUser join organization_members '
'on organization_members.user_id = thisUser.id '
'join users as orgUser on organization_members.org_id = orgUser.id '
'where thisUser.email = "' + blame_commit.author.email + '"')
result = db.engine.execute(sql)
#add the email to the dictionary
orgs_associated_with_user[blame_commit.author.email] = []
#if there are organizations in the result, associate those organizations with the
#user email in the dictionary.
#Then, set or add blameLineCount to the organization total.
for organization_row in result:
orgs_associated_with_user[blame_commit.author.email] = orgs_associated_with_user[blame_commit.author.email] + [organization_row[0]]
if organization_row[0] not in lines_per_organization_per_file:
lines_per_organization_per_file[organization_row[0]] = blameLineCount
else:
lines_per_organization_per_file[organization_row[0]] += blameLineCount
#If there is at least one line in this file
if total_lines_in_file > 0:
#Add the total lines in this file to the total lines in the repo.
total_lines_in_repo += total_lines_in_file
#Loop through the organization total lines for this file.
#Add each organization to the repo's organization total lines.
for organization in lines_per_organization_per_file:
if organization not in lines_per_organization_entire_repo:
lines_per_organization_entire_repo[organization] = lines_per_organization_per_file[organization]
else:
lines_per_organization_entire_repo[organization] += lines_per_organization_per_file[organization]
#Calculate the percentage for this file by organization (no longer used: former testing output)
percentage = lines_per_organization_per_file[organization] / total_lines_in_file * 100
#Construct output for this commit. First output the commit, date, and total lines in the repo.
outputString = outputString + "REPO TOTALS FOR COMMIT: " + str(history_commit) + " authored at " + time.strftime("%I:%M %p, %b %d, %Y", time.gmtime(history_commit.authored_date)) + " <br>"
outputString = outputString + "TOTAL REPO LINES: " + str(total_lines_in_repo) + "<br>"
#Now loop through the organizations and calculate the percentage of the repo for each.
#Output a line for each organization showing organization name, lines from that organization, percentage of the file
for organization in lines_per_organization_entire_repo:
percentage = lines_per_organization_entire_repo[organization] / total_lines_in_repo * 100
outputString = outputString + " ORGANIZATION: " + str(organization) + " ORG TOTAL LINES: " + str(lines_per_organization_entire_repo[organization]) + " PERCENTAGE OF REPO: " + str(percentage) + "%<br>"
#Output line between each commit in the history for easier legibility.
outputString = outputString + "----------------------------------------------------------------------------<br>"
#Show the outputString in the browser.
return outputString
if __name__ == "__main__":
app.run()
|
Hackers-To-Engineers/ghdata-sprint1team-2
|
organizationHistory/pythonBlameHistoryTree.py
|
Python
|
mit
| 12,918
|
# -*- coding: utf-8 -*-
from ...support.grammar import Grammar
from ...support.fluent import Fluent
from ...query.expression import QueryExpression
from ...dbal.column import Column
from ...dbal.table_diff import TableDiff
from ...dbal.comparator import Comparator
from ..blueprint import Blueprint
class SchemaGrammar(Grammar):
def __init__(self, connection):
super(SchemaGrammar, self).__init__(marker=connection.get_marker())
self._connection = connection
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
schema = connection.get_schema_manager()
table = self.get_table_prefix() + blueprint.get_table()
column = connection.get_column(table, command.from_)
table_diff = self._get_renamed_diff(blueprint, command, column, schema)
return schema.get_database_platform().get_alter_table_sql(table_diff)
def _get_renamed_diff(self, blueprint, command, column, schema):
"""
Get a new column instance with the new column name.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param column: The column
:type column: orator.dbal.Column
:param schema: The schema
:type schema: orator.dbal.SchemaManager
:rtype: orator.dbal.TableDiff
"""
table_diff = self._get_table_diff(blueprint, schema)
return self._set_renamed_columns(table_diff, command, column)
def _set_renamed_columns(self, table_diff, command, column):
"""
Set the renamed columns on the table diff.
:rtype: orator.dbal.TableDiff
"""
new_column = Column(command.to, column.get_type(), column.to_dict())
table_diff.renamed_columns = {command.from_: new_column}
return table_diff
def compile_foreign(self, blueprint, command, _):
"""
Compile a foreign key command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:rtype: str
"""
table = self.wrap_table(blueprint)
on = self.wrap_table(command.on)
columns = self.columnize(command.columns)
on_columns = self.columnize(
command.references
if isinstance(command.references, list)
else [command.references]
)
sql = "ALTER TABLE %s ADD CONSTRAINT %s " % (table, command.index)
sql += "FOREIGN KEY (%s) REFERENCES %s (%s)" % (columns, on, on_columns)
if command.get("on_delete"):
sql += " ON DELETE %s" % command.on_delete
if command.get("on_update"):
sql += " ON UPDATE %s" % command.on_update
return sql
def _get_columns(self, blueprint):
"""
Get the blueprint's columns definitions.
:param blueprint: The blueprint
:type blueprint: Blueprint
:rtype: list
"""
columns = []
for column in blueprint.get_added_columns():
sql = self.wrap(column) + " " + self._get_type(column)
columns.append(self._add_modifiers(sql, blueprint, column))
return columns
def _add_modifiers(self, sql, blueprint, column):
"""
Add the column modifiers to the deifinition
"""
for modifier in self._modifiers:
method = "_modify_%s" % modifier
if hasattr(self, method):
sql += getattr(self, method)(blueprint, column)
return sql
def _get_command_by_name(self, blueprint, name):
"""
Get the primary key command it it exists.
"""
commands = self._get_commands_by_name(blueprint, name)
if len(commands):
return commands[0]
def _get_commands_by_name(self, blueprint, name):
"""
Get all of the commands with a given name.
"""
return list(filter(lambda value: value.name == name, blueprint.get_commands()))
def _get_type(self, column):
"""
Get the SQL for the column data type.
:param column: The column
:type column: Fluent
:rtype sql
"""
return getattr(self, "_type_%s" % column.type)(column)
def prefix_list(self, prefix, values):
"""
Add a prefix to a list of values.
"""
return list(map(lambda value: prefix + " " + value, values))
def wrap_table(self, table):
if isinstance(table, Blueprint):
table = table.get_table()
return super(SchemaGrammar, self).wrap_table(table)
def wrap(self, value, prefix_alias=False):
if isinstance(value, Fluent):
value = value.name
return super(SchemaGrammar, self).wrap(value, prefix_alias)
def _get_default_value(self, value):
"""
Format a value so that it can be used in "default" clauses.
"""
if isinstance(value, QueryExpression):
return value
if isinstance(value, bool):
return "'%s'" % int(value)
return "'%s'" % value
def _get_table_diff(self, blueprint, schema):
table = self.get_table_prefix() + blueprint.get_table()
table_diff = TableDiff(table)
table_diff.from_table = schema.list_table_details(table)
return table_diff
def compile_change(self, blueprint, command, connection):
"""
Compile a change column command into a series of SQL statement.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
schema = connection.get_schema_manager()
table_diff = self._get_changed_diff(blueprint, schema)
if table_diff:
sql = schema.get_database_platform().get_alter_table_sql(table_diff)
if isinstance(sql, list):
return sql
return [sql]
return []
def _get_changed_diff(self, blueprint, schema):
"""
Get the table diffrence for the given changes.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param schema: The schema
:type schema: orator.dbal.SchemaManager
:rtype: orator.dbal.TableDiff
"""
table = schema.list_table_details(
self.get_table_prefix() + blueprint.get_table()
)
return Comparator().diff_table(
table, self._get_table_with_column_changes(blueprint, table)
)
def _get_table_with_column_changes(self, blueprint, table):
"""
Get a copy of the given table after making the column changes.
:param blueprint: The blueprint
:type blueprint: Blueprint
:type table: orator.dbal.table.Table
:rtype: orator.dbal.table.Table
"""
table = table.clone()
for fluent in blueprint.get_changed_columns():
column = self._get_column_for_change(table, fluent)
for key, value in fluent.get_attributes().items():
option = self._map_fluent_option(key)
if option is not None:
method = "set_%s" % option
if hasattr(column, method):
getattr(column, method)(self._map_fluent_value(option, value))
return table
def _get_column_for_change(self, table, fluent):
"""
Get the column instance for a column change.
:type table: orator.dbal.table.Table
:rtype: orator.dbal.column.Column
"""
return table.change_column(
fluent.name, self._get_column_change_options(fluent)
).get_column(fluent.name)
def _get_column_change_options(self, fluent):
"""
Get the column change options.
"""
options = {
"name": fluent.name,
"type": self._get_dbal_column_type(fluent.type),
"default": fluent.get("default"),
}
if fluent.type in ["string"]:
options["length"] = fluent.length
return options
def _get_dbal_column_type(self, type_):
"""
Get the dbal column type.
:param type_: The fluent type
:type type_: str
:rtype: str
"""
type_ = type_.lower()
if type_ == "big_integer":
type_ = "bigint"
elif type == "small_integer":
type_ = "smallint"
elif type_ in ["medium_text", "long_text"]:
type_ = "text"
return type_
def _map_fluent_option(self, attribute):
if attribute in ["type", "name"]:
return
elif attribute == "nullable":
return "notnull"
elif attribute == "total":
return "precision"
elif attribute == "places":
return "scale"
else:
return
def _map_fluent_value(self, option, value):
if option == "notnull":
return not value
return value
def platform_version(self, parts=2):
return self._connection.server_version[:parts]
def platform(self):
"""
Returns the dbal database platform.
:rtype: orator.dbal.platforms.platform.Platform
"""
return self._connection.get_database_platform()
|
sdispater/orator
|
orator/schema/grammars/grammar.py
|
Python
|
mit
| 9,880
|
#!/usr/bin/python
import random
from lib.singleton import Singleton
from lib import log
from srv.phase import Phase
from srv.game import Game
from srv.variable import Variable
from srv.logic.doublerphase import DoublerPhase
from srv.logic.withdrawphase import WithdrawPhase
class DicePhase(Phase,Singleton):
_name = "dice"
_idnum = 13
def __init__(self):
log.debug("dice", 5, at="init")
if None is getattr(self,"accounts",None):
log.debug("dice", 5, at="real init")
Variable("diceingame", 131, ispersistent = True, minmax = [0, 1000])
Variable("holddice1", 132, minmax = [0, 1])
Variable("holddice2", 133, minmax = [0, 1])
Variable("holddice3", 134, minmax = [0, 1])
Variable("bet", 135, enumdict = {10:"10", 20:"20", 30:"30"})
Variable("dice1state", 136, ispersistent = True, minmax = [1, 6])
Variable("dice2state", 137, ispersistent = True, minmax = [1, 6])
Variable("dice3state", 138, ispersistent = True, minmax = [1, 6])
self.accounts = ["credit", "diceingame"]
self.inputs = ["bet", "holddice1", "holddice2", "holddice3"]
self.outputs = ["dice1state", "dice2state", "dice3state", "diceingame"]
Phase.__init__(self)
def play(self, state):
state.credit -= state.bet
if not state.holddice1:
state.dice1state = random.randint(1, 6)
if not state.holddice2:
state.dice2state = random.randint(1, 6)
if not state.holddice3:
state.dice3state = random.randint(1, 6)
state.diceingame += ((state.dice1state + state.dice2state + state.dice1state)*2 + 1 ) * state.bet / 22 # ((3.5*3)*2 +1)/22 = 1
def validate(self, state):
if state.credit < state.bet:
return False
return True
class DiceDoubler(DoublerPhase,Singleton):
_name = "dicedoubler"
_idnum = 11
log.debug("dice", 5, at="init")
def __init__(self):
if None is getattr(self,"accounts",None):
DoublerPhase.__init__(self, "diceingame")
class DiceWithdraw(WithdrawPhase, Singleton):
_name = "dicewithdraw"
_idnum = 12
log.debug("dice", 5, at="init")
def __init__(self):
if None is getattr(self,"accounts",None):
WithdrawPhase.__init__(self, "diceingame")
class Dice(Game,Singleton):
_name = "dice"
_idnum = 1
def __init__(self):
log.debug("dice", 5, at="init")
if None is getattr(self,"statemachine",None):
mainphase = DicePhase()
doubler = DiceDoubler()
withdraw = DiceWithdraw()
self.statemachine = {
"dice":
[("dice", self.alwaysValidPhase),
("dicedoubler", self.alwaysValidPhase),
("dicewithdraw", self.alwaysValidPhase)],
"dicedoubler":
[("dicedoubler", self.alwaysValidPhase),
("dicewithdraw", self.alwaysValidPhase)],
"dicewithdraw":
[("dice", self.alwaysValidPhase)],
}
Game.__init__(self)
|
magwas/gamsrv
|
logic/dice.py
|
Python
|
gpl-3.0
| 2,718
|
#!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def macs(step, macs_flag):
'''Runs MACS to call peaks from ChIPseq data.
input:
.fastq file
output:
peaks and .bed file
citation:
Zhang et al. Model-based Analysis of ChIP-Seq (MACS). Genome Biol (2008) vol. 9 (9) pp. R137
link:
http://liulab.dfci.harvard.edu/MACS/
parameters from parameters file:
PAIR_LIST:
BOWTIE_RESULTS:
CHROM_SIZES:
MACS_RESULTS:
MACS_VERSION:
TEMP_DIR:
BEDTOOLS_VERSION:
PYTHON_VERSION:
'''
spawn_job(jobname = 'macs', SAMPLE = step, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "240:00:00", queue = p.QUEUE, nodes = 1, ppn = 8, memory = "15gb", script = "/macs_drmaa.sh", args_list = [p.PAIR_LIST, p.BOWTIE_RESULTS, p.CHROM_SIZES, p.MACS_RESULTS, p.MACS_VERSION, p.TEMP_DIR, p.BEDTOOLS_VERSION, p.PYTHON_VERSION])
job_status(jobname = 'macs', resultspath = p.MACS_RESULTS, SAMPLE = step, outputfilename = p.PAIR_LIST[1] + "_macs_enrichment.bed.gz", FLAG_PATH = p.FLAG_PATH)
return
if __name__ == '__main__':
macs(step, macs_flag)
sys.exit(0)
|
adammaikai/OmicsPipe2.0
|
omics_pipe/modules/macs.py
|
Python
|
mit
| 1,388
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from __future__ import division
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
class MainDisplay(object):
"""docstring for MainDisplay"""
def __init__(self, figsize=(11.7,8.3)):
super(MainDisplay, self).__init__()
self.fig = plt.figure(figsize=figsize)
plt.subplots_adjust(left=0.05,right=0.95,top=0.90,bottom=0.05,wspace=0.15,hspace=0.05)
self.ax = plt.subplot(111)
cid = self.fig.canvas.mpl_connect('motion_notify_event', self.onmove)
self.colors = self.get_colors(75) #TODO convert to a parameter variable
def onmove(self, event):
""" """
pass
def get_colors(self, num_clusters):
""" """
self.colormap = plt.cm.gist_ncar
return dict(zip(range(1,num_clusters +1), [self.colormap(i) for i in np.linspace(0, 0.9, num_clusters)]))
def draw_pie_charts(self, label, ratios=[0.4,0.3,0.3], X=0, Y=0, size = 500):
""" """
N = len(ratios)
xy = []
start = 0.
for ratio in ratios:
x = [0] + np.cos(np.linspace(2*np.pi*start,2*np.pi*(start+ratio), 30)).tolist()
y = [0] + np.sin(np.linspace(2*np.pi*start,2*np.pi*(start+ratio), 30)).tolist()
xy1 = zip(x,y)
xy.append(xy1)
start += ratio
for i, xyi in enumerate(xy):
self.ax.scatter([X],[Y] , marker=(xyi,0), s=size, facecolor=self.colors[i+1], alpha=0.9 )
self.ax.annotate(label, xy= (X, Y) , bbox = dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8)))
class MapData(object):
"""docstring for MapData"""
def __init__(self, filename_coordinates, filename_clusters=None, filename_individuals=None):
super(MapData, self).__init__()
# see is this variable is necesary
self.df_coordinates = pd.read_csv(filename_coordinates, sep=' ')
self.df_clusters = map(lambda s: s.strip(), open(filename_clusters).readlines())
self.df_individuals = pd.read_csv(filename_individuals, sep='\t', header=None)
self.populations = self.get_values('Population')
self.longitude = self.get_values('Longitude')
self.latitude = self.get_values('Latitude')
self.populations_ind = self.get_populations_individuals()
def get_values(self, valuename):
"""docstring for get_values"""
return self.df_coordinates[valuename].values
def get_populations_individuals(self):
"""Get name and number of individuals in that population"""
self.df_individuals.columns = ['ind', 'pop']
groups = self.df_individuals.groupby('pop')
return dict(map(lambda p: (p, [groups.get_group(p).shape[0], list(groups.get_group(p)['ind'].values)]),
self.populations))
def get_ratios(self, population):
"""Get the proportions of each cluster in one population"""
clusters = self.get_clusters(population)
total_individuals = self.populations_ind[population][0]
return map(lambda p: p/total_individuals, clusters.values())
def get_clusters(self, population):
"""distribute each individual of a population in the clusters"""
clusters = dict(zip(range(1, 76), [0]*75))
individuals = self.populations_ind[population][1] # This is the list of the indiviudals in the group
for individual in individuals:
for c in self.df_clusters:
if c.find(individual) != -1:
clusters[int(c.split(' ')[0])] += 1
# gete number of cluster and add 1
print population, clusters
return clusters
class Map(Basemap):
"""docstring for Map"""
def __init__(self, ax):
super(Map, self).__init__(projection='merc',llcrnrlat=-80,urcrnrlat=80,\
llcrnrlon=-180,urcrnrlon=180,lat_ts=20,resolution='i', ax=ax)
#pass arg by parameter so they can change proj and other things
def draw(self, color='#9A9595', alpha=0.1):
""" """
#map_.bluemarble()
#map_.shadedrelief()
self.drawcoastlines()
self.drawcountries()
self.fillcontinents(color=color, alpha=alpha)
self.drawmapboundary()
self.drawmeridians(np.arange(0, 360, 30))
self.drawparallels(np.arange(-90, 90, 30))
def main():
"""
"""
#TODO use argparse and pass the filename by parameter
FILENAME_COR = 'data/don_coordinates.txt'
FILENAME_CLUS = 'data/CandelaFStree.populations.indlist.txt'
FILENAME_IND_POP = 'data/candela_main.idfile.txt'
my_display = MainDisplay()
my_data = MapData(FILENAME_COR, FILENAME_CLUS, FILENAME_IND_POP)
my_map = Map(my_display.ax)
my_map.draw()
#my_data.get_clusters(my_data.populations_ind.keys()[0])
x,y = my_map(my_data.longitude, my_data.latitude)
#TODO make filter
map(lambda p: my_display.draw_pie_charts(p[0], ratios=my_data.get_ratios(p[0]), X=p[1], Y=p[2]),
zip(my_data.populations, x, y))
plt.show()
if __name__ == '__main__':
main()
|
celiacintas/candela_maps
|
pie_maps.py
|
Python
|
gpl-2.0
| 5,213
|
#!/usr/bin/env python
import glob
import fnmatch
import itertools
import os
import re
TODO_PATTERN = re.compile(r'\s*// TODO:')
DOC_PATTERN = re.compile(r'\s*//')
EXPECT_PATTERN = re.compile(r'// expect')
num_files = 0
num_docs = 0
num_code = 0
num_empty = 0
num_todos = 0
num_semicolons = 0
num_test_files = 0
num_test_todos = 0
num_expects = 0
num_test_empty = 0
num_test = 0
num_benchmark_files = 0
num_benchmark_todos = 0
num_benchmark_empty = 0
num_benchmark = 0
files = itertools.chain(glob.iglob("src/*.[ch]"), glob.iglob("include/*.[ch]"))
for source_path in files:
num_files += 1
with open(source_path, "r") as input:
for line in input:
num_semicolons += line.count(';')
match = TODO_PATTERN.match(line)
if match:
num_todos += 1
continue
match = DOC_PATTERN.match(line)
if match:
num_docs += 1
continue
if (line.strip() == ""):
num_empty += 1
continue
num_code += 1
for dir_path, dir_names, file_names in os.walk("test"):
for file_name in fnmatch.filter(file_names, "*.wren"):
num_test_files += 1
with open(os.path.join(dir_path, file_name), "r") as input:
for line in input:
if (line.strip() == ""):
num_test_empty += 1
else:
num_test += 1
match = TODO_PATTERN.match(line)
if match:
num_test_todos += 1
continue
match = EXPECT_PATTERN.search(line)
if match:
num_expects += 1
continue
for dir_path, dir_names, file_names in os.walk("benchmark"):
for file_name in fnmatch.filter(file_names, "*.wren"):
num_benchmark_files += 1
with open(os.path.join(dir_path, file_name), "r") as input:
for line in input:
if (line.strip() == ""):
num_benchmark_empty += 1
else:
num_benchmark += 1
match = TODO_PATTERN.match(line)
if match:
num_benchmark_todos += 1
continue
print("source:")
print(" files " + str(num_files))
print(" semicolons " + str(num_semicolons))
print(" TODOs " + str(num_todos))
print(" comment lines " + str(num_docs))
print(" code lines " + str(num_code))
print(" empty lines " + str(num_empty))
print("\n")
print("test:")
print(" files " + str(num_test_files))
print(" TODOs " + str(num_test_todos))
print(" expectations " + str(num_expects))
print(" non-empty lines " + str(num_test))
print(" empty lines " + str(num_test_empty))
print("\n")
print("benchmark:")
print(" files " + str(num_benchmark_files))
print(" TODOs " + str(num_benchmark_todos))
print(" non-empty lines " + str(num_benchmark))
print(" empty lines " + str(num_benchmark_empty))
|
lluchs/wren
|
script/metrics.py
|
Python
|
mit
| 2,793
|
"""
Component to interface with various media players.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/media_player/
"""
import asyncio
from datetime import timedelta
import functools as ft
import hashlib
import logging
import os
from random import SystemRandom
from aiohttp import web
import async_timeout
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
from homeassistant.components.http import HomeAssistantView, KEY_AUTHENTICATED
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util.async import run_coroutine_threadsafe
from homeassistant.const import (
STATE_OFF, STATE_UNKNOWN, STATE_PLAYING, STATE_IDLE,
ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_VOLUME_UP, SERVICE_VOLUME_DOWN, SERVICE_VOLUME_SET,
SERVICE_VOLUME_MUTE, SERVICE_TOGGLE, SERVICE_MEDIA_STOP,
SERVICE_MEDIA_PLAY_PAUSE, SERVICE_MEDIA_PLAY, SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_NEXT_TRACK, SERVICE_MEDIA_PREVIOUS_TRACK, SERVICE_MEDIA_SEEK)
_LOGGER = logging.getLogger(__name__)
_RND = SystemRandom()
DOMAIN = 'media_player'
DEPENDENCIES = ['http']
SCAN_INTERVAL = timedelta(seconds=10)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
ENTITY_IMAGE_URL = '/api/media_player_proxy/{0}?token={1}&cache={2}'
ATTR_CACHE_IMAGES = 'images'
ATTR_CACHE_URLS = 'urls'
ATTR_CACHE_MAXSIZE = 'maxsize'
ENTITY_IMAGE_CACHE = {
ATTR_CACHE_IMAGES: {},
ATTR_CACHE_URLS: [],
ATTR_CACHE_MAXSIZE: 16
}
CONTENT_TYPE_HEADER = 'Content-Type'
SERVICE_PLAY_MEDIA = 'play_media'
SERVICE_SELECT_SOURCE = 'select_source'
SERVICE_CLEAR_PLAYLIST = 'clear_playlist'
ATTR_MEDIA_VOLUME_LEVEL = 'volume_level'
ATTR_MEDIA_VOLUME_MUTED = 'is_volume_muted'
ATTR_MEDIA_SEEK_POSITION = 'seek_position'
ATTR_MEDIA_CONTENT_ID = 'media_content_id'
ATTR_MEDIA_CONTENT_TYPE = 'media_content_type'
ATTR_MEDIA_DURATION = 'media_duration'
ATTR_MEDIA_POSITION = 'media_position'
ATTR_MEDIA_POSITION_UPDATED_AT = 'media_position_updated_at'
ATTR_MEDIA_TITLE = 'media_title'
ATTR_MEDIA_ARTIST = 'media_artist'
ATTR_MEDIA_ALBUM_NAME = 'media_album_name'
ATTR_MEDIA_ALBUM_ARTIST = 'media_album_artist'
ATTR_MEDIA_TRACK = 'media_track'
ATTR_MEDIA_SERIES_TITLE = 'media_series_title'
ATTR_MEDIA_SEASON = 'media_season'
ATTR_MEDIA_EPISODE = 'media_episode'
ATTR_MEDIA_CHANNEL = 'media_channel'
ATTR_MEDIA_PLAYLIST = 'media_playlist'
ATTR_APP_ID = 'app_id'
ATTR_APP_NAME = 'app_name'
ATTR_SUPPORTED_MEDIA_COMMANDS = 'supported_media_commands'
ATTR_INPUT_SOURCE = 'source'
ATTR_INPUT_SOURCE_LIST = 'source_list'
ATTR_MEDIA_ENQUEUE = 'enqueue'
MEDIA_TYPE_MUSIC = 'music'
MEDIA_TYPE_TVSHOW = 'tvshow'
MEDIA_TYPE_VIDEO = 'movie'
MEDIA_TYPE_EPISODE = 'episode'
MEDIA_TYPE_CHANNEL = 'channel'
MEDIA_TYPE_PLAYLIST = 'playlist'
SUPPORT_PAUSE = 1
SUPPORT_SEEK = 2
SUPPORT_VOLUME_SET = 4
SUPPORT_VOLUME_MUTE = 8
SUPPORT_PREVIOUS_TRACK = 16
SUPPORT_NEXT_TRACK = 32
SUPPORT_TURN_ON = 128
SUPPORT_TURN_OFF = 256
SUPPORT_PLAY_MEDIA = 512
SUPPORT_VOLUME_STEP = 1024
SUPPORT_SELECT_SOURCE = 2048
SUPPORT_STOP = 4096
SUPPORT_CLEAR_PLAYLIST = 8192
SUPPORT_PLAY = 16384
# Service call validation schemas
MEDIA_PLAYER_SCHEMA = vol.Schema({
ATTR_ENTITY_ID: cv.entity_ids,
})
MEDIA_PLAYER_SET_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_LEVEL): cv.small_float,
})
MEDIA_PLAYER_MUTE_VOLUME_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_VOLUME_MUTED): cv.boolean,
})
MEDIA_PLAYER_MEDIA_SEEK_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_SEEK_POSITION):
vol.All(vol.Coerce(float), vol.Range(min=0)),
})
MEDIA_PLAYER_SELECT_SOURCE_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_INPUT_SOURCE): cv.string,
})
MEDIA_PLAYER_PLAY_MEDIA_SCHEMA = MEDIA_PLAYER_SCHEMA.extend({
vol.Required(ATTR_MEDIA_CONTENT_TYPE): cv.string,
vol.Required(ATTR_MEDIA_CONTENT_ID): cv.string,
vol.Optional(ATTR_MEDIA_ENQUEUE): cv.boolean,
})
SERVICE_TO_METHOD = {
SERVICE_TURN_ON: {'method': 'async_turn_on'},
SERVICE_TURN_OFF: {'method': 'async_turn_off'},
SERVICE_TOGGLE: {'method': 'async_toggle'},
SERVICE_VOLUME_UP: {'method': 'async_volume_up'},
SERVICE_VOLUME_DOWN: {'method': 'async_volume_down'},
SERVICE_MEDIA_PLAY_PAUSE: {'method': 'async_media_play_pause'},
SERVICE_MEDIA_PLAY: {'method': 'async_media_play'},
SERVICE_MEDIA_PAUSE: {'method': 'async_media_pause'},
SERVICE_MEDIA_STOP: {'method': 'async_media_stop'},
SERVICE_MEDIA_NEXT_TRACK: {'method': 'async_media_next_track'},
SERVICE_MEDIA_PREVIOUS_TRACK: {'method': 'async_media_previous_track'},
SERVICE_CLEAR_PLAYLIST: {'method': 'async_clear_playlist'},
SERVICE_VOLUME_SET: {
'method': 'async_set_volume_level',
'schema': MEDIA_PLAYER_SET_VOLUME_SCHEMA},
SERVICE_VOLUME_MUTE: {
'method': 'async_mute_volume',
'schema': MEDIA_PLAYER_MUTE_VOLUME_SCHEMA},
SERVICE_MEDIA_SEEK: {
'method': 'async_media_seek',
'schema': MEDIA_PLAYER_MEDIA_SEEK_SCHEMA},
SERVICE_SELECT_SOURCE: {
'method': 'async_select_source',
'schema': MEDIA_PLAYER_SELECT_SOURCE_SCHEMA},
SERVICE_PLAY_MEDIA: {
'method': 'async_play_media',
'schema': MEDIA_PLAYER_PLAY_MEDIA_SCHEMA},
}
ATTR_TO_PROPERTY = [
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_POSITION_UPDATED_AT,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_SERIES_TITLE,
ATTR_MEDIA_SEASON,
ATTR_MEDIA_EPISODE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_PLAYLIST,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_SUPPORTED_MEDIA_COMMANDS,
ATTR_INPUT_SOURCE,
ATTR_INPUT_SOURCE_LIST,
]
def is_on(hass, entity_id=None):
"""
Return true if specified media player entity_id is on.
Check all media player if no entity_id specified.
"""
entity_ids = [entity_id] if entity_id else hass.states.entity_ids(DOMAIN)
return any(not hass.states.is_state(entity_id, STATE_OFF)
for entity_id in entity_ids)
def turn_on(hass, entity_id=None):
"""Turn on specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_ON, data)
def turn_off(hass, entity_id=None):
"""Turn off specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TURN_OFF, data)
def toggle(hass, entity_id=None):
"""Toggle specified media player or all."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_TOGGLE, data)
def volume_up(hass, entity_id=None):
"""Send the media player the command for volume up."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_UP, data)
def volume_down(hass, entity_id=None):
"""Send the media player the command for volume down."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_VOLUME_DOWN, data)
def mute_volume(hass, mute, entity_id=None):
"""Send the media player the command for muting the volume."""
data = {ATTR_MEDIA_VOLUME_MUTED: mute}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_MUTE, data)
def set_volume_level(hass, volume, entity_id=None):
"""Send the media player the command for setting the volume."""
data = {ATTR_MEDIA_VOLUME_LEVEL: volume}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_VOLUME_SET, data)
def media_play_pause(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY_PAUSE, data)
def media_play(hass, entity_id=None):
"""Send the media player the command for play/pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PLAY, data)
def media_pause(hass, entity_id=None):
"""Send the media player the command for pause."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PAUSE, data)
def media_stop(hass, entity_id=None):
"""Send the media player the stop command."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_STOP, data)
def media_next_track(hass, entity_id=None):
"""Send the media player the command for next track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_NEXT_TRACK, data)
def media_previous_track(hass, entity_id=None):
"""Send the media player the command for prev track."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_MEDIA_PREVIOUS_TRACK, data)
def media_seek(hass, position, entity_id=None):
"""Send the media player the command to seek in current playing media."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_MEDIA_SEEK_POSITION] = position
hass.services.call(DOMAIN, SERVICE_MEDIA_SEEK, data)
def play_media(hass, media_type, media_id, entity_id=None, enqueue=None):
"""Send the media player the command for playing media."""
data = {ATTR_MEDIA_CONTENT_TYPE: media_type,
ATTR_MEDIA_CONTENT_ID: media_id}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
if enqueue:
data[ATTR_MEDIA_ENQUEUE] = enqueue
hass.services.call(DOMAIN, SERVICE_PLAY_MEDIA, data)
def select_source(hass, source, entity_id=None):
"""Send the media player the command to select input source."""
data = {ATTR_INPUT_SOURCE: source}
if entity_id:
data[ATTR_ENTITY_ID] = entity_id
hass.services.call(DOMAIN, SERVICE_SELECT_SOURCE, data)
def clear_playlist(hass, entity_id=None):
"""Send the media player the command for clear playlist."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
hass.services.call(DOMAIN, SERVICE_CLEAR_PLAYLIST, data)
@asyncio.coroutine
def async_setup(hass, config):
"""Track states and offer events for media_players."""
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
hass.http.register_view(MediaPlayerImageView(component.entities))
yield from component.async_setup(config)
descriptions = yield from hass.loop.run_in_executor(
None, load_yaml_config_file, os.path.join(
os.path.dirname(__file__), 'services.yaml'))
@asyncio.coroutine
def async_service_handler(service):
"""Map services to methods on MediaPlayerDevice."""
method = SERVICE_TO_METHOD.get(service.service)
if not method:
return
params = {}
if service.service == SERVICE_VOLUME_SET:
params['volume'] = service.data.get(ATTR_MEDIA_VOLUME_LEVEL)
elif service.service == SERVICE_VOLUME_MUTE:
params['mute'] = service.data.get(ATTR_MEDIA_VOLUME_MUTED)
elif service.service == SERVICE_MEDIA_SEEK:
params['position'] = service.data.get(ATTR_MEDIA_SEEK_POSITION)
elif service.service == SERVICE_SELECT_SOURCE:
params['source'] = service.data.get(ATTR_INPUT_SOURCE)
elif service.service == SERVICE_PLAY_MEDIA:
params['media_type'] = \
service.data.get(ATTR_MEDIA_CONTENT_TYPE)
params['media_id'] = service.data.get(ATTR_MEDIA_CONTENT_ID)
params[ATTR_MEDIA_ENQUEUE] = \
service.data.get(ATTR_MEDIA_ENQUEUE)
target_players = component.async_extract_from_service(service)
update_tasks = []
for player in target_players:
yield from getattr(player, method['method'])(**params)
for player in target_players:
if not player.should_poll:
continue
update_coro = player.async_update_ha_state(True)
if hasattr(player, 'async_update'):
update_tasks.append(update_coro)
else:
yield from update_coro
if update_tasks:
yield from asyncio.wait(update_tasks, loop=hass.loop)
for service in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service].get(
'schema', MEDIA_PLAYER_SCHEMA)
hass.services.async_register(
DOMAIN, service, async_service_handler,
descriptions.get(service), schema=schema)
return True
class MediaPlayerDevice(Entity):
"""ABC for media player devices."""
_access_token = None
# pylint: disable=no-self-use
# Implement these for your media player
@property
def state(self):
"""State of the player."""
return STATE_UNKNOWN
@property
def access_token(self):
"""Access token for this media player."""
if self._access_token is None:
self._access_token = hashlib.sha256(
_RND.getrandbits(256).to_bytes(32, 'little')).hexdigest()
return self._access_token
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return None
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return None
@property
def media_content_id(self):
"""Content ID of current playing media."""
return None
@property
def media_content_type(self):
"""Content type of current playing media."""
return None
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
return None
@property
def media_position(self):
"""Position of current playing media in seconds."""
return None
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
return None
@property
def media_image_url(self):
"""Image url of current playing media."""
return None
@property
def media_title(self):
"""Title of current playing media."""
return None
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return None
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return None
@property
def media_album_artist(self):
"""Album artist of current playing media, music track only."""
return None
@property
def media_track(self):
"""Track number of current playing media, music track only."""
return None
@property
def media_series_title(self):
"""Title of series of current playing media, TV show only."""
return None
@property
def media_season(self):
"""Season of current playing media, TV show only."""
return None
@property
def media_episode(self):
"""Episode of current playing media, TV show only."""
return None
@property
def media_channel(self):
"""Channel currently playing."""
return None
@property
def media_playlist(self):
"""Title of Playlist currently playing."""
return None
@property
def app_id(self):
"""ID of the current running app."""
return None
@property
def app_name(self):
"""Name of the current running app."""
return None
@property
def source(self):
"""Name of the current input source."""
return None
@property
def source_list(self):
"""List of available input sources."""
return None
@property
def supported_media_commands(self):
"""Flag media commands that are supported."""
return 0
def turn_on(self):
"""Turn the media player on."""
raise NotImplementedError()
def async_turn_on(self):
"""Turn the media player on.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.turn_on)
def turn_off(self):
"""Turn the media player off."""
raise NotImplementedError()
def async_turn_off(self):
"""Turn the media player off.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.turn_off)
def mute_volume(self, mute):
"""Mute the volume."""
raise NotImplementedError()
def async_mute_volume(self, mute):
"""Mute the volume.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.mute_volume, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
raise NotImplementedError()
def async_set_volume_level(self, volume):
"""Set volume level, range 0..1.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.set_volume_level, volume)
def media_play(self):
"""Send play commmand."""
raise NotImplementedError()
def async_media_play(self):
"""Send play commmand.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_play)
def media_pause(self):
"""Send pause command."""
raise NotImplementedError()
def async_media_pause(self):
"""Send pause command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_pause)
def media_stop(self):
"""Send stop command."""
raise NotImplementedError()
def async_media_stop(self):
"""Send stop command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_stop)
def media_previous_track(self):
"""Send previous track command."""
raise NotImplementedError()
def async_media_previous_track(self):
"""Send previous track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_previous_track)
def media_next_track(self):
"""Send next track command."""
raise NotImplementedError()
def async_media_next_track(self):
"""Send next track command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_next_track)
def media_seek(self, position):
"""Send seek command."""
raise NotImplementedError()
def async_media_seek(self, position):
"""Send seek command.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.media_seek, position)
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
raise NotImplementedError()
def async_play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, ft.partial(self.play_media, media_type, media_id, **kwargs))
def select_source(self, source):
"""Select input source."""
raise NotImplementedError()
def async_select_source(self, source):
"""Select input source.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.select_source, source)
def clear_playlist(self):
"""Clear players playlist."""
raise NotImplementedError()
def async_clear_playlist(self):
"""Clear players playlist.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.loop.run_in_executor(
None, self.clear_playlist)
# No need to overwrite these.
@property
def support_play(self):
"""Boolean if play is supported."""
return bool(self.supported_media_commands & SUPPORT_PLAY)
@property
def support_pause(self):
"""Boolean if pause is supported."""
return bool(self.supported_media_commands & SUPPORT_PAUSE)
@property
def support_stop(self):
"""Boolean if stop is supported."""
return bool(self.supported_media_commands & SUPPORT_STOP)
@property
def support_seek(self):
"""Boolean if seek is supported."""
return bool(self.supported_media_commands & SUPPORT_SEEK)
@property
def support_volume_set(self):
"""Boolean if setting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_SET)
@property
def support_volume_mute(self):
"""Boolean if muting volume is supported."""
return bool(self.supported_media_commands & SUPPORT_VOLUME_MUTE)
@property
def support_previous_track(self):
"""Boolean if previous track command supported."""
return bool(self.supported_media_commands & SUPPORT_PREVIOUS_TRACK)
@property
def support_next_track(self):
"""Boolean if next track command supported."""
return bool(self.supported_media_commands & SUPPORT_NEXT_TRACK)
@property
def support_play_media(self):
"""Boolean if play media command supported."""
return bool(self.supported_media_commands & SUPPORT_PLAY_MEDIA)
@property
def support_select_source(self):
"""Boolean if select source command supported."""
return bool(self.supported_media_commands & SUPPORT_SELECT_SOURCE)
@property
def support_clear_playlist(self):
"""Boolean if clear playlist command supported."""
return bool(self.supported_media_commands & SUPPORT_CLEAR_PLAYLIST)
def toggle(self):
"""Toggle the power on the media player."""
if self.state in [STATE_OFF, STATE_IDLE]:
self.turn_on()
else:
self.turn_off()
def async_toggle(self):
"""Toggle the power on the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self.state in [STATE_OFF, STATE_IDLE]:
return self.async_turn_on()
else:
return self.async_turn_off()
@asyncio.coroutine
def async_volume_up(self):
"""Turn volume up for media player.
This method is a coroutine.
"""
if hasattr(self, 'volume_up'):
# pylint: disable=no-member
yield from self.hass.loop.run_in_executor(None, self.volume_up)
return
if self.volume_level < 1:
yield from self.async_set_volume_level(
min(1, self.volume_level + .1))
@asyncio.coroutine
def async_volume_down(self):
"""Turn volume down for media player.
This method is a coroutine.
"""
if hasattr(self, 'volume_down'):
# pylint: disable=no-member
yield from self.hass.loop.run_in_executor(None, self.volume_down)
return
if self.volume_level > 0:
yield from self.async_set_volume_level(
max(0, self.volume_level - .1))
def media_play_pause(self):
"""Play or pause the media player."""
if self.state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def async_media_play_pause(self):
"""Play or pause the media player.
This method must be run in the event loop and returns a coroutine.
"""
if self.state == STATE_PLAYING:
return self.async_media_pause()
else:
return self.async_media_play()
@property
def entity_picture(self):
"""Return image of the media playing."""
if self.state == STATE_OFF:
return None
url = self.media_image_url
if url is None:
return None
return ENTITY_IMAGE_URL.format(
self.entity_id, self.access_token,
hashlib.md5(url.encode('utf-8')).hexdigest()[:5])
@property
def state_attributes(self):
"""Return the state attributes."""
if self.state == STATE_OFF:
state_attr = {
ATTR_SUPPORTED_MEDIA_COMMANDS: self.supported_media_commands,
}
else:
state_attr = {
attr: getattr(self, attr) for attr
in ATTR_TO_PROPERTY if getattr(self, attr) is not None
}
return state_attr
def preload_media_image_url(self, url):
"""Preload and cache a media image for future use."""
run_coroutine_threadsafe(
_async_fetch_image(self.hass, url), self.hass.loop
).result()
@asyncio.coroutine
def _async_fetch_image(hass, url):
"""Helper method to fetch image.
Images are cached in memory (the images are typically 10-100kB in size).
"""
cache_images = ENTITY_IMAGE_CACHE[ATTR_CACHE_IMAGES]
cache_urls = ENTITY_IMAGE_CACHE[ATTR_CACHE_URLS]
cache_maxsize = ENTITY_IMAGE_CACHE[ATTR_CACHE_MAXSIZE]
if url in cache_images:
return cache_images[url]
content, content_type = (None, None)
websession = async_get_clientsession(hass)
response = None
try:
with async_timeout.timeout(10, loop=hass.loop):
response = yield from websession.get(url)
if response.status == 200:
content = yield from response.read()
content_type = response.headers.get(CONTENT_TYPE_HEADER)
except asyncio.TimeoutError:
pass
finally:
if response is not None:
yield from response.release()
if not content:
return (None, None)
cache_images[url] = (content, content_type)
cache_urls.append(url)
while len(cache_urls) > cache_maxsize:
# remove oldest item from cache
oldest_url = cache_urls[0]
if oldest_url in cache_images:
del cache_images[oldest_url]
cache_urls = cache_urls[1:]
return content, content_type
class MediaPlayerImageView(HomeAssistantView):
"""Media player view to serve an image."""
requires_auth = False
url = "/api/media_player_proxy/{entity_id}"
name = "api:media_player:image"
def __init__(self, entities):
"""Initialize a media player view."""
self.entities = entities
@asyncio.coroutine
def get(self, request, entity_id):
"""Start a get request."""
player = self.entities.get(entity_id)
if player is None:
status = 404 if request[KEY_AUTHENTICATED] else 401
return web.Response(status=status)
authenticated = (request[KEY_AUTHENTICATED] or
request.GET.get('token') == player.access_token)
if not authenticated:
return web.Response(status=401)
data, content_type = yield from _async_fetch_image(
request.app['hass'], player.media_image_url)
if data is None:
return web.Response(status=500)
return web.Response(body=data, content_type=content_type)
|
eagleamon/home-assistant
|
homeassistant/components/media_player/__init__.py
|
Python
|
apache-2.0
| 28,502
|
# -*- encoding: utf-8 -*-
import abc
from abjad.tools.abctools.AbjadObject import AbjadObject
class TypedCollection(AbjadObject):
r'''Abstract base class for typed collections.
'''
### CLASS VARIABLES ###
__slots__ = (
'_collection',
'_item_class',
)
### INITIALIZER ###
@abc.abstractmethod
def __init__(self, items=None, item_class=None):
assert isinstance(item_class, (type(None), type))
self._item_class = item_class
### SPECIAL METHODS ###
def __contains__(self, item):
r'''Is true when typed collection container `item`.
Otherwise false.
Returns boolean.
'''
try:
item = self._item_coercer(item)
except ValueError:
return False
return self._collection.__contains__(item)
def __eq__(self, expr):
r'''Is true when `expr` is a typed collection with items that compare
equal to those of this typed collection. Otherwise false.
Returns boolean.
'''
if isinstance(expr, type(self)):
return self._collection == expr._collection
elif isinstance(expr, type(self._collection)):
return self._collection == expr
return False
def __format__(self, format_specification=''):
r'''Formats typed collection.
Set `format_specification` to `''` or `'storage'`.
Interprets `''` equal to `'storage'`.
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'storage'):
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getnewargs__(self):
r'''Gets new arguments.
Returns tuple.
'''
return (self._collection, self.item_class)
def __hash__(self):
r'''Hashes typed collection.
Required to be explicitly re-defined on Python 3 if __eq__ changes.
Returns integer.
'''
return super(TypedCollection, self).__hash__()
def __iter__(self):
r'''Iterates typed collection.
Returns generator.
'''
return self._collection.__iter__()
def __len__(self):
r'''Length of typed collection.
Returns nonnegative integer.
'''
return len(self._collection)
def __ne__(self, expr):
r'''Is true when `expr` is not a typed collection with items equal to
this typed collection. Otherwise false.
Returns boolean.
'''
return not self.__eq__(expr)
### PRIVATE METHODS ###
def _on_insertion(self, item):
r'''Override to operate on item after insertion into collection.
'''
pass
def _on_removal(self, item):
r'''Override to operate on item after removal from collection.
'''
pass
### PRIVATE PROPERTIES ###
@property
def _item_coercer(self):
def coerce_(x):
if isinstance(x, self._item_class):
return x
return self._item_class(x)
if self._item_class is None:
return lambda x: x
return coerce_
@property
def _repr_specification(self):
from abjad.tools import systemtools
manager = systemtools.StorageFormatManager
names = manager.get_signature_keyword_argument_names(self)
keyword_argument_names = list(names)
if 'items' in keyword_argument_names:
keyword_argument_names.remove('items')
keyword_argument_names = tuple(keyword_argument_names)
positional_argument_values = (
self._collection,
)
return systemtools.StorageFormatSpecification(
self,
is_indented=False,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
manager = systemtools.StorageFormatManager
names = manager.get_signature_keyword_argument_names(self)
keyword_argument_names = list(names)
if 'items' in keyword_argument_names:
keyword_argument_names.remove('items')
keyword_argument_names = tuple(keyword_argument_names)
positional_argument_values = (
self._collection,
)
return systemtools.StorageFormatSpecification(
self,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
### PUBLIC PROPERTIES ###
@property
def item_class(self):
r'''Item class to coerce items into.
'''
return self._item_class
@property
def items(self):
r'''Gets collection items.
'''
return [x for x in self]
|
mscuthbert/abjad
|
abjad/tools/datastructuretools/TypedCollection.py
|
Python
|
gpl-3.0
| 4,949
|
from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required
from views import uploadgrade, current_courselist, all_courselist
urlpatterns = patterns('',
url(r'^uploadgrade/(\d+)/$',uploadgrade, name='uploadgrade'),
url(r'currentlist/$', current_courselist, name="currentlist"),
url(r'alllist/$', all_courselist, name="alllist"),
)
|
Linktime/Chronus
|
teacher/urls.py
|
Python
|
gpl-3.0
| 393
|
# (c) 2017, Brian Coca <bcoca@ansible.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import optparse
from operator import attrgetter
from ansible.cli import CLI
from ansible.errors import AnsibleOptionsError
from ansible.parsing.dataloader import DataLoader
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
INTERNAL_VARS = frozenset(['ansible_facts', 'ansible_version',
'ansible_playbook_python',
'inventory_dir',
'inventory_file',
'inventory_hostname',
'inventory_hostname_short',
'groups',
'group_names',
'omit',
'playbook_dir', ])
class InventoryCLI(CLI):
''' used to display or dump the configured inventory as Ansible sees it '''
ARGUMENTS = {'host': 'The name of a host to match in the inventory, relevant when using --list',
'group': 'The name of a group in the inventory, relevant when using --graph', }
def __init__(self, args):
super(InventoryCLI, self).__init__(args)
self.vm = None
self.loader = None
self.inventory = None
self._new_api = True
def parse(self):
self.parser = CLI.base_parser(
usage='usage: %prog [options] [host|group]',
epilog='Show Ansible inventory information, by default it uses the inventory script JSON format',
inventory_opts=True,
vault_opts=True
)
# Actions
action_group = optparse.OptionGroup(self.parser, "Actions", "One of following must be used on invocation, ONLY ONE!")
action_group.add_option("--list", action="store_true", default=False, dest='list', help='Output all hosts info, works as inventory script')
action_group.add_option("--host", action="store", default=None, dest='host', help='Output specific host info, works as inventory script')
action_group.add_option("--graph", action="store_true", default=False, dest='graph',
help='create inventory graph, if supplying pattern it must be a valid group name')
self.parser.add_option_group(action_group)
# Options
self.parser.add_option("-y", "--yaml", action="store_true", default=False, dest='yaml',
help='Use YAML format instead of default JSON, ignored for --graph')
self.parser.add_option("--vars", action="store_true", default=False, dest='show_vars',
help='Add vars to graph display, ignored unless used with --graph')
super(InventoryCLI, self).parse()
display.verbosity = self.options.verbosity
self.validate_conflicts(vault_opts=True)
# there can be only one! and, at least, one!
used = 0
for opt in (self.options.list, self.options.host, self.options.graph):
if opt:
used += 1
if used == 0:
raise AnsibleOptionsError("No action selected, at least one of --host, --graph or --list needs to be specified.")
elif used > 1:
raise AnsibleOptionsError("Conflicting options used, only one of --host, --graph or --list can be used at the same time.")
# set host pattern to default if not supplied
if len(self.args) > 0:
self.options.pattern = self.args[0]
else:
self.options.pattern = 'all'
def run(self):
results = None
super(InventoryCLI, self).run()
# Initialize needed objects
if getattr(self, '_play_prereqs', False):
self.loader, self.inventory, self.vm = self._play_prereqs(self.options)
else:
# fallback to pre 2.4 way of initialzing
from ansible.vars import VariableManager
from ansible.inventory import Inventory
self._new_api = False
self.loader = DataLoader()
self.vm = VariableManager()
# use vault if needed
if self.options.vault_password_file:
vault_pass = CLI.read_vault_password_file(self.options.vault_password_file, loader=self.loader)
elif self.options.ask_vault_pass:
vault_pass = self.ask_vault_passwords()
else:
vault_pass = None
if vault_pass:
self.loader.set_vault_password(vault_pass)
# actually get inventory and vars
self.inventory = Inventory(loader=self.loader, variable_manager=self.vm, host_list=self.options.inventory)
self.vm.set_inventory(self.inventory)
if self.options.host:
hosts = self.inventory.get_hosts(self.options.host)
if len(hosts) != 1:
raise AnsibleOptionsError("You must pass a single valid host to --hosts parameter")
myvars = self._get_host_variables(host=hosts[0])
self._remove_internal(myvars)
# FIXME: should we template first?
results = self.dump(myvars)
elif self.options.graph:
results = self.inventory_graph()
elif self.options.list:
top = self._get_group('all')
if self.options.yaml:
results = self.yaml_inventory(top)
else:
results = self.json_inventory(top)
results = self.dump(results)
if results:
# FIXME: pager?
display.display(results)
exit(0)
exit(1)
def dump(self, stuff):
if self.options.yaml:
import yaml
from ansible.parsing.yaml.dumper import AnsibleDumper
results = yaml.dump(stuff, Dumper=AnsibleDumper, default_flow_style=False)
else:
import json
results = json.dumps(stuff, sort_keys=True, indent=4)
return results
def _get_host_variables(self, host):
if self._new_api:
hostvars = self.vm.get_vars(host=host)
else:
hostvars = self.vm.get_vars(self.loader, host=host)
return hostvars
def _get_group(self, gname):
if self._new_api:
group = self.inventory.groups.get(gname)
else:
group = self.inventory.get_group(gname)
return group
def _remove_internal(self, dump):
for internal in INTERNAL_VARS:
if internal in dump:
del dump[internal]
def _remove_empty(self, dump):
# remove empty keys
for x in ('hosts', 'vars', 'children'):
if x in dump and not dump[x]:
del dump[x]
def _show_vars(self, dump, depth):
result = []
self._remove_internal(dump)
if self.options.show_vars:
for (name, val) in sorted(dump.items()):
result.append(self._graph_name('{%s = %s}' % (name, val), depth + 1))
return result
def _graph_name(self, name, depth=0):
if depth:
name = " |" * (depth) + "--%s" % name
return name
def _graph_group(self, group, depth=0):
result = [self._graph_name('@%s:' % group.name, depth)]
depth = depth + 1
for kid in sorted(group.child_groups, key=attrgetter('name')):
result.extend(self._graph_group(kid, depth))
if group.name != 'all':
for host in sorted(group.hosts, key=attrgetter('name')):
result.append(self._graph_name(host.name, depth))
result.extend(self._show_vars(host.get_vars(), depth))
result.extend(self._show_vars(group.get_vars(), depth))
return result
def inventory_graph(self):
start_at = self._get_group(self.options.pattern)
if start_at:
return '\n'.join(self._graph_group(start_at))
else:
raise AnsibleOptionsError("Pattern must be valid group name when using --graph")
def json_inventory(self, top):
def format_group(group):
results = {}
results[group.name] = {}
if group.name != 'all':
results[group.name]['hosts'] = [h.name for h in sorted(group.hosts, key=attrgetter('name'))]
results[group.name]['vars'] = group.get_vars()
results[group.name]['children'] = []
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
results[group.name]['children'].append(subgroup.name)
results.update(format_group(subgroup))
self._remove_empty(results[group.name])
return results
results = format_group(top)
# populate meta
results['_meta'] = {'hostvars': {}}
hosts = self.inventory.get_hosts()
for host in hosts:
results['_meta']['hostvars'][host.name] = self._get_host_variables(host=host)
self._remove_internal(results['_meta']['hostvars'][host.name])
return results
def yaml_inventory(self, top):
seen = []
def format_group(group):
results = {}
# initialize group + vars
results[group.name] = {}
results[group.name]['vars'] = group.get_vars()
# subgroups
results[group.name]['children'] = {}
for subgroup in sorted(group.child_groups, key=attrgetter('name')):
if subgroup.name != 'all':
results[group.name]['children'].update(format_group(subgroup))
# hosts for group
results[group.name]['hosts'] = {}
if group.name != 'all':
for h in sorted(group.hosts, key=attrgetter('name')):
myvars = {}
if h.name not in seen: # avoid defining host vars more than once
seen.append(h.name)
myvars = self._get_host_variables(host=h)
self._remove_internal(myvars)
results[group.name]['hosts'][h.name] = myvars
self._remove_empty(results[group.name])
return results
return format_group(top)
|
e-gob/plataforma-kioscos-autoatencion
|
scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/cli/inventory.py
|
Python
|
bsd-3-clause
| 10,972
|
from datetime import datetime
import logging
from shutil import make_archive, copyfile
import os
from django.conf import settings
from mmc.mixins import BaseCommand as MonitoredCommand
from rest_framework.test import APIClient
from learn import export
logger = logging.getLogger(__name__)
class Command(MonitoredCommand):
help = "Export all data for analysis into CSV files."
#entities_to_export = [
# ('tasks', export.TaskViewSet),
# ('problemsets', export.ProblemSetViewSet),
# ('task_sessions', export.TaskSessionsViewSet),
# ('program_snapshots', export.ProgramSnapshotsViewSet),
#]
def handle(self, *args, **options):
logger.info('Management command called: export_data')
datestamp = datetime.now().strftime('%Y-%m-%d')
dirname = 'robomission-' + datestamp
# The last empty path ('') is there to make it a directory, not a file.
full_dirpath = os.path.join(settings.EXPORTS_DIR, dirname, '')
self.stdout.write('Exporting entities to {path}'.format(path=full_dirpath))
os.makedirs(full_dirpath, exist_ok=True)
#for entity_name, viewset_class in self.entities_to_export:
# self.export_entity(entity_name, viewset_class, full_dirpath)
export.export_to_csv(path=full_dirpath)
bundle_path = self.zip_bundle(full_dirpath)
self.mark_zip_bundle_as_latest(bundle_path)
#def export_entity(self, entity_name, viewset_class, dirpath):
# file_name = entity_name + '.csv'
# file_path = os.path.join(dirpath, file_name)
# self.stdout.write('-> exporting {file_name}'.format(file_name=file_name))
# viewset_class().export_to_csv(path=file_path)
# # We have originally used Django Rest Pandas to create (and possibly
# # transform) dataframe:
# # df = viewset_class().get_dataframe()
# # df.to_csv(file_path)
# # but that caused memory problems as there
# # started to be too many entities.
def zip_bundle(self, dirpath):
# shutil.make_archive needs bundle output path without ".zip" as the
# first argument.
bundle_base = os.path.normpath(dirpath)
root_dir = os.path.dirname(bundle_base)
bundle_dirname = os.path.basename(bundle_base)
path = make_archive(bundle_base, 'zip', root_dir=root_dir, base_dir=bundle_dirname)
self.stdout.write('Created bundle to {path}'.format(path=path))
return path
def mark_zip_bundle_as_latest(self, bundle_path):
latest_bundle_path = os.path.join(settings.EXPORTS_DIR, settings.EXPORT_BUNDLE_NAME)
copyfile(bundle_path, latest_bundle_path)
self.stdout.write('Copied as latest bundle to {path}'.format(path=latest_bundle_path))
|
adaptive-learning/robomission
|
backend/learn/management/commands/export_data.py
|
Python
|
gpl-3.0
| 2,781
|
# Copyright (c) 2012 Phil Birkelbach
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
class BusError(Exception):
"""Base class for exceptions in this module"""
pass
class BusInitError(BusError):
"""CAN Bus Initialization Error"""
def __init__(self, msg):
self.msg = msg
class BusReadError(BusError):
"""CAN Bus Read Error"""
def __init__(self, msg):
self.msg = msg
class BusWriteError(BusError):
"""CAN Bus Write Error"""
def __init__(self, msg):
self.msg = msg
class DeviceTimeout(Exception):
"""Device Timeout Exception"""
pass
|
birkelbach/python-canbus
|
canbus/exceptions.py
|
Python
|
gpl-2.0
| 1,265
|
'''
title = "Camel Case String"
level = "intro"
dificulty = "easy"
tags = ["strings", "functions", "camel-case"]
description = """
Write a function that receives a string and returns a camel case version of it.
Example:
camel_case('hello world') # Hello World
"""
'''
def camel_case(a_string):
pass
|
rmotr/whooshercises
|
assignments/camel_case_string.py
|
Python
|
mit
| 310
|
from django.conf import settings
from django.conf.urls import patterns, include, url
# There is a course creators admin table.
from ratelimitbackend import admin
from cms.djangoapps.contentstore.views.program import ProgramAuthoringView, ProgramsIdTokenView
from cms.djangoapps.contentstore.views.organization import OrganizationListView
from student.views import LogoutView
admin.autodiscover()
# Pattern to match a course key or a library key
COURSELIKE_KEY_PATTERN = r'(?P<course_key_string>({}|{}))'.format(
r'[^/]+/[^/]+/[^/]+', r'[^/:]+:[^/+]+\+[^/+]+(\+[^/]+)?'
)
# Pattern to match a library key only
LIBRARY_KEY_PATTERN = r'(?P<library_key_string>library-v1:[^/+]+\+[^/+]+)'
urlpatterns = patterns(
'',
url(r'^transcripts/upload$', 'contentstore.views.upload_transcripts', name='upload_transcripts'),
url(r'^transcripts/download$', 'contentstore.views.download_transcripts', name='download_transcripts'),
url(r'^transcripts/check$', 'contentstore.views.check_transcripts', name='check_transcripts'),
url(r'^transcripts/choose$', 'contentstore.views.choose_transcripts', name='choose_transcripts'),
url(r'^transcripts/replace$', 'contentstore.views.replace_transcripts', name='replace_transcripts'),
url(r'^transcripts/rename$', 'contentstore.views.rename_transcripts', name='rename_transcripts'),
url(r'^transcripts/save$', 'contentstore.views.save_transcripts', name='save_transcripts'),
url(r'^preview/xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.preview_handler', name='preview_handler'),
url(r'^xblock/(?P<usage_key_string>.*?)/handler/(?P<handler>[^/]*)(?:/(?P<suffix>.*))?$',
'contentstore.views.component_handler', name='component_handler'),
url(r'^xblock/resource/(?P<block_type>[^/]*)/(?P<uri>.*)$',
'openedx.core.djangoapps.common_views.xblock.xblock_resource', name='xblock_resource_url'),
url(r'^not_found$', 'contentstore.views.not_found', name='not_found'),
url(r'^server_error$', 'contentstore.views.server_error', name='server_error'),
url(r'^organizations$', OrganizationListView.as_view(), name='organizations'),
# noop to squelch ajax errors
url(r'^event$', 'contentstore.views.event', name='event'),
url(r'^xmodule/', include('pipeline_js.urls')),
url(r'^heartbeat$', include('heartbeat.urls')),
url(r'^user_api/', include('openedx.core.djangoapps.user_api.legacy_urls')),
url(r'^i18n/', include('django.conf.urls.i18n')),
# User API endpoints
url(r'^api/user/', include('openedx.core.djangoapps.user_api.urls')),
# Update session view
url(r'^lang_pref/session_language', 'lang_pref.views.update_session_language', name='session_language'),
)
# User creation and updating views
urlpatterns += patterns(
'',
url(r'^create_account$', 'student.views.create_account', name='create_account'),
url(r'^activate/(?P<key>[^/]*)$', 'student.views.activate_account', name='activate'),
# ajax view that actually does the work
url(r'^login_post$', 'student.views.login_user', name='login_post'),
url(r'^logout$', LogoutView.as_view(), name='logout'),
)
# restful api
urlpatterns += patterns(
'contentstore.views',
url(r'^$', 'howitworks', name='homepage'),
url(r'^howitworks$', 'howitworks'),
url(r'^signup$', 'signup', name='signup'),
url(r'^signin$', 'login_page', name='login'),
url(r'^request_course_creator$', 'request_course_creator', name='request_course_creator'),
url(r'^course_team/{}(?:/(?P<email>.+))?$'.format(COURSELIKE_KEY_PATTERN), 'course_team_handler'),
url(r'^course_info/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_info_handler'),
url(
r'^course_info_update/{}/(?P<provided_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN),
'course_info_update_handler'
),
url(r'^home/?$', 'course_listing', name='home'),
url(
r'^course/{}/search_reindex?$'.format(settings.COURSE_KEY_PATTERN),
'course_search_index_handler',
name='course_search_index_handler'
),
url(r'^course/{}?$'.format(settings.COURSE_KEY_PATTERN), 'course_handler', name='course_handler'),
url(r'^course_notifications/{}/(?P<action_state_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN),
'course_notifications_handler'),
url(r'^course_rerun/{}$'.format(settings.COURSE_KEY_PATTERN), 'course_rerun_handler', name='course_rerun_handler'),
url(r'^container/{}$'.format(settings.USAGE_KEY_PATTERN), 'container_handler'),
url(r'^orphan/{}$'.format(settings.COURSE_KEY_PATTERN), 'orphan_handler'),
url(r'^assets/{}/{}?$'.format(settings.COURSE_KEY_PATTERN, settings.ASSET_KEY_PATTERN), 'assets_handler'),
url(r'^import/{}$'.format(COURSELIKE_KEY_PATTERN), 'import_handler'),
url(r'^import_status/{}/(?P<filename>.+)$'.format(COURSELIKE_KEY_PATTERN), 'import_status_handler'),
url(r'^export/{}$'.format(COURSELIKE_KEY_PATTERN), 'export_handler'),
url(r'^xblock/outline/{}$'.format(settings.USAGE_KEY_PATTERN), 'xblock_outline_handler'),
url(r'^xblock/container/{}$'.format(settings.USAGE_KEY_PATTERN), 'xblock_container_handler'),
url(r'^xblock/{}/(?P<view_name>[^/]+)$'.format(settings.USAGE_KEY_PATTERN), 'xblock_view_handler'),
url(r'^xblock/{}?$'.format(settings.USAGE_KEY_PATTERN), 'xblock_handler'),
url(r'^tabs/{}$'.format(settings.COURSE_KEY_PATTERN), 'tabs_handler'),
url(r'^settings/details/{}$'.format(settings.COURSE_KEY_PATTERN), 'settings_handler'),
url(r'^settings/grading/{}(/)?(?P<grader_index>\d+)?$'.format(settings.COURSE_KEY_PATTERN), 'grading_handler'),
url(r'^settings/advanced/{}$'.format(settings.COURSE_KEY_PATTERN), 'advanced_settings_handler'),
url(r'^textbooks/{}$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_list_handler'),
url(r'^textbooks/{}/(?P<textbook_id>\d[^/]*)$'.format(settings.COURSE_KEY_PATTERN), 'textbooks_detail_handler'),
url(r'^videos/{}$'.format(settings.COURSE_KEY_PATTERN), 'videos_handler'),
url(r'^video_encodings_download/{}$'.format(settings.COURSE_KEY_PATTERN), 'video_encodings_download'),
url(r'^group_configurations/{}$'.format(settings.COURSE_KEY_PATTERN), 'group_configurations_list_handler'),
url(r'^group_configurations/{}/(?P<group_configuration_id>\d+)(/)?(?P<group_id>\d+)?$'.format(
settings.COURSE_KEY_PATTERN), 'group_configurations_detail_handler'),
url(r'^api/val/v0/', include('edxval.urls')),
)
JS_INFO_DICT = {
'domain': 'djangojs',
# We need to explicitly include external Django apps that are not in LOCALE_PATHS.
'packages': ('openassessment',),
}
if settings.FEATURES.get('ENABLE_CONTENT_LIBRARIES'):
urlpatterns += (
url(r'^library/{}?$'.format(LIBRARY_KEY_PATTERN),
'contentstore.views.library_handler', name='library_handler'),
url(r'^library/{}/team/$'.format(LIBRARY_KEY_PATTERN),
'contentstore.views.manage_library_users', name='manage_library_users'),
)
if settings.FEATURES.get('ENABLE_EXPORT_GIT'):
urlpatterns += (url(
r'^export_git/{}$'.format(
settings.COURSE_KEY_PATTERN,
),
'contentstore.views.export_git',
name='export_git',
),)
if settings.FEATURES.get('ENABLE_SERVICE_STATUS'):
urlpatterns += patterns(
'',
url(r'^status/', include('service_status.urls')),
)
if settings.FEATURES.get('AUTH_USE_CAS'):
urlpatterns += (
url(r'^cas-auth/login/$', 'external_auth.views.cas_login', name="cas-login"),
url(r'^cas-auth/logout/$', 'django_cas.views.logout', {'next_page': '/'}, name="cas-logout"),
)
urlpatterns += patterns('', url(r'^admin/', include(admin.site.urls)),)
# enable automatic login
if settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING'):
urlpatterns += (
url(r'^auto_auth$', 'student.views.auto_auth'),
)
# enable entrance exams
if settings.FEATURES.get('ENTRANCE_EXAMS'):
urlpatterns += (
url(r'^course/{}/entrance_exam/?$'.format(settings.COURSE_KEY_PATTERN), 'contentstore.views.entrance_exam'),
)
# Enable Web/HTML Certificates
if settings.FEATURES.get('CERTIFICATES_HTML_VIEW'):
urlpatterns += (
url(r'^certificates/activation/{}/'.format(settings.COURSE_KEY_PATTERN),
'contentstore.views.certificates.certificate_activation_handler'),
url(r'^certificates/{}/(?P<certificate_id>\d+)/signatories/(?P<signatory_id>\d+)?$'.format(
settings.COURSE_KEY_PATTERN), 'contentstore.views.certificates.signatory_detail_handler'),
url(r'^certificates/{}/(?P<certificate_id>\d+)?$'.format(settings.COURSE_KEY_PATTERN),
'contentstore.views.certificates.certificates_detail_handler'),
url(r'^certificates/{}$'.format(settings.COURSE_KEY_PATTERN),
'contentstore.views.certificates.certificates_list_handler')
)
urlpatterns += (
# These views use a configuration model to determine whether or not to
# display the Programs authoring app. If disabled, a 404 is returned.
url(r'^programs/id_token/$', ProgramsIdTokenView.as_view(), name='programs_id_token'),
# Drops into the Programs authoring app, which handles its own routing.
url(r'^program/', ProgramAuthoringView.as_view(), name='programs'),
)
if settings.DEBUG:
try:
from .urls_dev import urlpatterns as dev_urlpatterns
urlpatterns += dev_urlpatterns
except ImportError:
pass
if 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns += (
url(r'^__debug__/', include(debug_toolbar.urls)),
)
# Custom error pages
# pylint: disable=invalid-name
handler404 = 'contentstore.views.render_404'
handler500 = 'contentstore.views.render_500'
# display error page templates, for testing purposes
urlpatterns += (
url(r'^404$', handler404),
url(r'^500$', handler500),
)
|
marcore/edx-platform
|
cms/urls.py
|
Python
|
agpl-3.0
| 9,922
|
# Django settings for djangoajaxtest project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = '4-yp6n$sq4m+e0o^%5psstkil98=)e^ji8%a8^+_x$j2z5s3($'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'djangoajaxtest.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'djangoajaxtest.basic',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
ROOT_URL = 'http://localhost:8000'
BASIC_MEDIA_PREFIX = '%s/basicmedia/' % ROOT_URL
|
vincent-petithory/django-ajax
|
examples/djangoajaxtest/settings.py
|
Python
|
gpl-3.0
| 3,515
|
###############################################################################
# Copyright 2014 The University of Texas at Austin #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
###############################################################################
import json
from xml.dom.minidom import getDOMImplementation
from ipf.data import Data, Representation
from .entity import *
#######################################################################################################################
class Benchmark(Entity):
def __init__(self):
Entity.__init__(self)
self.Type = None # Benchmark_t
self.Value = None # a number
self.ExecutionEnvironmentID = None # string uri
self.ComputingManagerID = None # string uri
#######################################################################################################################
class BenchmarkOgfJson(EntityOgfJson):
data_cls = Benchmark
def __init__(self, data):
EntityOgfJson.__init__(self,data)
def get(self):
return json.dumps(self.toJson(),sort_keys=True,indent=4)
def toJson(self):
doc = EntityOgfJson.toJson(self)
doc["Type"] = self.data.Type
doc["Value"] = self.data.Value
associations = {}
associations["ExecutionEnvironmentID"] = self.data.ExecutionEnvironmentID
associations["ComputingManagerID"] = self.data.ComputingManagerID
doc["Associations"] = associations
return doc
#######################################################################################################################
|
ericblau/ipf-xsede
|
ipf/glue2/benchmark.py
|
Python
|
apache-2.0
| 2,610
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------
# Movie Ultra 7K Regex de Sawlive por Quequino
# Version 0.2 (7.12.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import time
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
import json
import math
addonName = xbmcaddon.Addon().getAddonInfo("name")
addonVersion = xbmcaddon.Addon().getAddonInfo("version")
addonId = xbmcaddon.Addon().getAddonInfo("id")
addonPath = xbmcaddon.Addon().getAddonInfo("path")
# Función que guía el proceso de elaboración de la URL original
def sawlive(params):
plugintools.log("[movie.ultra.7k-0.3.0].ninestreams "+repr(params))
url_user = {}
# Construimos diccionario...
url = params.get("url")
url_extracted = url.split(" ")
for entry in url_extracted:
if entry.startswith("rtmp"):
entry = entry.replace("rtmp=", "")
url_user["rtmp"]=entry
elif entry.startswith("playpath"):
entry = entry.replace("playpath=", "")
url_user["playpath"]=entry
elif entry.startswith("swfUrl"):
entry = entry.replace("swfUrl=", "")
url_user["swfurl"]=entry
elif entry.startswith("pageUrl"):
entry = entry.replace("pageUrl=", "")
url_user["pageurl"]=entry
elif entry.startswith("token"):
entry = entry.replace("token=", "")
url_user["token"]=entry
elif entry.startswith("referer"):
entry = entry.replace("referer=", "")
url_user["referer"]=entry
plugintools.log("URL_user dict= "+repr(url_user))
pageurl = url_user.get("pageurl")
referer = url_user.get("referer")
if referer == "":
referer = 'http://www.wiz1.net/lag10_home.php'
url = wizz1(pageurl, referer)
# Vamos a hacer una llamada al pageUrl
def gethttp_headers(url, referer):
request_headers=[]
request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"])
request_headers.append(["Referer",referer])
body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers)
plugintools.log("body= "+body)
return body
def wizz1(pageurl, referer):
plugintools.log("empieza...")
data=gethttp_headers(pageurl,referer)
data=unpack(data);
r='src="([^"]+)';w=plugintools.find_single_match(urllib.unquote_plus(tamzar(data)),r);
data=gethttp_headers(w,referer);url=w;
r='SWFObject\(\'([^\']+).*?file\',\s?\'([^\']+).*?streamer\',\s?\'([^\']+)';w=plugintools.find_multiple_matches(data,r);
url=w[0][2]+' playpath='+w[0][1]+' swfUrl='+w[0][0]+' token=#yw%tt#w@kku conn=S:OK live=1 pageUrl='+url;print url
plugintools.play_resolved_url(url)
def tamzar(data):
r='Tamrzar\.push\(\'([^\']+)';w=plugintools.find_multiple_matches(data,r);data=''.join(w);
return data
def unpack(sJavascript,iteration=1, totaliterations=1 ):
aSplit = sJavascript.split("rn p}('")
p1,a1,c1,k1=('','0','0','')
ss="p1,a1,c1,k1=(\'"+aSplit[1].split(".spli")[0]+')';exec(ss)
k1=k1.split('|')
aSplit = aSplit[1].split("))'")
e = '';d = ''
sUnpacked1 = str(__unpack(p1, a1, c1, k1, e, d,iteration))
if iteration>=totaliterations: return sUnpacked1
else: return unpack(sUnpacked1,iteration+1)
def __unpack(p, a, c, k, e, d, iteration,v=1):
while (c >= 1):
c = c -1
if (k[c]):
aa=str(__itoaNew(c, a))
p=re.sub('\\b' + aa +'\\b', k[c], p)
return p
def __itoa(num, radix):
result = ""
if num==0: return '0'
while num > 0: result = "0123456789abcdefghijklmnopqrstuvwxyz"[num % radix] + result;num /= radix
return result
def __itoaNew(cc, a):
aa="" if cc < a else __itoaNew(int(cc / a),a)
cc = (cc % a)
bb=chr(cc + 29) if cc> 35 else str(__itoa(cc,36))
return aa+bb
|
corvorepack/REPOIVAN
|
plugin.video.movie.ultra.7k/resources/regex/sawlive.py
|
Python
|
gpl-2.0
| 4,208
|
"""
INFORM over multiple transports
+++++++++++++++++++++++++++++++
The following script sends SNMP INFORM notification using the following options:
* with SNMPv2c
* with community name 'public'
* over IPv4/UDP and IPv6/UDP
* send INFORM notification
* to a Manager at demo.snmplabs.com:162 and [::1]:162
* with TRAP ID 'coldStart' specified as an OID
The following Net-SNMP command will produce similar SNMP notification:
| $ snmpinform -v2c -c public udp:demo.snmplabs.com 0 1.3.6.1.6.3.1.1.5.1
| $ snmpinform -v2c -c public udp6:[::1] 0 1.3.6.1.6.3.1.1.5.1
"""#
from time import time
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
from pysnmp.carrier.asyncore.dgram import udp, udp6
from pyasn1.codec.ber import encoder, decoder
from pysnmp.proto.api import v2c as pMod
# Build PDU
reqPDU = pMod.InformRequestPDU()
pMod.apiTrapPDU.setDefaults(reqPDU)
# Build message
trapMsg = pMod.Message()
pMod.apiMessage.setDefaults(trapMsg)
pMod.apiMessage.setCommunity(trapMsg, 'public')
pMod.apiMessage.setPDU(trapMsg, reqPDU)
startedAt = time()
def cbTimerFun(timeNow):
if timeNow - startedAt > 3:
raise Exception("Request timed out")
def cbRecvFun(transportDispatcher, transportDomain, transportAddress,
wholeMsg, reqPDU=reqPDU):
while wholeMsg:
rspMsg, wholeMsg = decoder.decode(wholeMsg, asn1Spec=pMod.Message())
rspPDU = pMod.apiMessage.getPDU(rspMsg)
# Match response to request
if pMod.apiPDU.getRequestID(reqPDU) == pMod.apiPDU.getRequestID(rspPDU):
# Check for SNMP errors reported
errorStatus = pMod.apiPDU.getErrorStatus(rspPDU)
if errorStatus:
print(errorStatus.prettyPrint())
else:
print('INFORM message delivered, response var-binds follow')
for oid, val in pMod.apiPDU.getVarBinds(rspPDU):
print('%s = %s' % (oid.prettyPrint(), val.prettyPrint()))
transportDispatcher.jobFinished(1)
return wholeMsg
transportDispatcher = AsyncoreDispatcher()
transportDispatcher.registerRecvCbFun(cbRecvFun)
transportDispatcher.registerTimerCbFun(cbTimerFun)
# UDP/IPv4
transportDispatcher.registerTransport(
udp.DOMAIN_NAME, udp.UdpSocketTransport().openClientMode()
)
transportDispatcher.sendMessage(
encoder.encode(trapMsg), udp.DOMAIN_NAME, ('demo.snmplabs.com', 162)
)
transportDispatcher.jobStarted(1)
# UDP/IPv6
# transportDispatcher.registerTransport(
# udp6.domainName, udp6.Udp6SocketTransport().openClientMode()
# )
# transportDispatcher.sendMessage(
# encoder.encode(trapMsg), udp6.domainName, ('::1', 162)
# )
# transportDispatcher.jobStarted(1)
# Dispatcher will finish as all scheduled messages are sent
transportDispatcher.runDispatcher()
transportDispatcher.closeDispatcher()
|
etingof/pysnmp
|
examples/v1arch/asyncore/agent/ntforg/send-inform-over-ipv4-and-ipv6.py
|
Python
|
bsd-2-clause
| 2,835
|
import bpy
from bpy.types import Node
from .. import node_tree
from .. import export_utils
class FloatNode(Node, node_tree.AvangoCustomTreeNode):
bl_idname = "FloatInputNode"
bl_label = "Float"
def init(self, context):
self.inputs.new("NodeSocketFloat", "Value")
self.outputs.new("NodeSocketFloat", "Value")
def to_dict(self):
return {
'type': 'Float',
'name': self.name,
'values': export_utils.export_values(self),
'field_connections': export_utils.export_links(self)
}
def execute(self, x):
return x
def update(self):
pass
def register():
bpy.utils.register_class(FloatNode)
def unregister():
bpy.utils.unregister_class(FloatNode)
|
jakobharlan/avango
|
avango-blender/blender-addon/nodes/float_node.py
|
Python
|
lgpl-3.0
| 780
|
from django.contrib import admin
from blog.models import Post
from blog.models import Comment
admin.site.register(Post)
admin.site.register(Comment)
|
I-prefer-the-front-end/I-prefer-the-front-end
|
iptfe/blog/admin.py
|
Python
|
mit
| 151
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C)
# 2004-2011: Pexego Sistemas Informáticos. (http://pexego.es)
# 2013: Top Consultant Software Creations S.L.
# (http://www.topconsultant.es/)
# 2014: Serv. Tecnol. Avanzados (http://www.serviciosbaeza.com)
# Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>
#
# Autores originales: Luis Manuel Angueira Blanco (Pexego)
# Omar Castiñeira Saavedra(omar@pexego.es)
# Migración OpenERP 7.0: Ignacio Martínez y Miguel López.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
from openerp.tools.translate import _
class Mod349ExportToBoe(orm.TransientModel):
_inherit = "l10n.es.aeat.report.export_to_boe"
_name = "l10n.es.aeat.mod349.export_to_boe"
_description = "Export AEAT Model 349 to BOE format"
def _get_company_name_with_title(self, company_obj, context=None):
"""Returns company name with title."""
if company_obj.partner_id and company_obj.partner_id.title:
return company_obj.name + ' ' + \
company_obj.partner_id.title.name.capitalize()
return company_obj.name
def _get_formatted_declaration_record(self, cr, uid, report,
context=None):
"""
Returns a type 1, declaration/company, formated record.
· All amounts must be positives
· Numeric fields with no data must be filled with zeros
· Alfanumeric/Alfabetic fields with no data must be filled with
empty spaces
· Numeric fields must be right aligned and filled with zeros on
the left
· Alfanumeric/Alfabetic fields must be uppercase left aligned,
filled with empty spaces on right side. No special characters
allowed unless specified in field description
Format of the record:
Tipo registro 1 – Registro de declarante:
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '1'
2-4 Numérico Modelo Declaración Constante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-57 Alfanumérico Apellidos y nombre o razón social del declarante
58 Alfabético Tipo de soporte
59-67 Numérico (9) Teléfono contacto
68-107 Alfabético Apellidos y nombre contacto
108-120 Numérico Número identificativo de la declaración
121-122 Alfabético Declaración complementaria o substitutiva
123-135 Numérico Número identificativo de la declaración anterior
136-137 Alfanumérico Período
138-146 Numérico Número total de operadores intracomunitarios
147-161 Numérico Importe de las operaciones intracomunitarias
- 147-159 Numérico Importe de las operaciones intracomunitarias (parte entera)
- 160-161 Numérico Importe de las operaciones intracomunitarias (parte decimal)
162-170 Numérico Número total de operadores intracomunitarios con rectificaciones
171-185 Numérico Importe total de las rectificaciones
- 171-183 Numérico Importe total de las rectificaciones (parte entera)
- 184-185 Numérico Importe total de las rectificaciones (parte decimal)
186 Alfabético Indicador cambio periodicidad en la obligación a declarar (X o '')
187-390 Blancos ----------------------------------------
391-399 Alfanumérico NIF del representante legal
400-487 Blancos ----------------------------------------
488-500 Sello electrónico
"""
assert report, 'No Report defined'
try:
fiscal_year = int((report.fiscalyear_id.code or '')[:4])
except:
raise orm.except_orm(_('Fiscal year code'),
_('First four characters of fiscal year \
code must be numeric and contain the fiscal \
year number. Please, fix it and try again.'))
company_name = self._get_company_name_with_title(report.company_id,
context=context)
period = report.period_selection == 'MO' and report.month_selection \
or report.period_selection
text = '' # Empty text
text += '1' # Tipo de Registro
text += '349' # Modelo Declaración
text += self._formatNumber(fiscal_year, 4) # Ejercicio
text += self._formatString(report.company_vat, 9) # NIF del declarante
text += self._formatString(company_name, 40) # Apellidos y nombre o razón social del declarante
text += self._formatString(report.support_type, 1) # Tipo de soporte
text += self._formatString(report.contact_phone.replace(' ', ''), 9) # Persona de contacto (Teléfono)
text += self._formatString(report.contact_name, 40) # Persona de contacto (Apellidos y nombre)
text += self._formatNumber(report.number, 13) # Número identificativo de la declaración
text += self._formatString(report.type, 2).replace('N', ' ') # Declaración complementaria o substitutiva
text += self._formatNumber(report.previous_number, 13) # Número identificativo de la declaración anterior
text += self._formatString(period, 2) # Período
text += self._formatNumber(report.total_partner_records, 9) # Número total de operadores intracomunitarios
text += self._formatNumber(report.total_partner_records_amount, 13, 2) # Importe total de las operaciones intracomunitarias (parte entera)
text += self._formatNumber(report.total_partner_refunds, 9) # Número total de operadores intracomunitarios con rectificaciones
text += self._formatNumber(report.total_partner_refunds_amount, 13, 2) # Importe total de las rectificaciones
text += self._formatBoolean(report.frequency_change) # Indicador cambio periodicidad en la obligación a declarar
text += 204 * ' ' # Blancos
text += self._formatString(report.representative_vat, 9) # NIF del representante legal
#text += 9*' '
text += 88 * ' ' # Blancos
text += 13 * ' ' # Sello electrónico
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, \
_("The type 1 record must be 502 characters long")
return text
def _get_formatted_main_record(self, cr, uid, report, context=None):
file_contents = ''
for partner_record in report.partner_record_ids:
file_contents += self._get_formated_partner_record(report,
partner_record, context=context)
for refund_record in report.partner_refund_ids:
file_contents += self._get_formatted_partner_refund(report,
refund_record, context=context)
return file_contents
def _get_formated_partner_record(self, report, partner_record,
context=None):
"""
Returns a type 2, partner record
Format of the record:
Tipo registro 2
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '2'
2-4 Numérico Modelo Declaración Constante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-75 Blancos ----------------------------------------
76-92 Alfanumérico NIF operador Intracomunitario
- 76-77 Alfanumérico Codigo de País
- 78-92 Alfanumérico NIF
93-132 Alfanumérico Apellidos y nombre o razón social del operador intracomunitario
133 Alfanumérico Clave de operación
134-146 Numérico Base imponible
- 134-144 Numérico Base imponible (parte entera)
- 145-146 Numérico Base imponible (parte decimal)
147-500 Blancos ----------------------------------------
"""
assert report, 'No AEAT 349 Report defined'
assert partner_record, 'No Partner record defined'
text = ''
try:
fiscal_year = int((report.fiscalyear_id.code or '')[:4])
except:
raise orm.except_orm(_('Fiscal year code'),
_('First four characters of fiscal year \
code must be numeric and contain the fiscal \
year number. Please, fix it and try again.'))
## Formateo de algunos campos (debido a que pueden no ser correctos)
## NIF : Se comprueba que no se incluya el código de pais
company_vat = report.company_vat
if len(report.company_vat) > 9:
company_vat = report.company_vat[2:]
text += '2' # Tipo de registro
text += '349' # Modelo de declaración
text += self._formatNumber(fiscal_year, 4) # Ejercicio
text += self._formatString(company_vat, 9) # NIF del declarante
text += 58 * ' ' # Blancos
# NIF del operador intracomunitario
text += self._formatString(partner_record.partner_vat, 17)
# Apellidos y nombre o razón social del operador intracomunitario
text += self._formatString(partner_record.partner_id.name, 40)
# Clave de operación
text += self._formatString(partner_record.operation_key, 1)
# Base imponible (parte entera)
text += self._formatNumber(partner_record.total_operation_amount, 11, 2)
text += 354 * ' ' # Blancos
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, \
_("The type 2 record must be 502 characters long")
return text
def _get_formatted_partner_refund(self, report, refund_record,
context=None):
"""
Returns a type 2, refund record
Format of the record:
Tipo registro 2
Posiciones Naturaleza Descripción
1 Numérico Tipo de Registro Constante = '2'
2-4 Numérico Modelo Declaración Constante = '349'
5-8 Numérico Ejercicio
9-17 Alfanumérico NIF del declarante
18-75 Blancos ----------------------------------------
76-92 Alfanumérico NIF operador Intracomunitario
- 76-77 Alfanumérico Codigo de Pais
- 78-92 Alfanumérico NIF
93-132 Alfanumérico Apellidos y nombre o razón social del operador intracomunitario
133 Alfanumérico Clave de operación
134-146 Blancos ----------------------------------------
147-178 Alfanumérico Rectificaciones
- 147-150 Numérico Ejercicio
- 151-152 Alfanumérico Periodo
- 153-165 Numérico Base Imponible rectificada
- 153-163 Numérico Base Imponible (parte entera)
- 164-165 Numérico Base Imponible (parte decimal)
166-178 Numérico Base imponible declarada anteriormente
- 166-176 Numérico Base imponible declarada anteriormente (parte entera)
- 177-176 Numérico Base imponible declarada anteriormente (parte decimal)
179-500 Blancos ----------------------------------------
"""
assert report, 'No AEAT 349 Report defined'
assert refund_record, 'No Refund record defined'
text = ''
period = refund_record.period_selection == 'MO' and refund_record.month_selection or refund_record.period_selection
text += '2' # Tipo de registro
text += '349' # Modelo de declaración
text += self._formatNumber(report.fiscalyear_id.code[:4], 4) # Ejercicio
text += self._formatString(report.company_vat, 9) # NIF del declarante
text += 58 * ' ' # Blancos
text += self._formatString(refund_record.partner_id.vat, 17) # NIF del operador intracomunitario
text += self._formatString(refund_record.partner_id.name, 40) # Apellidos y nombre o razón social del operador intracomunitario
text += self._formatString(refund_record.operation_key, 1) # Clave de operación
text += 13 * ' ' # Blancos
text += self._formatNumber(refund_record.fiscalyear_id.code[:4], 4) # Ejercicio (de la rectificación)
text += self._formatString(period, 2) # Periodo (de la rectificación)
text += self._formatNumber(refund_record.total_operation_amount, 11, 2) # Base imponible de la rectificación
text += self._formatNumber(refund_record.total_origin_amount, 11, 2) # Base imponible declarada anteriormente
text += 322 * ' ' # Blancos
text += '\r\n' # Retorno de carro + Salto de línea
assert len(text) == 502, _("The type 2 record must be 502 characters long")
return text
|
otherway/loc-spain
|
l10n_es_aeat_mod349/wizard/export_mod349_to_boe.py
|
Python
|
agpl-3.0
| 15,781
|
from time import time as ti
from random import randrange as rr
from numpy import random
from affichage_bataille import affichage
def bataille():
def distribute():
"""distribue les cartes"""
cards = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
np.random.shuffle(cards)
return [cards[:26], cards[26:]]
nombre_plis = 0
jeux = distribute()
jeu_base = tuple(jeux) # on récupère le jeux de départ pr les statistiques
def tirage(jeux): # tire du plus long vers le plus court 1 fois
if len(jeux[0]) > len(jeux[1]): # on doit tirer du jeu du 0 vers le 1
aleatoir = rr(len(jeux[1])-1, len(jeux[0]))
"""choisi un nombre aleatoire corespondant à l'indice d'une carte
qui ne soit pas engager dans l'escarmouche donc un indice supérieur à la longueur du jeu de 1 """
jeux[1].append(jeux[0][aleatoir]) # le joeur 1 récupère la carte d'incide aleatoir du joueur 0
del jeux[0][aleatoir] # la carte d'indice aleatoir est supprimé du jeu du joueur 0
return escarmouche(jeux) # semi recursivité : rappelle la f° escarmouche avec le nv jeux
elif len(jeux[0]) < len(jeux[1]): # on doit tirer du jeu du 1 vers le 0
aleatoir = rr(len(jeux[0])-1, len(jeux[1]))
"""choisi un nombre aleatoire corespondant à l'indice d'une carte
qui ne soit pas engager dans l'escarmouche donc un indice supérieur à la longueur du jeu de 0 """
jeux[0].append(jeux[1][aleatoir]) # le joeur 0 récupère la carte d'incide aleatoir du joueur 1
del jeux[1][aleatoir] # la carte d'indice aleatoir est supprimé du jeu du joueur 1
return escarmouche(jeux) # semi recursivité : rappelle la f° escarmouche avec le nv jeux
else: # si ils font la même taille alors bataille ultime
return 0 # arrête la boucle principale while
def redistribute(jeux, vainqueur, perdant, escarmoucheDepth):
""" remet dans les paquet les cartes ganées à l'escarmouche"""
for i1 in range(0, escarmoucheDepth+1): # toutes les cartes engagées dans l'escarmouche
jeux[vainqueur].append(jeux[perdant][0]) # le vainqueur récupère la 1er carte du jeu du perdant
jeux[vainqueur].append(jeux[vainqueur][0]) # le vainqueur récupère la 1er carte du jeu du gagnant
del jeux[vainqueur][0] # la 1ere carte du jeu du perdant est supprimé
del jeux[perdant][0] # la 1ere carte du jeu du perdant est supprimé
return jeux
def escarmouche(jeux):
"""simule une escarmouche"""
escarmoucheDepth = 0
try:
while jeux[0][escarmoucheDepth] == jeux[1][escarmoucheDepth]:
"""tant que les cartes impaires sont égales on augmente l'escarmoucheDepth"""
escarmoucheDepth += 2
if jeux[0][escarmoucheDepth] > jeux[1][escarmoucheDepth]: # 0 gagne l'escarmouche
jeux = redistribute(jeux, 0, 1, escarmoucheDepth)
elif jeux[0][escarmoucheDepth] < jeux[1][escarmoucheDepth]: # 1 gagne l'escarmouche
jeux = redistribute(jeux, 1, 0, escarmoucheDepth)
except IndexError:
jeux = tirage(jeux) # bataille à sec donc tirage pour pouvoir continuer
return jeux
while jeux != 0 and len(jeux[0]) > 0 and len(jeux[1]) > 0: # fait des plis tant qu'il y a pas de gagnant
nombre_plis += 1
if jeux[0][0] > jeux[1][0]: # 0 gagne le plis
jeux[0].append(jeux[1][0]) # le gagnant récupère la carte du perdant
jeux[0].append(jeux[0][0]) # le gagnat récupère la carte du gagnant
del jeux[1][0] # puis on supprime la carte du perdant
del jeux[0][0] # puis on supprime la carte du gagnant
elif jeux[0][0] < jeux[1][0]: # 1 gagne le plis
jeux[1].append(jeux[0][0]) # le gagnant récupère la carte du perdant
jeux[1].append(jeux[1][0]) # le gagnat récupère la carte du gagnant
del jeux[0][0] # puis on supprime la carte du perdant
del jeux[1][0] # puis on supprime la carte du gagnant
else:
jeux = escarmouche(jeux)
if jeux != 0 and len(jeux[0]) > len(jeux[1]): # 0 gagne la bataille
return 1, nombre_plis, jeu_base # on renvoie 1
elif jeux != 0 and len(jeux[0]) < len(jeux[1]): # 1 gagne la bataille
return 2, nombre_plis, jeu_base # on renvoie 2
elif jeux == 0: # bataille ultime donc égalité
return 3, nombre_plis, jeu_base # on renvoie 3
result_full = [[0, 0, 0], [], []]
""""[victoire 1, victoire 2, égalité], [nombre de plis de chaque parties], [jeux de base]"""
nombre_bataille = int(input("Nombre de bataille à simuler: "))
t1 = ti()
for i in range(0, nombre_bataille):
result_one = bataille()
result_full[1].append(result_one[1]) # on récupère le nombre de pli de la bataille simulée
result_full[2].append(result_one[2]) # on récupère les jeux de départ de la bataille simulée
result_full[0][result_one[0]-1] += 1 # on récupère lerésultat de la bataille simulée
t = ti() - t1
affichage(result_full, nombre_bataille, t)
|
ZeeGabByte/War-cards-game-simulation
|
War/Alternativ_versions/Tirage/p_(perdant)/bataille_module_tirage_p.py
|
Python
|
gpl-3.0
| 5,709
|
#!usr/bin/env python
from argparse import ArgumentParser
from matplotlib import pyplot as plt
from ggraph import Greengraph
# execute iff we are on command line
if __name__ == "__main__":
parser = ArgumentParser(description = "Run Greengraph from command line")
parser.add_argument('--begin', '-b', type = str)
parser.add_argument('--end', '-e', type = str)
parser.add_argument('--steps','-s', type = int)
parser.add_argument('--out', '-o')
arguments = parser.parse_args()
mygraph = Greengraph(arguments.begin, arguments.end)
data = mygraph.green_between(arguments.steps)
plt.plot(data)
plt.savefig(args.out)
|
jscott6/greengraph
|
build/scripts-3.5/command_line.py
|
Python
|
mit
| 653
|
import asyncio
import jinja2
import aiohttp_debugtoolbar
import aiohttp_jinja2
from aiohttp import web
@aiohttp_jinja2.template('index.html')
def basic_handler(request):
return {'title': 'example aiohttp_debugtoolbar!',
'text': 'Hello aiohttp_debugtoolbar!',
'app': request.app}
@asyncio.coroutine
def exception_handler(request):
raise NotImplementedError
@asyncio.coroutine
def init(loop):
# add aiohttp_debugtoolbar middleware to you application
app = web.Application(loop=loop)
# install aiohttp_debugtoolbar
aiohttp_debugtoolbar.setup(app)
template = """
<html>
<head>
<title>{{ title }}</title>
</head>
<body>
<h1>{{ text }}</h1>
<p>
<a href="{{ app.router['exc_example'].url() }}">
Exception example</a>
</p>
</body>
</html>
"""
# install jinja2 templates
loader = jinja2.DictLoader({'index.html': template})
aiohttp_jinja2.setup(app, loader=loader)
# init routes for index page, and page with error
app.router.add_route('GET', '/', basic_handler, name='index')
app.router.add_route('GET', '/exc', exception_handler, name='exc_example')
handler = app.make_handler()
srv = yield from loop.create_server(handler, '127.0.0.1', 9000)
print("Server started at http://127.0.0.1:9000")
return srv, handler
loop = asyncio.get_event_loop()
srv, handler = loop.run_until_complete(init(loop))
try:
loop.run_forever()
except KeyboardInterrupt:
loop.run_until_complete(handler.finish_connections())
|
realer01/aiohttp-debugtoolbar
|
examples/simple.py
|
Python
|
apache-2.0
| 1,624
|
from .base import *
DEBUG = False
ALLOWED_HOSTS = ['163.172.167.163', 'tweets.mikexine.com']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'tweets',
'USER': 'collector',
'PASSWORD': 'c0llect0r',
'HOST': 'localhost',
'PORT': '',
}
}
# PYTHON_EXECUTABLE = '/srv/django-envs/tweetset/bin/python'
PROJECT_ROOT = '/home/mikexine/tweetset/tweetset/'
STATICFILES_DIRS = (
PROJECT_ROOT + 'collect/static/',
)
|
mikexine/tweetset
|
tweetset/tweetset/settings/production.py
|
Python
|
mit
| 534
|
#!/usr/bin/env python
# encoding=utf-8
"""
爬取豆瓣电影TOP250 - 完整示例代码
我们已经得到的信息有如下:
1.每页有25条电影,共有10页。
2.电影列表在页面上的位置为一个class属性为grid_view的ol标签中。
3.每条电影信息放在这个ol标签的一个li标签里。
"""
import codecs
import requests
from bs4 import BeautifulSoup
DOWNLOAD_URL = 'http://movie.douban.com/top250/'
def download_page(url):
"""
如果不加'User-Agent'就会产生403的,原因一般可能是因为需要登录的网站没有登录或者被服务器认为是爬虫而拒绝访问,
这里很显然属于第二种情况。一般,浏览器在向服务器发送请求的时候,会有一个请求头——User-Agent,
它用来标识浏览器的类型.当我们使用requests来发送请求的时候,
默认的User-Agent是python-requests/2.8.1(后面的数字可能不同,表示版本号)
服务器通过校验请求的U-A来识别爬虫,这算是最简单的一种反爬虫机制了,通过模拟浏览器的U-A,能够很轻松地绕过这个问题。
"""
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36'
}
data = requests.get(url, headers=headers).content
return data
"""
我们创建了一个BeautifulSoup对象,然后紧接着在第7行使用刚刚创建的对象搜索这篇html文档中查找那个class为grid_view的ol标签,
接着通过find_all方法,我们得到了电影的集合,通过对它迭代取出每一个电影的名字,打印出来。
至于for循环之间的内容,其实就是在解析每个li标签。你可以很简单的在刚才的浏览器窗口通过开发者工具查看li中的网页结构。
我们找到了下一页的链接放置在一个span标签中,这个span标签的class为next。
具体链接则在这个span的a标签中,到了最后一页之后,这个span中的a标签消失了,就不需要再翻页了。
"""
def parse_html(html):
soup = BeautifulSoup(html, "lxml")
movie_list_soup = soup.find('ol', attrs={'class': 'grid_view'})
movie_list = []
for movie_li in movie_list_soup.find_all('li'):
detail = movie_li.find('div', attrs={'class': 'hd'})
pic = movie_li.find('div', attrs={'class': 'pic'})
rank = pic.find('em').getText()
name = detail.find('span', attrs={'class': 'title'}).getText()
movie = {'name': name, 'rank': rank}
movie_list.append(movie)
next_page = soup.find('span', attrs={'class': 'next'}).find('a')
if next_page:
return movie_list, DOWNLOAD_URL + next_page['href']
return movie_list, None
def main():
url = DOWNLOAD_URL
with codecs.open('movies', 'wb', encoding='utf-8') as fp:
while url:
html = download_page(url)
movies, url = parse_html(html)
for movie in movies:
fp.write(u'{rank} {name}\n'.format(rank=movie['rank'], name=movie['name']))
if __name__ == '__main__':
main()
|
zhangmianhongni/MyPractice
|
Python/WebCrawler/xlzd/03.豆瓣电影TOP250/main.py
|
Python
|
apache-2.0
| 3,140
|
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Copyright (c) 2019 The ARC developers are EternityGroup
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Helpful routines for regression testing
#
# Add python-bitcoinrpc to module search path:
import os
import sys
from binascii import hexlify, unhexlify
from base64 import b64encode
from decimal import Decimal, ROUND_DOWN
import json
import random
import shutil
import subprocess
import time
import re
import errno
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
COVERAGE_DIR = None
#Set Mocktime default to OFF.
#MOCKTIME is only needed for scripts that use the
#cached version of the blockchain. If the cached
#version of the blockchain is used without MOCKTIME
#then the mempools will not sync due to IBD.
MOCKTIME = 0
def enable_mocktime():
#For backwared compatibility of the python scripts
#with previous versions of the cache, set MOCKTIME
#to regtest genesis time + (201 * 156)
global MOCKTIME
MOCKTIME = 1417713337 + (201 * 156)
def disable_mocktime():
global MOCKTIME
MOCKTIME = 0
def get_mocktime():
return MOCKTIME
def enable_coverage(dirname):
"""Maintain a log of which RPC calls are made during testing."""
global COVERAGE_DIR
COVERAGE_DIR = dirname
def get_rpc_proxy(url, node_number, timeout=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
COVERAGE_DIR, node_number) if COVERAGE_DIR else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def get_mnsync_status(node):
result = node.mnsync("status")
return result['IsSynced']
def wait_to_sync(node):
synced = False
while not synced:
synced = get_mnsync_status(node)
time.sleep(0.5)
def p2p_port(n):
return 11000 + n + os.getpid()%999
def rpc_port(n):
return 12000 + n + os.getpid()%999
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def sync_blocks(rpc_connections, wait=1):
"""
Wait until everybody has the same block count
"""
while True:
counts = [ x.getblockcount() for x in rpc_connections ]
if counts == [ counts[0] ]*len(counts):
break
time.sleep(wait)
def sync_mempools(rpc_connections, wait=1):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while True:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match+1
if num_match == len(rpc_connections):
break
time.sleep(wait)
def sync_goldminenodes(rpc_connections):
for node in rpc_connections:
wait_to_sync(node)
bitcoind_processes = {}
def initialize_datadir(dirname, n):
datadir = os.path.join(dirname, "node"+str(n))
if not os.path.isdir(datadir):
os.makedirs(datadir)
with open(os.path.join(datadir, "arc.conf"), 'w') as f:
f.write("regtest=1\n")
f.write("rpcuser=rt\n")
f.write("rpcpassword=rt\n")
f.write("port="+str(p2p_port(n))+"\n")
f.write("rpcport="+str(rpc_port(n))+"\n")
f.write("listenonion=0\n")
return datadir
def rpc_url(i, rpchost=None):
return "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i))
def wait_for_bitcoind_start(process, url, i):
'''
Wait for arcd to start. This means that RPC is accessible and fully initialized.
Raise an exception if arcd exits during initialization.
'''
while True:
if process.poll() is not None:
raise Exception('arcd exited with status %i during initialization' % process.returncode)
try:
rpc = get_rpc_proxy(url, i)
blocks = rpc.getblockcount()
break # break out of loop on success
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
if e.error['code'] != -28: # RPC in warmup?
raise # unkown JSON RPC exception
time.sleep(0.25)
def initialize_chain(test_dir):
"""
Create (or copy from cache) a 200-block-long chain and
4 wallets.
"""
if (not os.path.isdir(os.path.join("cache","node0"))
or not os.path.isdir(os.path.join("cache","node1"))
or not os.path.isdir(os.path.join("cache","node2"))
or not os.path.isdir(os.path.join("cache","node3"))):
#find and delete old cache directories if any exist
for i in range(4):
if os.path.isdir(os.path.join("cache","node"+str(i))):
shutil.rmtree(os.path.join("cache","node"+str(i)))
# Create cache directories, run arcds:
for i in range(4):
datadir=initialize_datadir("cache", i)
args = [ os.getenv("ARCD", "arcd"), "-server", "-keypool=1", "-datadir="+datadir, "-discover=0" ]
if i > 0:
args.append("-connect=127.0.0.1:"+str(p2p_port(0)))
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: arcd started, waiting for RPC to come up"
wait_for_bitcoind_start(bitcoind_processes[i], rpc_url(i), i)
if os.getenv("PYTHON_DEBUG", ""):
print "initialize_chain: RPC succesfully started"
rpcs = []
for i in range(4):
try:
rpcs.append(get_rpc_proxy(rpc_url(i), i))
except:
sys.stderr.write("Error connecting to "+url+"\n")
sys.exit(1)
# Create a 200-block-long chain; each of the 4 nodes
# gets 25 mature blocks and 25 immature.
# blocks are created with timestamps 156 seconds apart
# starting from 31356 seconds in the past
enable_mocktime()
block_time = get_mocktime() - (201 * 156)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(rpcs, block_time)
rpcs[peer].generate(1)
block_time += 156
# Must sync before next peer starts generating blocks
sync_blocks(rpcs)
# Shut them down, and clean up cache directories:
stop_nodes(rpcs)
wait_bitcoinds()
disable_mocktime()
for i in range(4):
os.remove(log_filename("cache", i, "debug.log"))
os.remove(log_filename("cache", i, "db.log"))
os.remove(log_filename("cache", i, "peers.dat"))
os.remove(log_filename("cache", i, "fee_estimates.dat"))
for i in range(4):
from_dir = os.path.join("cache", "node"+str(i))
to_dir = os.path.join(test_dir, "node"+str(i))
shutil.copytree(from_dir, to_dir)
initialize_datadir(test_dir, i) # Overwrite port/rpcport in arc.conf
def initialize_chain_clean(test_dir, num_nodes):
"""
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization.
"""
for i in range(num_nodes):
datadir=initialize_datadir(test_dir, i)
def _rpchost_to_args(rpchost):
'''Convert optional IP:port spec to rpcconnect/rpcport args'''
if rpchost is None:
return []
match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost)
if not match:
raise ValueError('Invalid RPC host spec ' + rpchost)
rpcconnect = match.group(1)
rpcport = match.group(2)
if rpcconnect.startswith('['): # remove IPv6 [...] wrapping
rpcconnect = rpcconnect[1:-1]
rv = ['-rpcconnect=' + rpcconnect]
if rpcport:
rv += ['-rpcport=' + rpcport]
return rv
def start_node(i, dirname, extra_args=None, rpchost=None, timewait=None, binary=None):
"""
Start a arcd and return RPC connection to it
"""
datadir = os.path.join(dirname, "node"+str(i))
if binary is None:
binary = os.getenv("ARCD", "arcd")
# RPC tests still depend on free transactions
args = [ binary, "-datadir="+datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-blockprioritysize=50000", "-mocktime="+str(get_mocktime()) ]
if extra_args is not None: args.extend(extra_args)
bitcoind_processes[i] = subprocess.Popen(args)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: arcd started, waiting for RPC to come up"
url = rpc_url(i, rpchost)
wait_for_bitcoind_start(bitcoind_processes[i], url, i)
if os.getenv("PYTHON_DEBUG", ""):
print "start_node: RPC succesfully started"
proxy = get_rpc_proxy(url, i, timeout=timewait)
if COVERAGE_DIR:
coverage.write_all_rpc_commands(COVERAGE_DIR, proxy)
return proxy
def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None, binary=None):
"""
Start multiple arcds, return RPC connections to them
"""
if extra_args is None: extra_args = [ None for i in range(num_nodes) ]
if binary is None: binary = [ None for i in range(num_nodes) ]
rpcs = []
try:
for i in range(num_nodes):
rpcs.append(start_node(i, dirname, extra_args[i], rpchost, binary=binary[i]))
except: # If one node failed to start, stop the others
stop_nodes(rpcs)
raise
return rpcs
def log_filename(dirname, n_node, logname):
return os.path.join(dirname, "node"+str(n_node), "regtest", logname)
def stop_node(node, i):
node.stop()
bitcoind_processes[i].wait()
del bitcoind_processes[i]
def stop_nodes(nodes):
for node in nodes:
node.stop()
del nodes[:] # Emptying array closes connections as a side effect
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def wait_bitcoinds():
# Wait for all bitcoinds to cleanly exit
for bitcoind in bitcoind_processes.values():
bitcoind.wait()
bitcoind_processes.clear()
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:"+str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()):
time.sleep(0.1)
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >=0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } )
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out+fee
change = amount_in - amount
if change > amount*2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def send_zeropri_transaction(from_node, to_node, amount, fee):
"""
Create&broadcast a zero-priority transaction.
Returns (txid, hex-encoded-txdata)
Ensures transaction is zero-priority by first creating a send-to-self,
then using its output
"""
# Create a send-to-self with confirmed inputs:
self_address = from_node.getnewaddress()
(total_in, inputs) = gather_inputs(from_node, amount+fee*2)
outputs = make_change(from_node, total_in, amount+fee, fee)
outputs[self_address] = float(amount+fee)
self_rawtx = from_node.createrawtransaction(inputs, outputs)
self_signresult = from_node.signrawtransaction(self_rawtx)
self_txid = from_node.sendrawtransaction(self_signresult["hex"], True)
vout = find_output(from_node, self_txid, amount+fee)
# Now immediately spend the output to create a 1-input, 1-output
# zero-priority transaction:
inputs = [ { "txid" : self_txid, "vout" : vout } ]
outputs = { to_node.getnewaddress() : float(amount) }
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"])
def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random zero-priority transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee)
return (txid, txhex, fee)
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment*random.randint(0,fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount+fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
def assert_equal(thing1, thing2):
if thing1 != thing2:
raise AssertionError("%s != %s"%(str(thing1),str(thing2)))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s"%(str(thing1),str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
try:
fun(*args, **kwds)
except exc:
pass
except Exception as e:
raise AssertionError("Unexpected exception raised: "+type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, basestring):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find = False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find == True:
assert_equal(expected, { })
num_matched = 0
for item in object_array:
all_match = True
for key,value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find == True:
num_matched = num_matched+1
for key,value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s"%(str(item), str(key), str(value)))
num_matched = num_matched+1
if num_matched == 0 and should_not_find != True:
raise AssertionError("No objects matched %s"%(str(to_match)))
if num_matched > 0 and should_not_find == True:
raise AssertionError("Objects were found %s"%(str(to_match)))
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
node.generate(int(0.5*count)+101)
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in xrange(iterations):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = satoshi_round(send_value/2)
outputs[addr2] = satoshi_round(send_value/2)
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
txid = node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" #OP_RETURN OP_PUSH2 512 bytes
for i in xrange (512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in xrange(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{ "txid" : coinbase, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, fee):
addr = node.getnewaddress()
txids = []
for i in xrange(len(utxos)):
t = utxos.pop()
inputs = []
inputs.append({ "txid" : t["txid"], "vout" : t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr] = satoshi_round(send_value)
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def get_bip9_status(node, key):
info = node.getblockchaininfo()
for row in info['bip9_softforks']:
if row['id'] == key:
return row
raise IndexError ('key:"%s" not found' % key)
|
ArcticCore/arcticcoin
|
qa/rpc-tests/test_framework/util.py
|
Python
|
mit
| 21,581
|
from typing import List
from flask_restx import Namespace, Resource
from CTFd.api.v1.helpers.request import validate_args
from CTFd.api.v1.helpers.schemas import sqlalchemy_to_pydantic
from CTFd.api.v1.schemas import (
APIDetailedSuccessResponse,
PaginatedAPIListSuccessResponse,
)
from CTFd.cache import clear_standings
from CTFd.constants import RawEnum
from CTFd.models import Submissions, db
from CTFd.schemas.submissions import SubmissionSchema
from CTFd.utils.decorators import admins_only
from CTFd.utils.helpers.models import build_model_filters
submissions_namespace = Namespace(
"submissions", description="Endpoint to retrieve Submission"
)
SubmissionModel = sqlalchemy_to_pydantic(Submissions)
TransientSubmissionModel = sqlalchemy_to_pydantic(Submissions, exclude=["id"])
class SubmissionDetailedSuccessResponse(APIDetailedSuccessResponse):
data: SubmissionModel
class SubmissionListSuccessResponse(PaginatedAPIListSuccessResponse):
data: List[SubmissionModel]
submissions_namespace.schema_model(
"SubmissionDetailedSuccessResponse", SubmissionDetailedSuccessResponse.apidoc()
)
submissions_namespace.schema_model(
"SubmissionListSuccessResponse", SubmissionListSuccessResponse.apidoc()
)
@submissions_namespace.route("")
class SubmissionsList(Resource):
@admins_only
@submissions_namespace.doc(
description="Endpoint to get submission objects in bulk",
responses={
200: ("Success", "SubmissionListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(
{
"challenge_id": (int, None),
"user_id": (int, None),
"team_id": (int, None),
"ip": (str, None),
"provided": (str, None),
"type": (str, None),
"q": (str, None),
"field": (
RawEnum(
"SubmissionFields",
{
"challenge_id": "challenge_id",
"user_id": "user_id",
"team_id": "team_id",
"ip": "ip",
"provided": "provided",
"type": "type",
},
),
None,
),
},
location="query",
)
def get(self, query_args):
q = query_args.pop("q", None)
field = str(query_args.pop("field", None))
filters = build_model_filters(model=Submissions, query=q, field=field)
args = query_args
schema = SubmissionSchema(many=True)
submissions = (
Submissions.query.filter_by(**args)
.filter(*filters)
.paginate(max_per_page=100)
)
response = schema.dump(submissions.items)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {
"meta": {
"pagination": {
"page": submissions.page,
"next": submissions.next_num,
"prev": submissions.prev_num,
"pages": submissions.pages,
"per_page": submissions.per_page,
"total": submissions.total,
}
},
"success": True,
"data": response.data,
}
@admins_only
@submissions_namespace.doc(
description="Endpoint to create a submission object. Users should interact with the attempt endpoint to submit flags.",
responses={
200: ("Success", "SubmissionListSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
@validate_args(TransientSubmissionModel, location="json")
def post(self, json_args):
req = json_args
Model = Submissions.get_child(type=req.get("type"))
schema = SubmissionSchema(instance=Model())
response = schema.load(req)
if response.errors:
return {"success": False, "errors": response.errors}, 400
db.session.add(response.data)
db.session.commit()
response = schema.dump(response.data)
db.session.close()
# Delete standings cache
clear_standings()
return {"success": True, "data": response.data}
@submissions_namespace.route("/<submission_id>")
@submissions_namespace.param("submission_id", "A Submission ID")
class Submission(Resource):
@admins_only
@submissions_namespace.doc(
description="Endpoint to get submission objects in bulk",
responses={
200: ("Success", "SubmissionDetailedSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def get(self, submission_id):
submission = Submissions.query.filter_by(id=submission_id).first_or_404()
schema = SubmissionSchema()
response = schema.dump(submission)
if response.errors:
return {"success": False, "errors": response.errors}, 400
return {"success": True, "data": response.data}
@admins_only
@submissions_namespace.doc(
description="Endpoint to get submission objects in bulk",
responses={
200: ("Success", "APISimpleSuccessResponse"),
400: (
"An error occured processing the provided or stored data",
"APISimpleErrorResponse",
),
},
)
def delete(self, submission_id):
submission = Submissions.query.filter_by(id=submission_id).first_or_404()
db.session.delete(submission)
db.session.commit()
db.session.close()
# Delete standings cache
clear_standings()
return {"success": True}
|
LosFuzzys/CTFd
|
CTFd/api/v1/submissions.py
|
Python
|
apache-2.0
| 6,085
|
#!/usr/bin/env python
from pynocchio.pynocchio import Pynocchio
def main():
Pynocchio().run()
if __name__ == '__main__':
main()
|
pynocchio-comic-reader/pynocchio-comic-reader
|
pynocchio-client.py
|
Python
|
gpl-3.0
| 142
|
"""Generic unit tests for Web API resources"""
from __future__ import unicode_literals
import json
from django.test.client import RequestFactory
from djblets.features import Feature, get_features_registry
from djblets.testing.decorators import add_fixtures
from djblets.webapi.errors import PERMISSION_DENIED
from reviewboard.site.models import LocalSite
from reviewboard.webapi.base import WebAPIResource
from reviewboard.webapi.tests.base import BaseWebAPITestCase
class DummyFeature(Feature):
"""A dummy feature for testing."""
feature_id = 'dummy.feature'
name = 'Dummy Feature'
summary = 'A dummy feature'
class BaseDummyResource(WebAPIResource):
"""A dummy resource for testing required_features."""
allowed_methods = ('GET', 'POST', 'PUT', 'DELETE')
uri_object_key = 'dummy'
def has_access_permissions(self, *args, **kwargs):
return True
def has_list_access_permissions(self, *args, **kwargs):
return True
def has_modify_permissions(self, *args, **kwargs):
return True
def has_delete_permissions(self, *args, **kwargs):
return True
def get(self, request, dummy=None, *args, **kwargs):
return 418, {'dummy': dummy}
def get_list(self, request, dummy=None, *args, **kwargs):
return 418, {'dummy': dummy}
def update(self, request, dummy=None, *args, **kwargs):
return 418, {'dummy': dummy}
def create(self, request, dummy=None, *args, **kwargs):
return 418, {'dummy': dummy}
def delete(self, request, dummy=None, *args, **kwargs):
return 418, {'dummy': dummy}
class WebAPIResourceFeatureTests(BaseWebAPITestCase):
"""Tests for Web API Resources with required features"""
def setUp(self):
super(WebAPIResourceFeatureTests, self).setUp()
self.feature = DummyFeature()
class DummyResource(BaseDummyResource):
required_features = [self.feature]
self.resource_cls = DummyResource
self.resource = self.resource_cls()
def tearDown(self):
super(WebAPIResourceFeatureTests, self).tearDown()
registry = get_features_registry()
registry.unregister(self.feature)
def test_disabled_feature_post(self):
"""Testing POST with a disabled required feature returns
PERMISSION_DENIED
"""
self._test_method('post', False)
def test_disabled_feature_get_list(self):
"""Testing GET with a disabled required feature returns
PERMISSION_DENIED for a list_resource
"""
self._test_method('get', False)
def test_disabled_feature_get(self):
"""Testing GET with a disabled required feature returns
PERMISSION_DENIED
"""
self._test_method('get', False, dummy=123)
def test_disabled_feature_delete(self):
"""Testing DELETE with a disabled required feature returns
PERMISSION_DENIED
"""
self._test_method('delete', False, dummy=123)
def test_disabled_feature_forbidden_update(self):
"""Testing PUT with a disabled required feature returns
PERMISSION_DENIED
"""
self._test_method('put', False, dummy=123)
def test_enabled_feature_post(self):
"""Testing POST with an enabled required feature returns the correct
response
"""
self._test_method('post', True)
def test_enabled_feature_get_list(self):
"""Testing GET with an enabled required feature returns the correct
response for a list resource
"""
self._test_method('get', True)
def test_enabled_feature_get(self):
"""Testing GET with an enabled required feature returns the correct
response
"""
self._test_method('get', True, dummy=123)
def test_enabled_feature_delete(self):
"""Testing DELETE with an enabled required feature returns the correct
response
"""
self._test_method('delete', True, dummy=123)
def test_enabled_feature_update(self):
"""Testing PUT with an enabled required feature returns the correct
response
"""
self._test_method('put', True, dummy=123)
@add_fixtures(['test_site'])
def test_disabled_feature_post_local_site(self):
"""Testing POST with a disabled required feature returns
PERMISSION_DENIED on a LocalSite
"""
self._test_method(
'post', False,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_disabled_feature_get_list_local_site(self):
"""Testing GET with a disabled required feature returns
PERMISSION_DENIED for a list_resource on a LocalSite
"""
self._test_method('get', False)
@add_fixtures(['test_site'])
def test_disabled_feature_get_local_site(self):
"""Testing GET with a disabled required feature returns
PERMISSION_DENIED on a LocalSite
"""
self._test_method(
'get', False, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_disabled_feature_delete_local_site(self):
"""Testing DELETE with a disabled required feature returns
PERMISSION_DENIED on a LocalSite
"""
self._test_method(
'delete', False, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_disabled_feature_forbidden_update_local_site(self):
"""Testing PUT with a disabled required feature returns
PERMISSION_DENIED on a LocalSite
"""
self._test_method(
'put', False, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_enabled_feature_post_local_site(self):
"""Testing POST with an enabled required feature returns the correct
response on a LocalSite
"""
self._test_method(
'post', True,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_enabled_feature_get_list_local_site(self):
"""Testing GET with an enabled required feature returns the correct
response for a list resource on a LocalSite
"""
self._test_method(
'get', True,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_enabled_feature_get_local_site(self):
"""Testing GET with an enabled required feature returns the correct
response on a LocalSite
"""
self._test_method(
'get', True, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_enabled_feature_delete_local_site(self):
"""Testing DELETE with an enabled required feature returns the correct
response on a LocalSite
"""
self._test_method(
'delete', True, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
@add_fixtures(['test_site'])
def test_enabled_feature_update_local_site(self):
"""Testing PUT with an enabled required feature returns the correct
response on a LocalSite
"""
self._test_method(
'put', True, dummy=123,
local_site=LocalSite.objects.get(name='local-site-1'))
def _test_method(self, method, feature_enabled, local_site=None,
dummy=None):
# When a LocalSite is provided, we want to enable/disable the feature
# only for that LocalSite and do the opposite for the global settings
# to ensure that we are picking up the setting from the LocalSite and
# not from the global settings.
if local_site is not None:
enabled_globally = not feature_enabled
if not local_site.extra_data:
local_site.extra_data = {}
local_site.extra_data['enabled_features'] = {
DummyFeature.feature_id: feature_enabled,
}
local_site.save(update_fields=('extra_data',))
else:
enabled_globally = feature_enabled
settings = {
'ENABLED_FEATURES': {
DummyFeature.feature_id: enabled_globally,
},
}
request = getattr(RequestFactory(), method)('/')
request.local_site = local_site
request.session = {}
with self.settings(**settings):
rsp = self.resource(request, dummy=dummy)
content = json.loads(rsp.content)
if feature_enabled:
self.assertEqual(rsp.status_code, 418)
self.assertEqual(content['stat'], 'ok')
self.assertEqual(content['dummy'], dummy)
else:
self.assertEqual(rsp.status_code, 403)
self.assertEqual(content['stat'], 'fail')
self.assertEqual(content['err']['msg'], PERMISSION_DENIED.msg)
self.assertEqual(content['err']['code'], PERMISSION_DENIED.code)
|
davidt/reviewboard
|
reviewboard/webapi/tests/test_base.py
|
Python
|
mit
| 9,181
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.atoms.affine.add_expr import AddExpression
from cvxpy.expressions.expression import *
from cvxpy.expressions.variables import Variable, Semidef
from cvxpy.expressions.constants import Constant
from cvxpy.expressions.constants import Parameter
from cvxpy import Problem, Minimize
import cvxpy.utilities as u
import cvxpy.interface.matrix_utilities as intf
import cvxpy.settings as s
from collections import deque
import unittest
from cvxpy.tests.base_test import BaseTest
from cvxopt import matrix
import numpy as np
import warnings
class TestExpressions(BaseTest):
""" Unit tests for the expression/expression module. """
def setUp(self):
self.a = Variable(name='a')
self.x = Variable(2, name='x')
self.y = Variable(3, name='y')
self.z = Variable(2, name='z')
self.A = Variable(2,2,name='A')
self.B = Variable(2,2,name='B')
self.C = Variable(3,2,name='C')
self.intf = intf.DEFAULT_INTF
# Test the Variable class.
def test_variable(self):
x = Variable(2)
y = Variable(2)
assert y.name() != x.name()
x = Variable(2, name='x')
y = Variable()
self.assertEqual(x.name(), 'x')
self.assertEqual(x.size, (2,1))
self.assertEqual(y.size, (1,1))
self.assertEqual(x.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(x.canonical_form[0].size, (2,1))
self.assertEqual(x.canonical_form[1], [])
self.assertEquals(repr(self.x), "Variable(2, 1)")
self.assertEquals(repr(self.A), "Variable(2, 2)")
# # Scalar variable
# coeff = self.a.coefficients()
# self.assertEqual(coeff[self.a.id], [1])
# # Vector variable.
# coeffs = x.coefficients()
# self.assertItemsEqual(coeffs.keys(), [x.id])
# vec = coeffs[x.id][0]
# self.assertEqual(vec.shape, (2,2))
# self.assertEqual(vec[0,0], 1)
# # Matrix variable.
# coeffs = self.A.coefficients()
# self.assertItemsEqual(coeffs.keys(), [self.A.id])
# self.assertEqual(len(coeffs[self.A.id]), 2)
# mat = coeffs[self.A.id][1]
# self.assertEqual(mat.shape, (2,4))
# self.assertEqual(mat[0,2], 1)
def test_assign_var_value(self):
"""Test assigning a value to a variable.
"""
# Scalar variable.
a = Variable()
a.value = 1
self.assertEqual(a.value, 1)
with self.assertRaises(Exception) as cm:
a.value = [2, 1]
self.assertEqual(str(cm.exception), "Invalid dimensions (2, 1) for Variable value.")
# Test assigning None.
a.value = 1
a.value = None
assert a.value is None
# Vector variable.
x = Variable(2)
x.value = [2, 1]
self.assertItemsAlmostEqual(x.value, [2, 1])
# Matrix variable.
A = Variable(3, 2)
A.value = np.ones((3, 2))
self.assertItemsAlmostEqual(A.value, np.ones((3, 2)))
# Test tranposing variables.
def test_transpose_variable(self):
var = self.a.T
self.assertEquals(var.name(), "a")
self.assertEquals(var.size, (1,1))
self.a.save_value(2)
self.assertEquals(var.value, 2)
var = self.x.T
self.assertEquals(var.name(), "x.T")
self.assertEquals(var.size, (1,2))
self.x.save_value( matrix([1,2]) )
self.assertEquals(var.value[0,0], 1)
self.assertEquals(var.value[0,1], 2)
var = self.C.T
self.assertEquals(var.name(), "C.T")
self.assertEquals(var.size, (2,3))
# coeffs = var.canonical_form[0].coefficients()
# mat = coeffs.values()[0][0]
# self.assertEqual(mat.size, (2,6))
# self.assertEqual(mat[1,3], 1)
index = var[1,0]
self.assertEquals(index.name(), "C.T[1, 0]")
self.assertEquals(index.size, (1,1))
var = self.x.T.T
self.assertEquals(var.name(), "x.T.T")
self.assertEquals(var.size, (2,1))
# Test the Constant class.
def test_constants(self):
c = Constant(2)
self.assertEqual(c.name(), str(2))
c = Constant(2)
self.assertEqual(c.value, 2)
self.assertEqual(c.size, (1,1))
self.assertEqual(c.curvature, u.Curvature.CONSTANT_KEY)
self.assertEqual(c.sign, u.Sign.POSITIVE_KEY)
self.assertEqual(Constant(-2).sign, u.Sign.NEGATIVE_KEY)
self.assertEqual(Constant(0).sign, u.Sign.ZERO_KEY)
self.assertEqual(c.canonical_form[0].size, (1,1))
self.assertEqual(c.canonical_form[1], [])
# coeffs = c.coefficients()
# self.assertEqual(coeffs.keys(), [s.CONSTANT])
# self.assertEqual(coeffs[s.CONSTANT], [2])
# Test the sign.
c = Constant([[2], [2]])
self.assertEqual(c.size, (1, 2))
self.assertEqual(c.sign, u.Sign.POSITIVE_KEY)
self.assertEqual((-c).sign, u.Sign.NEGATIVE_KEY)
self.assertEqual((0*c).sign, u.Sign.ZERO_KEY)
c = Constant([[2], [-2]])
self.assertEqual(c.sign, u.Sign.UNKNOWN_KEY)
# Test sign of a complex expression.
c = Constant([1, 2])
A = Constant([[1,1],[1,1]])
exp = c.T*A*c
self.assertEqual(exp.sign, u.Sign.POSITIVE_KEY)
self.assertEqual((c.T*c).sign, u.Sign.POSITIVE_KEY)
exp = c.T.T
self.assertEqual(exp.sign, u.Sign.POSITIVE_KEY)
exp = c.T*self.A
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
# Test repr.
self.assertEqual(repr(c), "Constant(CONSTANT, POSITIVE, (2, 1))")
def test_1D_array(self):
"""Test NumPy 1D arrays as constants.
"""
c = np.array([1,2])
p = Parameter(2)
p.value = [1,1]
self.assertEquals((c*p).value, 3)
self.assertEqual((c*self.x).size, (1,1))
# Test the Parameter class.
def test_parameters(self):
p = Parameter(name='p')
self.assertEqual(p.name(), "p")
self.assertEqual(p.size, (1,1))
p = Parameter(4, 3, sign="positive")
with self.assertRaises(Exception) as cm:
p.value = 1
self.assertEqual(str(cm.exception), "Invalid dimensions (1, 1) for Parameter value.")
val = -np.ones((4,3))
val[0,0] = 2
p = Parameter(4, 3, sign="positive")
with self.assertRaises(Exception) as cm:
p.value = val
self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.")
p = Parameter(4, 3, sign="negative")
with self.assertRaises(Exception) as cm:
p.value = val
self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.")
# No error for unknown sign.
p = Parameter(4, 3)
p.value = val
# Initialize a parameter with a value.
p = Parameter(value=10)
self.assertEqual(p.value, 10)
# Test assigning None.
p.value = 10
p.value = None
assert p.value is None
with self.assertRaises(Exception) as cm:
p = Parameter(2, 1, sign="negative", value=[2,1])
self.assertEqual(str(cm.exception), "Invalid sign for Parameter value.")
with self.assertRaises(Exception) as cm:
p = Parameter(4, 3, sign="positive", value=[1,2])
self.assertEqual(str(cm.exception), "Invalid dimensions (2, 1) for Parameter value.")
# Test repr.
p = Parameter(4, 3, sign="negative")
self.assertEqual(repr(p), 'Parameter(4, 3, sign="NEGATIVE")')
# Test the AddExpresion class.
def test_add_expression(self):
# Vectors
c = Constant([2,2])
exp = self.x + c
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(exp.canonical_form[0].size, (2,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), self.x.name() + " + " + c.name())
self.assertEqual(exp.size, (2,1))
z = Variable(2, name='z')
exp = exp + z + self.x
with self.assertRaises(Exception) as cm:
(self.x + self.y)
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)")
# Matrices
exp = self.A + self.B
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.size, (2,2))
with self.assertRaises(Exception) as cm:
(self.A + self.C)
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)")
with self.assertRaises(Exception) as cm:
AddExpression([self.A, self.C])
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)")
# Test that sum is flattened.
exp = self.x + c + self.x
self.assertEqual(len(exp.args), 3)
# Test repr.
self.assertEqual(repr(exp), "Expression(AFFINE, UNKNOWN, (2, 1))")
# Test the SubExpresion class.
def test_sub_expression(self):
# Vectors
c = Constant([2,2])
exp = self.x - c
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(exp.canonical_form[0].size, (2,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), self.x.name() + " - " + Constant([2,2]).name())
self.assertEqual(exp.size, (2,1))
z = Variable(2, name='z')
exp = exp - z - self.x
with self.assertRaises(Exception) as cm:
(self.x - self.y)
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 1) (3, 1)")
# Matrices
exp = self.A - self.B
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.size, (2,2))
with self.assertRaises(Exception) as cm:
(self.A - self.C)
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)")
# Test repr.
self.assertEqual(repr(self.x - c), "Expression(AFFINE, UNKNOWN, (2, 1))")
# Test the MulExpresion class.
def test_mul_expression(self):
# Vectors
c = Constant([[2],[2]])
exp = c*self.x
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual((c[0]*self.x).sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(exp.canonical_form[0].size, (1,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), c.name() + " * " + self.x.name())
self.assertEqual(exp.size, (1,1))
with self.assertRaises(Exception) as cm:
([2,2,3]*self.x)
self.assertEqual(str(cm.exception), "Incompatible dimensions (3, 1) (2, 1)")
# Matrices
with self.assertRaises(Exception) as cm:
Constant([[2, 1],[2, 2]]) * self.C
self.assertEqual(str(cm.exception), "Incompatible dimensions (2, 2) (3, 2)")
with self.assertRaises(Exception) as cm:
(self.A * self.B)
self.assertEqual(str(cm.exception), "Cannot multiply two non-constants.")
# Constant expressions
T = Constant([[1,2,3],[3,5,5]])
exp = (T + T) * self.B
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.size, (3,2))
# Expression that would break sign multiplication without promotion.
c = Constant([[2], [2], [-2]])
exp = [[1], [2]] + c*self.C
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
# Scalar constants on the right should be moved left.
expr = self.C*2
self.assertEqual(expr.args[0].value, 2)
# Scalar variables on the left should be moved right.
expr = self.a*[2,1]
self.assertItemsAlmostEqual(expr.args[0].value, [2,1])
# Test the DivExpresion class.
def test_div_expression(self):
# Vectors
exp = self.x/2
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(exp.canonical_form[0].size, (2,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), c.name() + " * " + self.x.name())
self.assertEqual(exp.size, (2,1))
with self.assertRaises(Exception) as cm:
(self.x/[2,2,3])
print(cm.exception)
self.assertEqual(str(cm.exception), "Can only divide by a scalar constant.")
# Constant expressions.
c = Constant(2)
exp = c/(3 - 5)
self.assertEqual(exp.curvature, u.Curvature.CONSTANT_KEY)
self.assertEqual(exp.size, (1,1))
self.assertEqual(exp.sign, u.Sign.NEGATIVE_KEY)
# Parameters.
p = Parameter(sign="positive")
exp = 2/p
p.value = 2
self.assertEquals(exp.value, 1)
rho = Parameter(sign="positive")
rho.value = 1
self.assertEquals(rho.sign, u.Sign.POSITIVE_KEY)
self.assertEquals(Constant(2).sign, u.Sign.POSITIVE_KEY)
self.assertEquals((Constant(2)/Constant(2)).sign, u.Sign.POSITIVE_KEY)
self.assertEquals((Constant(2)*rho).sign, u.Sign.POSITIVE_KEY)
self.assertEquals((rho/2).sign, u.Sign.POSITIVE_KEY)
# Test the NegExpression class.
def test_neg_expression(self):
# Vectors
exp = -self.x
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
assert exp.is_affine()
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
assert not exp.is_positive()
self.assertEqual(exp.canonical_form[0].size, (2,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), "-%s" % self.x.name())
self.assertEqual(exp.size, self.x.size)
# Matrices
exp = -self.C
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.size, (3,2))
# Test promotion of scalar constants.
def test_scalar_const_promotion(self):
# Vectors
exp = self.x + 2
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
assert exp.is_affine()
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
assert not exp.is_negative()
self.assertEqual(exp.canonical_form[0].size, (2,1))
self.assertEqual(exp.canonical_form[1], [])
# self.assertEqual(exp.name(), self.x.name() + " + " + Constant(2).name())
self.assertEqual(exp.size, (2,1))
self.assertEqual((4 - self.x).size, (2,1))
self.assertEqual((4 * self.x).size, (2,1))
self.assertEqual((4 <= self.x).size, (2,1))
self.assertEqual((4 == self.x).size, (2,1))
self.assertEqual((self.x >= 4).size, (2,1))
# Matrices
exp = (self.A + 2) + 4
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual((3 * self.A).size, (2,2))
self.assertEqual(exp.size, (2,2))
# Test indexing expression.
def test_index_expression(self):
# Tuple of integers as key.
exp = self.x[1,0]
# self.assertEqual(exp.name(), "x[1,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
assert exp.is_affine()
self.assertEquals(exp.size, (1,1))
# coeff = exp.canonical_form[0].coefficients()[self.x][0]
# self.assertEqual(coeff[0,1], 1)
self.assertEqual(exp.value, None)
exp = self.x[1,0].T
# self.assertEqual(exp.name(), "x[1,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
with self.assertRaises(Exception) as cm:
(self.x[2,0])
self.assertEqual(str(cm.exception), "Index/slice out of bounds.")
# Slicing
exp = self.C[0:2,1]
# self.assertEquals(exp.name(), "C[0:2,1]")
self.assertEquals(exp.size, (2,1))
exp = self.C[0:,0:2]
# self.assertEquals(exp.name(), "C[0:,0:2]")
self.assertEquals(exp.size, (3,2))
exp = self.C[0::2,0::2]
# self.assertEquals(exp.name(), "C[0::2,0::2]")
self.assertEquals(exp.size, (2,1))
exp = self.C[:3,:1:2]
# self.assertEquals(exp.name(), "C[0:3,0]")
self.assertEquals(exp.size, (3,1))
exp = self.C[0:,0]
# self.assertEquals(exp.name(), "C[0:,0]")
self.assertEquals(exp.size, (3,1))
c = Constant([[1,-2],[0,4]])
exp = c[1, 1]
self.assertEqual(exp.curvature, u.Curvature.CONSTANT_KEY)
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(c[0,1].sign, u.Sign.UNKNOWN_KEY)
self.assertEqual(c[1,0].sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(exp.size, (1,1))
self.assertEqual(exp.value, 4)
c = Constant([[1,-2,3],[0,4,5],[7,8,9]])
exp = c[0:3,0:4:2]
self.assertEqual(exp.curvature, u.Curvature.CONSTANT_KEY)
assert exp.is_constant()
self.assertEquals(exp.size, (3,2))
self.assertEqual(exp[0,1].value, 7)
# Slice of transpose
exp = self.C.T[0:2,1]
self.assertEquals(exp.size, (2,1))
# Arithmetic expression indexing
exp = (self.x + self.z)[1,0]
# self.assertEqual(exp.name(), "x[1,0] + z[1,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEqual(exp.sign, u.Sign.UNKNOWN_KEY)
self.assertEquals(exp.size, (1,1))
exp = (self.x + self.a)[1,0]
# self.assertEqual(exp.name(), "x[1,0] + a")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
exp = (self.x - self.z)[1,0]
# self.assertEqual(exp.name(), "x[1,0] - z[1,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
exp = (self.x - self.a)[1,0]
# self.assertEqual(exp.name(), "x[1,0] - a")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
exp = (-self.x)[1,0]
# self.assertEqual(exp.name(), "-x[1,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
c = Constant([[1,2],[3,4]])
exp = (c*self.x)[1,0]
# self.assertEqual(exp.name(), "[[2], [4]] * x[0:,0]")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
c = Constant([[1,2],[3,4]])
exp = (c*self.a)[1,0]
# self.assertEqual(exp.name(), "2 * a")
self.assertEqual(exp.curvature, u.Curvature.AFFINE_KEY)
self.assertEquals(exp.size, (1,1))
def test_neg_indices(self):
"""Test negative indices.
"""
c = Constant([[1,2],[3,4]])
exp = c[-1, -1]
self.assertEquals(exp.value, 4)
self.assertEquals(exp.size, (1, 1))
self.assertEquals(exp.curvature, u.Curvature.CONSTANT_KEY)
c = Constant([1,2,3,4])
exp = c[1:-1]
self.assertItemsAlmostEqual(exp.value, [2, 3])
self.assertEquals(exp.size, (2, 1))
self.assertEquals(exp.curvature, u.Curvature.CONSTANT_KEY)
c = Constant([1,2,3,4])
exp = c[::-1]
self.assertItemsAlmostEqual(exp.value, [4, 3, 2, 1])
self.assertEquals(exp.size, (4, 1))
self.assertEquals(exp.curvature, u.Curvature.CONSTANT_KEY)
x = Variable(4)
Problem(Minimize(0), [x[::-1] == c]).solve()
self.assertItemsAlmostEqual(x.value, [4, 3, 2, 1])
self.assertEquals(x[::-1].size, (4, 1))
x = Variable(2)
self.assertEquals(x[::-1].size, (2, 1))
x = Variable(100, name="x")
self.assertEquals("x[:-1, 0]", str(x[:-1]))
def test_logical_indices(self):
"""Test indexing with boolean arrays.
"""
A = np.array([[1,2,3,4], [5, 6, 7, 8], [9,10,11,12]])
C = Constant(A)
# Boolean array.
expr = C[A <= 2]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[A <= 2], expr.value)
expr = C[A % 2 == 0]
self.assertEquals(expr.size, (6, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[A % 2 == 0], expr.value)
# Boolean array for rows, index for columns.
expr = C[np.array([True,False,True]), 3]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[np.array([True,False,True]), 3], expr.value)
# Index for row, boolean array for columns.
expr = C[1, np.array([True,False,False,True])]
self.assertEquals(expr.size, (2,1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[1, np.array([True,False,False,True])],
expr.value)
# Boolean array for rows, slice for columns.
expr = C[np.array([True,True,True]), 1:3]
self.assertEquals(expr.size, (3, 2))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[np.array([True,True,True]), 1:3], expr.value)
# Slice for row, boolean array for columns.
expr = C[1:-1, np.array([True,False,True, True])]
self.assertEquals(expr.size, (1, 3))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[1:-1, np.array([True,False,True, True])],
expr.value)
# Boolean arrays for rows and columns.
# Not sure what this does.
expr = C[np.array([True,True, True]),
np.array([True,False,True, True])]
self.assertEquals(expr.size, (3, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[np.array([True,True, True]),
np.array([True,False,True, True])], expr.value)
def test_selector_list_indices(self):
"""Test indexing with lists/ndarrays of indices.
"""
A = np.array([[1,2,3,4], [5, 6, 7, 8], [9,10,11,12]])
C = Constant(A)
# List for rows.
expr = C[[1,2]]
self.assertEquals(expr.size, (2, 4))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[[1,2]], expr.value)
# List for rows, index for columns.
expr = C[[0,2], 3]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[[0,2],3], expr.value)
# Index for row, list for columns.
expr = C[1, [0,2]]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[1, [0,2]], expr.value)
# List for rows, slice for columns.
expr = C[[0,2], 1:3]
self.assertEquals(expr.size, (2, 2))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[[0,2],1:3], expr.value)
# Slice for row, list for columns.
expr = C[1:-1, [0,2]]
self.assertEquals(expr.size, (1, 2))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[1:-1, [0,2]], expr.value)
# Lists for rows and columns.
expr = C[[0,1], [1,3]]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[[0,1], [1,3]], expr.value)
# Ndarray for rows, list for columns.
expr = C[np.array([0,1]), [1,3]]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[np.array([0,1]), [1,3]], expr.value)
# Ndarrays for rows and columns.
expr = C[np.array([0,1]), np.array([1,3])]
self.assertEquals(expr.size, (2, 1))
self.assertEquals(expr.sign, u.Sign.POSITIVE_KEY)
self.assertItemsAlmostEqual(A[np.array([0,1]), np.array([1,3])],
expr.value)
def test_powers(self):
exp = self.x**2
self.assertEqual(exp.curvature, u.Curvature.CONVEX_KEY)
exp = self.x**0.5
self.assertEqual(exp.curvature, u.Curvature.CONCAVE_KEY)
exp = self.x**-1
self.assertEqual(exp.curvature, u.Curvature.CONVEX_KEY)
def test_sum(self):
"""Test built-in sum. Not good usage.
"""
self.a.value = 1
expr = sum(self.a)
self.assertEquals(expr.value, 1)
self.x.value = [1, 2]
expr = sum(self.x)
self.assertEquals(expr.value, 3)
def test_var_copy(self):
"""Test the copy function for variable types.
"""
x = Variable(3, 4, name="x")
y = x.copy()
self.assertEquals(y.size, (3, 4))
self.assertEquals(y.name(), "x")
x = Semidef(5, name="x")
y = x.copy()
self.assertEquals(y.size, (5, 5))
def test_param_copy(self):
"""Test the copy function for Parameters.
"""
x = Parameter(3, 4, name="x", sign="positive")
y = x.copy()
self.assertEquals(y.size, (3, 4))
self.assertEquals(y.name(), "x")
self.assertEquals(y.sign, "POSITIVE")
def test_constant_copy(self):
"""Test the copy function for Constants.
"""
x = Constant(2)
y = x.copy()
self.assertEquals(y.size, (1, 1))
self.assertEquals(y.value, 2)
|
halwai/cvxpy
|
cvxpy/tests/test_expressions.py
|
Python
|
gpl-3.0
| 26,292
|
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_utils
----------------------------------
Tests for `utils` module.
"""
from . import base
class TestUtils(base.TestCase):
def test_something(self):
pass
|
dims/oslo.utils
|
tests/test_utils.py
|
Python
|
apache-2.0
| 750
|
from functools import wraps
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.shortcuts import resolve_url
from django.contrib.contenttypes.models import ContentType
from django.utils.decorators import available_attrs
from django.utils.six.moves.urllib.parse import urlparse
from django.http import Http404
from operator import and_, or_
from trusts import utils
class P(object):
def __init__(self, perm, **fieldlookups):
self._perm = perm
self._fieldlookups = fieldlookups
self._left_operand = None
self._right_operand = None
self._operator = None
def __and__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("unsupported operand type(s) for &: '%s' and '%s'" % type(self), type(other))
p = type(self)('')
p._left_operand = self
p._right_operand = other
p._operator = and_
return p
def __or__(self, other):
if not isinstance(other, self.__class__):
raise TypeError("unsupported operand type(s) for |: '%s' and '%s'" % type(self), type(other))
p = type(self)('')
p._left_operand = self
p._right_operand = other
p._operator = or_
return p
def __repr__(self):
return self.__unicode__()
def __unicode__(self):
if not self._operator:
return self.perm
return 'P object'
def get_leaves(self):
leaves = []
if not self._operator:
return [self]
# Do not use += or leaves.extend here since it changes the original list
leaves = leaves + self._left_operand.get_leaves()
leaves = leaves + self._right_operand.get_leaves()
return leaves
def solve(self, fn):
if self._operator:
# Parent node, return result operation
if self._operator == and_:
return self._left_operand.solve(fn) and self._right_operand.solve(fn)
elif self._operator == or_:
return self._left_operand.solve(fn) or self._right_operand.solve(fn)
else:
raise TypeError('Unsupported Operator: ', self._operator)
else:
return fn(self._perm, **self._fieldlookups)
class R(object):
def __init__(self, key):
self.key = key
class K(R):
pass
class G(R):
pass
class O(R):
pass
def request_passes_test(test_func, login_url=None, redirect_field_name=REDIRECT_FIELD_NAME, *args, **kwargs):
'''
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
Adapted from `django/contrib/auth/decorator.py`
'''
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if test_func(request, *args, **kwargs):
return view_func(request, *args, **kwargs)
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login url is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urlparse(path)[:2]
if ((not login_scheme or login_scheme == current_scheme) and
(not login_netloc or login_netloc == current_netloc)):
path = request.get_full_path()
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
path, resolved_login_url, redirect_field_name)
return _wrapped_view
return decorator
def _collect_args(args, fieldlookups):
results = {}
if not fieldlookups:
return results
for lookup, arg_name in fieldlookups.iteritems():
if arg_name in args:
results[lookup] = args[arg_name]
else:
results[lookup] = None
return results
def _get_permissible_items(perm, request, fieldlookups):
if fieldlookups is None:
return None
applabel, modelname, action, cond = utils.parse_perm_code(perm)
try:
ctype = ContentType.objects.get_by_natural_key(applabel, modelname)
return ctype.model_class().objects.filter(**fieldlookups)
except ObjectDoesNotExist:
raise ValueError('Permission code must be of the form "app_label.action_modelname". Actual: %s' % permext)
def _resolve_fieldlookups(request, kwargs, fieldlookups_kwargs=None, fieldlookups_getparams=None, fieldlookups_postparams=None, **fieldlookups):
resolved_fields = {}
resolved_fields.update(_collect_args(kwargs, fieldlookups_kwargs))
resolved_fields.update(_collect_args(request.GET, fieldlookups_getparams))
resolved_fields.update(_collect_args(request.POST, fieldlookups_postparams, ))
for field, lookup in fieldlookups.items():
if isinstance(lookup, K):
source = kwargs
elif isinstance(lookup, G):
source = request.GET
elif isinstance(lookup, O):
source = request.POST
else:
continue
resolved_fields[field] = source[lookup.key] if lookup.key in source else None
return resolved_fields or None
def _check(perm, request, kwargs, raise_exception, **fieldlookups):
if not isinstance(perm, (list, tuple)):
perms = (perm, )
else:
perms = perm
resolved_items = _resolve_fieldlookups(request, kwargs, **fieldlookups)
items = None
if fieldlookups is not None:
items = _get_permissible_items(perm, request, resolved_items)
if items is None:
if raise_exception:
raise Http404
return False
if request.user.has_perms(perms, items):
return True
# In case the 403 handler should be called raise the exception
if raise_exception:
raise PermissionDenied
return False
def permission_required(perm, raise_exception=True, login_url=None, **fieldlookups):
'''
Decorator for views that checks whether a user has a particular permission
enabled, redirecting to the log-in page if necessary.
If the raise_exception parameter is given the PermissionDenied exception
is raised.
Adapted from `django/contrib/auth/decorator.py`
'''
def _check_perms(request, *args, **kwargs):
def _wrapped_check(perm, **fieldlookups):
return _check(perm, request, kwargs, raise_exception, **fieldlookups)
if isinstance(perm, P):
return perm.solve(_wrapped_check)
return _check(perm, request, kwargs, raise_exception, **fieldlookups)
return request_passes_test(_check_perms, login_url=login_url)
|
beedesk/django-trusts
|
trusts/decorators.py
|
Python
|
bsd-2-clause
| 7,047
|
# -*- coding: utf-8 -*-
"""
Random Forest Regression in python - code example
- similar of Decision tree method
- A version of Ensemble learning - other Ensemble learning are gradient Boosting
- Ensemble - means taking multiple algorithms or same algorithms for multiple time and
put them together for making some thing more powerful than the original one
- Steps:
- Step1: Pick at random k data points from the training set
- Step2: Build the Decision tree using the k data points
- Step3: Choose the number Ntree of trees you want to build and repeat steps 1 & 2
- Step4: For a new data point, predict the value of Y for NTree of tree and
assign Y value for all of the tree average
- Not predicting for one tree, it is predicting for forest of tree.
Taking the average of many predictions, so it improves the accuracy
- Decision tree called "CART", stands for Classification And Regression Tree
- A non-continuous regression. | Linear, Polynomial and SVR was continuous regression
- In Decision tree - Average the independent variable for the interval.
- Decision Tree is not interesting model for 1D, but very interesting for multiple Dimensions
- Regression model:
- Linear regression model [Linear and Multiple linear regression]
- Non-linear regression model [Polynomial linear regression, SVR]
- Non-linear Non-continuous regression model [Decision tree]
- Non-linear non-continuous ensembble regression model [Random Forest]
Created on Wed Jul 06 14:32:07 2017
@author: Moinul Al-Mamun
"""
# Random Forest Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
"""from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
# Feature Scaling
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
# Fitting Random Forest Regression to the dataset
from sklearn.ensemble import RandomForestRegressor
# n_estimators - Number of trees - important parameters
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X, y)
# 100 trees
regressor = RandomForestRegressor(n_estimators = 100, random_state = 0)
regressor.fit(X, y)
# 300 trees
regressor = RandomForestRegressor(n_estimators = 300, random_state = 0)
regressor.fit(X, y)
# Predicting a new result
# 10 trees - 167k
# 100 trees - 158k
# 300 trees - 160k -- wow!!
y_pred = regressor.predict(6.5)
# Visualising the Random Forest Regression results (higher resolution)
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
|
moinulkuet/machine-learning
|
Part 2 - Regression/Section 9 - Random Forest Regression/random-forest-regression.py
|
Python
|
gpl-3.0
| 3,227
|
"""Module that creates and initialises application."""
import logging
import os
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_moment import Moment
from flask_login import LoginManager
from flask_session import Session
from flask_migrate import Migrate
# from flask_paranoid import Paranoid
from logging.handlers import SMTPHandler, RotatingFileHandler
from .database import db, User
from .views import web
from .errors import error
from .auth.views import auth
from config import config
from .email import mail
sess = Session()
bootstrap = Bootstrap()
moment = Moment()
login_manager = LoginManager()
login_manager.session_protection = 'strong'
login_manager.login_view = 'auth.login'
migrate = Migrate()
# paranoid = Paranoid()
@login_manager.user_loader
def load_user(user_id):
"""Load a user for Flask-Login."""
return User.query.get(int(user_id))
def create_app(config_name):
"""Create Flask app."""
app = Flask(__name__)
app.config.from_object(config[config_name])
config[config_name].init_app(app)
sess.init_app(app)
bootstrap.init_app(app)
moment.init_app(app)
db.init_app(app)
login_manager.init_app(app)
migrate.init_app(app, db)
mail.init_app(app)
app.register_blueprint(web)
app.register_blueprint(error)
app.register_blueprint(auth)
# paranoid.init_app(app)
# paranoid.redirect_view = '/'
if not app.debug:
if app.config['MAIL_SERVER']:
authentication = None
if app.config['MAIL_USERNAME'] or app.config['MAIL_PASSWORD']:
authentication = (
app.config['MAIL_USERNAME'], app.config['MAIL_PASSWORD'])
secure = None
if app.config['MAIL_USE_TLS']:
secure = ()
mail_handler = SMTPHandler(
mailhost=(app.config['MAIL_SERVER'], app.config['MAIL_PORT']),
fromaddr='no-reply@' + app.config['MAIL_SERVER'],
toaddrs=app.config['ADMINS'], subject='PFT Failure',
credentials=authentication, secure=secure)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
if not os.path.exists('logs'):
os.mkdir('logs')
file_handler = RotatingFileHandler(
'logs/btt.log', maxBytes=10240, backupCount=10)
formatter = (
'%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]'
)
file_handler.setFormatter(logging.Formatter(formatter))
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
app.logger.info('PFT startup')
return app
|
gregcowell/BAM
|
btt/__init__.py
|
Python
|
apache-2.0
| 2,788
|
from numpy import *
import matplotlib
from tools import *
# Constants
# Distances [m]
R = 1. # support points to geometric center
L = .995 # Length of the cables
Rg = .995 # g measure points to geometric center
# Weights [kg]
Mp = 0.848 # platform bare
Mpm = 1.003 # platform + mount
# Errors
DR = DL = .5e-3 # [m]
Dg = 0.2 # [g]
DM = 0.5e-3 # [kg]
# Support points
Sv = array([
pol2xy(R, 2.*pi/3), # arm 1
pol2xy(R, 4.*pi/3), # arm 2
pol2xy(R, 0.) # arm 3
])
# Video FPS
fps = 240
# Matplotlib style adjustments
font = {'family' : 'serif',
'serif': ['Computer Modern Roman'],
'size' : 18}
matplotlib.rc('font', **font)
plt.rc('text', usetex=True)
|
morloy/trifilar-mass-prop
|
inc/constants.py
|
Python
|
gpl-2.0
| 706
|
i = 0
print("************************* Part 1 *****************************")
aList = ['Oxo', 'OXO', '123454321', 'ROTATOR', '12345 54321']
for i in range(len(aList)):
name = reversed(aList[i])
if list(name) == list(aList[i]):
print("True ", aList[i])
else:
print("False", aList[i])
print("************************* Part 2 *****************************")
inPut = input("Enter a String: ")
rev = reversed(inPut)
if list(inPut) == list(rev):
print("True ", inPut)
else:
print("False", inPut)
|
MajedAlshammari/cloud-computing
|
lab3/lab3-1.py
|
Python
|
mit
| 519
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#controller.py
import copy, datetime, multiprocessing
import dataListener, strategyActuator
from DataApi_32 import CDataProcess
#载入策略
import signalStrategy, multipleStrategy
#-----------------------
#定义全局变量
#-----------------------
#数据监听对象
g_listenerList = [] #总共3个对象
#策略执行器对象列表
g_StrategyActuatorDict = {} #每个股票一个对象
#订阅股票列表
g_subStocks = []
#-----------------------
#注册策略
#-----------------------
#单只股票策略对象池
g_SSDict = {}
g_SSDict["baseSignal"] = signalStrategy.CBaseSignal
#多只股票策略对象池
g_MSDict = {}
g_MSDict["baseMultiple"] = multipleStrategy.CBaseMultiple
#-----------------------
#实现函数
#-----------------------
#读取设置参数
execfile("config.ini")
#读取订阅股票
def loadSubStocks():
global g_subStocks
_fileReader = open("./subStock.csv","r")
while 1:
line = _fileReader.readline()
line = line.replace("\n","")
if not line:
break
g_subStocks.append(line)
#创建策略对象
def creatStrategyObject(needSignal, stock):
strategyObjDict = {}
if needSignal: #单信号策略
if not SUB_SIGNALS: #如果没有订阅
return False
for signalName in SUB_SIGNALS:
strategyObjDict[signalName] = g_SSDict[signalName](stock)
return strategyObjDict
else: #多信号策略
if not SUB_MULTIPLES: #如果没有订阅
return False
for multipeName in SUB_MULTIPLES:
strategyObjDict[multipeName] = g_MSDict[multipeName]("Multiple")
strategyObjDict[multipeName].getActuatorDict(g_StrategyActuatorDict)
return strategyObjDict
#创建监听对象
def creatListener(bufferStack):
global g_listenerList
listenersNum = 1
if len(g_subStocks) >= listenersNum:
perListenerStocksNum = len(g_subStocks)/listenersNum
for i in xrange(listenersNum):
if listenersNum - i == 1:
actuatorDict = creatActuators(g_subStocks[i*perListenerStocksNum:], bufferStack, True)
listener = dataListener.CDataListerner(g_subStocks[i*perListenerStocksNum:], actuatorDict, bufferStack)
listener.start()
else:
actuatorDict = creatActuators(g_subStocks[i*perListenerStocksNum:i*perListenerStocksNum+perListenerStocksNum], bufferStack, False)
listener = dataListener.CDataListerner(g_subStocks[i*perListenerStocksNum:i*perListenerStocksNum+perListenerStocksNum], actuatorDict, bufferStack)
listener.start()
g_listenerList.append(listener)
else:
actuatorDict = creatActuators(g_subStocks, bufferStack, True)
listener = dataListener.CDataListerner(g_subStocks, actuatorDict, bufferStack)
listener.start()
g_listenerList.append(listener)
#创建监听对象
def creatActuators(stocks, bufferStack, isLast):
global g_StrategyActuatorDict
actuatorDict = {}
#单股票策略监听
for stock in stocks:
strategyObjDict = creatStrategyObject(True, stock)
if strategyObjDict:
newActuator = strategyActuator.CStrategyActuator(bufferStack[stock])
newActuator.getSignalStrategyObj(strategyObjDict)
g_StrategyActuatorDict[stock] = newActuator
actuatorDict[stock] = newActuator
if isLast: #多股票策略监听
strategyObjDict = creatStrategyObject(False, "Multiple")
if strategyObjDict:
newActuator = strategyActuator.CStrategyActuator(bufferStack["Multiple"])
newActuator.getmultipleStrategyObj(strategyObjDict)
g_StrategyActuatorDict["Multiple"] = newActuator
actuatorDict["Multiple"] = newActuator
return actuatorDict
#主入口
def main():
#注册策略
#载入订阅股票代码
loadSubStocks()
#创建数据连接对象
dataServerInstance = CDataProcess(
HOST,PORT,
SUB_ALL_STOCK, g_subStocks,
REQUEST_TYPE,
REQUEST_FLAG,
datetime.datetime.strptime(START_TIME,"%Y-%m-%d %H:%M:%S"),
datetime.datetime.strptime(END_TIME,"%Y-%m-%d %H:%M:%S"))
#创建数据监听器
creatListener(dataServerInstance.bufferStack)
dataServerInstance.run()
|
sharmaking/BackTestSystem
|
controller.py
|
Python
|
mit
| 3,944
|
# lib.vartools
import re
from string import letters
import random
import hashlib
import hmac
import json
secret = 'movethistoanimport'
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,30}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
PASS_RE = re.compile(r"^.{3,20}$")
def valid_username(username):
return username and USER_RE.match(username)
def valid_password(password):
return password and PASS_RE.match(password)
def valid_email(email):
return EMAIL_RE.match(email)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def make_secure_eval(val):
return '%s;%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
def check_secure_eval(secure_val):
val = secure_val.split(';')[0]
if secure_val == make_secure_val(val):
return val
def render_post(response, post):
response.out.write('<b>' + post.subject + '</b><br>')
response.out.write(post.content)
def make_salt(length = 5):
return ''.join(random.choice(letters) for x in xrange(length))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + pw + salt).hexdigest()
return '%s,%s' % (salt, h)
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
|
oweninglese/filmofile
|
lib/vartools.py
|
Python
|
gpl-2.0
| 1,432
|
#!/bin/python3
""" This script contains functions to check wich packages are updatable on the
system.
Author: Julien Delplanque
"""
import subprocess
class PackageManagerDoesNotExists(Exception):
""" Exception launched if the package manager doesn't exists.
"""
pass
def pacman_packages_to_update():
""" Return the packages to update from the pacman's database.
"""
pacman_proc = subprocess.Popen(["/bin/pacman -Qu"], stdout=subprocess.PIPE, shell=True)
(pacman_out, pacman_err) = pacman_proc.communicate()
if pacman_proc.returncode == 0:
pkgs_to_update = pacman_out.decode("utf-8").split("\n")
pkgs_to_update.pop()
return pkgs_to_update
raise PackageManagerDoesNotExists
def yaourt_packages_to_update():
""" Return the packages to update from the yaourt's database.
"""
yaourt_proc = subprocess.Popen(["/bin/yaourt -Qu"], stdout=subprocess.PIPE, shell=True)
(yaourt_out, yaourt_err) = yaourt_proc.communicate()
if yaourt_proc.returncode == 0:
pkgs_to_update = yaourt_out.decode("utf-8").split("\n")
pkgs_to_update.pop()
return pkgs_to_update
raise PackageManagerDoesNotExists
|
juliendelplanque/raspirestmonitor
|
server/pkgmanagers.py
|
Python
|
mit
| 1,196
|
# encoding: utf-8
"""
Common shape-related oxml objects
"""
from __future__ import absolute_import
from ...enum.shapes import PP_PLACEHOLDER
from ..ns import qn
from ..simpletypes import (
ST_Angle, ST_Coordinate, ST_Direction, ST_DrawingElementId, ST_LineWidth,
ST_PlaceholderSize, ST_PositiveCoordinate, XsdBoolean, XsdString,
XsdUnsignedInt
)
from ...util import Emu
from ..xmlchemy import (
BaseOxmlElement, Choice, OptionalAttribute, OxmlElement,
RequiredAttribute, ZeroOrOne, ZeroOrOneChoice
)
class BaseShapeElement(BaseOxmlElement):
"""
Provides common behavior for shape element classes like CT_Shape,
CT_Picture, etc.
"""
@property
def cx(self):
return self._get_xfrm_attr('cx')
@cx.setter
def cx(self, value):
self._set_xfrm_attr('cx', value)
@property
def cy(self):
return self._get_xfrm_attr('cy')
@cy.setter
def cy(self, value):
self._set_xfrm_attr('cy', value)
@property
def flipH(self):
return bool(self._get_xfrm_attr('flipH'))
@flipH.setter
def flipH(self, value):
self._set_xfrm_attr('flipH', value)
@property
def flipV(self):
return bool(self._get_xfrm_attr('flipV'))
@flipV.setter
def flipV(self, value):
self._set_xfrm_attr('flipV', value)
def get_or_add_xfrm(self):
"""
Return the ``<a:xfrm>`` grandchild element, newly-added if not
present. This version works for ``<p:sp>``, ``<p:cxnSp>``, and
``<p:pic>`` elements, others will need to override.
"""
return self.spPr.get_or_add_xfrm()
@property
def has_ph_elm(self):
"""
True if this shape element has a ``<p:ph>`` descendant, indicating it
is a placeholder shape. False otherwise.
"""
return self.ph is not None
@property
def ph(self):
"""
The ``<p:ph>`` descendant element if there is one, None otherwise.
"""
ph_elms = self.xpath('./*[1]/p:nvPr/p:ph')
if len(ph_elms) == 0:
return None
return ph_elms[0]
@property
def ph_idx(self):
"""
Integer value of placeholder idx attribute. Raises |ValueError| if
shape is not a placeholder.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.idx
@property
def ph_orient(self):
"""
Placeholder orientation, e.g. 'vert'. Raises |ValueError| if shape is
not a placeholder.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.orient
@property
def ph_sz(self):
"""
Placeholder size, e.g. ST_PlaceholderSize.HALF, None if shape has no
``<p:ph>`` descendant.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.sz
@property
def ph_type(self):
"""
Placeholder type, e.g. ST_PlaceholderType.TITLE ('title'), none if
shape has no ``<p:ph>`` descendant.
"""
ph = self.ph
if ph is None:
raise ValueError("not a placeholder shape")
return ph.type
@property
def rot(self):
"""
Float representing degrees this shape is rotated clockwise.
"""
xfrm = self.xfrm
if xfrm is None:
return 0.0
return xfrm.rot
@rot.setter
def rot(self, value):
self.get_or_add_xfrm().rot = value
@property
def shape_id(self):
"""
Integer id of this shape
"""
return self._nvXxPr.cNvPr.id
@property
def shape_name(self):
"""
Name of this shape
"""
return self._nvXxPr.cNvPr.name
@property
def txBody(self):
"""
Child ``<p:txBody>`` element, None if not present
"""
return self.find(qn('p:txBody'))
@property
def x(self):
return self._get_xfrm_attr('x')
@x.setter
def x(self, value):
self._set_xfrm_attr('x', value)
@property
def xfrm(self):
"""
The ``<a:xfrm>`` grandchild element or |None| if not found. This
version works for ``<p:sp>``, ``<p:cxnSp>``, and ``<p:pic>``
elements, others will need to override.
"""
return self.spPr.xfrm
@property
def y(self):
return self._get_xfrm_attr('y')
@y.setter
def y(self, value):
self._set_xfrm_attr('y', value)
@property
def _nvXxPr(self):
"""
Required non-visual shape properties element for this shape. Actual
name depends on the shape type, e.g. ``<p:nvPicPr>`` for picture
shape.
"""
return self.xpath('./*[1]')[0]
def _get_xfrm_attr(self, name):
xfrm = self.xfrm
if xfrm is None:
return None
return getattr(xfrm, name)
def _set_xfrm_attr(self, name, value):
xfrm = self.get_or_add_xfrm()
setattr(xfrm, name, value)
class CT_ApplicationNonVisualDrawingProps(BaseOxmlElement):
"""
``<p:nvPr>`` element
"""
ph = ZeroOrOne('p:ph', successors=(
'a:audioCd', 'a:wavAudioFile', 'a:audioFile', 'a:videoFile',
'a:quickTimeFile', 'p:custDataLst', 'p:extLst'
))
class CT_LineProperties(BaseOxmlElement):
"""
Custom element class for <a:ln> element
"""
eg_lineFillProperties = ZeroOrOneChoice(
(Choice('a:noFill'), Choice('a:solidFill'), Choice('a:gradFill'),
Choice('a:pattFill')),
successors=(
'a:prstDash', 'a:custDash', 'a:round', 'a:bevel', 'a:miter',
'a:headEnd', 'a:tailEnd', 'a:extLst'
)
)
w = OptionalAttribute('w', ST_LineWidth, default=Emu(0))
@property
def eg_fillProperties(self):
"""
Required to fulfill the interface used by dml.fill.
"""
return self.eg_lineFillProperties
class CT_NonVisualDrawingProps(BaseOxmlElement):
"""
``<p:cNvPr>`` custom element class.
"""
_tag_seq = ('a:hlinkClick', 'a:hlinkHover', 'a:extLst')
hlinkClick = ZeroOrOne('a:hlinkClick', successors=_tag_seq[1:])
hlinkHover = ZeroOrOne('a:hlinkHover', successors=_tag_seq[2:])
id = RequiredAttribute('id', ST_DrawingElementId)
name = RequiredAttribute('name', XsdString)
del _tag_seq
class CT_Placeholder(BaseOxmlElement):
"""
``<p:ph>`` custom element class.
"""
type = OptionalAttribute(
'type', PP_PLACEHOLDER, default=PP_PLACEHOLDER.OBJECT
)
orient = OptionalAttribute(
'orient', ST_Direction, default=ST_Direction.HORZ
)
sz = OptionalAttribute(
'sz', ST_PlaceholderSize, default=ST_PlaceholderSize.FULL
)
idx = OptionalAttribute('idx', XsdUnsignedInt, default=0)
class CT_Point2D(BaseOxmlElement):
"""
Custom element class for <a:off> element.
"""
x = RequiredAttribute('x', ST_Coordinate)
y = RequiredAttribute('y', ST_Coordinate)
class CT_PositiveSize2D(BaseOxmlElement):
"""
Custom element class for <a:ext> element.
"""
cx = RequiredAttribute('cx', ST_PositiveCoordinate)
cy = RequiredAttribute('cy', ST_PositiveCoordinate)
class CT_ShapeProperties(BaseOxmlElement):
"""
Custom element class for <p:spPr> element. Shared by ``<p:sp>``,
``<p:pic>``, and ``<p:cxnSp>`` elements as well as a few more obscure
ones.
"""
xfrm = ZeroOrOne('a:xfrm', successors=(
'a:custGeom', 'a:prstGeom', 'a:ln', 'a:effectLst', 'a:effectDag',
'a:scene3d', 'a:sp3d', 'a:extLst'
))
eg_fillProperties = ZeroOrOneChoice(
(Choice('a:noFill'), Choice('a:solidFill'), Choice('a:gradFill'),
Choice('a:blipFill'), Choice('a:pattFill'), Choice('a:grpFill')),
successors=(
'a:ln', 'a:effectLst', 'a:effectDag', 'a:scene3d', 'a:sp3d',
'a:extLst'
)
)
ln = ZeroOrOne('a:ln', successors=(
'a:effectLst', 'a:effectDag', 'a:scene3d', 'a:sp3d', 'a:extLst'
))
@property
def cx(self):
"""
Shape width as an instance of Emu, or None if not present.
"""
cx_str_lst = self.xpath('./a:xfrm/a:ext/@cx')
if not cx_str_lst:
return None
return Emu(cx_str_lst[0])
@property
def cy(self):
"""
Shape height as an instance of Emu, or None if not present.
"""
cy_str_lst = self.xpath('./a:xfrm/a:ext/@cy')
if not cy_str_lst:
return None
return Emu(cy_str_lst[0])
@property
def prstGeom(self):
"""
The <a:prstGeom> child element, or None if not present.
"""
return self.find(qn('a:prstGeom'))
@property
def x(self):
"""
The offset of the left edge of the shape from the left edge of the
slide, as an instance of Emu. Corresponds to the value of the
`./xfrm/off/@x` attribute. None if not present.
"""
x_str_lst = self.xpath('./a:xfrm/a:off/@x')
if not x_str_lst:
return None
return Emu(x_str_lst[0])
@property
def y(self):
"""
The offset of the top of the shape from the top of the slide, as an
instance of Emu. None if not present.
"""
y_str_lst = self.xpath('./a:xfrm/a:off/@y')
if not y_str_lst:
return None
return Emu(y_str_lst[0])
class CT_Transform2D(BaseOxmlElement):
"""
Custom element class for <a:xfrm> element.
"""
_tag_seq = ('a:off', 'a:ext')
off = ZeroOrOne('a:off', successors=_tag_seq[1:])
ext = ZeroOrOne('a:ext', successors=_tag_seq[2:])
del _tag_seq
rot = OptionalAttribute('rot', ST_Angle, default=0.0)
flipH = OptionalAttribute('flipH', XsdBoolean, default=False)
flipV = OptionalAttribute('flipV', XsdBoolean, default=False)
@property
def x(self):
off = self.off
if off is None:
return None
return off.x
@x.setter
def x(self, value):
off = self.get_or_add_off()
off.x = value
@property
def y(self):
off = self.off
if off is None:
return None
return off.y
@y.setter
def y(self, value):
off = self.get_or_add_off()
off.y = value
@property
def cx(self):
ext = self.ext
if ext is None:
return None
return ext.cx
@cx.setter
def cx(self, value):
ext = self.get_or_add_ext()
ext.cx = value
@property
def cy(self):
ext = self.ext
if ext is None:
return None
return ext.cy
@cy.setter
def cy(self, value):
ext = self.get_or_add_ext()
ext.cy = value
def _new_ext(self):
ext = OxmlElement('a:ext')
ext.cx = 0
ext.cy = 0
return ext
def _new_off(self):
off = OxmlElement('a:off')
off.x = 0
off.y = 0
return off
|
biggihs/python-pptx
|
pptx/oxml/shapes/shared.py
|
Python
|
mit
| 11,160
|
data = [(x,y) for y in range (0,10) for x in range(0, 10)]
for x,y in data:
print "{} {}".format(x*1.0,y*1.0)
|
maximg/comp-prog
|
uva/10034.gen.py
|
Python
|
mit
| 115
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.paging import Paged
class RoleDefinitionPaged(Paged):
"""
A paging container for iterating over a list of :class:`RoleDefinition <azure.mgmt.authorization.models.RoleDefinition>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[RoleDefinition]'}
}
def __init__(self, *args, **kwargs):
super(RoleDefinitionPaged, self).__init__(*args, **kwargs)
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-authorization/azure/mgmt/authorization/models/role_definition_paged.py
|
Python
|
mit
| 956
|
import platform
from copy import *
from ctypes import *
class Param(Structure): #Forward declaration
pass
class Value(Structure):
pass
class StringValue(Structure):
pass
class BoolValue(Structure):
pass
class NumberValue(Structure):
pass
class ListValue(Structure):
pass
PARAM_P = POINTER(Param)
VALUE_P = POINTER(Value)
LIST_P = POINTER(ListValue)
Value._fields_ = [
("type", c_uint),
("val", c_void_p)
]
StringValue._fields_ = [
("value", c_char_p)
]
BoolValue._fields_ = [
("value", c_bool)
]
NumberValue._fields_ = [
("value", c_int)
]
ListValue._fields_ = [
("value", VALUE_P),
("next", LIST_P)
]
Param._fields_ = [
("key", c_char_p),
("value", VALUE_P),
("next", PARAM_P)
]
class zTemplate(object):
def __init__(self):
if platform.system() == "Windows":
self.lib = cdll.LoadLibrary("bin/zTemplate.dll")
else:
self.lib = cdll.LoadLibrary("bin/zTemplate.so")
self.lib.render.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
self.lib.render_text.restype = c_char_p
self.lib.render.argtype = [c_char_p, PARAM_P]
def handle_type(self, value):
v = Value()
if type(value) == list:
v.type = 4
rev = value[:]
rev.reverse()
prev_item = None
for item in rev:
lv = ListValue()
self.Values.append(lv)
lv.value = VALUE_P(self.handle_type(item))
if prev_item != None:
lv.next = LIST_P(prev_item)
prev_item = lv
v.val = cast(byref(lv), c_void_p)
elif type(value) == dict:
pass
elif type(value) == str:
sv = StringValue()
sv.value = value.encode("UTF-8")
self.Values.append(sv)
v.type = 1
v.val = cast(byref(sv), c_void_p)
elif type(value) == bool:
bv = BoolValue()
bv.value = value
self.Values.append(bv)
v.type = 2
v.val = cast(byref(bv), c_void_p)
elif type(value) == int:
nv = NumberValue()
nv.value = value
self.Values.append(nv)
v.type = 3
v.val = cast(byref(nv), c_void_p)
else:
print("Unhandled type %s" % type(value))
return v
def render(self, file, params = {}):
root = self.construct_params(params)
return self.lib.render(file.encode("UTF-8"), byref(root))
def render_text(self, text, params = {}):
root = self.construct_params(params)
return self.lib.render_text(text.encode("UTF-8"), byref(root))
def construct_params(self, params):
root = Param()
cursor = root
self.Values = [] #Just to keep our value structs not destroyed
for key, value in params.items():
if type(value) == dict:
for name, member in value.items():
p = Param()
p.key = ("%s->%s" % (key, name)).encode("UTF-8")
v = self.handle_type(member)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cursor = p
else:
p = Param()
p.key = key.encode("UTF-8")
v = self.handle_type(value)
p.value = VALUE_P(v)
cursor.next = PARAM_P(p)
cursor = p
return root
|
zaibacu/zTemplate
|
lib/zTemplate.py
|
Python
|
mit
| 2,885
|
from __future__ import print_function
from __future__ import division
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.model_selection import H2OModelSelectionEstimator as modelSelection
# test modelselection algorithm for regression only. Make sure the result frame contains the correct information. Make
# sure that we can instantiate the best model from model ID, perform scoring with it.
def test_gaussian_result_frame_model_id():
d = h2o.import_file(path=pyunit_utils.locate("smalldata/logreg/prostate.csv"))
my_y = "GLEASON"
my_x = ["AGE","RACE","CAPSULE","DCAPS","PSA","VOL","DPROS"]
maxr_model = modelSelection(seed=12345, max_predictor_number=7, mode="maxr")
maxr_model.train(training_frame=d, x=my_x, y=my_y)
allsubsets_model = modelSelection(seed=12345, max_predictor_number=7, mode="allsubsets")
allsubsets_model.train(training_frame=d, x=my_x, y=my_y)
result_frame_allsubsets = allsubsets_model.result()
numRows = result_frame_allsubsets.nrows
best_r2_allsubsets = allsubsets_model.get_best_R2_values()
result_frame_maxr = maxr_model.result()
best_r2_maxr = maxr_model.get_best_R2_values()
for ind in list(range(numRows)):
# r2 from attributes
best_r2_value_allsubsets = best_r2_allsubsets[ind]
one_model_allsubsets = h2o.get_model(result_frame_allsubsets["model_id"][ind, 0])
pred_allsubsets = one_model_allsubsets.predict(d)
print("last element of predictor frame: {0}".format(pred_allsubsets[pred_allsubsets.nrows-1,pred_allsubsets.ncols-1]))
assert pred_allsubsets.nrows == d.nrows, "expected dataset row: {0}, actual dataset row: " \
"{1}".format(pred_allsubsets.nrows, d.nrows)
best_r2_value_maxr = best_r2_maxr[ind]
one_model_maxr = h2o.get_model(result_frame_maxr["model_id"][ind, 0])
pred_maxr = one_model_maxr.predict(d)
pyunit_utils.compare_frames_local(pred_maxr, pred_allsubsets, prob=1, tol=1e-6) # compare allsubsets and maxr results
# r2 from result frame
frame_r2_allsubsets = result_frame_allsubsets["best_r2_value"][ind,0]
# r2 from model
model_r2_allsubsets = one_model_allsubsets.r2()
# make sure all r2 are equal
assert abs(best_r2_value_allsubsets-frame_r2_allsubsets) < 1e-6, "expected best r2: {0}, actual best r2: " \
"{1}".format(best_r2_value_allsubsets, frame_r2_allsubsets)
assert abs(frame_r2_allsubsets-model_r2_allsubsets) < 1e-6, "expected best r2: {0}, actual best r2: " \
"{1}".format(model_r2_allsubsets, frame_r2_allsubsets)
assert abs(best_r2_value_maxr-model_r2_allsubsets) < 1e-6, "expected best r2: {0}, maxr best r2: {1}" \
"".format(best_r2_value_maxr, model_r2_allsubsets)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gaussian_result_frame_model_id)
else:
test_gaussian_result_frame_model_id()
|
h2oai/h2o-3
|
h2o-py/tests/testdir_algos/modelselection/pyunit_PUBDEV_8346_modelselection_result_frame.py
|
Python
|
apache-2.0
| 3,191
|
from blmath.geometry.primitives.box import Box
from blmath.geometry.primitives.plane import Plane
from blmath.geometry.primitives.polyline import Polyline
|
bodylabs/blmath
|
blmath/geometry/__init__.py
|
Python
|
bsd-2-clause
| 155
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
default_message_ttl = cfg.IntOpt(
'default_message_ttl', default=3600,
help='Defines how long a message will be accessible.')
default_message_delay = cfg.IntOpt(
'default_message_delay', default=0,
help=('Defines the defautl value for queue delay seconds.'
'The 0 means the delayed queues feature is close.'))
default_claim_ttl = cfg.IntOpt(
'default_claim_ttl', default=300,
help='Defines how long a message will be in claimed state.')
default_claim_grace = cfg.IntOpt(
'default_claim_grace', default=60,
help='Defines the message grace period in seconds.')
default_subscription_ttl = cfg.IntOpt(
'default_subscription_ttl', default=3600,
help='Defines how long a subscription will be available.')
max_queues_per_page = cfg.IntOpt(
'max_queues_per_page', default=20,
deprecated_name='queue_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of queues per page.')
max_messages_per_page = cfg.IntOpt(
'max_messages_per_page', default=20,
deprecated_name='message_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of messages per page.')
max_subscriptions_per_page = cfg.IntOpt(
'max_subscriptions_per_page', default=20,
deprecated_name='subscription_paging_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum number of subscriptions per page.')
max_messages_per_claim_or_pop = cfg.IntOpt(
'max_messages_per_claim_or_pop', default=20,
deprecated_name='max_messages_per_claim',
help='The maximum number of messages that can be claimed (OR) '
'popped in a single request')
max_queue_metadata = cfg.IntOpt(
'max_queue_metadata', default=64 * 1024,
deprecated_name='metadata_size_uplimit',
deprecated_group='limits:transport',
help='Defines the maximum amount of metadata in a queue.')
max_messages_post_size = cfg.IntOpt(
'max_messages_post_size', default=256 * 1024,
deprecated_name='message_size_uplimit',
deprecated_group='limits:transport',
deprecated_opts=[cfg.DeprecatedOpt('max_message_size')],
help='Defines the maximum size of message posts.')
max_message_ttl = cfg.IntOpt(
'max_message_ttl', default=1209600,
deprecated_name='message_ttl_max',
deprecated_group='limits:transport',
help='Maximum amount of time a message will be available.')
max_message_delay = cfg.IntOpt(
'max_message_delay', default=900,
help='Maximum delay seconds for messages can be claimed.')
max_claim_ttl = cfg.IntOpt(
'max_claim_ttl', default=43200,
deprecated_name='claim_ttl_max',
deprecated_group='limits:transport',
help='Maximum length of a message in claimed state.')
max_claim_grace = cfg.IntOpt(
'max_claim_grace', default=43200,
deprecated_name='claim_grace_max',
deprecated_group='limits:transport',
help='Defines the maximum message grace period in seconds.')
subscriber_types = cfg.ListOpt(
'subscriber_types', default=['http', 'https', 'mailto',
'trust+http', 'trust+https'],
help='Defines supported subscriber types.')
max_flavors_per_page = cfg.IntOpt(
'max_flavors_per_page', default=20,
help='Defines the maximum number of flavors per page.')
max_pools_per_page = cfg.IntOpt(
'max_pools_per_page', default=20,
help='Defines the maximum number of pools per page.')
client_id_uuid_safe = cfg.StrOpt(
'client_id_uuid_safe', default='strict', choices=['strict', 'off'],
help='Defines the format of client id, the value could be '
'"strict" or "off". "strict" means the format of client id'
' must be uuid, "off" means the restriction be removed.')
min_length_client_id = cfg.IntOpt(
'min_length_client_id', default='10',
help='Defines the minimum length of client id if remove the '
'uuid restriction. Default is 10.')
max_length_client_id = cfg.IntOpt(
'max_length_client_id', default='36',
help='Defines the maximum length of client id if remove the '
'uuid restriction. Default is 36.')
message_delete_with_claim_id = cfg.BoolOpt(
'message_delete_with_claim_id', default=False,
help='Enable delete messages must be with claim IDS. This will '
'improve the security of the message avoiding delete messages before'
' they are claimed and handled.')
message_encryption_algorithms = cfg.StrOpt(
'message_encryption_algorithms', default='AES256', choices=['AES256'],
help='Defines the encryption algorithms of messages, the value could be '
'"AES256" for now.')
message_encryption_key = cfg.StrOpt(
'message_encryption_key', default='AES256',
help='Defines the encryption key of algorithms.')
GROUP_NAME = 'transport'
ALL_OPTS = [
default_message_ttl,
default_message_delay,
default_claim_ttl,
default_claim_grace,
default_subscription_ttl,
max_queues_per_page,
max_messages_per_page,
max_subscriptions_per_page,
max_messages_per_claim_or_pop,
max_queue_metadata,
max_messages_post_size,
max_message_ttl,
max_message_delay,
max_claim_ttl,
max_claim_grace,
subscriber_types,
max_flavors_per_page,
max_pools_per_page,
client_id_uuid_safe,
min_length_client_id,
max_length_client_id,
message_delete_with_claim_id,
message_encryption_algorithms,
message_encryption_key
]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=GROUP_NAME)
def list_opts():
return {GROUP_NAME: ALL_OPTS}
|
openstack/zaqar
|
zaqar/conf/transport.py
|
Python
|
apache-2.0
| 6,180
|
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
import ctypes
import logging
import os
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
#levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
logging.DEBUG: (None, 'blue', True),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', True),
logging.ERROR: (None, 'red', True),
logging.CRITICAL: ('red', 'white', True),
}
else:
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'black', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', True),
}
csi = '\x1b['
reset = '\x1b[0m'
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
if os.name != 'nt':
def output_colorized(self, message):
self.stream.write(message)
else:
import re
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message):
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)
def colorize(self, message, record):
if record.levelno in self.level_map:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params:
message = ''.join((self.csi, ';'.join(params),
'm', message, self.reset))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
if self.is_tty:
# Don't colorize any traceback
parts = message.split('\n', 1)
parts[0] = self.colorize(parts[0], record)
message = '\n'.join(parts)
return message
def main():
root = logging.getLogger()
root.setLevel(logging.DEBUG)
root.addHandler(ColorizingStreamHandler())
logging.debug('DEBUG')
logging.info('INFO')
logging.warning('WARNING')
logging.error('ERROR')
logging.critical('CRITICAL')
if __name__ == '__main__':
main()
|
Arvin-X/swarm
|
thirdparty/ansistrm/ansistrm.py
|
Python
|
gpl-3.0
| 4,788
|
default_app_config = 'machiavelli.apps.MachiavelliConfig'
|
jantoniomartin/machiavelli
|
__init__.py
|
Python
|
agpl-3.0
| 58
|
# This file is part of the Frescobaldi project, http://www.frescobaldi.org/
#
# Copyright (c) 2008 - 2014 by Wilbert Berendsen
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# See http://www.gnu.org/licenses/ for more information.
"""
The PDF preview panel.
This file loads even if popplerqt5 is absent, although the PDF preview
panel only shows a message about missing the popplerqt5 module.
The widget module contains the real widget, the documents module a simple
abstraction and caching of Poppler documents with their filename,
and the printing module contains code to print a Poppler document, either
via a PostScript rendering or by printing raster images to a QPrinter.
All the point & click stuff is handled in the pointandclick module.
"""
import functools
import os
import weakref
from PyQt5.QtCore import QSettings, QTimer, Qt, pyqtSignal
from PyQt5.QtGui import QColor, QKeySequence, QPalette
from PyQt5.QtWidgets import (
QAction, QActionGroup, QApplication, QComboBox, QLabel, QSpinBox,
QWidgetAction)
import app
import actioncollection
import actioncollectionmanager
import icons
import job
import qutil
import panel
import listmodel
import gadgets.drag
from . import documents
# default zoom percentages
_zoomvalues = [50, 75, 100, 125, 150, 175, 200, 250, 300]
# viewModes from qpopplerview:
from qpopplerview import FixedScale, FitWidth, FitHeight, FitBoth
def activate(func):
"""Decorator for MusicViewPanel methods/slots.
The purpose is to first activate the widget and only perform an action
when the event loop starts. This gives the PDF widget the chance to resize
and position itself correctly.
"""
@functools.wraps(func)
def wrapper(self):
instantiated = bool(super(panel.Panel, self).widget())
self.activate()
if instantiated:
func(self)
else:
QTimer.singleShot(0, lambda: func(self))
return wrapper
class MusicViewPanel(panel.Panel):
def __init__(self, mainwindow):
super(MusicViewPanel, self).__init__(mainwindow)
self.toggleViewAction().setShortcut(QKeySequence("Meta+Alt+M"))
mainwindow.addDockWidget(Qt.RightDockWidgetArea, self)
ac = self.actionCollection = Actions(self)
actioncollectionmanager.manager(mainwindow).addActionCollection(ac)
ac.music_print.triggered.connect(self.printMusic)
ac.music_zoom_in.triggered.connect(self.zoomIn)
ac.music_zoom_out.triggered.connect(self.zoomOut)
ac.music_zoom_original.triggered.connect(self.zoomOriginal)
ac.music_zoom_combo.zoomChanged.connect(self.slotZoomChanged)
ac.music_fit_width.triggered.connect(self.fitWidth)
ac.music_fit_height.triggered.connect(self.fitHeight)
ac.music_fit_both.triggered.connect(self.fitBoth)
ac.music_single_pages.triggered.connect(self.viewSinglePages)
ac.music_two_pages_first_right.triggered.connect(self.viewTwoPagesFirstRight)
ac.music_two_pages_first_left.triggered.connect(self.viewTwoPagesFirstLeft)
ac.music_maximize.triggered.connect(self.maximize)
ac.music_jump_to_cursor.triggered.connect(self.jumpToCursor)
ac.music_sync_cursor.triggered.connect(self.toggleSyncCursor)
ac.music_copy_image.triggered.connect(self.copyImage)
ac.music_copy_text.triggered.connect(self.copyText)
ac.music_document_select.documentsChanged.connect(self.updateActions)
ac.music_copy_image.setEnabled(False)
ac.music_next_page.triggered.connect(self.slotNextPage)
ac.music_prev_page.triggered.connect(self.slotPreviousPage)
self.slotPageCountChanged(0)
ac.music_next_page.setEnabled(False)
ac.music_prev_page.setEnabled(False)
ac.music_single_pages.setChecked(True) # default to single pages
ac.music_reload.triggered.connect(self.reloadView)
self.actionCollection.music_sync_cursor.setChecked(
QSettings().value("musicview/sync_cursor", False, bool))
mode = QSettings().value("muziekview/layoutmode", "single", str)
if mode == "double_left":
ac.music_two_pages_first_left.setChecked(True)
elif mode == "double_right":
ac.music_two_pages_first_right.setChecked(True)
else: # mode == "single":
ac.music_single_pages.setChecked(True)
def translateUI(self):
self.setWindowTitle(_("window title", "Music View"))
self.toggleViewAction().setText(_("&Music View"))
def createWidget(self):
from . import widget
w = widget.MusicView(self)
w.zoomChanged.connect(self.slotMusicZoomChanged)
w.updateZoomInfo()
w.view.surface().selectionChanged.connect(self.updateSelection)
# read layout mode setting before using the widget
layout = w.view.surface().pageLayout()
if self.actionCollection.music_two_pages_first_right.isChecked():
layout.setPagesPerRow(2)
layout.setPagesFirstRow(1)
elif self.actionCollection.music_two_pages_first_left.isChecked():
layout.setPagesPerRow(2)
layout.setPagesFirstRow(0)
else: # "single"
layout.setPagesPerRow(1) # default to single
layout.setPagesFirstRow(0) # pages
import qpopplerview.pager
self._pager = p = qpopplerview.pager.Pager(w.view)
p.pageCountChanged.connect(self.slotPageCountChanged)
p.currentPageChanged.connect(self.slotCurrentPageChanged)
app.languageChanged.connect(self.updatePagerLanguage)
selector = self.actionCollection.music_document_select
selector.currentDocumentChanged.connect(w.openDocument)
selector.documentClosed.connect(w.clear)
if selector.currentDocument():
# open a document only after the widget has been created;
# this prevents many superfluous resizes
def open():
if selector.currentDocument():
w.openDocument(selector.currentDocument())
QTimer.singleShot(0, open)
return w
def setPageLayoutMode(self, mode):
"""Change the page layout and store the setting as well.
The mode is "single", "double_left" or "double_right".
"single": a vertical row of single pages
"double_left": two pages besides each other, first page is a left page
"double_right": two pages, first page is a right page.
"""
layout = self.widget().view.surface().pageLayout()
if mode == "double_right":
layout.setPagesPerRow(2)
layout.setPagesFirstRow(1)
elif mode == "double_left":
layout.setPagesPerRow(2)
layout.setPagesFirstRow(0)
elif mode == "single":
layout.setPagesPerRow(1)
layout.setPagesFirstRow(0)
else:
raise ValueError("wrong mode value")
QSettings().setValue("muziekview/layoutmode", mode)
layout.update()
def updateSelection(self, rect):
self.actionCollection.music_copy_image.setEnabled(bool(rect))
self.actionCollection.music_copy_text.setEnabled(bool(rect))
def updatePagerLanguage(self):
self.actionCollection.music_pager.setPageCount(self._pager.pageCount())
def slotPageCountChanged(self, total):
self.actionCollection.music_pager.setPageCount(total)
def slotCurrentPageChanged(self, num):
self.actionCollection.music_pager.setCurrentPage(num)
self.actionCollection.music_next_page.setEnabled(num < self._pager.pageCount())
self.actionCollection.music_prev_page.setEnabled(num > 1)
@activate
def slotNextPage(self):
self._pager.setCurrentPage(self._pager.currentPage() + 1)
@activate
def slotPreviousPage(self):
self._pager.setCurrentPage(self._pager.currentPage() - 1)
def setCurrentPage(self, num):
self.activate()
self._pager.setCurrentPage(num)
def updateActions(self):
ac = self.actionCollection
ac.music_print.setEnabled(bool(ac.music_document_select.documents()))
def printMusic(self):
doc = self.actionCollection.music_document_select.currentDocument()
if doc and doc.document():
### temporarily disable printing on Mac OS X
import sys
if sys.platform.startswith('darwin'):
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QMessageBox
result = QMessageBox.warning(self.mainwindow(),
_("Print Music"), _(
"Unfortunately, this version of Frescobaldi is unable to print "
"PDF documents on Mac OS X due to various technical reasons.\n\n"
"Do you want to open the file in the default viewer for printing instead? "
"(remember to close it again to avoid access problems)\n\n"
"Choose Yes if you want that, No if you want to try the built-in "
"printing functionality anyway, or Cancel to cancel printing."),
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
if result == QMessageBox.Yes:
import helpers
helpers.openUrl(QUrl.fromLocalFile(doc.filename()), "pdf")
return
elif result == QMessageBox.Cancel:
return
### end temporarily disable printing on Mac OS X
import popplerprint
popplerprint.printDocument(doc, self)
@activate
def zoomIn(self):
self.widget().view.zoomIn()
@activate
def zoomOut(self):
self.widget().view.zoomOut()
@activate
def zoomOriginal(self):
self.widget().view.zoom(1.0)
@activate
def fitWidth(self):
self.widget().view.setViewMode(FitWidth)
@activate
def fitHeight(self):
self.widget().view.setViewMode(FitHeight)
@activate
def fitBoth(self):
self.widget().view.setViewMode(FitBoth)
@activate
def viewSinglePages(self):
self.setPageLayoutMode("single")
@activate
def viewTwoPagesFirstRight(self):
self.setPageLayoutMode("double_right")
@activate
def viewTwoPagesFirstLeft(self):
self.setPageLayoutMode("double_left")
@activate
def jumpToCursor(self):
self.widget().showCurrentLinks()
@activate
def reloadView(self):
d = self.mainwindow().currentDocument()
group = documents.group(d)
if group.update() or group.update(False):
ac = self.actionCollection
ac.music_document_select.setCurrentDocument(d)
def toggleSyncCursor(self):
QSettings().setValue("musicview/sync_cursor",
self.actionCollection.music_sync_cursor.isChecked())
def copyImage(self):
page = self.widget().view.surface().selectedPage()
if not page:
return
rect = self.widget().view.surface().selectedPageRect(page)
import copy2image
copy2image.copy_image(self, page, rect, documents.filename(page.document()))
def copyText(self):
text = self.widget().view.surface().selectedText()
if text:
QApplication.clipboard().setText(text)
def slotZoomChanged(self, mode, scale):
"""Called when the combobox is changed, changes view zoom."""
self.activate()
if mode == FixedScale:
self.widget().view.zoom(scale)
else:
self.widget().view.setViewMode(mode)
def slotMusicZoomChanged(self, mode, scale):
"""Called when the music view is changed, updates the toolbar actions."""
ac = self.actionCollection
ac.music_fit_width.setChecked(mode == FitWidth)
ac.music_fit_height.setChecked(mode == FitHeight)
ac.music_fit_both.setChecked(mode == FitBoth)
ac.music_zoom_combo.updateZoomInfo(mode, scale)
class Actions(actioncollection.ActionCollection):
name = "musicview"
def createActions(self, panel):
self.music_document_select = DocumentChooserAction(panel)
self.music_print = QAction(panel)
self.music_zoom_in = QAction(panel)
self.music_zoom_out = QAction(panel)
self.music_zoom_original = QAction(panel)
self.music_zoom_combo = ZoomerAction(panel)
self.music_fit_width = QAction(panel, checkable=True)
self.music_fit_height = QAction(panel, checkable=True)
self.music_fit_both = QAction(panel, checkable=True)
self._column_mode = ag = QActionGroup(panel)
self.music_single_pages = QAction(ag, checkable=True)
self.music_two_pages_first_right = QAction(ag, checkable=True)
self.music_two_pages_first_left = QAction(ag, checkable=True)
self.music_maximize = QAction(panel)
self.music_jump_to_cursor = QAction(panel)
self.music_sync_cursor = QAction(panel, checkable=True)
self.music_copy_image = QAction(panel)
self.music_copy_text = QAction(panel)
self.music_pager = PagerAction(panel)
self.music_next_page = QAction(panel)
self.music_prev_page = QAction(panel)
self.music_reload = QAction(panel)
self.music_print.setIcon(icons.get('document-print'))
self.music_zoom_in.setIcon(icons.get('zoom-in'))
self.music_zoom_out.setIcon(icons.get('zoom-out'))
self.music_zoom_original.setIcon(icons.get('zoom-original'))
self.music_fit_width.setIcon(icons.get('zoom-fit-width'))
self.music_fit_height.setIcon(icons.get('zoom-fit-height'))
self.music_fit_both.setIcon(icons.get('zoom-fit-best'))
self.music_maximize.setIcon(icons.get('view-fullscreen'))
self.music_jump_to_cursor.setIcon(icons.get('go-jump'))
self.music_copy_image.setIcon(icons.get('edit-copy'))
self.music_copy_text.setIcon(icons.get('edit-copy'))
self.music_next_page.setIcon(icons.get('go-next'))
self.music_prev_page.setIcon(icons.get('go-previous'))
self.music_document_select.setShortcut(QKeySequence(Qt.SHIFT | Qt.CTRL | Qt.Key_O))
self.music_print.setShortcuts(QKeySequence.Print)
self.music_zoom_in.setShortcuts(QKeySequence.ZoomIn)
self.music_zoom_out.setShortcuts(QKeySequence.ZoomOut)
self.music_jump_to_cursor.setShortcut(QKeySequence(Qt.CTRL | Qt.Key_J))
self.music_copy_image.setShortcut(QKeySequence(Qt.SHIFT | Qt.CTRL | Qt.Key_C))
self.music_reload.setShortcut(QKeySequence(Qt.Key_F5))
def translateUI(self):
self.music_document_select.setText(_("Select Music View Document"))
self.music_print.setText(_("&Print Music..."))
self.music_zoom_in.setText(_("Zoom &In"))
self.music_zoom_out.setText(_("Zoom &Out"))
self.music_zoom_original.setText(_("Original &Size"))
self.music_zoom_combo.setText(_("Zoom Music"))
self.music_fit_width.setText(_("Fit &Width"))
self.music_fit_height.setText(_("Fit &Height"))
self.music_fit_both.setText(_("Fit &Page"))
self.music_single_pages.setText(_("Single Pages"))
self.music_two_pages_first_right.setText(_("Two Pages (first page right)"))
self.music_two_pages_first_left.setText(_("Two Pages (first page left)"))
self.music_maximize.setText(_("&Maximize"))
self.music_jump_to_cursor.setText(_("&Jump to Cursor Position"))
self.music_sync_cursor.setText(_("S&ynchronize with Cursor Position"))
self.music_copy_image.setText(_("Copy to &Image..."))
self.music_copy_text.setText(_("Copy Selected &Text"))
self.music_next_page.setText(_("Next Page"))
self.music_next_page.setIconText(_("Next"))
self.music_prev_page.setText(_("Previous Page"))
self.music_prev_page.setIconText(_("Previous"))
self.music_reload.setText(_("&Reload"))
class ComboBoxAction(QWidgetAction):
"""A widget action that opens a combobox widget popup when triggered."""
def __init__(self, panel):
super(ComboBoxAction, self).__init__(panel)
self.triggered.connect(self.showPopup)
def showPopup(self):
"""Called when our action is triggered by a keyboard shortcut."""
# find the widget in our floating panel, if available there
for w in self.createdWidgets():
if w.window() == self.parent():
w.showPopup()
return
# find the one in the main window
for w in self.createdWidgets():
if w.window() == self.parent().mainwindow():
w.showPopup()
return
class DocumentChooserAction(ComboBoxAction):
"""A ComboBoxAction that keeps track of the current text document.
It manages the list of generated PDF documents for every text document.
If the mainwindow changes its current document and there are PDFs to display,
it switches the current document.
It also switches to a text document if a job finished for that document,
and it generated new PDF documents.
"""
documentClosed = pyqtSignal()
documentsChanged = pyqtSignal()
currentDocumentChanged = pyqtSignal(documents.Document)
def __init__(self, panel):
super(DocumentChooserAction, self).__init__(panel)
self._model = None
self._document = None
self._documents = []
self._currentIndex = -1
self._indices = weakref.WeakKeyDictionary()
panel.mainwindow().currentDocumentChanged.connect(self.slotDocumentChanged)
documents.documentUpdated.connect(self.slotDocumentUpdated)
def createWidget(self, parent):
w = DocumentChooser(parent)
w.activated[int].connect(self.setCurrentIndex)
if self._model:
w.setModel(self._model)
return w
def slotDocumentChanged(self, doc):
"""Called when the mainwindow changes its current document."""
# only switch our document if there are PDF documents to display
if self._document is None or documents.group(doc).documents():
self.setCurrentDocument(doc)
def slotDocumentUpdated(self, doc, j):
"""Called when a Job, finished on the document, has created new PDFs."""
# if result files of this document were already displayed, the display
# is updated. Else the current document is switched if the document was
# the current document to be engraved (e.g. sticky or master) and the
# the job was started on this mainwindow
import engrave
mainwindow = self.parent().mainwindow()
if (doc == self._document or
(job.attributes.get(j).mainwindow == mainwindow and
doc == engrave.engraver(mainwindow).document())):
self.setCurrentDocument(doc)
def setCurrentDocument(self, document):
"""Displays the DocumentGroup of the given text Document in our chooser."""
prev = self._document
self._document = document
if prev:
prev.loaded.disconnect(self.updateDocument)
prev.closed.disconnect(self.closeDocument)
self._indices[prev] = self._currentIndex
document.loaded.connect(self.updateDocument)
document.closed.connect(self.closeDocument)
self.updateDocument()
def updateDocument(self):
"""(Re)read the output documents of the current document and show them."""
docs = self._documents = documents.group(self._document).documents()
self.setVisible(bool(docs))
self.setEnabled(bool(docs))
# make model for the docs
m = self._model = listmodel.ListModel([d.filename() for d in docs],
display = os.path.basename, icon = icons.file_type)
m.setRoleFunction(Qt.UserRole, lambda f: f)
for w in self.createdWidgets():
w.setModel(m)
index = self._indices.get(self._document, 0)
if index < 0 or index >= len(docs):
index = 0
self.documentsChanged.emit()
self.setCurrentIndex(index)
def closeDocument(self):
"""Called when the current document is closed by the user."""
self._document = None
self._documents = []
self._currentIndex = -1
self.setVisible(False)
self.setEnabled(False)
self.documentClosed.emit()
self.documentsChanged.emit()
def documents(self):
return self._documents
def setCurrentIndex(self, index):
if self._documents:
self._currentIndex = index
p = QApplication.palette()
if not self._documents[index].updated:
color = qutil.mixcolor(QColor(Qt.red), p.color(QPalette.Base), 0.3)
p.setColor(QPalette.Base, color)
for w in self.createdWidgets():
w.setCurrentIndex(index)
w.setPalette(p)
self.currentDocumentChanged.emit(self._documents[index])
def currentIndex(self):
return self._currentIndex
def currentDocument(self):
"""Returns the currently selected Music document (Note: NOT the text document!)"""
if self._documents:
return self._documents[self._currentIndex]
class DocumentChooser(QComboBox):
def __init__(self, parent):
super(DocumentChooser, self).__init__(parent)
self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.setEditable(True)
self.lineEdit().setReadOnly(True)
self.setFocusPolicy(Qt.NoFocus)
app.translateUI(self)
gadgets.drag.ComboDrag(self).role = Qt.UserRole
def translateUI(self):
self.setToolTip(_("Choose the PDF document to display."))
self.setWhatsThis(_(
"Choose the PDF document to display or drag the file "
"to another application or location."))
class ZoomerAction(ComboBoxAction):
zoomChanged = pyqtSignal(int, float)
def createWidget(self, parent):
return Zoomer(self, parent)
def setCurrentIndex(self, index):
"""Called when a user manipulates a Zoomer combobox.
Updates the other widgets and calls the corresponding method of the panel.
"""
for w in self.createdWidgets():
w.setCurrentIndex(index)
if index == 0:
self.zoomChanged.emit(FitWidth, 0)
elif index == 1:
self.zoomChanged.emit(FitHeight, 0)
elif index == 2:
self.zoomChanged.emit(FitBoth, 0)
else:
self.zoomChanged.emit(FixedScale, _zoomvalues[index-3] / 100.0)
def updateZoomInfo(self, mode, scale):
"""Connect view.viewModeChanged and layout.scaleChanged to this."""
if mode == FixedScale:
text = "{0:.0%}".format(scale)
for w in self.createdWidgets():
w.setEditText(text)
else:
if mode == FitWidth:
index = 0
elif mode == FitHeight:
index = 1
else: # qpopplerview.FitBoth:
index = 2
for w in self.createdWidgets():
w.setCurrentIndex(index)
class Zoomer(QComboBox):
def __init__(self, action, parent):
super(Zoomer, self).__init__(parent)
self.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.setEditable(True)
self.lineEdit().setReadOnly(True)
self.setFocusPolicy(Qt.NoFocus)
self.activated[int].connect(action.setCurrentIndex)
self.addItems(['']*3)
self.addItems(list(map("{0}%".format, _zoomvalues)))
self.setMaxVisibleItems(20)
app.translateUI(self)
def translateUI(self):
self.setItemText(0, _("Fit Width"))
self.setItemText(1, _("Fit Height"))
self.setItemText(2, _("Fit Page"))
class PagerAction(QWidgetAction):
def __init__(self, panel):
super(PagerAction, self).__init__(panel)
def createWidget(self, parent):
w = QSpinBox(parent, buttonSymbols=QSpinBox.NoButtons)
w.setFocusPolicy(Qt.ClickFocus)
w.valueChanged[int].connect(self.slotValueChanged)
return w
def setPageCount(self, total):
if total:
self.setVisible(True)
# L10N: page numbering: page {num} of {total}
prefix, suffix = _("{num} of {total}").split('{num}')
def adjust(w):
w.setRange(1, total)
w.setSuffix(suffix.format(total=total))
w.setPrefix(prefix.format(total=total))
else:
self.setVisible(False)
def adjust(w):
w.setRange(0, 0)
w.clear()
for w in self.createdWidgets():
with qutil.signalsBlocked(w):
adjust(w)
def setCurrentPage(self, num):
if num:
for w in self.createdWidgets():
with qutil.signalsBlocked(w):
w.setValue(num)
w.lineEdit().deselect()
def slotValueChanged(self, num):
self.parent().setCurrentPage(num)
|
brownian/frescobaldi
|
frescobaldi_app/musicview/__init__.py
|
Python
|
gpl-2.0
| 25,858
|
from scipy.integrate import quad
import numpy as np
from prettytable import PrettyTable
def integral_solution(interval_bounds, family):
answers = []
for function in family:
answers.append(quad(lambda x: eval(function), interval_bounds[0], interval_bounds[1])[0])
return answers
def midpoint_rule(interval_bounds, partitions, family):
intervals = list() # array of all dx's
dx = ((interval_bounds[1] - interval_bounds[0]) / partitions)
step = interval_bounds[0]
while step <= interval_bounds[1]:
intervals.append(step)
step += dx
answers = [0.0] * len(family)
i = 0
while i < len(intervals) - 1:
x = ((intervals[i] + intervals[i+1]) / 2.0) # evaluate each with x
for j in range(0, len(family)):
answers[j] += 2*(eval(family[j]))
i += 1
return answers
def trapezoidal_rule(interval_bounds, partitions, family):
x = list() # array of all dx's
dx = ((interval_bounds[1] - interval_bounds[0]) / partitions)
step = interval_bounds[0]
while step <= interval_bounds[1]:
x.append(step)
step += dx
x = np.array(x)
answers = []
for function in family:
if function.isdigit():
answers.append(np.trapz(int(function) + (0*x), x)) # check if constant
else:
answers.append(np.trapz(eval(function), x))
return answers
def find_weight(midpoint_rule_value, trapezoidal_rule_value, integral_value, family):
answers = [0] * len(family)
i = 0
while i <= len(answers) - 1:
if trapezoidal_rule_value[i] - midpoint_rule_value[i] == 0:
answers[i] = 0
else:
answers[i] = (trapezoidal_rule_value[i] - integral_value[i]) / \
(trapezoidal_rule_value[i] - midpoint_rule_value[i])
i += 1
return answers
def table(family, midpoint_value, trapezoidal_value, integral_value, weights):
table = PrettyTable()
table.add_column("Functions", family)
table.add_column("Midpoint Values", midpoint_value)
table.add_column("Trapezoidal Values", trapezoidal_value)
table.add_column("Integral Values", integral_value)
table.add_column("Scalar Constants", weights)
return table
def main():
family = []
print("Warning: Use ** for exponents instead of ^")
while True:
function = input("Please enter your next function or press q to finish: ")
if function == "q" or function == "Q":
break
else:
family.append(function)
partitions = int(input("How many partitions? "))
start = int(input("Interval start? "))
end = int(input("Interval end? "))
interval_bounds = [start, end]
integral_value = integral_solution(interval_bounds, family)
midpoint_value = midpoint_rule(interval_bounds, partitions, family)
trapezoidal_value = trapezoidal_rule(interval_bounds, partitions*2, family)
weights = find_weight(midpoint_value, trapezoidal_value, integral_value, family)
print(table(family, midpoint_value, trapezoidal_value, integral_value, weights))
if __name__ == main():
main()
|
Kosci/Approximate-Integrals
|
ApproxIntegrals.py
|
Python
|
mit
| 3,252
|
# -*- coding:utf8 -*-
# File : callback.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 12/29/16
#
# This file is part of TensorArtist.
__all__ = ['CallbackManager']
class CallbackManager(object):
"""
A callable manager utils.
Using register(name, callback) to register a callback.
Using dispatch(name, *args, **kwargs) to dispatch.
If there exists a super callback, it will block all callbacks.
A super callback will receive the called name as its first argument.
Then the dispatcher will try to call the callback by name.
If such name does not exists, a fallback callback will be called.
The fallback callback will also receive the called name as its first argument.
"""
def __init__(self):
super().__init__()
self._super_callback = None
self._callbacks = dict()
self._fallback_callback = None
def register(self, name, callback):
"""
Register a callable with (name, callback)
:param name: the name
:param callback: the callback
:return: self
"""
self._callbacks[name] = callback
return self
def get_callback(self, name):
"""
Get a callable by name. If not exists, return None.
:param name: the name
:return: callable / None
"""
if name in self._callbacks:
return self._callbacks[name]
return None
def has_callback(self, name):
"""
Tell whether there exists a callable of given name.
:param name: the name
:return: whether the callable exists
"""
return name in self._callbacks
@property
def super_callback(self):
"""
:return: the super callback
"""
return self._super_callback
def set_super_callback(self, callback):
"""
:param callback: the new super callback
:return: self
"""
self._super_callback = callback
return self
@property
def fallback_callback(self):
"""
:return: the super callback
"""
return self._fallback_callback
def set_fallback_callback(self, callback):
"""
:param callback: the new fallback callback
:return: self
"""
self._fallback_callback = callback
return self
def dispatch(self, name, *args, **kwargs):
"""
Dispatch by name.
:param name: the name
:return: the result
"""
if self._super_callback is not None:
return self._super_callback(self, name, *args, **kwargs)
return self.dispatch_direct(name, *args)
def dispatch_direct(self, name, *args, **kwargs):
"""
Dispatch by name, ignoring the super callback.
This method is useful if you want to register a super callback.
:param name: the name
:return: the result
"""
if name in self._callbacks:
return self._callbacks[name](*args, **kwargs)
elif self._fallback_callback is not None:
return self._fallback_callback(self, name, *args, **kwargs)
return None
|
vacancy/TensorArtist
|
tartist/core/utils/callback.py
|
Python
|
mit
| 3,198
|
import csv
import time
from datetime import timedelta
from django.shortcuts import render, redirect
from django.http import Http404, HttpResponse, HttpResponseForbidden, JsonResponse
from django.conf import settings
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.utils.html import format_html
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.db import transaction, IntegrityError
from django.contrib.auth.hashers import make_password
from django.contrib.auth.models import User
from django.contrib.auth.forms import PasswordChangeForm
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.timezone import now
from formtools.wizard.views import SessionWizardView
from .models import Petition, Signature, Organization, PytitionUser, PetitionTemplate, Permission
from .models import SlugModel
from .forms import SignatureForm, ContentFormPetition, EmailForm, NewsletterForm, SocialNetworkForm, ContentFormTemplate
from .forms import StyleForm, PetitionCreationStep1, PetitionCreationStep2, PetitionCreationStep3, UpdateInfoForm
from .forms import DeleteAccountForm, OrgCreationForm
from .helpers import get_client_ip, get_session_user, petition_from_id
from .helpers import check_petition_is_accessible
from .helpers import send_confirmation_email, subscribe_to_newsletter
from .helpers import get_update_form, petition_detail_meta
#------------------------------------ Views -----------------------------------
# Path : /
# Depending on the settings.INDEX_PAGE, show a list of petitions or
# redirect to an user/org profile page
def index(request):
petitions = Petition.objects.filter(published=True).order_by('-id')[:12]
if not hasattr(settings, 'INDEX_PAGE'):
raise Http404(_("You must set an INDEX_PAGE config in your settings"))
if settings.INDEX_PAGE == 'USER_PROFILE':
try:
user_name = settings.INDEX_PAGE_USER
except:
raise Http404(_("You must set an INDEX_PAGE_USER config in your settings"))
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
try:
org_name = settings.INDEX_PAGE_ORGA
except:
raise Http404(_("You must set an INDEX_PAGE_ORGA config in your settings"))
if settings.INDEX_PAGE == 'ALL_PETITIONS':
return redirect("all_petitions")
elif settings.INDEX_PAGE == 'ORGA_PROFILE':
org = Organization.objects.get(name=org_name)
return redirect("org_profile", org.slugname)
elif settings.INDEX_PAGE == 'USER_PROFILE':
return redirect("user_profile", user_name)
elif settings.INDEX_PAGE == 'LOGIN_REGISTER':
if request.user.is_authenticated:
return redirect("user_dashboard")
else:
return redirect("login")
else:
authenticated = request.user.is_authenticated
if authenticated:
user = get_session_user(request)
else:
user = request.user
return render(request, 'petition/index.html',
{
'user': user,
'petitions': petitions
}
)
# /all_petitions
# Show all the petitions in the database
def all_petitions(request):
petitions = Petition.objects.filter(published=True).all()
return render(request, 'petition/all_petitions.html',
{'petitions': petitions})
# /search?q=QUERY
# Show results of a search query
def search(request):
q = request.GET.get('q', '')
if q != "":
petitions = Petition.objects.filter(Q(title__icontains=q) | Q(text__icontains=q)).filter(published=True)[:15]
orgs = Organization.objects.filter(name__icontains=q)
else:
petitions = Petition.objects.filter(published=True)[:15]
orgs = []
return render(
request, 'petition/search.html',
{
'petitions': petitions,
'orgs': orgs,
'q': q
}
)
# /<int:petition_id>/
# Show information on a petition
def detail(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, 'petition': petition, 'form': sign_form,
'meta': petition_detail_meta(request, petition_id)}
return render(request, 'petition/petition_detail.html', ctx)
# /<int:petition_id>/confirm/<confirmation_hash>
# Confirm signature to a petition
def confirm(request, petition_id, confirmation_hash):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
try:
successmsg = petition.confirm_signature(confirmation_hash)
if successmsg is None:
messages.error(request, _("Error: This confirmation code is invalid. Maybe you\'ve already confirmed?"))
else:
messages.success(request, successmsg)
except ValidationError as e:
messages.error(request, _(e.message))
except Signature.DoesNotExist:
messages.error(request, _("Error: This confirmation code is invalid."))
return redirect(petition.url)
# <int:petition_id>/get_csv_signature
# <int:petition_id>/get_csv_confirmed_signature
# returns the CSV files of the list of signatures
@login_required
def get_csv_signature(request, petition_id, only_confirmed):
user = get_session_user(request)
try:
petition = Petition.objects.get(pk=petition_id)
except Petition.DoesNotExist:
return JsonResponse({}, status=404)
if petition.owner_type == "org":
if not petition.org.is_allowed_to(user, "can_view_signatures"):
return JsonResponse({}, status=403)
filename = '{}.csv'.format(petition)
signatures = Signature.objects.filter(petition = petition)
if only_confirmed:
signatures = signatures.filter(confirmed = True)
else:
signatures = signatures.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment;filename={}'.format(filename).replace('\r\n', '').replace(' ', '%20')
writer = csv.writer(response)
attrs = ['first_name', 'last_name', 'phone', 'email', 'subscribed_to_mailinglist', 'confirmed']
writer.writerow(attrs)
for signature in signatures:
values = [getattr(signature, field) for field in attrs]
writer.writerow(values)
return response
# resend/<int:signature_id>
# resend the signature confirmation email
@login_required
def go_send_confirmation_email(request, signature_id):
app_label = Signature._meta.app_label
signature = Signature.objects.filter(pk=signature_id).get()
send_confirmation_email(request, signature)
return redirect('admin:{}_signature_change'.format(app_label), signature_id)
# <int:petition_id>/sign
# Sign a petition
def create_signature(request, petition_id):
petition = petition_from_id(petition_id)
check_petition_is_accessible(request, petition)
if request.method == "POST":
form = SignatureForm(petition=petition, data=request.POST)
if not form.is_valid():
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
ipaddr = make_password(
get_client_ip(request),
salt=petition.salt.encode('utf-8'))
since = now() - timedelta(seconds=settings.SIGNATURE_THROTTLE_TIMING)
signatures = Signature.objects.filter(
petition=petition,
ipaddress=ipaddr,
date__gt=since)
if signatures.count() > settings.SIGNATURE_THROTTLE:
messages.error(request, _("Too many signatures from your IP address, please try again later."))
return render(request, 'petition/petition_detail.html', {'petition': petition, 'form': form, 'meta': petition_detail_meta(request, petition_id)})
else:
signature = form.save()
signature.ipaddress = ipaddr
signature.save()
send_confirmation_email(request, signature)
messages.success(request,
format_html(_("Thank you for signing this petition, an email has just been sent to you at your address \'{}\'" \
" in order to confirm your signature.<br>" \
"You will need to click on the confirmation link in the email.<br>" \
"If you cannot find the email in your Inbox, please have a look in your Spam box.")\
, signature.email))
if petition.has_newsletter and signature.subscribed_to_mailinglist:
subscribe_to_newsletter(petition, signature.email)
return redirect(petition.url)
# /org/<slug:orgslugname>/dashboard
# Show the dashboard of an organization
@login_required
def org_dashboard(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(request, _("This organization does not exist: '{}'".format(orgslugname)))
return redirect("user_dashboard")
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization: '{}'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)))
return redirect("user_dashboard")
can_create_petition = org.is_allowed_to(pytitionuser, "can_create_petitions")
petitions = org.petition_set.all()
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
return render(request, 'petition/org_dashboard.html',
{'org': org, 'user': pytitionuser, "other_orgs": other_orgs,
'petitions': petitions, 'user_permissions': permissions,
'can_create_petition': can_create_petition})
# /user/dashboard
# Dashboard of the logged in user
@login_required
def user_dashboard(request):
user = get_session_user(request)
petitions = user.petition_set.all()
return render(
request,
'petition/user_dashboard.html',
{'user': user, 'petitions': petitions, 'can_create_petition': True}
)
# /user/<user_name>
# Show the user profile
def user_profile(request, user_name):
try:
user = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
raise Http404(_("not found"))
ctx = {'user': user,
'petitions': user.petition_set.filter(published=True)}
return render(request, 'petition/user_profile.html', ctx)
# /org/<slug:orgslugname>/leave_org
# User is leaving the organisation
@login_required
def leave_org(request, orgslugname):
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
pytitionuser = get_session_user(request)
if pytitionuser not in org.members.all():
raise Http404(_("not found"))
with transaction.atomic():
if org.is_last_admin(pytitionuser):
messages.error(request, _('Impossible to leave this organisation, you are the last administrator'))
return redirect(reverse('account_settings') + '#a_org_form')
elif org.members.count() == 1:
messages.error(request, _('Impossible to leave this organisation, you are the last member'))
return redirect(reverse('account_settings') + '#a_org_form')
else:
org.members.remove(pytitionuser)
return redirect('account_settings')
# /org/<slug:orgslugname>
# Show the profile of an organization
def org_profile(request, orgslugname):
try:
user = get_session_user(request)
except:
user = None
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
ctx = {'org': org,
'petitions': org.petition_set.filter(published=True)}
# if a user is logged-in, put it in the context, it will feed the navbar dropdown
if user is not None:
ctx['user'] = user
return render(request, "petition/org_profile.html", ctx)
# /get_user_list
# get the list of users
@login_required
def get_user_list(request):
q = request.GET.get('q', '')
if q != "":
users = PytitionUser.objects.filter(Q(user__username__contains=q) | Q(user__first_name__icontains=q) |
Q(user__last_name__icontains=q)).all()
else:
users = []
userdict = {
"values": [user.user.username for user in users],
}
return JsonResponse(userdict)
# PATH : org/<slug:orgslugname>/add_user
# Add an user to an organization
@login_required
def org_add_user(request, orgslugname):
adduser = request.GET.get('user', '')
try:
adduser = PytitionUser.objects.get(user__username=adduser)
except PytitionUser.DoesNotExist:
message = _("This user does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
message = _("This organization does not exist (anylonger?)")
return JsonResponse({"message": message}, status=404)
pytitionuser = get_session_user(request)
if org not in pytitionuser.organization_set.all():
message = _("You are not part of this organization.")
return JsonResponse({"message": message}, status=403)
if org in adduser.organization_set.all():
message = _("User is already member of {orgname} organization".format(orgname=org.name))
return JsonResponse({"message": message}, status=500)
if not org.is_allowed_to(pytitionuser, "can_add_members"):
message = _("You are not allowed to invite new members into this organization.")
return JsonResponse({"message": message}, status=403)
try:
adduser.invitations.add(org)
adduser.save()
except:
message = _("An error occured")
return JsonResponse({"message": message}, status=500)
message = _("You invited {username} to join {orgname}".format(username=adduser.name, orgname=org.name))
return JsonResponse({"message": message})
# /org/<slug:orgslugname>/invite_accept
# Accept an invitation to an organisation
# Called from /user/dashboard
@login_required
def invite_accept(request, orgslugname):
if orgslugname == "":
return HttpResponse(status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
with transaction.atomic():
pytitionuser.invitations.remove(org)
org.members.add(pytitionuser)
except:
return HttpResponse(status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/invite_dismiss
# Dismiss the invitation to an organisation
@login_required
def invite_dismiss(request, orgslugname):
if orgslugname == "":
return JsonResponse({}, status=500)
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("not found"))
if org in pytitionuser.invitations.all():
try:
pytitionuser.invitations.remove(org)
except:
return JsonResponse({}, status=500)
else:
raise Http404(_("not found"))
return redirect('user_dashboard')
# /org/<slug:orgslugname>/new_template
# /user/new_template
# Create a new template
@login_required
def new_template(request, orgslugname=None):
pytitionuser = get_session_user(request)
ctx = {'user': pytitionuser}
if orgslugname:
redirection = "org_new_template"
try:
org = Organization.objects.get(slugname=orgslugname)
ctx['org'] = org
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in pytitionuser.organization_set.all():
return HttpResponseForbidden(_("You are not allowed to view this organization dashboard"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
ctx['user_permissions'] = permissions
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
if not permissions.can_create_templates:
return HttpResponseForbidden(_("You don't have the permission to create a Template in this organization"))
ctx['base_template'] = 'petition/org_base.html'
else:
redirection = "user_new_template"
ctx['base_template'] = 'petition/user_base.html'
if request.method == "POST":
template_name = request.POST.get('template_name', '')
if template_name != '':
if orgslugname:
template = PetitionTemplate(name=template_name, org=org)
else:
template = PetitionTemplate(name=template_name, user=pytitionuser)
template.save()
return redirect("edit_template", template.id)
else:
messages.error(request, _("You need to provide a template name."))
return redirect(redirection)
else:
return render(request, "petition/new_template.html", ctx)
# /templates/<int:template_id>/edit
# Edit a petition template
@login_required
def edit_template(request, template_id):
id = template_id
if id == '':
return HttpResponseForbidden(_("You need to provide the template id to modify"))
try:
template = PetitionTemplate.objects.get(pk=id)
except PetitionTemplate.DoesNotExist:
raise Http404(_("This template does not exist"))
pytitionuser = get_session_user(request)
context = {'user': pytitionuser}
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
try:
permissions = Permission.objects.get(organization=owner, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=owner.name)), status=500)
context['user_permissions'] = permissions
if owner not in pytitionuser.organization_set.all() or not permissions.can_modify_templates:
return HttpResponseForbidden(_("You are not allowed to edit this organization's templates"))
context['org'] = owner
base_template = "petition/org_base.html"
else:
if owner != pytitionuser:
return HttpResponseForbidden(_("You are not allowed to edit this user's templates"))
base_template = "petition/user_base.html"
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
'style_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
content_form = ContentFormTemplate(request.POST)
submitted_ctx['content_form_submitted'] = True
if content_form.is_valid():
template.name = content_form.cleaned_data['name']
template.text = content_form.cleaned_data['text']
template.side_text = content_form.cleaned_data['side_text']
template.footer_text = content_form.cleaned_data['footer_text']
template.footer_links = content_form.cleaned_data['footer_links']
template.sign_form_footer = content_form.cleaned_data['sign_form_footer']
template.save()
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
if 'email_form_submitted' in request.POST:
email_form = EmailForm(request.POST)
submitted_ctx['email_form_submitted'] = True
if email_form.is_valid():
template.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
template.save()
else:
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
social_network_form = SocialNetworkForm(request.POST)
submitted_ctx['social_network_form_submitted'] = True
if social_network_form.is_valid():
template.twitter_description = social_network_form.cleaned_data['twitter_description']
template.twitter_image = social_network_form.cleaned_data['twitter_image']
template.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
template.save()
else:
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
newsletter_form = NewsletterForm(request.POST)
submitted_ctx['newsletter_form_submitted'] = True
if newsletter_form.is_valid():
template.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
template.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
template.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
template.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
template.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
template.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
template.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
template.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
template.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
template.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
template.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
template.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
template.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
template.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
template.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
template.save()
else:
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
template.bgcolor = style_form.cleaned_data['bgcolor']
template.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
template.gradient_from = style_form.cleaned_data['gradient_from']
template.gradient_to = style_form.cleaned_data['gradient_to']
template.save()
else:
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormTemplate({f: getattr(template, f) for f in ContentFormTemplate.base_fields})
email_form = EmailForm({f: getattr(template, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(template, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(template, f) for f in NewsletterForm.base_fields})
style_form = StyleForm({f: getattr(template, f) for f in StyleForm.base_fields})
ctx = {'content_form': content_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'style_form': style_form,
'petition': template}
context['base_template'] = base_template
context.update(ctx)
context.update(submitted_ctx)
return render(request, "petition/edit_template.html", context)
# /templates/<int:template_id>/delete
# Delete a template
@login_required
def template_delete(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except:
return JsonResponse({}, status=404)
if template.owner_type == "org":
if not pytitionuser in template.org.members.all():
return JsonResponse({}, status=403) # User not in organization
try:
permissions = Permission.objects.get(
organization=template.org,
user=pytitionuser)
except Permission.DoesNotExist:
return JsonResponse({}, status=500) # No permission? fatal error!
if not permissions.can_delete_templates:
return JsonResponse({}, status=403) # User does not have the permission!
else:
if pytitionuser != template.user:
return JsonResponse({}, status=403) # User cannot delete a template if it's not his
template.delete()
return JsonResponse({})
# /templates/<int:template_id>/fav
# Set a template as favourite
@login_required
def template_fav_toggle(request, template_id):
pytitionuser = get_session_user(request)
if template_id == '':
return JsonResponse({}, status=500)
try:
template = PetitionTemplate.objects.get(pk=template_id)
except PetitionTemplate.DoesNotExist:
return JsonResponse({}, status=404)
if template.owner_type == "org":
owner = template.org
else:
owner = template.user
if template.owner_type == "org":
if owner not in pytitionuser.organization_set.all():
return JsonResponse({}, status=403) # Forbidden
else:
if owner != pytitionuser:
return JsonResponse({'msg': _("You are not allowed to change this user's default template")}, status=403)
if owner.default_template == template:
owner.default_template = None
else:
owner.default_template = template
owner.save()
return JsonResponse({})
# /org/<slug:orgslugname>/delete_member
# Remove a member from an organization
@login_required
def org_delete_member(request, orgslugname):
member_name = request.GET.get('member', '')
try:
member = PytitionUser.objects.get(user__username=member_name)
except PytitionUser.DoesNotExist:
raise Http404(_("User does not exist"))
pytitionuser = get_session_user(request)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if pytitionuser not in org.members.all():
return JsonResponse({}, status=403) # Forbidden
try:
permissions = Permission.objects.get(user=pytitionuser, organization=org)
except Permission.DoesNoeExist:
return JsonResponse({}, status=500)
if permissions.can_remove_members or pytitionuser == member:
if org in member.organization_set.all():
if org.is_last_admin(member):
return JsonResponse({}, status=403) # Forbidden
member.organization_set.remove(org)
else:
return JsonResponse({}, status=404)
else:
return JsonResponse({}, status=403) # Forbidden
return JsonResponse({}, status=200)
# PATH : org/<slug:orgslugname>/edit_user_permissions/<slug:user_name>
# Show a webpage to edit permissions
@login_required
def org_edit_user_perms(request, orgslugname, user_name):
"""Shows the page which lists the user permissions."""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User '{name}' does not exist".format(name=user_name)))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization '{name}' does not exist".format(name=orgslugname)))
if org not in member.organization_set.all():
messages.error(request, _("The user '{username}' is not member of this organization ({orgname}).".
format(username=user_name, orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(organization=org, user=member)
except Permission.DoesNotExist:
messages.error(request,
_("Internal error, this member does not have permissions attached to this organization."))
return redirect("org_dashboard", org.slugname)
try:
user_permissions = Permission.objects.get(organization=org, user=pytitionuser)
except:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
return render(request, "petition/org_edit_user_perms.html",
{'org': org, 'member': member, 'user': pytitionuser,
'permissions': permissions,
'user_permissions': user_permissions})
# PATH /org/<slug:orgslugname>/set_user_permissions/<slug:user_name>
# Set a permission for an user
@login_required
def org_set_user_perms(request, orgslugname, user_name):
"""Actually do the modification of user permissions.
Data come from "org_edit_user_perms" view's form.
"""
pytitionuser = get_session_user(request)
try:
member = PytitionUser.objects.get(user__username=user_name)
except PytitionUser.DoesNotExist:
messages.error(request, _("User does not exist"))
return redirect("org_dashboard", orgslugname)
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
if org not in member.organization_set.all():
messages.error(request, _("This user is not part of organization \'{orgname}\'".format(orgname=org.name)))
return redirect("org_dashboard", org.slugname)
try:
permissions = Permission.objects.get(user=member, organization=org)
except Permission.DoesNotExist:
messages.error(request, _("Fatal error, this user does not have permissions attached for this organization"))
return redirect("org_dashboard", org.slugname)
try:
userperms = Permission.objects.get(user=pytitionuser, organization=org)
except:
messages.error(request, _("Fatal error, you don't have permissions attached to you for this organization"))
return redirect("org_dashboard", org.slugname)
if pytitionuser not in org.members.all():
messages.error(request, _("You are not part of this organization"))
return redirect("user_dashboard")
if not userperms.can_modify_permissions:
messages.error(request, _("You are not allowed to modify this organization members' permissions"))
return redirect("org_edit_user_perms", orgslugname, user_name)
if request.method == "POST":
error = False
post = request.POST
permissions.can_remove_members = post.get('can_remove_members', '') == 'on'
permissions.can_add_members = post.get('can_add_members', '') == 'on'
permissions.can_create_petitions = post.get('can_create_petitions', '') == 'on'
permissions.can_modify_petitions = post.get('can_modify_petitions', '') == 'on'
permissions.can_delete_petitions = post.get('can_delete_petitions', '') == 'on'
permissions.can_create_templates = post.get('can_create_templates', '') == 'on'
permissions.can_modify_templates = post.get('can_modify_templates', '') == 'on'
permissions.can_delete_templates = post.get('can_delete_templates', '') == 'on'
permissions.can_view_signatures = post.get('can_view_signatures', '') == 'on'
permissions.can_modify_signatures = post.get('can_modify_signatures', '') == 'on'
permissions.can_delete_signatures = post.get('can_delete_signatures', '') == 'on'
can_modify_perms = post.get('can_modify_permissions', '') == 'on'
with transaction.atomic():
# if user is dropping his own permissions
if not can_modify_perms and permissions.can_modify_permissions and pytitionuser == member:
# get list of people with can_modify_permissions permission on this org
owners = org.owners
if owners.count() > 1:
permissions.can_modify_permissions = can_modify_perms
else:
if org.members.count() > 1:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only one left who can do this. "
"Give the permission to someone else before removing yours."))
else:
error = True
messages.error(request, _("You cannot remove your ability to change permissions on this "
"Organization because you are the only member left."))
if not error:
permissions.can_modify_permissions = can_modify_perms
messages.success(request, _("Permissions successfully changed!"))
permissions.save()
return redirect("org_edit_user_perms", orgslugname, user_name)
WizardTemplates = {"step1": "petition/new_petition_step1.html",
"step2": "petition/new_petition_step2.html",
"step3": "petition/new_petition_step3.html"}
WizardForms = [("step1", PetitionCreationStep1),
("step2", PetitionCreationStep2),
("step3", PetitionCreationStep3)]
# Class Based Controller
# PATH : subroutes of /wizard
@method_decorator(login_required, name='dispatch')
class PetitionCreationWizard(SessionWizardView):
def get_template_names(self):
return [WizardTemplates[self.steps.current]]
def get_form_initial(self, step):
if step == "step2":
use_template = False
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
org = Organization.objects.get(slugname=orgslugname)
else:
pytitionuser = get_session_user(self.request)
# Use a specific template if its id is given
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if org_petition:
if template in org.petitiontemplate_set.all():
return {'message': template.text}
else:
if template in pytitionuser.petitiontemplate_set.all():
return {'message': template.text}
# if no template id is given, check for default templates
if org_petition:
if org.default_template is not None:
template = org.default_template
use_template = True
elif pytitionuser.default_template is not None:
template = pytitionuser.default_template
use_template = True
if use_template:
return {'message': template.text}
return self.initial_dict.get(step, {})
def get_form_kwargs(self, step=None):
if step == "step1":
org_petition = "orgslugname" in self.kwargs
if org_petition:
orgslugname = self.kwargs['orgslugname']
kwargs = {"orgslugname": orgslugname}
else:
pytitionuser = get_session_user(self.request)
kwargs = {"user_name": pytitionuser.user.username}
return kwargs
else:
return {}
def done(self, form_list, **kwargs):
org_petition = "orgslugname" in self.kwargs
title = self.get_cleaned_data_for_step("step1")["title"]
message = self.get_cleaned_data_for_step("step2")["message"]
publish = self.get_cleaned_data_for_step("step3")["publish"]
pytitionuser = get_session_user(self.request)
_redirect = self.request.POST.get('redirect', '')
if org_petition:
orgslugname = self.kwargs['orgslugname']
try:
org = Organization.objects.get(slugname=orgslugname)
except Organization.DoesNotExist:
messages.error(self.request, _("Cannot find this organization"))
return redirect("user_dashboard")
#raise Http404(_("Organization does not exist"))
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return redirect("org_dashboard", orgslugname)
if pytitionuser in org.members.all() and permissions.can_create_petitions:
#FIXME I think new here is better than create
petition = Petition.objects.create(title=title, text=message, org=org)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in org.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to your organization"))
return redirect("org_dashboard", orgslugname)
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("org_dashboard", orgslugname)
else:
messages.error(self.request, _("You don't have the permission to create a new petition in this Organization"))
return redirect("org_dashboard", orgslugname)
else:
petition = Petition.objects.create(title=title, text=message, user=pytitionuser)
if "template_id" in self.kwargs:
template = PetitionTemplate.objects.get(pk=self.kwargs['template_id'])
if template in pytitionuser.petitiontemplate_set.all():
petition.prepopulate_from_template(template)
petition.save()
else:
messages.error(self.request, _("This template does not belong to you"))
return redirect("user_dashboard")
if publish:
petition.publish()
if _redirect and _redirect == '1':
return redirect("edit_petition", petition.id)
else:
return redirect("user_dashboard")
def get_context_data(self, form, **kwargs):
org_petition = "orgslugname" in self.kwargs
context = super(PetitionCreationWizard, self).get_context_data(form=form, **kwargs)
if org_petition:
base_template = 'petition/org_base.html'
try:
org = Organization.objects.get(slugname=self.kwargs['orgslugname'])
except Organization.DoesNotExist:
raise Http404(_("Organization does not exist"))
else:
base_template = 'petition/user_base.html'
pytitionuser = get_session_user(self.request)
context.update({'user': pytitionuser,
'base_template': base_template})
if org_petition:
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
return HttpResponse(
_("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')"
.format(orgname=org.name)), status=500)
context.update({'org': org,
'user_permissions': permissions})
if self.steps.current == "step3":
context.update(self.get_cleaned_data_for_step("step1"))
context.update(self.get_cleaned_data_for_step("step2"))
return context
# /<int:petition_id>/delete
# Delete a petition
@login_required
def petition_delete(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else: # an organization owns the petition
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_delete_petitions:
petition.delete()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
# /<int:petition_id>/publish
# Publish a petition
@login_required
def petition_publish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.publish()
return JsonResponse({})
else:
# Petition owned by someone else
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.publish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/unpublish
# Unpublish a petition
@login_required
def petition_unpublish(request, petition_id):
pytitionuser = get_session_user(request)
petition = petition_from_id(petition_id)
if petition.owner_type == "user":
if petition.user == pytitionuser:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
else:
# Check if the user has permission over this org
try:
userperms = Permission.objects.get(organization=petition.org, user=pytitionuser)
if userperms.can_modify_petitions:
petition.unpublish()
return JsonResponse({})
else:
return JsonResponse({}, status=403)
except Permission.DoesNotExist:
return JsonResponse({}, status=403)
# /<int:petition_id>/edit
# Edit a petition
@login_required
def edit_petition(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
if not petition.is_allowed_to_edit(pytitionuser):
messages.error(request, _("You are not allowed to edit this petition"))
return redirect("user_dashboard")
submitted_ctx = {
'content_form_submitted': False,
'email_form_submitted': False,
'social_network_form_submitted': False,
'newsletter_form_submitted': False,
}
if request.method == "POST":
if 'content_form_submitted' in request.POST:
submitted_ctx['content_form_submitted'] = True
content_form = ContentFormPetition(request.POST)
if content_form.is_valid():
petition.title = content_form.cleaned_data['title']
petition.target = content_form.cleaned_data['target']
petition.text = content_form.cleaned_data['text']
petition.side_text = content_form.cleaned_data['side_text']
petition.footer_text = content_form.cleaned_data['footer_text']
petition.footer_links = content_form.cleaned_data['footer_links']
petition.sign_form_footer = content_form.cleaned_data['sign_form_footer']
petition.save()
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
if 'email_form_submitted' in request.POST:
submitted_ctx['email_form_submitted'] = True
email_form = EmailForm(request.POST)
if email_form.is_valid():
petition.confirmation_email_reply = email_form.cleaned_data['confirmation_email_reply']
petition.save()
else:
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
if 'social_network_form_submitted' in request.POST:
submitted_ctx['social_network_form_submitted'] = True
social_network_form = SocialNetworkForm(request.POST)
if social_network_form.is_valid():
petition.twitter_description = social_network_form.cleaned_data['twitter_description']
petition.twitter_image = social_network_form.cleaned_data['twitter_image']
petition.org_twitter_handle = social_network_form.cleaned_data['org_twitter_handle']
petition.save()
else:
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
if 'newsletter_form_submitted' in request.POST:
submitted_ctx['newsletter_form_submitted'] = True
newsletter_form = NewsletterForm(request.POST)
if newsletter_form.is_valid():
petition.has_newsletter = newsletter_form.cleaned_data['has_newsletter']
petition.newsletter_text = newsletter_form.cleaned_data['newsletter_text']
petition.newsletter_subscribe_http_data = newsletter_form.cleaned_data['newsletter_subscribe_http_data']
petition.newsletter_subscribe_http_mailfield = newsletter_form.cleaned_data['newsletter_subscribe_http_mailfield']
petition.newsletter_subscribe_http_url = newsletter_form.cleaned_data['newsletter_subscribe_http_url']
petition.newsletter_subscribe_mail_subject = newsletter_form.cleaned_data['newsletter_subscribe_mail_subject']
petition.newsletter_subscribe_mail_from = newsletter_form.cleaned_data['newsletter_subscribe_mail_from']
petition.newsletter_subscribe_mail_to = newsletter_form.cleaned_data['newsletter_subscribe_mail_to']
petition.newsletter_subscribe_method = newsletter_form.cleaned_data['newsletter_subscribe_method']
petition.newsletter_subscribe_mail_smtp_host = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_host']
petition.newsletter_subscribe_mail_smtp_port = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_port']
petition.newsletter_subscribe_mail_smtp_user = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_user']
petition.newsletter_subscribe_mail_smtp_password = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_password']
petition.newsletter_subscribe_mail_smtp_tls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_tls']
petition.newsletter_subscribe_mail_smtp_starttls = newsletter_form.cleaned_data['newsletter_subscribe_mail_smtp_starttls']
petition.save()
else:
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
if 'style_form_submitted' in request.POST:
submitted_ctx['style_form_submitted'] = True
style_form = StyleForm(request.POST)
if style_form.is_valid():
petition.bgcolor = style_form.cleaned_data['bgcolor']
petition.linear_gradient_direction = style_form.cleaned_data['linear_gradient_direction']
petition.gradient_from = style_form.cleaned_data['gradient_from']
petition.gradient_to = style_form.cleaned_data['gradient_to']
petition.save()
else:
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
else:
content_form = ContentFormPetition({f: getattr(petition, f) for f in ContentFormPetition.base_fields})
style_form = StyleForm({f: getattr(petition, f) for f in StyleForm.base_fields})
email_form = EmailForm({f: getattr(petition, f) for f in EmailForm.base_fields})
social_network_form = SocialNetworkForm({f: getattr(petition, f) for f in SocialNetworkForm.base_fields})
newsletter_form = NewsletterForm({f: getattr(petition, f) for f in NewsletterForm.base_fields})
ctx = {'user': pytitionuser,
'content_form': content_form,
'style_form': style_form,
'email_form': email_form,
'social_network_form': social_network_form,
'newsletter_form': newsletter_form,
'petition': petition}
url_prefix = request.scheme + "://" + request.get_host()
if petition.owner_type == "org":
permissions = Permission.objects.get(organization=petition.org, user=pytitionuser)
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'orgslugname': petition.org.slugname,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'org': petition.org,
'user_permissions': permissions,
'base_template': 'petition/org_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
else:
example_url = url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': _("save-the-kittens-from-bad-wolf")})
slug_prefix = (url_prefix + reverse("slug_show_petition",
kwargs={'username': pytitionuser.user.username,
'petitionname': 'toto'})).rsplit('/', 1)[0]
ctx.update({'base_template': 'petition/user_base.html',
'example_url': example_url,
'slug_prefix': slug_prefix})
ctx.update(submitted_ctx)
return render(request, "petition/edit_petition.html", ctx)
# /<int:petition_id>/show_signatures
# Show the signatures of a petition
@login_required
def show_signatures(request, petition_id):
petition = petition_from_id(petition_id)
pytitionuser = get_session_user(request)
ctx = {}
if petition.owner_type == "user":
base_template = 'petition/user_base.html'
else:
org = petition.org
base_template = 'petition/org_base.html'
other_orgs = pytitionuser.organization_set.filter(~Q(name=org.name)).all()
if pytitionuser not in org.members.all():
messages.error(request, _("You are not member of the following organization: \'{}\'".format(org.name)))
return redirect("user_dashboard")
try:
permissions = Permission.objects.get(organization=org, user=pytitionuser)
except Permission.DoesNotExist:
messages.error(request, _("Internal error, cannot find your permissions attached to this organization (\'{orgname}\')".format(orgname=org.name)))
return redirect("user_dashboard")
if not permissions.can_view_signatures:
messages.error(request, _("You are not allowed to view signatures in this organization"))
return redirect("org_dashboard", org.slugname)
ctx.update({'org': org, 'other_orgs': other_orgs,
'user_permissions': permissions})
if request.method == "POST":
action = request.POST.get('action', '')
selected_signature_ids = request.POST.getlist('signature_id', '')
failed = False
if selected_signature_ids and action:
selected_signatures = Signature.objects.filter(pk__in=selected_signature_ids)
if action == "delete":
for s in selected_signatures:
pet = s.petition
if pet.org: # Petition is owned by an org, we check for rights
if pet.org.is_allowed_to(pytitionuser, 'can_delete_signatures'):
s.delete()
else:
failed = True
else: # Petition is owned by a user, we check it's the one asking for deletion
if pet.user == pytitionuser:
s.delete()
else:
failed = True
if failed:
messages.error(request, _("You don't have permission to delete some or all of selected signatures"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send":
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
if action == "re-send-all":
selected_signatures = Signature.objects.filter(petition=petition)
for s in selected_signatures:
try:
send_confirmation_email(request, s)
except:
failed = True
if failed:
messages.error(request, _("An error happened while trying to re-send confirmation emails"))
else:
messages.success(request, _("You successfully deleted all selected signatures"))
return redirect("show_signatures", petition_id)
signatures = petition.signature_set.all()
ctx.update({'petition': petition, 'user': pytitionuser,
'base_template': base_template,
'signatures': signatures})
return render(request, "petition/signature_data.html", ctx)
# /account_settings
# Show settings for the user accounts
@login_required
def account_settings(request):
pytitionuser = get_session_user(request)
submitted_ctx = {
'update_info_form_submitted': False,
'delete_account_form_submitted': False,
'password_change_form_submitted': False
}
if request.method == "POST":
if 'update_info_form_submitted' in request.POST:
update_info_form = UpdateInfoForm(pytitionuser.user, request.POST)
submitted_ctx['update_info_form_submitted'] = True
if update_info_form.is_valid():
update_info_form.save()
else:
update_info_form = get_update_form(pytitionuser.user)
if 'delete_account_form_submitted' in request.POST:
delete_account_form = DeleteAccountForm(request.POST)
submitted_ctx['delete_account_form_submitted'] = True
if delete_account_form.is_valid():
pytitionuser.drop()
return redirect("index")
else:
delete_account_form = DeleteAccountForm()
if 'password_change_form_submitted' in request.POST:
password_change_form = PasswordChangeForm(pytitionuser.user, request.POST)
submitted_ctx['password_change_form_submitted'] = True
if password_change_form.is_valid():
password_change_form.save()
messages.success(request, _("You successfully changed your password!"))
else:
password_change_form = PasswordChangeForm(pytitionuser.user)
else:
update_info_form = get_update_form(pytitionuser.user)
delete_account_form = DeleteAccountForm()
password_change_form = PasswordChangeForm(pytitionuser.user)
orgs = pytitionuser.organization_set.all()
# Checking if the user is allowed to leave the organisation
for org in orgs:
if org.members.count() < 2:
org.leave = False
else:
# More than one user, we need to check owners
owners = org.owners.all()
if owners.count() == 1 and pytitionuser in owners:
org.leave = False
else:
org.leave = True
ctx = {'user': pytitionuser,
'update_info_form': update_info_form,
'delete_account_form': delete_account_form,
'password_change_form': password_change_form,
'base_template': 'petition/user_base.html',
'orgs': orgs}
ctx.update(submitted_ctx)
return render(request, "petition/account_settings.html", ctx)
# GET/POST /org/create
# Create a new organization
@login_required
def org_create(request):
user = get_session_user(request)
ctx = {'user': user}
if request.method == "POST":
form = OrgCreationForm(request.POST)
if form.is_valid():
org = form.save()
org.members.add(user)
perm = Permission.objects.get(organization=org)
perm.set_all(True)
messages.success(request, _("You successfully created organization '{}'".format(org.name)))
return redirect('user_dashboard')
else:
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
form = OrgCreationForm()
ctx.update({'form': form})
return render(request, "petition/org_create.html", ctx)
# GET /org/<slug:orgslugname>/<slug:petitionname>
# Show a petition
def slug_show_petition(request, orgslugname=None, username=None, petitionname=None):
try:
pytitionuser = get_session_user(request)
except:
pytitionuser = None
if orgslugname:
try:
org = Organization.objects.get(slugname=orgslugname)
slug = SlugModel.objects.get(slug=petitionname, petition__org=org)
except (Organization.DoesNotExist, SlugModel.DoesNotExist):
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
else:
try:
user = PytitionUser.objects.get(user__username=username)
slug = SlugModel.objects.get(slug=petitionname, petition__user=user)
except PytitionUser.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
except SlugModel.DoesNotExist:
raise Http404(_("Sorry, we are not able to find this petition"))
petition = slug.petition
sign_form = SignatureForm(petition=petition)
ctx = {"user": pytitionuser, "petition": petition, "form": sign_form,
'meta': petition_detail_meta(request, petition.id)}
return render(request, "petition/petition_detail.html", ctx)
# /<int:petition_id>/add_new_slug
# Add a new slug for a petition
@login_required
def add_new_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if request.method == "POST":
slugtexts = request.POST.getlist('slugtext', '')
if slugtexts == '' or slugtexts == []:
messages.error(request, _("You entered an empty slug text"))
else:
if petition.is_allowed_to_edit(pytitionuser):
for slugtext in slugtexts:
try:
petition.add_slug(slugtext)
petition.save()
messages.success(request, _("Successful addition of the slug '{}'!".format(slugtext)))
except IntegrityError:
messages.error(request, _("The slug '{}' already exists!".format(slugtext)))
except ValidationError as v:
for message in v.messages:
messages.error(request, message)
else:
messages.error(request, _("You don't have the permission to modify petitions"))
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
else:
return redirect("user_dashboard")
# /<int:petition_id>/del_slug
# Remove a slug from a petition
@login_required
def del_slug(request, petition_id):
pytitionuser = get_session_user(request)
try:
petition = petition_from_id(petition_id)
except:
messages.error(request, _("This petition does not exist (anymore?)."))
return redirect("user_dashboard")
if petition.is_allowed_to_edit(pytitionuser):
slug_id = request.GET.get('slugid', None)
if not slug_id:
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
slug = SlugModel.objects.get(pk=slug_id)
petition.del_slug(slug)
petition.save()
messages.success(request, _("Successful deletion of a slug"))
else:
messages.error(request, _("You don't have the permission to modify petitions"))
if petition.owner_type == "org":
return redirect("org_dashboard", petition.owner.slugname)
else:
return redirect("user_dashboard")
return redirect(reverse("edit_petition", args=[petition_id]) + "#tab_social_network_form")
|
fallen/Pytition
|
pytition/petition/views.py
|
Python
|
bsd-3-clause
| 64,345
|
# Copyright (c) 2015 David Wilson
# This file is part of Icarus.
# Icarus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Icarus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Icarus. If not, see <http://www.gnu.org/licenses/>.
from Interactors.PlatformInteractors import GetPlatformsInteractor
from Interactors.Interactor import Interactor
from Tests.Interactors.InteractorTestBase import InteractorTestBase
class TestGetPlatformsInteractor(InteractorTestBase):
"""Unit tests for the GetPlatformsInteractor class"""
def setUp(self):
"""setUp function for all unit tests in this class"""
super().setUp()
self.__target = GetPlatformsInteractor()
self.__target.persistence = self.persistence
def test_is_interactor(self):
"""Test that GetPlatformsInteractor is an instance of Interactor"""
self.__target = GetPlatformsInteractor()
self.assertIsInstance(self.__target, Interactor)
def test_execute_calls_persistence(self):
"""Test that calling GetPlatformsInteractor.execute causes persistence.get_platforms to be called"""
self.__target.execute()
self.assertTrue(self.persistence.get_platforms.called)
|
jeroanan/GameCollection
|
Tests/Interactors/Platform/TestGetPlatformsInteractor.py
|
Python
|
gpl-3.0
| 1,654
|
import os
from django.conf import settings
from openflow.optin_manager.sfa.trust.gid import GID
from openflow.optin_manager.sfa.trust.credential import Credential
from openflow.optin_manager.sfa.trust.certificate import Certificate, Keypair, convert_public_key
from openflow.optin_manager.sfa.trust.gid import create_uuid
from openflow.optin_manager.sfa.trust.auth import Auth
from openflow.optin_manager.sfa.trust.hierarchy import Hierarchy
from openflow.optin_manager.sfa.util.xrn import Xrn, get_authority, hrn_to_urn, urn_to_hrn
class MetaSfaRegistry:
sfa_gid_location = '/sfa/jfed_roots/ocf_of.gid'
def __init__(self, config=None):
self.gid = settings.CONF_DIR + self.sfa_gid_location
def get_trusted_certs(self,cert=None):
f = open(self.gid,'r')
gid = f.read()
f.close()
return [gid]
|
dana-i2cat/felix
|
optin_manager/src/python/openflow/optin_manager/sfa/managers/MetaSfaRegistry.py
|
Python
|
apache-2.0
| 852
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name": "Account Journal Sequence",
"version": "1.0",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
"category": "Accounting",
"description": """
Account Journal Sequence
========================
Adds sequence field on account journal and it is going to be considered when choosing journals in differents models.
""",
'depends': [
'account',
],
'data': [
'account_journal_view.xml',
],
'demo': [],
'test': [],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
maljac/odoo-addons
|
account_journal_sequence/__openerp__.py
|
Python
|
agpl-3.0
| 1,536
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, MultiIndex
from pandas.util.testing import ensure_clean
from pandas.io.excel import ExcelWriter, _XlwtWriter
xlwt = pytest.importorskip("xlwt")
pytestmark = pytest.mark.parametrize("ext,", ['.xls'])
def test_excel_raise_error_on_multiindex_columns_and_no_index(ext):
# MultiIndex as columns is not yet implemented 9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(10, 3), columns=cols)
with pytest.raises(NotImplementedError):
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_excel_multiindex_columns_and_index_true(ext):
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = pd.DataFrame(np.random.randn(10, 3), columns=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=True)
def test_excel_multiindex_index(ext):
# MultiIndex as index works so assert no error #9794
cols = MultiIndex.from_tuples([('site', ''),
('2014', 'height'),
('2014', 'weight')])
df = DataFrame(np.random.randn(3, 10), index=cols)
with ensure_clean(ext) as path:
df.to_excel(path, index=False)
def test_to_excel_styleconverter(ext):
hstyle = {"font": {"bold": True},
"borders": {"top": "thin",
"right": "thin",
"bottom": "thin",
"left": "thin"},
"alignment": {"horizontal": "center", "vertical": "top"}}
xls_style = _XlwtWriter._convert_to_style(hstyle)
assert xls_style.font.bold
assert xlwt.Borders.THIN == xls_style.borders.top
assert xlwt.Borders.THIN == xls_style.borders.right
assert xlwt.Borders.THIN == xls_style.borders.bottom
assert xlwt.Borders.THIN == xls_style.borders.left
assert xlwt.Alignment.HORZ_CENTER == xls_style.alignment.horz
assert xlwt.Alignment.VERT_TOP == xls_style.alignment.vert
def test_write_append_mode_raises(ext):
msg = "Append mode is not supported with xlwt!"
with ensure_clean(ext) as f:
with pytest.raises(ValueError, match=msg):
ExcelWriter(f, engine='xlwt', mode='a')
|
cbertinato/pandas
|
pandas/tests/io/excel/test_xlwt.py
|
Python
|
bsd-3-clause
| 2,485
|
import micropython
@micropython.native
def native_x(x):
print(x + 1)
@micropython.native
def native_y(x):
print(x + 1)
@micropython.native
def native_z(x):
print(x + 1)
|
trezor/micropython
|
ports/qemu-arm/test-frzmpy/native_frozen_align.py
|
Python
|
mit
| 184
|
"""Tests for tensorflow.kernels.edit_distance_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
def ConstantOf(x):
x = np.asarray(x)
# Convert to int64 if it's not a string
if x.dtype.char != "S": x = np.asarray(x, dtype=np.int64)
return tf.constant(x)
class EditDistanceTest(tf.test.TestCase):
def _testEditDistance(self, hypothesis, truth, normalize,
expected_output, expected_err_re=None):
# hypothesis and truth are (index, value, shape) tuples
hypothesis_st = tf.SparseTensor(*[ConstantOf(x) for x in hypothesis])
truth_st = tf.SparseTensor(*[ConstantOf(x) for x in truth])
edit_distance = tf.edit_distance(
hypothesis=hypothesis_st, truth=truth_st, normalize=normalize)
with self.test_session():
if expected_err_re is None:
# Shape inference figures out the shape from the shape variables
expected_shape = [
max(h, t) for h, t in zip(hypothesis[2], truth[2])[:-1]]
self.assertEqual(edit_distance.get_shape(), expected_shape)
output = edit_distance.eval()
self.assertAllClose(output, expected_output)
else:
with self.assertRaisesOpError(expected_err_re):
edit_distance.eval()
def testEditDistanceNormalized(self):
hypothesis_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
hypothesis_values = [0, 1,
1, -1]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0],
[1, 0], [1, 1]]
truth_values = [0,
1, 1]
truth_shape = [2, 2]
expected_output = [1.0, 0.5]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceUnnormalized(self):
hypothesis_indices = [[0, 0],
[1, 0], [1, 1]]
hypothesis_values = [10,
10, 11]
hypothesis_shape = [2, 2]
truth_indices = [[0, 0], [0, 1],
[1, 0], [1, 1]]
truth_values = [1, 2,
1, -1]
truth_shape = [2, 3]
expected_output = [2.0, 2.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_output)
def testEditDistanceProperDistance(self):
# In this case, the values are individual characters stored in the
# SparseTensor (type DT_STRING)
hypothesis_indices = ([[0, i] for i, _ in enumerate("algorithm")] +
[[1, i] for i, _ in enumerate("altruistic")])
hypothesis_values = [x for x in "algorithm"] + [x for x in "altruistic"]
hypothesis_shape = [2, 11]
truth_indices = ([[0, i] for i, _ in enumerate("altruistic")] +
[[1, i] for i, _ in enumerate("algorithm")])
truth_values = [x for x in "altruistic"] + [x for x in "algorithm"]
truth_shape = [2, 11]
expected_unnormalized = [6.0, 6.0]
expected_normalized = [6.0/len("altruistic"),
6.0/len("algorithm")]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=False,
expected_output=expected_unnormalized)
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_normalized)
def testEditDistance3D(self):
hypothesis_indices = [[0, 0, 0],
[1, 0, 0]]
hypothesis_values = [0, 1]
hypothesis_shape = [2, 1, 1]
truth_indices = [[0, 1, 0],
[1, 0, 0],
[1, 1, 0]]
truth_values = [0, 1, 1]
truth_shape = [2, 2, 1]
expected_output = [[np.inf, 1.0], # (0,0): no truth, (0,1): no hypothesis
[0.0, 1.0]] # (1,0): match, (1,1): no hypothesis
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingHypothesis(self):
hypothesis_indices = np.empty((0, 2), dtype=np.int64)
hypothesis_values = []
hypothesis_shape = [1, 0]
truth_indices = [[0, 0]]
truth_values = [0]
truth_shape = [1, 1]
expected_output = [1.0]
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
def testEditDistanceMissingTruth(self):
hypothesis_indices = [[0, 0]]
hypothesis_values = [0]
hypothesis_shape = [1, 1]
truth_indices = np.empty((0, 2), dtype=np.int64)
truth_values = []
truth_shape = [1, 0]
expected_output = [np.inf] # Normalized, divide by zero
self._testEditDistance(
hypothesis=(hypothesis_indices, hypothesis_values, hypothesis_shape),
truth=(truth_indices, truth_values, truth_shape),
normalize=True,
expected_output=expected_output)
if __name__ == "__main__":
tf.test.main()
|
arunhotra/tensorflow
|
tensorflow/python/kernel_tests/edit_distance_op_test.py
|
Python
|
apache-2.0
| 5,651
|
from celery import Celery
from actions.fetch import Fetch
from actions.pickup import Pickup
from actions.judge import Judge
from actions.inform import InformTriche
from celery.bin.celery import result
from celery.result import AsyncResult
from sqlalchemy import create_engine
import config
from models import Project
from models import User
from models import Task
from models import Project_Student
from models import Template
import logging
from sqlalchemy.exc import IntegrityError
import json
from datetime import datetime
from celery import chord
from celery import chain
app = Celery('tasks')
app.config_from_object("workerconfig")
engine = create_engine(config.SQL_DB_URI, echo=True, pool_recycle=3600)
from sqlalchemy.orm import sessionmaker
Session = sessionmaker()
Session.configure(bind=engine)
@app.task
def add(x, y):
return x + y
@app.task
def inform_triche(task_id):
session = Session()
try:
task = session.query(Task).get(task_id)
if not task:
raise Exception("This task does not exist.")
project = task.project.serialize
return InformTriche(project).result
except IntegrityError:
session.rollback()
finally:
session.close()
return False
@app.task
def pickup_task(task_id):
session = Session()
try:
task = session.query(Task).get(task_id)
if not task:
raise Exception("This task does not exist.")
project = task.project.serialize
#return chord((retrieve_scm.s(task_id, project, u["user"]["login"]) for u in project["students"]),
# pickup_complete.s(task_id, project)).apply_async()
return chord((retrieve_scm.s(task_id, project, u["user"]["login"]) for u in project["students"]),
pickup_complete.s(task_id, project))()
except IntegrityError:
session.rollback()
finally:
session.close()
@app.task
def scheduled_launch(task_id, token):
session = Session()
try:
obj = session.query(Task).get(task_id)
obj.status = "ongoing"
session.add(obj)
session.commit()
chain(fetch.si(token, task_id), pickup_task.si(task_id))()
return True
except Exception as e:
logging.warning(e)
session.rollback()
finally:
session.close()
return False
@app.task
def scheduled_judge(task_id):
session = Session()
try:
task = session.query(Task).get(task_id)
if not task:
raise Exception("This task does not exist.")
project = task.project.serialize
stask = task.serialize
if task.project.template.call_judge:
logging.warning("Time to call Judge!")
j = Judge(project, stask)
j.run()
except Exception as e:
logging.warning(e)
finally:
session.close()
return True
@app.task
def scheduled_launch_done(task_id):
session = Session()
try:
obj = session.query(Task).get(task_id)
obj.status = "succeed"
session.add(obj)
session.commit()
except Exception as e:
logging.warning(e)
session.rollback()
finally:
session.close()
return True
@app.task
def scheduled_triche(task_id):
session = Session()
#try:
current_task = session.query(Task).get(task_id)
todos = session.query(Task).join(Project).filter(Project.template_id==1).order_by(Task.launch_date).filter(Task.status != 'succeed').filter(Task.id != current_task.id).all()
if current_task.type == 'auto' and len(todos) == 0:
project = current_task.project.serialize
return InformTriche(project).result
#except Exception as e:
# logging.warning(e)
# session.rollback()
#finally:
# session.close()
return True
@app.task
def pickup_complete(repos, task_id, project):
#chain (add.s(4, 4), mul.s(8), mul.s(10))
# archive, distribute, correction, triche
p = Pickup(task_id, project)
p.archive()
p.distribute()
p.clean_all()
scheduled_launch_done(task_id)
scheduled_judge(task_id)
scheduled_triche(task_id)
return None
@app.task
def retrieve_scm(task_id, project, user):
begin = datetime.now()
p = Pickup(task_id, project)
succeed, repo = p.one(user)
session = Session()
try:
obj = session.query(Project_Student).join(User).filter(Project_Student.project_id==project["id"]).filter(User.login==user).first()
obj.status = "Succeed" if succeed else "Failed"
obj.logs = repo._messages
obj.begin_date = begin
obj.end_date = datetime.now()
session.add(obj)
session.commit()
except Exception as e:
logging.warning(e)
session.rollback()
finally:
session.close()
return succeed
@app.task()
def fetch_onerror(uuid, token, retry):
print('Task %s raised exception' % uuid)
if retry < 2:
print("Retry fetch(%s): #%s" % (token, retry))
fetch.apply_async(args=[token], link_error=fetch_onerror.s(token, retry + 1),
countdown=120)
# relaunch T.apply_async(countdown=60
@app.task
def fetch(token, task_id=None):
session = Session()
try:
obj = Fetch(token)
print(obj.result)
t = session.query(Project).filter_by(token=token).first()
print(t)
datas = obj.result
if not t:
logging.info("Create new project")
tpl = session.query(Template).filter_by(codemodule=datas["module_code"], slug=datas["slug"]).first()
if not tpl:
logging.info("Create new Template")
tpl = Template(codemodule=datas["module_code"], slug=datas["slug"])
# repository_name, call*, school, ...
session.add(tpl)
t = Project(template=tpl)
session.add(t)
t.token = datas["token"]
t.scolaryear = datas["scolaryear"]
t.module_title = datas["module_title"]
t.module_code = datas["module_code"]
t.instance_code = datas["instance_code"]
t.location = datas["location"]
t.title = datas["title"]
t.deadline = datetime.strptime(datas["deadline"], "%Y-%m-%d %H:%M:%S")
t.promo = datas["promo"]
t.groups = json.dumps(datas["groups"])
t.last_update = datetime.now()
resp = []
for user in datas["resp"]:
u = session.query(User).filter_by(login=user["login"]).first()
if not u:
u = User(firstname=user["firstname"], lastname=user["lastname"],
login=user["login"], old_login=user["old_login"])
session.add(u)
resp.append(u)
t.resp = resp
template_resp = []
for user in datas["template_resp"]:
u = session.query(User).filter_by(login=user["login"]).first()
if not u:
u = User(firstname=user["firstname"], lastname=user["lastname"],
login=user["login"], old_login=user["old_login"])
session.add(u)
template_resp.append(u)
t.template_resp = template_resp
assistants = []
for user in datas["assistants"]:
u = session.query(User).filter_by(login=user["login"]).first()
if not u:
u = User(firstname=user["firstname"], lastname=user["lastname"],
login=user["login"], old_login=user["old_login"])
session.add(u)
assistants.append(u)
t.assistants = assistants
t.students = []
for user in datas["students"]:
u = session.query(User).filter_by(login=user["login"]).first()
if not u:
u = User(firstname=user["firstname"], lastname=user["lastname"],
login=user["login"], old_login=user["old_login"])
session.add(u)
t.students.append(Project_Student(user=u, project=t))
session.add(t)
need_new = True
for task in t.tasks:
if task.type == "auto":
need_new = False
if task.type == "auto" and task.status != "ongoing" and task.id != task_id and task_id != 0:
task.launch_date = t.deadline
task.status = "todo"
session.add(task)
if need_new:
session.add(Task(type="auto", launch_date=t.deadline, project=t))
session.commit()
return t.serialize
except IntegrityError as e:
session.rollback()
except Exception as e:
session.rollback()
logging.error(str(e))
finally:
session.close()
return False
|
steven-martins/ramassage.epitech.eu
|
tasks.py
|
Python
|
mit
| 8,744
|
#
# network_gui.py: Network configuration dialog
#
# Copyright (C) 2000, 2001, 2002, 2003, 2004, 2005, 2006, Red Hat, Inc.
# 2007, 2008, 2009
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author(s): Michael Fulbright <msf@redhat.com>
# David Cantrell <dcantrell@redhat.com>
#
import string
from iw_gui import *
from pyanaconda import gui
from pyanaconda import network
from pyanaconda import iutil
from pyanaconda.flags import flags
import gobject
import subprocess
import gtk
from pyanaconda import isys
from pyanaconda.constants import *
import gettext
_ = lambda x: gettext.ldgettext("anaconda", x)
class NetworkWindow(InstallWindow):
def getScreen(self, anaconda):
self.intf = anaconda.intf
self.anaconda = anaconda
self.hostname = network.getDefaultHostname(anaconda)
# load the UI
(self.xml, self.align) = gui.getGladeWidget("network.glade",
"network_align")
self.icon = self.xml.get_widget("icon")
self.hostnameEntry = self.xml.get_widget("hostnameEntry")
self.hostnameEntry.set_text(self.hostname)
self.netconfButton = self.xml.get_widget("netconfButton")
self.netconfButton.connect("clicked", self._setupNetwork)
if len(self.anaconda.network.netdevices) == 0 or flags.imageInstall:
self.netconfButton.set_sensitive(False)
# pressing Enter in confirm == clicking Next
self.hostnameEntry.connect("activate",
lambda w: self.ics.setGrabNext(1))
# load the icon
gui.readImageFromFile("network.png", image=self.icon)
return self.align
def _setupNetwork(self, *args):
self.intf.enableNetwork(just_setup=True)
def focus(self):
self.hostnameEntry.grab_focus()
def hostnameError(self):
self.hostnameEntry.grab_focus()
raise gui.StayOnScreen
def getNext(self):
hostname = string.strip(self.hostnameEntry.get_text())
herrors = network.sanityCheckHostname(hostname)
if not hostname:
self.intf.messageWindow(_("Error with Hostname"),
_("You must enter a valid hostname for this "
"computer."), custom_icon="error")
self.hostnameError()
if herrors is not None:
self.intf.messageWindow(_("Error with Hostname"),
_("The hostname \"%(hostname)s\" is not "
"valid for the following reason:\n\n"
"%(herrors)s")
% {'hostname': hostname,
'herrors': herrors},
custom_icon="error")
self.hostnameError()
self.anaconda.network.setHostname(hostname)
return None
def NMCEExited(pid, condition, anaconda):
if anaconda:
anaconda.intf.icw.window.set_sensitive(True)
# TODORV: get rid of setting sensitive completely?
def runNMCE(anaconda=None, blocking=True):
if not blocking and anaconda:
anaconda.intf.icw.window.set_sensitive(False)
cmd = ["/usr/bin/nm-connection-editor"]
out = open("/dev/tty5", "w")
try:
proc = subprocess.Popen(cmd, stdout=out, stderr=out)
except Exception as e:
if not blocking and anaconda:
anaconda.intf.icw.window.set_sensitive(True)
import logging
log = logging.getLogger("anaconda")
log.error("Could not start nm-connection-editor: %s" % e)
return None
else:
if blocking:
proc.wait()
else:
gobject.child_watch_add(proc.pid, NMCEExited, data=anaconda, priority=gobject.PRIORITY_DEFAULT)
def selectInstallNetDeviceDialog(network, devices = None):
devs = devices or network.netdevices.keys()
if not devs:
return None
devs.sort()
dialog = gtk.Dialog(_("Select network interface"))
dialog.add_button('gtk-cancel', gtk.RESPONSE_CANCEL)
dialog.add_button('gtk-ok', 1)
dialog.set_position(gtk.WIN_POS_CENTER)
gui.addFrame(dialog)
dialog.vbox.pack_start(gui.WrappingLabel(
_("This requires that you have an active "
"network connection during the installation "
"process. Please configure a network interface.")))
combo = gtk.ComboBox()
cell = gtk.CellRendererText()
combo.pack_start(cell, True)
combo.set_attributes(cell, text = 0)
cell.set_property("wrap-width", 525)
combo.set_size_request(480, -1)
store = gtk.TreeStore(gobject.TYPE_STRING, gobject.TYPE_STRING)
combo.set_model(store)
ksdevice = network.getKSDevice()
if ksdevice:
ksdevice = ksdevice.iface
preselected = None
for dev in devices:
i = store.append(None)
if not preselected:
preselected = i
desc = network.netdevices[dev].description
if desc:
desc = "%s - %s" %(dev, desc)
else:
desc = "%s" %(dev,)
hwaddr = network.netdevices[dev].get("HWADDR")
if hwaddr:
desc = "%s - %s" %(desc, hwaddr,)
if ksdevice and ksdevice == dev:
preselected = i
store[i] = (desc, dev)
combo.set_active_iter(preselected)
dialog.vbox.pack_start(combo)
dialog.show_all()
rc = dialog.run()
if rc in [gtk.RESPONSE_CANCEL, gtk.RESPONSE_DELETE_EVENT]:
install_device = None
else:
active = combo.get_active_iter()
install_device = combo.get_model().get_value(active, 1)
dialog.destroy()
return install_device
def selectSSIDsDialog(devssids):
"""Dialog for access point selection.
devssids - dict iface->[ssid1, ssid2, ssid3, ...]
returns - dict iface->[ssidX] or None on Cancel
"""
# If there are no choices, don't ask
for dev, ssids in devssids.items():
if len(ssids) > 1:
break
else:
return devssids
rv = {}
dialog = gtk.Dialog(_("Select APs"))
dialog.add_button('gtk-cancel', gtk.RESPONSE_CANCEL)
dialog.add_button('gtk-ok', 1)
dialog.set_position(gtk.WIN_POS_CENTER)
gui.addFrame(dialog)
dialog.vbox.pack_start(gui.WrappingLabel(
_("Select APs for wireless devices")))
table = gtk.Table(len(devssids), 2)
table.set_row_spacings(5)
table.set_col_spacings(5)
combos = {}
for i, (dev, ssids) in enumerate(devssids.items()):
label = gtk.Label(dev)
table.attach(label, 0, 1, i, i+1, gtk.FILL, gtk.FILL)
combo = gtk.combo_box_new_text()
for ssid in ssids:
combo.append_text(ssid)
table.attach(combo, 1, 2, i, i+1, gtk.FILL, gtk.FILL)
combo.set_active(0)
combos[dev] = combo
dialog.vbox.pack_start(table)
dialog.show_all()
rc = dialog.run()
# cancel
if rc in [gtk.RESPONSE_CANCEL, gtk.RESPONSE_DELETE_EVENT]:
rv = None
else:
for dev, combo in combos.items():
rv[dev] = [combo.get_active_text()]
dialog.destroy()
return rv
|
masami256/Anaconda-for-ore-ore-kernel
|
pyanaconda/iw/network_gui.py
|
Python
|
gpl-2.0
| 7,800
|
from os import path
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
path.join(path.dirname(__file__), '..',
'tardis_portal/templates/').replace('\\', '/'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'tardis.tardis_portal.context_processors'
'.global_contexts',
'tardis.tardis_portal.context_processors'
'.single_search_processor',
'tardis.tardis_portal.context_processors'
'.registration_processor',
'tardis.tardis_portal.context_processors'
'.user_details_processor',
'tardis.tardis_portal.context_processors'
'.google_analytics',
'tardis.tardis_portal.context_processors'
'.user_menu_processor',
],
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
]),
],
},
}
]
|
wettenhj/mytardis
|
tardis/default_settings/templates.py
|
Python
|
gpl-3.0
| 1,575
|
## begin license ##
#
# "Meresco PyLucene" contains JVM initialization for pylucene
#
# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl
# Copyright (C) 2015 Seecr (Seek You Too B.V.) http://seecr.nl
#
# This file is part of "Meresco PyLucene"
#
# "Meresco PyLucene" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Meresco PyLucene" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Meresco PyLucene"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
seecr/meresco-pylucene
|
meresco/__init__.py
|
Python
|
gpl-2.0
| 1,077
|
# Gufw 12.10.0 - http://gufw.tuxfamily.org
# Copyright (C) 2008-2011 Marcos Alvarez Costales https://launchpad.net/~costales
#
# Gufw is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Gufw is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Gufw; if not, see http://www.gnu.org/licenses for more
# information.
import commands
import time
import os
import dbus
# Work around a bug in python-distutils-extra auto, fixed in p-d-e rev 258
# org.freedesktop.PolicyKit1
class Firewall():
"""Set or get the Firewall properties"""
WIN_WIDTH = 336
WIN_HEIGHT = 334
WIN_VPANEL = 153
def __init__(self):
bus = dbus.SystemBus()
remote_object = bus.get_object("gufw.Daemon", "/Gufw_daemon")
self.iface = dbus.Interface(remote_object, "gufw.SampleInterface")
self.gufw_logging = "disable"
self.listening_status = "disable"
self.notify_popup = "disable"
self.width = self.WIN_WIDTH
self.height = self.WIN_HEIGHT
self.vpanel = self.WIN_VPANEL
self._read_config_file()
def unlock(self):
"""Unlock by PolicyKit"""
try:
self.iface.fw_unlock()
except:
return "no_access"
self.status = self.iface.get_status()
self.incoming_policy = self.iface.get_policy("incoming")
self.outgoing_policy = self.iface.get_policy("outgoing")
self.ufw_logging = self.iface.get_ufw_logging()
return "access"
def get_window_size(self):
"""Return the width & height"""
return self.width, self.height
def get_vpanel_pos(self):
"""Return the Vpanel position"""
return self.vpanel
def get_status(self):
"""Get status FW (enable/disable)"""
return self.status
def set_status(self, status):
"""Set status FW (enable/disable)"""
self.status = status
self._add_gufw_log(self.iface.set_status(status))
def get_policy(self, policy):
"""Get Policy (Incoming & Outgoing = allow/deny/reject)"""
if policy == "incoming":
return self.incoming_policy
elif policy == "outgoing":
return self.outgoing_policy
def set_policy(self, direction, policy):
"""Set Policy (Incoming & Outgoing = allow/deny/reject)"""
if direction == "incoming":
if policy == "allow":
self.incoming_policy = "allow"
elif policy == "deny":
self.incoming_policy = "deny"
elif policy == "reject":
self.incoming_policy = "reject"
elif direction == "outgoing":
if policy == "allow":
self.outgoing_policy = "allow"
elif policy == "deny":
self.outgoing_policy = "deny"
elif policy == "reject":
self.outgoing_policy = "reject"
self._add_gufw_log(self.iface.set_policy(direction, policy))
def get_ufw_logging(self):
"""Get logging (enable/disable)"""
return self.ufw_logging
def set_ufw_logging(self, logging):
"""Get log level (off/on/low/medium/high/full)"""
self.ufw_logging = logging
self._add_gufw_log(self.iface.set_ufw_logging(logging))
def get_listening_report(self):
"""Get listening report"""
return self.iface.get_listening_report()
def get_listening_status(self):
"""Get listening status (enable/disable)"""
return self.listening_status
def set_listening_status(self, status):
"""Set listening status (enable/disable)"""
self.listening_status = status
def get_notify_popup(self):
"""Get notify popup status (enable/disable)"""
return self.notify_popup
def set_notify_popup(self, status):
"""Set notify popup status (enable/disable)"""
self.notify_popup = status
def reset_ufw(self):
"""Reset cofig ufw"""
self._add_gufw_log(self.iface.reset_ufw())
def get_gufw_logging(self):
"""Get the Gufw Logging Status (enable/disable)"""
return self.gufw_logging
def set_gufw_logging(self, status):
"""Set the Gufw Logging Status (enable/disable)"""
self.gufw_logging = status
def get_gufw_log(self, log = 'local'):
"""Get Gufw Log"""
return self.iface.get_gufw_log(log)
def _add_gufw_log(self, line):
"""Add a command to Gufw Log"""
self.iface.add_gufw_log(self.gufw_logging, line)
def erase_gufw_log(self):
"""Erase all Gufw Logs"""
self.iface.erase_gufw_log()
def add_rule(self, is_program, insert_number, action, direction, log, protocol, fromip, fromport, toip, toport):
"""Add rule to firewall"""
self._add_gufw_log(self.iface.add_rule(is_program, insert_number, action, direction, log, protocol, fromip, fromport, toip, toport))
def remove_rule(self, number):
"""Remove rule from firewall"""
self._add_gufw_log(self.iface.remove_rule(str(number)))
def get_number_rules(self):
"""Get the actual number of rules"""
return self.iface.get_number_rules()
def get_rule_list(self):
"""Get all List Rules"""
return self.iface.get_rule_list()
def update_config_file(self, width, height, vpanel):
"""Save actual FW config when quitting Gufw"""
self.iface.update_config_file(width, height, vpanel, self.gufw_logging, self.listening_status, self.notify_popup)
self.iface.Exit()
def _read_config_file(self):
"""Get previous values from config file """
file = commands.getstatusoutput("cat /etc/gufw/gufw.cfg")
if file[0] != 0:
return
cfg_file = file[1].split("\n")
for line in cfg_file:
# Width & height
if line.find("sizewin=") != -1:
width_height_split = (line.replace("sizewin=", "")).split("x")
self.width = int(width_height_split[0])
self.height = int(width_height_split[1])
# Vpanel position
if line.find("vpanel=") != -1:
self.vpanel = int(line.replace("vpanel=", ""))
# Gufw Logging
if line.find("log=enable") != -1:
self.gufw_logging = "enable"
# Listening Status
if line.find("listening=enable") != -1:
self.listening_status = "enable"
# Notify Status
if line.find("notify_popup=enable") != -1:
self.notify_popup = "enable"
|
antiX-Linux/gufw-jessie
|
gui-ufw-12.10.0/gufw/model/Firewall.py
|
Python
|
gpl-3.0
| 7,258
|
#!/usr/bin/env python
"""Semantic protocol buffers can be created from proto2 .proto files.
For maintaining inter-operatibility with primitive protocol buffer
implementations, we can parse the field descriptors created by the standard
Google proto implementation, and generate Semantic proto descriptors.
This file contains interoperability code with the Google protocol buffer
library.
"""
import logging
from grr.lib import rdfvalue
from grr.lib import type_info
from grr.proto import semantic_pb2
# Field types present in the proto2 field descriptors.
TYPE_DOUBLE = 1
TYPE_FLOAT = 2
TYPE_INT64 = 3
TYPE_UINT64 = 4
TYPE_INT32 = 5
TYPE_FIXED64 = 6
TYPE_FIXED32 = 7
TYPE_BOOL = 8
TYPE_STRING = 9
TYPE_GROUP = 10
TYPE_MESSAGE = 11
TYPE_BYTES = 12
TYPE_UINT32 = 13
TYPE_ENUM = 14
TYPE_SFIXED32 = 15
TYPE_SFIXED64 = 16
TYPE_SINT32 = 17
TYPE_SINT64 = 18
MAX_TYPE = 18
# These are labels in the descriptor. Semantic protobufs only distinguish
# between optional and repeated labels. Required is not enforced by the library
# - it should be done by the user in their Validate() method.
LABEL_OPTIONAL = 1
LABEL_REQUIRED = 2
LABEL_REPEATED = 3
MAX_LABEL = 3
# Semantic Value data store type specifies how they prefer to be encoded. This
# maps to a proto2 primitive field type. When parsing the .proto file we must
# ensure that the semantic value is getting encoded into the correct primitive
# field type.
_SEMANTIC_PRIMITIVE_TO_FIELD_TYPE = dict(
bytes=TYPE_BYTES,
string=TYPE_STRING,
integer=TYPE_INT64,
unsigned_integer=TYPE_UINT64,
)
def DefineFromProtobuf(cls, protobuf):
"""Add type info definitions from an existing protobuf.
We support building this class by copying definitions from an annotated
protobuf using the semantic protobuf. This is ideal for interoperability
with other languages and non-semantic protobuf implementations. In that case
it might be easier to simply annotate the .proto file with the relevant
semantic information.
Args:
cls: The class to add fields descriptors to (i.e. the new semantic class).
protobuf: A generated proto2 protocol buffer class as produced by the
standard Google protobuf compiler.
"""
# Parse message level options.
message_options = protobuf.DESCRIPTOR.GetOptions()
semantic_options = message_options.Extensions[semantic_pb2.semantic]
# Support message descriptions
if semantic_options.description and not cls.__doc__:
cls.__doc__ = semantic_options.description
# We search through all the field descriptors and build type info
# descriptors from them.
for field in protobuf.DESCRIPTOR.fields:
type_descriptor = None
# Does this field have semantic options?
options = field.GetOptions().Extensions[semantic_pb2.sem_type]
kwargs = dict(description=options.description, name=field.name,
friendly_name=options.friendly_name,
field_number=field.number, labels=list(options.label))
if field.has_default_value:
kwargs["default"] = field.default_value
# This field is a non-protobuf semantic value.
if options.type and field.type != TYPE_MESSAGE:
rdf_type = getattr(rdfvalue, options.type, None)
if rdf_type:
# Make sure that the field type is the same as what is required by the
# semantic type.
required_field_type = _SEMANTIC_PRIMITIVE_TO_FIELD_TYPE[
rdf_type.data_store_type]
if required_field_type != field.type:
raise rdfvalue.InitializeError(
("%s: .proto file uses incorrect field to store Semantic Value "
"%s: Should be %s") % (
cls.__name__, field.name, rdf_type.data_store_type))
type_descriptor = type_info.ProtoRDFValue(rdf_type=options.type, **kwargs)
# A semantic protobuf is already a semantic value so it is an error to
# specify it in two places.
elif options.type and field.type == TYPE_MESSAGE:
raise rdfvalue.InitializeError(
("%s: .proto file specified both Semantic Value type %s and "
"Semantic protobuf %s") % (
cls.__name__, options.type, field.message_type.name))
# Try to figure out what this field actually is from the descriptor.
elif field.type == TYPE_DOUBLE:
type_descriptor = type_info.ProtoDouble(**kwargs)
elif field.type == TYPE_FLOAT:
type_descriptor = type_info.ProtoFloat(**kwargs)
elif field.type == TYPE_BOOL:
type_descriptor = type_info.ProtoBoolean(**kwargs)
elif field.type == TYPE_STRING:
type_descriptor = type_info.ProtoString(**kwargs)
elif field.type == TYPE_BYTES:
type_descriptor = type_info.ProtoBinary(**kwargs)
if options.dynamic_type:
# This may be a dynamic type. In this case the dynamic_type option
# names a method (which must exist) which should return the class of
# the embedded semantic value.
dynamic_cb = getattr(cls, options.dynamic_type, None)
if dynamic_cb is not None:
type_descriptor = type_info.ProtoDynamicEmbedded(
dynamic_cb=dynamic_cb, **kwargs)
else:
logging.warning("Dynamic type specifies a non existant callback %s",
options.dynamic_type)
elif field.type == TYPE_INT64 or field.type == TYPE_INT32:
type_descriptor = type_info.ProtoSignedInteger(**kwargs)
elif field.type == TYPE_UINT32 or field.type == TYPE_UINT64:
type_descriptor = type_info.ProtoUnsignedInteger(**kwargs)
# An embedded protocol buffer.
elif field.type == TYPE_MESSAGE and field.message_type:
# Refer to another protobuf. Note that the target does not need to be
# known at this time. It will be resolved using the late binding algorithm
# when it is known. Therefore this can actually also refer to this current
# protobuf (i.e. nested proto).
type_descriptor = type_info.ProtoEmbedded(
nested=field.message_type.name, **kwargs)
# TODO(user): support late binding here.
if type_descriptor.type:
# This traps the following problem:
# class Certificate(rdfvalue.RDFValueArray):
# protobuf = jobs_pb2.BlobArray
#
# A primitive Protobuf definition like:
# message Certificate {
# ....
# };
# And a field like:
# optional Certificate csr = 1 [(sem_type) = {
# description: "A Certificate RDFValue with the CSR in it.",
# }];
# If we blindly allowed the Certificate RDFValue to be used, the
# semantic library will end up embedding a BlobArray protobuf, but the
# primitive library will still use Certificate.
# The name of the primitive protobuf the semantic type implements.
semantic_protobuf_primitive = type_descriptor.type.protobuf.__name__
# This is an error because the primitive library will use the protobuf
# named in the field, but the semantic library will implement a
# different protobuf.
if semantic_protobuf_primitive != field.message_type.name:
raise rdfvalue.InitializeError(
("%s.%s: Conflicting primitive (%s) and semantic protobuf %s "
"which implements primitive protobuf (%s)") %(
cls.__name__, field.name, field.message_type.name,
type_descriptor.type.__name__, semantic_protobuf_primitive))
elif field.enum_type: # It is an enum.
enum_desc = field.enum_type
enum_dict = {}
enum_descriptions = {}
for enum_value in enum_desc.values:
enum_dict[enum_value.name] = enum_value.number
description = enum_value.GetOptions().Extensions[
semantic_pb2.description]
enum_descriptions[enum_value.name] = description
enum_dict = dict((x.name, x.number) for x in enum_desc.values)
type_descriptor = type_info.ProtoEnum(
enum_name=enum_desc.name, enum=enum_dict,
enum_descriptions=enum_descriptions, **kwargs)
# Attach the enum container to the class for easy reference:
setattr(cls, enum_desc.name, type_descriptor.enum_container)
# If we do not recognize the type descriptor we ignore this field.
if type_descriptor is not None:
# If the field is repeated, wrap it in a ProtoList.
if field.label == LABEL_REPEATED:
type_descriptor = type_info.ProtoList(type_descriptor)
try:
cls.AddDescriptor(type_descriptor)
except Exception:
logging.error("Failed to parse protobuf %s", cls)
raise
else:
logging.error("Unknown field type for %s - Ignoring.", field.name)
|
simsong/grr-insider
|
lib/rdfvalues/proto2.py
|
Python
|
apache-2.0
| 8,695
|
# -*- coding: utf-8 -*-
from itertools import product
import requests
import shutil
def api_list(apiargs):
"""Google Street View Image API results.
Constructs a list of `Google Street View Image API queries <https://developers.google.com/maps/documentation/streetview/>`_
from a dictionary.
Args:
apiargs (listof dict):
Dict containing `street view URL parameters <https://developers.google.com/maps/documentation/streetview/intro>`_.
Each parameter can have multiple values if separated by ``;``.
Returns:
A ``listof dict`` containing single query requests per dictionary for Google Street View Image API.
Examples:
::
# Import google_streetview for the api and helper module
import google_streetview.api
import google_streetview.helpers
# Create a dictionary with multiple parameters separated by ;
apiargs = {
'location': '46.414382,10.013988;40.720032,-73.988354',
'size': '640x300;640x640',
'heading': '0;90;180;270',
'fov': '0;90;120',
'pitch': '-90;0;90'
}
# Get a list of all possible queries from multiple parameters
api_list = google_streetview.helpers.api_list(apiargs)
# Create a results object for all possible queries
results = google_streetview.api.results(api_list)
# Preview results
results.preview()
# Download images to directory 'downloads'
results.download_links('downloads')
# Save metadata
results.save_metadata('metadata.json')
"""
# (api_query) Query combinations for each parameter
api_queries = {}
keywords = [k for k in apiargs]
for k in keywords:
if k in apiargs:
api_queries[k] = apiargs[k].split(';')
apiargs.pop(k, None)
# (api_list) Build list of api requests based on query combinations
out = []
keys = [k for k in api_queries]
queries = [api_queries[k] for k in api_queries]
combinations = product(*queries)
for combo in combinations:
api_copy = apiargs.copy()
for k, parameter in zip(keys, combo):
api_copy[k] = parameter
out.append(api_copy)
return(out)
def download(url, file_path):
r = requests.get(url, stream=True)
if r.status_code == 200: # if request is successful
with open(file_path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
|
rrwen/google_streetview
|
google_streetview/helpers.py
|
Python
|
mit
| 2,418
|
'''Learning agent for learning with GPTD, using the
GPTDModule. Updates must be done with pairs (laststate, lastaction)
and (curstate, curaction), as specified in GPTDModule.
'''
class GPTDAgent(object):
curstate = None
nextaction = None
laststate = None
lastaction = None
lastreward = None
def __init__(self, module, initstate, initaction):
self.module = module
self.curstate = initstate
self.nextaction = initaction
def integrateObservation(self, obs):
# move last iteration into the past
self.laststate = self.curstate
self.lastaction = self.nextaction
# observation MUST be in form (r, c)
self.curstate = obs
def getAction(self):
self.nextaction = self.module.getMaxAction(self.curstate)
return self.nextaction
def getReward(self, reward):
self.lastreward = reward
# do the update
self.module.update(self.laststate, self.lastaction,
self.lastreward, self.curstate, self.nextaction)
|
alexandrwang/6882project
|
engel/agent.py
|
Python
|
mit
| 1,058
|
# -*- coding: utf-8 -*-
#
# This file is part of Zenodo.
# Copyright (C) 2016 CERN.
#
# Zenodo is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Zenodo is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Zenodo; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Cache module."""
from __future__ import absolute_import, print_function
from .ext import ZenodoCache
from .proxies import current_cache
__all__ = ('ZenodoCache', 'current_cache')
|
tiborsimko/zenodo
|
zenodo/modules/cache/__init__.py
|
Python
|
gpl-2.0
| 1,153
|
import urllib
import requests
import jwt
import ssl
from datetime import datetime
from binascii import a2b_base64
from Crypto.PublicKey import RSA
from Crypto.Util.asn1 import DerSequence
from django.core.cache import cache
from django.conf import settings
def get_auth0_public_key(cert_url):
"""
Using the URL for a public certificate, create the RSA certificate object
and return the public key
"""
cert_file = urllib.urlopen(cert_url)
cert_obj = cert_file.read()
# Convert from PEM to DER
der = ssl.PEM_cert_to_DER_cert(cert_obj)
# Extract subjectPublicKeyInfo field from X.509 certificate (see RFC3280)
cert = DerSequence()
cert.decode(der)
tbsCertificate = DerSequence()
tbsCertificate.decode(cert[0])
subjectPublicKeyInfo = tbsCertificate[6]
# Initialize RSA key
rsa_key = RSA.importKey(subjectPublicKeyInfo)
return rsa_key
def get_auth0_management_token():
"""
This gets an access token for the Auth0 management API. This is needed in
order to make calls to the API (e.g. to get user information). These tokens
expire so we cache them in local memory to avoid making an API call every
time we want to access the management API. Once the token has expired (or
we can't find one in memory) then we fetch a new one.
"""
token = cache.get('auth0_access_token', None)
if not token:
# We need to get a new token
response = requests.post(
settings.AUTH0_TOKEN_URL,
json={
'grant_type': 'client_credentials',
'client_id': settings.AUTH0_CLIENT_ID,
'client_secret': settings.AUTH0_CLIENT_SECRET,
'audience': settings.AUTH0_CLIENT_AUDIENCE
})
response.raise_for_status()
response_json = response.json()
# Get the access token from the response
token = response_json['access_token']
# Store the token in the cache
# TODO: This should expire from the cache before the token becomes
# invalid but we could store the expiration then explicity
# check the expiration of the token to be 100% sure.
cache.set('auth0_access_token', token)
return token
|
Techbikers/techbikers
|
server/auth/utils.py
|
Python
|
mit
| 2,262
|
from rasmodel.scenarios.default import model
import numpy as np
from matplotlib import pyplot as plt
from pysb.integrate import Solver
from pysb import *
from tbidbaxlipo.util import fitting
# Zero out all initial conditions
for ic in model.initial_conditions:
ic[1].value = 0
KRAS = model.monomers['KRAS']
GDP = model.monomers['GDP']
GTP = model.monomers['GTP']
Expression('KRAS_mGXP_', model.observables['KRAS_mGTP_closed_'] +
model.observables['KRAS_mGDP_closed_'])
# Add an initial condition for HRAS with GDP or GTP pre-bound
# (Concentration units in nM)
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GDP(p=1, label='n'),
Parameter('KRAS_WT_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GDP(p=1, label='n'),
Parameter('KRAS_G13D_GDP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='WT') % GTP(p=1, label='n'),
Parameter('KRAS_WT_GTP_0', 0.))
Initial(KRAS(gtp=1, gap=None, gef=None, p_loop=None, s1s2='closed', CAAX=None,
mutant='G13D') % GTP(p=1, label='n'),
Parameter('KRAS_G13D_GTP_0', 0.))
plt.ion()
# First simulate the data from Figure 1A (GDP exchange)
# WT, GDP:
model.parameters['mGDP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 750.
t = np.linspace(0, 1000, 1000) # 1000 seconds
sol = Solver(model, t)
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GDP:
model.parameters['KRAS_WT_GDP_0'].value = 0
model.parameters['KRAS_G13D_GDP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GDP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGDP] (nM)')
plt.show()
# Now simulate the data from Figure 1B (GTP exchange)
# WT, GTP
model.parameters['mGDP_0'].value = 0.
model.parameters['mGTP_0'].value = 1500.
model.parameters['KRAS_WT_GDP_0'].value = 0.
model.parameters['KRAS_G13D_GDP_0'].value = 0.
model.parameters['KRAS_WT_GTP_0'].value = 750.
model.parameters['KRAS_G13D_GTP_0'].value = 0.
sol.run()
plt.figure()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='WT')
# G13D, GTP
model.parameters['KRAS_WT_GTP_0'].value = 0.
model.parameters['KRAS_G13D_GTP_0'].value = 750.
sol.run()
plt.plot(t, sol.yexpr['KRAS_mGXP_'], label='G13D')
plt.legend(loc='lower right')
plt.title('GTP exchange')
plt.xlabel('Time (s)')
plt.ylabel('[Bound mGTP] (nM)')
plt.show()
|
johnbachman/ras_model
|
gxp_exchange.py
|
Python
|
mit
| 2,587
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.