repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
joomel1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/memorizingfile.py | 680 | 3709 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Memorizing file.
A memorizing file wraps a file and memorizes lines read by readline.
"""
import sys
class MemorizingFile(object):
"""MemorizingFile wraps a file and memorizes lines read by readline.
Note that data read by other methods are not memorized. This behavior
is good enough for memorizing lines SimpleHTTPServer reads before
the control reaches WebSocketRequestHandler.
"""
def __init__(self, file_, max_memorized_lines=sys.maxint):
"""Construct an instance.
Args:
file_: the file object to wrap.
max_memorized_lines: the maximum number of lines to memorize.
Only the first max_memorized_lines are memorized.
Default: sys.maxint.
"""
self._file = file_
self._memorized_lines = []
self._max_memorized_lines = max_memorized_lines
self._buffered = False
self._buffered_line = None
def __getattribute__(self, name):
if name in ('_file', '_memorized_lines', '_max_memorized_lines',
'_buffered', '_buffered_line', 'readline',
'get_memorized_lines'):
return object.__getattribute__(self, name)
return self._file.__getattribute__(name)
def readline(self, size=-1):
"""Override file.readline and memorize the line read.
Note that even if size is specified and smaller than actual size,
the whole line will be read out from underlying file object by
subsequent readline calls.
"""
if self._buffered:
line = self._buffered_line
self._buffered = False
else:
line = self._file.readline()
if line and len(self._memorized_lines) < self._max_memorized_lines:
self._memorized_lines.append(line)
if size >= 0 and size < len(line):
self._buffered = True
self._buffered_line = line[size:]
return line[:size]
return line
def get_memorized_lines(self):
"""Get lines memorized so far."""
return self._memorized_lines
# vi:sts=4 sw=4 et
| bsd-3-clause |
jmerkow/VTK | ThirdParty/Twisted/twisted/trial/_dist/test/test_workerreporter.py | 35 | 3765 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.trial._dist.workerreporter}.
"""
from twisted.python.failure import Failure
from twisted.trial.unittest import TestCase, Todo
from twisted.trial._dist.workerreporter import WorkerReporter
from twisted.trial._dist import managercommands
class FakeAMProtocol(object):
"""
A fake C{AMP} implementations to track C{callRemote} calls.
"""
id = 0
lastCall = None
def callRemote(self, command, **kwargs):
self.lastCall = command
class WorkerReporterTestCase(TestCase):
"""
Tests for L{WorkerReporter}.
"""
def setUp(self):
self.fakeAMProtocol = FakeAMProtocol()
self.workerReporter = WorkerReporter(self.fakeAMProtocol)
self.test = TestCase()
def test_addSuccess(self):
"""
L{WorkerReporter.addSuccess} sends a L{managercommands.AddSuccess}
command.
"""
self.workerReporter.addSuccess(self.test)
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddSuccess)
def test_addError(self):
"""
L{WorkerReporter.addError} sends a L{managercommands.AddError} command.
"""
self.workerReporter.addError(self.test, Failure(RuntimeError('error')))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddError)
def test_addErrorTuple(self):
"""
Adding an error using L{WorkerReporter.addError} as a
C{sys.exc_info}-style tuple sends an L{managercommands.AddError}
command.
"""
self.workerReporter.addError(
self.test, (RuntimeError, RuntimeError('error'), None))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddError)
def test_addFailure(self):
"""
L{WorkerReporter.addFailure} sends a L{managercommands.AddFailure}
command.
"""
self.workerReporter.addFailure(self.test,
Failure(RuntimeError('fail')))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddFailure)
def test_addFailureTuple(self):
"""
Adding a failure using L{WorkerReporter.addFailure} as a
C{sys.exc_info}-style tuple sends an L{managercommands.AddFailure}
message.
"""
self.workerReporter.addFailure(
self.test, (RuntimeError, RuntimeError('fail'), None))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddFailure)
def test_addSkip(self):
"""
L{WorkerReporter.addSkip} sends a L{managercommands.AddSkip} command.
"""
self.workerReporter.addSkip(self.test, 'reason')
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddSkip)
def test_addExpectedFailure(self):
"""
L{WorkerReporter.addExpectedFailure} sends a
L{managercommands.AddExpectedFailure} command.
protocol.
"""
self.workerReporter.addExpectedFailure(
self.test, Failure(RuntimeError('error')), Todo('todo'))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddExpectedFailure)
def test_addUnexpectedSuccess(self):
"""
L{WorkerReporter.addUnexpectedSuccess} sends a
L{managercommands.AddUnexpectedSuccess} command.
"""
self.workerReporter.addUnexpectedSuccess(self.test, Todo('todo'))
self.assertEqual(self.fakeAMProtocol.lastCall,
managercommands.AddUnexpectedSuccess)
| bsd-3-clause |
wilebeast/FireFox-OS | B2G/gecko/testing/mozbase/mozprofile/tests/test_nonce.py | 8 | 1384 | #!/usr/bin/env python
"""
test nonce in prefs delimeters
see https://bugzilla.mozilla.org/show_bug.cgi?id=722804
"""
import os
import tempfile
import time
import unittest
from mozprofile.prefs import Preferences
from mozprofile.profile import Profile
class PreferencesNonceTest(unittest.TestCase):
def test_nonce(self):
# make a profile with one preference
path = tempfile.mktemp()
profile = Profile(path,
preferences={'foo': 'bar'},
restore=False)
user_js = os.path.join(profile.profile, 'user.js')
self.assertTrue(os.path.exists(user_js))
# ensure the preference is correct
prefs = Preferences.read_prefs(user_js)
self.assertEqual(dict(prefs), {'foo': 'bar'})
del profile
# augment the profile with a second preference
profile = Profile(path,
preferences={'fleem': 'baz'},
restore=True)
prefs = Preferences.read_prefs(user_js)
self.assertEqual(dict(prefs), {'foo': 'bar', 'fleem': 'baz'})
# cleanup the profile;
# this should remove the new preferences but not the old
profile.cleanup()
prefs = Preferences.read_prefs(user_js)
self.assertEqual(dict(prefs), {'foo': 'bar'})
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
archf/ansible | test/units/modules/network/nxos/test_nxos_bgp_neighbor.py | 19 | 2404 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_bgp_neighbor
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosBgpNeighborModule(TestNxosModule):
module = nxos_bgp_neighbor
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_bgp_neighbor.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_bgp', 'config.cfg')
self.load_config.return_value = []
def test_nxos_bgp_neighbor(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.3', description='some words'))
self.execute_module(changed=True, commands=['router bgp 65535', 'neighbor 3.3.3.3', 'description some words'])
def test_nxos_bgp_neighbor_remove_private_as(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.4', remove_private_as='all'))
self.execute_module(changed=False, commands=[])
def test_nxos_bgp_neighbor_remove_private_as_changed(self):
set_module_args(dict(asn=65535, neighbor='3.3.3.4', remove_private_as='replace-as'))
self.execute_module(changed=True, commands=['router bgp 65535', 'neighbor 3.3.3.4', 'remove-private-as replace-as'])
| gpl-3.0 |
CZAlmon/MangaMine | MangaMineBot_Folder/MangaBotDownloader.py | 1 | 57509 | #Ver. 0.0.7
#Author: Zach Almon
import urllib.request
import re
import os
import platform
import sys
import string
import html
import time
platformType = platform.system()
def Batoto(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\Batoto"
else:
MASTERdirectoryName = currentDirectory + "/Batoto"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Batoto Folder
os.chdir(MASTERdirectoryName)
type_one_manga = False
type_two_manga = False
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
Manga_Title = re.findall(r'<title>+(.*?)- Scanlations', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
return
Manga_Title_string = Manga_Title[0]
Manga_Title_string = Manga_Title_string[:-1]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
print("Downloading", Manga_Title_string)
#For any other language on Bato.to change lang_English to whatever matches the language you desire.
#Then this file *SHOULD* work with your language. It is Untested as anything else but english
allENGLISHChaps = re.findall(r'lang_English+(.*?)\ title="+', str(urllibHTML))
if len(allENGLISHChaps) == 0:
print("Manga has no English Chapters or there was an error reading the HTML!")
return
else:
First_chapter_string = allENGLISHChaps[-1]
First_chapter_address = re.findall(r'href=\"+(.*?)\"', First_chapter_string)
First_chapter_address_string = First_chapter_address[0]
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
First_chapter_html = urllib.request.urlopen(First_chapter_address_string).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Second Main Request that Failed.')
return
type_one_padding_right = re.search("<div style=\"text-align:center;\">", str(First_chapter_html))
type_two_comic_page = re.search("comic_page", str(First_chapter_html))
#Type one is All images on One Page
if type_one_padding_right != None:
type_one_manga = True
#Type two is All images on seperate pages
elif type_two_comic_page != None:
type_two_manga = True
else:
print("There was an error with the Manga Type!")
return
#This will get the chapter links from the Select options on the chapters first page
#There are 2 select options (one at top and one at bottom
#They are same so its arbutrary which you pick. I Will be selecting [0]
get_Chapters = re.findall(r'250px;">+(.*?)</select>', str(First_chapter_html))
chapter_master_string = get_Chapters[0]
list_of_Chapter_Links = []
#Get all chapter links. Last thing in list is an unneeded "selected" string. Pop that off.
list_of_Chapter_Links = re.findall(r'\"+(.*?)\"', chapter_master_string)
#In this list there may be a "selected". It may or may not be at the end. The loop solves it.
#I am 95% sure there will only ever be 1 "selected" per list.
#list_of_Chapter_Links.pop(-1)
for i in range(len(list_of_Chapter_Links)):
if list_of_Chapter_Links[i] == "selected":
list_of_Chapter_Links.pop(i)
break
#Get Numbers of the chapters. Will be "Matched" up to the list_of_Chapter_Links.
list_of_Chapter_Numbers_raw = re.findall(r'Ch\.+(.*?)<', chapter_master_string)
list_of_chapter_names_refined = []
#Some chapters may be like "230: Title of Chapter" Some may be "145"
for i in range(len(list_of_Chapter_Numbers_raw)):
temp_list = re.findall('^(.*?):', list_of_Chapter_Numbers_raw[i])
if len(temp_list) == 0:
list_of_chapter_names_refined.append(list_of_Chapter_Numbers_raw[i])
elif len(temp_list) == 1:
list_of_chapter_names_refined.append(temp_list[0])
else:
print("Manga Chapter Name Error!")
return
list_of_Chapter_Links_Final = list_of_Chapter_Links
list_of_Chapter_Numbers_Final = list_of_chapter_names_refined
list_of_Chapter_Links_Final.reverse()
list_of_Chapter_Numbers_Final.reverse()
fullDownload = True
#Because there are duplicates I must check and add a v2 or v3 if it is in there more times
temp_name = []
temp_name_str = ''
for i in range(len(list_of_Chapter_Numbers_Final)):
if list_of_Chapter_Numbers_Final[i] in temp_name:
#At this point there are duplicates. The chapters may not be in order.
#This is the only method I can come up with to deal with duplicates
# that may be out of order.
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v2'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v3'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v4'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v5'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v6'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v7'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v8'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v9'
if temp_name_str in temp_name:
temp_name_str = list_of_Chapter_Numbers_Final[i] + ' v10'
#If there are more then 10 dulicates I can't help you
temp_name.append(temp_name_str)
else:
temp_name.append(list_of_Chapter_Numbers_Final[i])
list_of_Chapter_Numbers_Final = temp_name
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
#Main Loop for Downloading Images.
for i in range(len(list_of_Chapter_Numbers_Final)):
first_page_of_each_chapter = True
chapter_number = list_of_Chapter_Numbers_Final[i]
chapter_link = list_of_Chapter_Links_Final[i]
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_number
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_number
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_number)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(list_of_Chapter_Links_Final[i]).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
if type_one_manga == True:
get_images = re.findall(r'text-align:center;">+(.*?)</div><div', str(urllibHTML))
get_images_master_string = get_images[0]
image_file_name_list = re.findall(r"<img src=\\'(.*?)\\'", str(get_images_master_string))
Amount_of_pages = len(image_file_name_list)
for j in range(len(image_file_name_list)):
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
image_file_name = image_file_name_list[j]
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
elif type_two_manga == True:
#Get the pages between "<id..." and "</se..."
get_Pages = re.findall(r'id="page_select" onchange="window.location=this.value;">+(.*?)</select></li>', str(urllibHTML))
#There will be Two found
Pages_master_string = get_Pages[0]
#Get all page links. Second thing in list is an unneeded "selected" string. Loop to get rid
list_of_page_Links = re.findall(r'\"+(.*?)\"', Pages_master_string)
list_of_page_links_final = []
#Loop to rid of the "Selected" part of list
for j in range(len(list_of_page_Links)):
if list_of_page_Links[j] != "selected":
list_of_page_links_final.append(list_of_page_Links[j])
Amount_of_pages = len(list_of_page_links_final)
for j in range(len(list_of_page_links_final)):
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
#Check for First page. Checks to see if anything is already downloaded
if first_page_of_each_chapter == True:
first_page_of_each_chapter = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == Amount_of_pages:
break
#At this point There will be something you need to download.
#Since we already have the HTML for the first page of EACH Chapter
#We dont need to waste time to read that again, set it here.
page_urllibHTML = urllibHTML
else:
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(list_of_page_links_final[j]).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
#Get Image URL
image_file_name_list = re.findall(r'comic_page" style="max-width: 100%;" src="(.*?)"', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Bato.to Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
else:
print("Manga Type Error!")
return
return
def MangaPanda(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
downloadMangaListOnce = False
does_it_have_dot_html = re.findall(r'(\.html)', link_to_manga_site)
if len(does_it_have_dot_html) == 0:
pass
else:
link_to_manga_site = link_to_manga_site[:-5]
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
allChaps = re.findall(r'<div class="chico_manga"></div>\\n<a href="+(.*?)\">+', str(urllibHTML))
numOfChapLinks = len(allChaps)
#However the 6 most recent chapters are also under the 'chico_manga' class
#so it is necessary to pop those chapters off and if there are not a total
#of 6 chapters in the manga we have special cases
if numOfChapLinks < 12:
if numOfChapLinks == 10:
for i in range(5):
allChaps.pop(0)
elif numOfChapLinks == 8:
for i in range(4):
allChaps.pop(0)
elif numOfChapLinks == 6:
for i in range(3):
allChaps.pop(0)
elif numOfChapLinks == 4:
for i in range(2):
allChaps.pop(0)
elif numOfChapLinks == 2:
allChaps.pop(0)
else:
print('There was an error parsing the HTML!')
else:
for i in range(6):
allChaps.pop(0)
#Rather conveniently, there is a class called 'aname' which contains the name of the manga
grabName = re.findall(r'<h2 class="aname">+(.*?)\</h2>+', str(urllibHTML))
if len(grabName) == 0:
print("Title not found. URL or HTML Error.")
return
#some mangas contained characters in aname which cannot be used in windows directories
#these statements attempt to make said strings directory friendly
directorySafeName = grabName[0]
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", "")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
print("Downloading", directorySafeName)
#since Windows and UNIX platforms use different directory syntax we need to know the platform
#and adjust accordingly
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaPanda\\" + str(directorySafeName)
else:
directoryName = currentDirectory + "/MangaPanda/" + str(directorySafeName)
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#loops chapter URLs to determine chapter number for both types of URLs
chapterNames = []
for i in range(len(allChaps)):
chapterNum = re.findall('((?:\d)+)', allChaps[i])
chapterNames.append(chapterNum[-1])
for i in range(len(allChaps)):
if platformType == 'Windows':
chapDirectoryName = directoryName + "\\Chapter " + str(chapterNames[i])
else:
chapDirectoryName = directoryName + "/Chapter " + str(chapterNames[i])
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
#There are some special cases associated with the first loop through the chapter
isFirstLoopPage = True
chapURL = "http://www.mangapanda.com" + allChaps[i]
print("Downloading Chapter", str(chapterNames[i]))
imageLocation = 0
while 1:
imageLocation += 1
#Looks at page URLs for any and all sequences of numbers
nextChapDetermine = re.findall('((?:\d)+)', chapURL)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapURL).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
if isFirstLoopPage == True:
determineAmountOfPages = re.findall('<option value="+(.*?)\</option>', str(urllibHTML))
if len(determineAmountOfPages) == imageLocation - 1:
break
#Checks the number of files in directory in comparison to the number of images in the chapter
#If the number is the same the assumption is made that all images have been downloaded
if isFirstLoopPage == True:
isFirstLoopPage = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(determineAmountOfPages):
break
#Waiting till next request. MangaPanda doesn't like alot of requests in a short time period.
time.sleep(1)
#grabs both the next page URL and the URL for the image on the current page
URLandIMG = re.findall(r'<div id="imgholder">+(.*?)\" name=+', str(urllibHTML))
nextPageURL = re.findall(r'<a href="+(.*?)\">', URLandIMG[0])
imageURL = re.findall(r'src="+(.*?)\"', URLandIMG[0])
extensionForIMG = re.findall('\.\D[^\.]+', imageURL[0])
imageName = "Page " + str(imageLocation) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
#Old code that would put each page thats currently downloading on a new line
#print("Downloading Page", imageLocation)
#New code that will overwrite each "Downloading Page #" with the next page
#and will eventually be overwrote by the "Downloading Chapter #"
print("Downloading Page %d" % imageLocation, end="", flush=True)
print("\r", end="", flush=True)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7'
url = imageURL[0]
headers={'User-Agent':user_agent,}
request = urllib.request.Request(url,None,headers)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 1 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 2 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 3 Failed. Trying again in 10 seconds.')
time.sleep(10)
try:
rawImage = urllib.request.urlopen(request).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
chapURL = "http://www.mangapanda.com" + nextPageURL[0]
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
#Time between chapters as well
#time.sleep(1)
return
def MangaHere(link_to_manga_site):
success = False
Search_feature = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
directoryName = currentDirectory + "\\MangaHere"
else:
directoryName = currentDirectory + "/MangaHere"
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
#downloadMangaListOnce = False
downloadManga = False
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
allChaps = re.findall(r' <a class="color_0077" href="(.*?)"', str(urllibHTML))
allChaps.reverse()
numOfChapLinks = len(allChaps)
mangaName = re.findall(r' <h1 class="title"><span class="title_icon"></span>(.*?)</h1>', str(urllibHTML))
try:
directorySafeName = mangaName[0]
except:
print('Invalid URL!')
return
#Python 3.4 Converts '&' Type things to their string equivalent.
directorySafeName = html.unescape(directorySafeName)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"","'")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
directorySafeName = re.sub(r'\\x\w{2}', r' ', directorySafeName)
directorySafeName = re.sub(r"\\'", r"'", directorySafeName)
directorySafeName = directorySafeName.title()
print("Downloading", directorySafeName)
if platformType == 'Windows':
directoryName = directoryName + "\\" + directorySafeName
else:
directoryName = directoryName + "/" + directorySafeName
try:
os.makedirs(directoryName)
except OSError:
if not os.path.isdir(directoryName):
raise
os.chdir(directoryName)
for i in allChaps:
skipBool1 = False
skipBool2 = False
firstLoop = True
currentPage = 0
volChapDirectoryString = ""
findVolume = re.findall(r'v\d{2}.\d+' , i)
findChap = re.findall(r'c\d{3}.\d+' , i)
if len(findVolume) == 0:
findVolume = re.findall(r'v\d{2}', i)
try:
volTempString = re.findall(r'\d{2}', findVolume[0])
except:
skipBool1 = True
if skipBool1 == False:
volTempString = str(int(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
else:
volTempString = re.findall(r'\d{2}.\d+', findVolume[-1])
volTempString = str(float(volTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Vol. ' + volTempString + ' '
if len(findChap) == 0:
findChap = re.findall(r'c\d{3}', i)
try:
chapTempString = re.findall(r'\d{3}', findChap[0])
except:
skipBool2 = True
if skipBool2 == False:
chapTempString = str(int(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
else:
chapTempString = re.findall(r'\d{3}.\d+', findChap[-1])
chapTempString = str(float(chapTempString[0]))
volChapDirectoryString = volChapDirectoryString + 'Chap. ' + chapTempString
if volChapDirectoryString == "":
print('An error has occured getting chapter or volume number!')
return 1
print('Downloading', volChapDirectoryString)
if platformType == 'Windows':
volChapDirectoryName = directoryName + "\\" + volChapDirectoryString
else:
volChapDirectoryName = directoryName + "/" + volChapDirectoryString
try:
os.makedirs(volChapDirectoryName)
except OSError:
if not os.path.isdir(volChapDirectoryName):
raise
os.chdir(volChapDirectoryName)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(i).read())
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
trimHTML = re.findall('<select id="top_chapter_list"(.*?)read_img', urllibIMG)
try:
allPageURLs = re.findall('<option value="(.*?)" ', trimHTML[-1])
except:
print('Something went wrong when trying to find the page URL\'s!')
print('This manga cannot be downloaded at this time.')
return
for k in allPageURLs:
currentPage += 1
skipPage = False
if firstLoop == False:
#urllibReq = urllib.request.Request(k, None, {}, None, True,'POST')
urllibReq = urllib.request.Request(k)
urllibReq.method = 'POST'
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibIMG = str(urllib.request.urlopen(urllibReq).read())
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
if firstLoop == True:
firstLoop = False
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == len(allPageURLs):
break
print("Downloading Page %d" % currentPage, end="", flush=True)
print("\r", end="", flush=True)
#textFile = open("HTMLFile " + str(currentPage) + ".HTML", "w")
#textFile.write(urllibIMG)
#textFile.close()
imageURL = re.findall('<img src="(.*?)" onerror="', urllibIMG)
try:
extensionForIMG = re.findall('\.[a-z]{3}', imageURL[0])
except:
print('Page ' + str(currentPage) + ' could not be downloaded!')
skipPage = True
if skipPage == False:
imageName = "Page " + str(currentPage) + extensionForIMG[-1]
fileExists = os.path.isfile(imageName)
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(imageURL[0]).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
return
def MangaStream(link_to_manga_site):
success = False
currentDirectory = os.getcwd()
if platformType == 'Windows':
MASTERdirectoryName = currentDirectory + "\\MangaStream"
else:
MASTERdirectoryName = currentDirectory + "/Mangastream"
try:
os.makedirs(MASTERdirectoryName)
except OSError:
if not os.path.isdir(MASTERdirectoryName):
raise
#MASTERdirectoryName is the Variable that will keep the program downloading
#Different Manga to the same Mangastream Folder
os.chdir(MASTERdirectoryName)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(link_to_manga_site).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the First Main Request that Failed.')
return
Manga_Title = re.findall(r'<title>(.*?) Manga', str(urllibHTML))
if len(Manga_Title) == 0:
print("Title not found. URL or HTML Error.")
return
Manga_Title_string = Manga_Title[0]
Manga_Title_string = re.sub(r'\\x\w{2}', r' ', Manga_Title_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
Manga_Title_string = html.unescape(Manga_Title_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = Manga_Title_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"", "'")
directorySafeName = directorySafeName.replace("\'", "'")
directorySafeName = directorySafeName.replace("\\'", "'")
directorySafeName = directorySafeName.replace("\\", "")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
Manga_Title_string = directorySafeName
print("Downloading", Manga_Title_string)
all_chaps_list = re.findall('<th style="width: 70%">Chapter<\/th>\\\\n<th style="width: 30%">Released<\/th>\\\\n<\/tr>\\\\n<tr>\\\\n(.*?)<\/table>', str(urllibHTML), re.DOTALL)
all_chaps_str = all_chaps_list[0]
chapter_list_tuples = re.findall(r'href="(.*?)">(.*?)</a>', str(all_chaps_str))
chapter_names = []
chapter_links = []
for i in range(len(chapter_list_tuples)):
chapter_links.append(chapter_list_tuples[i][0])
chapter_names.append(chapter_list_tuples[i][1])
#Start Manga Downloading
currentDirectory = MASTERdirectoryName
if platformType == 'Windows':
manga_directory_name = currentDirectory + "\\" + Manga_Title_string
else:
manga_directory_name = currentDirectory + "/" + Manga_Title_string
try:
os.makedirs(manga_directory_name)
except OSError:
if not os.path.isdir(manga_directory_name):
raise
os.chdir(manga_directory_name)
for i in range(len(chapter_names)):
first_chapter_bool = True
chapter_link_string = chapter_links[i]
chapter_name_string = chapter_names[i]
chapDirectoryName = ''
chapter_name_string = re.sub(r'\\x\w{2}', r' ', chapter_name_string)
#Python 3.4 Converts '&' Type things to their string equivelant.
#chapter_name_string = html.unescape(chapter_name_string)
#Get rid of Non-Functioning characters for Filenames
directorySafeName = chapter_name_string
directorySafeName = directorySafeName.replace("/", " over ")
directorySafeName = directorySafeName.replace(":", "")
directorySafeName = directorySafeName.replace("?", "")
directorySafeName = directorySafeName.replace("+", " plus ")
directorySafeName = directorySafeName.replace("\"", "'")
directorySafeName = directorySafeName.replace("\'", "'")
directorySafeName = directorySafeName.replace("\\'", "'")
directorySafeName = directorySafeName.replace("\\", "")
directorySafeName = directorySafeName.replace("%", " Percent ")
directorySafeName = directorySafeName.replace("<", "")
directorySafeName = directorySafeName.replace(">", "")
chapter_name_string = directorySafeName
if platformType == 'Windows':
chapDirectoryName = manga_directory_name + "\\Chapter " + chapter_name_string
else:
chapDirectoryName = manga_directory_name + "/Chapter " + chapter_name_string
try:
os.makedirs(chapDirectoryName)
except OSError:
if not os.path.isdir(chapDirectoryName):
raise
os.chdir(chapDirectoryName)
print("Downloading Chapter", chapter_name_string)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
urllibHTML = urllib.request.urlopen(chapter_link_string).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Chapter Request that Failed.')
return
page_list_raw = re.findall(r'<ul class="dropdown-menu">(.*?)</ul>', str(urllibHTML), re.DOTALL)
page_list_string = page_list_raw[-1]
list_of_some_of_the_pages = re.findall(r'href="(.*?)">', str(page_list_string))
final_page = list_of_some_of_the_pages[-1]
number_of_pages_list = re.findall(r'http://readms.com/r/.*?/.*?/\d+/(\d+)', final_page)
number_of_pages = int(number_of_pages_list[0])
chapter_url_list = re.findall(r'(http://readms.com/r/.*?/.*?/\d+/)\d+', final_page)
chapter_url = chapter_url_list[0]
for j in range(number_of_pages):
if j == 0:
numOfFileInCWD = len([name for name in os.listdir('.') if os.path.isfile(name)])
if numOfFileInCWD == number_of_pages:
break
print("Downloading Page %d" % (j+1), end="", flush=True)
print("\r", end="", flush=True)
if first_chapter_bool:
first_chapter_bool = False
page_urllibHTML = urllibHTML
else:
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
page_urllibHTML = urllib.request.urlopen(chapter_url + str(j+1)).read()
except:
print('Request 4 Failed. Moving onto the Next manga.')
print('This was the Page Request that Failed.')
return
image_file_name_list = re.findall(r'<img id="manga-page" src="(.*?)"/></a>', str(page_urllibHTML))
image_file_name = image_file_name_list[0]
#CHECK EXTENSION. Mangastream Could use .png or .jpg or .jpeg
image_file_extension_list = re.findall(r'(\.\D[^\.]+)', image_file_name)
image_file_extension = image_file_extension_list[-1]
imageName = "Page " + str(j+1) + image_file_extension
fileExists = os.path.isfile(imageName)
#If file does not already exist, opens a file, writes image binary data to it and closes
if fileExists == False:
image_worked = True
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 1 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 2 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 3 Failed. Trying again in 30 seconds.')
time.sleep(30)
try:
rawImage = urllib.request.urlopen(image_file_name).read()
except:
print('Request 4 Failed. Moving onto the Next image.')
image_worked = False
if image_worked:
fout = open(imageName, 'wb')
fout.write(rawImage)
fout.close()
#
# Here may be a problem. After the program gets done with downloading a single page you may
# want to artifically slow the program down so you don't anger the website/server hosts with
# too many requests. A small test i did with good internet was 100 downloaded pages (around 4 chapters)
# in a minute. Which would have been over 200 urllib requests to mangastream's website in under a minute.
# I will leave this here in case you feel the need to slow down your requests to the website/server
# just incase something bad could happen. All you need to do is delete the # 3 lines below
# and the program will sleep for 2 seconds after each page is downloaded. You can add more time if you wish
#
#time.sleep(2)
return
#FULL DOWNLOAD. NO OPTIONS. THIS IS A BOT TO RUN 24/7 TO CHECK FOR UPDATES
def main():
#Time Run 5 Times over a ten hour period, once every 2 hours.
# Then wait a week. and Repeat. 2 Hours = 7200 ::: 1 week = 604800
currentDirectory = os.getcwd()
if platformType == 'Windows':
Main_Directory = currentDirectory + "\\Manga_Bot_Folder"
else:
Main_Directory = currentDirectory + "/Manga_Bot_Folder"
try:
os.makedirs(Main_Directory)
except OSError:
if not os.path.isdir(Main_Directory):
raise
os.chdir(Main_Directory)
Main_Directory = os.getcwd()
counter = 0
#To add more items to any list
# '', '', '', '', '', '', '', ''
#
# Lists must Look like this:
#
# batoto_manga = ['http://bato.to/comic/_/comics/one-piece-r39']
#
# with comma's between each link. Links can all be on one line or to make it neater, each link on its own line (See test list)
#
# Links must be to the manga's top page of each website. Examples:
# Bato: http://bato.to/comic/_/comics/one-piece-r39
# MangaPanda: http://www.mangapanda.com/one-piece
# MangaStream: http://mangastream.com/manga/one_piece
# MangaHere: http://www.mangahere.co/manga/one_piece/
#This is a List to test things/manga/url or anything else
#tests_list = ['',
# '']
batoto_manga = []
mangahere_manga = []
mangapanda_manga = []
mangastream_manga = []
while True:
#This is a loop to test things/manga/url or anything else
#print("Downloading Manga From TEST:\n")
#for i in range(len(tests_list)):
# os.chdir(Main_Directory)
# #Change this call to whatever mangasite you are testing
# MangaHere(tests_list[i])
# #Batoto()
# #MangaPanda()
# #MangaStream()
# print('\n')
### PLEASE READ ###
#Batoto has implemented anti-bot crawling measures. I would recommend you find the desired manga on
# MangaHere or MangaPanda. I will leave this here, but I would recommend leaving the list blank/empty.
print("Downloading Manga From Batoto:\n")
for i in range(len(batoto_manga)):
os.chdir(Main_Directory)
Batoto(batoto_manga[i])
print('\n')
print("Downloading Manga From MangaHere:\n")
for i in range(len(mangahere_manga)):
os.chdir(Main_Directory)
MangaHere(mangahere_manga[i])
print('\n')
print("Downloading Manga From MangaPanda:\n")
for i in range(len(mangapanda_manga)):
os.chdir(Main_Directory)
MangaPanda(mangapanda_manga[i])
print('\n')
print("Downloading Manga From MangaStream:\n")
for i in range(len(mangastream_manga)):
os.chdir(Main_Directory)
MangaStream(mangastream_manga[i])
print('\n')
counter += 1
if counter < 5:
print('\n\nSleeping for 2 Hours.\n')
time.sleep(7200)
else:
counter = 0
print('\n\nSleeping for 1 Week.\n')
time.sleep(604800)
main()
#To See any error/error code wait before the program exits completely
time.sleep(15)
| gpl-2.0 |
jfmartinez64/test | libs/html5lib/trie/datrie.py | 785 | 1166 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| gpl-3.0 |
xiaohan2012/lst | dump_vis_timeline_data.py | 1 | 4095 | from check_k_best_trees import k_best_trees
from meta_graph_stat import MetaGraphStat, build_default_summary_kws_from_path
from datetime import datetime
from collections import Counter
def format_time(dt):
if dt.year < 1900:
return str(dt)
else:
return datetime.strftime(dt, '%Y-%m-%d %H:%M:%S')
def run(cand_trees, k, summary_kws, undirected):
interactions = summary_kws['topics']['interactions']
mid2i = {
i['message_id']: i
for i in interactions
}
trees = k_best_trees(cand_trees, k)
summaries = [MetaGraphStat(t, summary_kws).summary_dict() for t in trees]
items = []
groups = []
start_times = []
end_times = []
added_id_count = Counter()
counter = 0
for group_id, (summ, t) in enumerate(zip(summaries, trees)):
group_id += 1
for i in t.nodes_iter():
counter += 1
items.append({
'id': counter,
'content': (mid2i[i]['subject'].strip()
if mid2i[i]['subject'] else
mid2i[i]['body']),
'start': format_time(mid2i[i]['datetime']),
'group': group_id
})
added_id_count[i] += 1
counter += 1
items.append(
{
'id': counter,
# 'id': 'event_{}'.format(group_id),
'start': format_time(summ['time_span']['start_time']),
'end': format_time(summ['time_span']['end_time']),
'content': 'Event {}'.format(group_id),
'group': group_id,
'type': 'background'
})
g = {
'id': group_id,
'terms': summ['topics']['topic_terms'],
# 'terms': summ['frequent_terms'],
# 'terms': summ['tdidf_terms'],
'participants': dict(
summ['participants']['participant_count']
),
'start': format_time(summ['time_span']['start_time']),
'end': format_time(summ['time_span']['end_time']),
'days': (summ['time_span']['end_time'] - summ['time_span']['start_time']).days,
'link_type_freq': summ['link_type_freq']
}
if 'hashtags' in summ:
g['hashtags'] = summ['hashtags']
groups.append(g)
start_times.append(summ['time_span']['start_time'])
end_times.append(summ['time_span']['end_time'])
return {
'items': items,
'groups': groups,
'start': format_time(min(start_times)),
'end': format_time(max(end_times))
}
def main():
import argparse
import cPickle as pkl
from util import json_dump
parser = argparse.ArgumentParser('dump vis timeline data')
parser.add_argument('--cand_trees_path', required=True)
parser.add_argument('--output_path', required=True)
parser.add_argument('--interactions_path', required=True)
parser.add_argument('--people_path', required=True)
parser.add_argument('--corpus_dict_path', required=True)
parser.add_argument('--lda_model_path', required=True)
parser.add_argument('--people_repr_template', type=str,
default="{id}")
parser.add_argument('-k', type=int, default=10)
parser.add_argument('--undirected', default=False, action="store_true")
args = parser.parse_args()
summary_kws = build_default_summary_kws_from_path(
args.interactions_path,
args.people_path,
args.corpus_dict_path,
args.lda_model_path,
args.people_repr_template,
undirected=args.undirected
)
trees = pkl.load(open(args.cand_trees_path))
# add hashtags if there
print(len(trees))
first_node = trees[0].nodes()[0]
if 'hashtags' in trees[0].node[first_node]:
print('add hashtags')
summary_kws['hashtags'] = {}
data = run(trees,
args.k,
summary_kws,
args.undirected)
json_dump(data, args.output_path)
if __name__ == '__main__':
main()
| mit |
PythonCHB/NumpyExtras | cython_src/accumulator.py | 1 | 4171 | #!/usr/bin/env python
"""
accumulator class
Designed to be used as an expandable numpy array, to accumulate values, rather
than a python list.
Note that slices return copies, rather than views, unlike regular numpy arrays.
This is so that the buffer can be re-allocated without messing up any views.
"""
import numpy as np
class Accumulator:
#A few parameters
DEFAULT_BUFFER_SIZE = 128
BUFFER_EXTEND_SIZE = 1.25 # array.array uses 1+1/16 -- that seems small to me.
def __init__(self, object=[], dtype=None, length=None):
"""
proper docs here
note: a scalar accumulator doesn't really make sense, so you get a length-1 array instead.
"""
buffer = np.array(object, dtype=dtype, copy=True)
if buffer.ndim > 1:
raise ValueError("accumulator only works with 1-d data")
buffer.shape = (-1) # to make sure we don't have a scalar
self.length = buffer.shape[0]
## add the padding to the buffer
buffer.resize( max(self.DEFAULT_BUFFER_SIZE, self.length*self.BUFFER_EXTEND_SIZE) )
self.__buffer = buffer
##fixme:
## using @property seems to give a getter, but setting then overrides it
## which seems terribly prone to error.
@property
def dtype(self):
return self.__buffer.dtype
@property
def buffersize(self):
"""
the size of the internal buffer
"""
return self.__buffer.size
@property
def shape(self):
"""
To be compatible with ndarray.shape
(only the getter!)
"""
return (self.length,)
def __len__(self):
return self.length
def __array__(self, dtype=None):
"""
a.__array__(|dtype) -> copy of array.
Always returns a copy array, so that buffer doesn't have any references to it.
"""
return np.array(self.__buffer[:self.length], dtype=dtype, copy=True)
def append(self, item):
"""
add a new item to the end of the array
"""
try:
self.__buffer[self.length] = item
self.length += 1
except IndexError: # the buffer is not big enough
self.resize(self.length*self.BUFFER_EXTEND_SIZE)
self.append(item)
def extend(self, items):
"""
add a sequence of new items to the end of the array
"""
try:
self.__buffer[self.length:self.length+len(items)] = items
self.length += len(items)
except ValueError: # the buffer is not big enough
self.resize((self.length+len(items))*self.BUFFER_EXTEND_SIZE)
self.extend(items)
def resize(self, newsize):
"""
resize the internal buffer
you might want to do this to speed things up if you know you want it
to be a lot bigger eventually
"""
if newsize < self.length:
raise ValueError("accumulator buffer cannot be made smaller that the length of the data")
self.__buffer.resize(newsize)
def fitbuffer(self):
"""
re-sizes the buffer so that it fits the data, rather than having extra space
"""
self.__buffer.resize(self.length)
## apparently __getitem__ is deprecated!
## but I'm not sure how to do it "right"
def __getitem__(self, index):
if index > self.length-1:
raise IndexError("index out of bounds")
elif index < 0:
index = self.length-1
return self.__buffer[index]
def __getslice__(self, i, j):
"""
a.__getslice__(i, j) <==> a[i:j]
Use of negative indices is not supported.
This returns a COPY, not a view, unlike numpy arrays
This is required as the data buffer needs to be able to change.
"""
if j > self.length:
j = self.length
return self.__buffer[i:j].copy()
def __str__(self):
return self.__buffer[:self.length].__str__()
def __repr__(self):
return "Accumulator%s"%self.__buffer[:self.length].__repr__()[5:]
| bsd-2-clause |
nwjs/chromium.src | third_party/blink/tools/blinkpy/common/system/crash_logs_unittest.py | 2 | 6042 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from blinkpy.common.system.crash_logs import CrashLogs
from blinkpy.common.system.filesystem_mock import MockFileSystem
from blinkpy.common.system.system_host import SystemHost
from blinkpy.common.system.system_host_mock import MockSystemHost
def make_mock_crash_report_darwin(process_name, pid):
return """Process: {process_name} [{pid}]
Path: /Volumes/Data/slave/x-release-tests/build/WebKitBuild/Release/{process_name}
Identifier: {process_name}
Version: ??? (???)
Code Type: X86-64 (Native)
Parent Process: Python [2578]
Date/Time: 2011-12-07 13:27:34.816 -0800
OS Version: Mac OS X 10.6.8 (10K549)
Report Version: 6
Interval Since Last Report: 1660 sec
Crashes Since Last Report: 1
Per-App Crashes Since Last Report: 1
Anonymous UUID: 507D4EEB-9D70-4E2E-B322-2D2F0ABFEDC0
Exception Type: EXC_BREAKPOINT (SIGTRAP)
Exception Codes: 0x0000000000000002, 0x0000000000000000
Crashed Thread: 0
Dyld Error Message:
Library not loaded: /Volumes/Data/WebKit-BuildSlave/x-release/build/WebKitBuild/Release/WebCore.framework/Versions/A/WebCore
Referenced from: /Volumes/Data/slave/x-release/build/WebKitBuild/Release/WebKit.framework/Versions/A/WebKit
Reason: image not found
Binary Images:
0x7fff5fc00000 - 0x7fff5fc3be0f dyld 132.1 (???) <29DECB19-0193-2575-D838-CF743F0400B2> /usr/lib/dyld
System Profile:
Model: Xserve3,1, BootROM XS31.0081.B04, 8 processors, Quad-Core Intel Xeon, 2.26 GHz, 6 GB, SMC 1.43f4
Graphics: NVIDIA GeForce GT 120, NVIDIA GeForce GT 120, PCIe, 256 MB
Memory Module: global_name
Network Service: Ethernet 2, Ethernet, en1
PCI Card: NVIDIA GeForce GT 120, sppci_displaycontroller, MXM-Slot
Serial ATA Device: OPTIARC DVD RW AD-5670S
""".format(process_name=process_name, pid=pid)
class CrashLogsTest(unittest.TestCase):
def test_find_log_darwin(self):
if not SystemHost().platform.is_mac():
return
older_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28528)
mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28530)
newer_mock_crash_report = make_mock_crash_report_darwin('DumpRenderTree', 28529)
other_process_mock_crash_report = make_mock_crash_report_darwin('FooProcess', 28527)
misformatted_mock_crash_report = 'Junk that should not appear in a crash report' + \
make_mock_crash_report_darwin('DumpRenderTree', 28526)[200:]
files = {
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150718_quadzen.crash': older_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150719_quadzen.crash': mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150720_quadzen.crash': newer_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150721_quadzen.crash': None,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150722_quadzen.crash':
other_process_mock_crash_report,
'/Users/mock/Library/Logs/DiagnosticReports/DumpRenderTree_2011-06-13-150723_quadzen.crash':
misformatted_mock_crash_report,
}
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
log = crash_logs.find_newest_log('DumpRenderTree')
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log('DumpRenderTree', 28529)
self.assertMultiLineEqual(log, newer_mock_crash_report)
log = crash_logs.find_newest_log('DumpRenderTree', 28530)
self.assertMultiLineEqual(log, mock_crash_report)
log = crash_logs.find_newest_log('DumpRenderTree', 28531)
self.assertIsNone(log)
log = crash_logs.find_newest_log('DumpRenderTree', newer_than=1.0)
self.assertIsNone(log)
def bad_read(_):
raise IOError('IOError: No such file or directory')
def bad_mtime(_):
raise OSError('OSError: No such file or directory')
filesystem.read_text_file = bad_read
log = crash_logs.find_newest_log('DumpRenderTree', 28531, include_errors=True)
self.assertIn('IOError: No such file or directory', log)
filesystem = MockFileSystem(files)
crash_logs = CrashLogs(MockSystemHost(filesystem=filesystem))
filesystem.mtime = bad_mtime
log = crash_logs.find_newest_log('DumpRenderTree', newer_than=1.0, include_errors=True)
self.assertIn('OSError: No such file or directory', log)
| bsd-3-clause |
bsdlp/pugstar | pugstar/__init__.py | 3 | 1546 | import logging
import os
import podhub.meh
import steam
from flask import Flask
from flask.ext.migrate import Migrate, MigrateCommand
from flask.ext.openid import OpenID
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.script import Manager
from logging import FileHandler
from os.path import expanduser
from werkzeug.contrib.cache import MemcachedCache
from werkzeug.exceptions import default_exceptions
def find_config():
"""
:return path: First readable config available.
:rtype path: ``str``
"""
home = expanduser('~/.config/pugstar/config.py')
system = expanduser('/etc/pugstar/config.py')
paths = filter(lambda x: x and os.access(x, os.R_OK), (home, system))
return next(paths)
app = Flask(__name__)
app.config.from_pyfile(find_config(), silent=True)
file_handler = FileHandler(
app.config.get('LOG_FILE', '/var/log/pugstar/app.log'))
file_handler.setLevel(
getattr(logging, app.config.get('LOG_LEVEL', 'WARNING').upper()))
app.logger.addHandler(file_handler)
for code in default_exceptions.keys():
app.error_handler_spec[None][code] = podhub.meh.util.make_json_error
db = SQLAlchemy(app)
oid = OpenID(app, app.config.get('OPENID_STORE'))
cache = MemcachedCache(
servers=app.config.get('MEMCACHED_HOSTS', ['127.0.0.1']),
default_timeout=app.config.get('MEMCACHED_DEFAULT_TIMEOUT', 300))
migrate = Migrate(app, db)
steam.api.key.set(app.config.get('STEAM_API_KEY'))
manager = Manager(app)
manager.add_command('db', MigrateCommand)
from . import auth, models, views
| mit |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pygame/tests/sndarray_test.py | 6 | 7264 | if __name__ == '__main__':
import sys
import os
pkg_dir = os.path.split(os.path.abspath(__file__))[0]
parent_dir, pkg_name = os.path.split(pkg_dir)
is_pygame_pkg = (pkg_name == 'tests' and
os.path.split(parent_dir)[1] == 'pygame')
if not is_pygame_pkg:
sys.path.insert(0, parent_dir)
else:
is_pygame_pkg = __name__.startswith('pygame.tests.')
import unittest
import pygame
from pygame.compat import as_bytes
import pygame.sndarray
from numpy import \
int8, int16, uint8, uint16, array, alltrue
arraytype = "numpy"
class SndarrayTest (unittest.TestCase):
if arraytype:
array_dtypes = {8: uint8, -8: int8, 16: uint16, -16: int16}
def _assert_compatible(self, arr, size):
dtype = self.array_dtypes[size]
if arraytype == 'numpy':
self.failUnlessEqual(arr.dtype, dtype)
else:
self.failUnlessEqual(arr.typecode(), dtype)
def test_import(self):
'does it import'
if not arraytype:
self.fail("no array package installed")
import pygame.sndarray
def test_array(self):
if not arraytype:
self.fail("no array package installed")
def check_array(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
srcarr = array(test_data, self.array_dtypes[size])
snd = pygame.sndarray.make_sound(srcarr)
arr = pygame.sndarray.array(snd)
self._assert_compatible(arr, size)
self.failUnless(alltrue(arr == srcarr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_array(8, 1, [0, 0x0f, 0xf0, 0xff])
check_array(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_array(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_array(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_array(-8, 1, [0, -0x80, 0x7f, 0x64])
check_array(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_array(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_array(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_get_arraytype(self):
if not arraytype:
self.fail("no array package installed")
self.failUnless((pygame.sndarray.get_arraytype() in
['numpy']),
("unknown array type %s" %
pygame.sndarray.get_arraytype()))
def test_get_arraytypes(self):
if not arraytype:
self.fail("no array package installed")
arraytypes = pygame.sndarray.get_arraytypes()
self.failUnless('numpy' in arraytypes)
for atype in arraytypes:
self.failUnless(atype in ['numpy'],
"unknown array type %s" % atype)
def test_make_sound(self):
if not arraytype:
self.fail("no array package installed")
def check_sound(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
srcarr = array(test_data, self.array_dtypes[size])
snd = pygame.sndarray.make_sound(srcarr)
arr = pygame.sndarray.samples(snd)
self.failUnless(alltrue(arr == srcarr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_sound(8, 1, [0, 0x0f, 0xf0, 0xff])
check_sound(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_sound(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_sound(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_sound(-8, 1, [0, -0x80, 0x7f, 0x64])
check_sound(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_sound(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_sound(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_samples(self):
if not arraytype:
self.fail("no array package installed")
null_byte = as_bytes('\x00')
def check_sample(size, channels, test_data):
try:
pygame.mixer.init(22050, size, channels)
except pygame.error:
# Not all sizes are supported on all systems.
return
try:
__, sz, __ = pygame.mixer.get_init()
if sz == size:
zeroed = null_byte * ((abs(size) // 8) *
len(test_data) *
channels)
snd = pygame.mixer.Sound(buffer=zeroed)
samples = pygame.sndarray.samples(snd)
self._assert_compatible(samples, size)
##print ('X %s' % (samples.shape,))
##print ('Y %s' % (test_data,))
samples[...] = test_data
arr = pygame.sndarray.array(snd)
self.failUnless(alltrue(samples == arr),
"size: %i\n%s\n%s" %
(size, arr, test_data))
finally:
pygame.mixer.quit()
check_sample(8, 1, [0, 0x0f, 0xf0, 0xff])
check_sample(8, 2,
[[0, 0x80], [0x2D, 0x41], [0x64, 0xA1], [0xff, 0x40]])
check_sample(16, 1, [0, 0x00ff, 0xff00, 0xffff])
check_sample(16, 2, [[0, 0xffff], [0xffff, 0],
[0x00ff, 0xff00], [0x0f0f, 0xf0f0]])
check_sample(-8, 1, [0, -0x80, 0x7f, 0x64])
check_sample(-8, 2,
[[0, -0x80], [-0x64, 0x64], [0x25, -0x50], [0xff, 0]])
check_sample(-16, 1, [0, 0x7fff, -0x7fff, -1])
check_sample(-16, 2, [[0, -0x7fff], [-0x7fff, 0],
[0x7fff, 0], [0, 0x7fff]])
def test_use_arraytype(self):
if not arraytype:
self.fail("no array package installed")
def do_use_arraytype(atype):
pygame.sndarray.use_arraytype(atype)
pygame.sndarray.use_arraytype('numpy')
self.failUnlessEqual(pygame.sndarray.get_arraytype(), 'numpy')
self.failUnlessRaises(ValueError, do_use_arraytype, 'not an option')
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
MurpheyLab/trep | examples/pendulum.py | 2 | 2235 | # Simulate an arbitrarily long pendulum.
import sys
import math
import time
import trep
import trep.potentials
import trep.visual as visual
links = 5
dt = 0.01
tf = 10.0
def simulate_system(system):
# Now we'll extract the current configuration into a tuple to use as
# initial conditions for a variational integrator.
q0 = system.q
# Create and initialize the variational integrator
mvi = trep.MidpointVI(system)
mvi.initialize_from_configs(0.0, q0, dt, q0)
# This is our simulation loop. We save the results in two lists.
q = [mvi.q2]
t = [mvi.t2]
while mvi.t1 < tf:
mvi.step(mvi.t2+dt)
q.append(mvi.q2)
t.append(mvi.t2)
return (t,q)
def make_pendulum(num_links):
"""
make_pendulum(num_links) -> System
Create a pendulum system with num_links.
"""
def add_level(frame, link=0):
"""
Recusively add links to a system by attaching a new link to
the specified frame.
"""
if link == num_links:
return
# Create a rotation for the pendulum.
# The first argument is the name of the frame, the second is
# the transformation type, and the third is the name of the
# configuration variable that parameterizes the
# transformation.
child = trep.Frame(frame, trep.RX, "link-%d" % link, "link-%d" % link)
# Move down to create the length of the pendulum link.
child = trep.Frame(child, trep.TZ, -1)
# Add mass to the end of the link (only a point mass, no
# rotational inertia)
child.set_mass(1.0)
add_level(child, link+1)
# Create a new system, add the pendulum links, and rotate the top
# pendulum.
system = trep.System()
trep.potentials.Gravity(system, name="Gravity")
add_level(system.world_frame)
system.get_config("link-0").q = math.pi/4.0
return system
# Create
system = make_pendulum(links)
# Simulate
start = time.clock()
(t, q) = simulate_system(system)
finish = time.clock()
# Display
print "%d-link pendulum, dt=%f, tf=%f... runtime=%f s" % (links, dt, tf, finish-start)
visual.visualize_3d([ visual.VisualItem3D(system, t, q) ])
| gpl-3.0 |
chb/indivo_server | indivo/tests/unit/models/record.py | 4 | 5920 | from indivo.tests.internal_tests import InternalTests, enable_transactions
from indivo.tests.data.account import TEST_ACCOUNTS
from indivo.tests.data.record import TEST_RECORDS
from indivo.tests.data.app import TEST_USERAPPS
from indivo.tests.data.message import TEST_MESSAGES
from indivo.models import Record, Carenet, Notification, Message, RecordNotificationRoute
from django.db import IntegrityError, transaction
from django.conf import settings
import copy
class RecordModelUnitTests(InternalTests):
def setUp(self):
super(RecordModelUnitTests,self).setUp()
# An Account
self.account = self.createAccount(TEST_ACCOUNTS, 1)
# A record for that account
self.record = self.createRecord(TEST_RECORDS, 1, owner=self.account)
# An account, full-shared with our record
self.s_account = self.createAccount(TEST_ACCOUNTS, 2)
self.shareRecordFull(self.record, self.s_account)
# An account, shared with nobody
self.u_account = self.createAccount(TEST_ACCOUNTS, 3)
# An app, shared with us
self.app = self.createUserApp(TEST_USERAPPS, 0)
self.addAppToRecord(self.record, self.app)
# An app that isn't shared
self.u_app = self.createUserApp(TEST_USERAPPS, 1)
# Notification routes for our shared accounts
self.rnrs = []
self.rnrs.append(RecordNotificationRoute.objects.create(record=self.record, account=self.account))
self.rnrs.append(RecordNotificationRoute.objects.create(record=self.record, account=self.s_account))
# A message we've received
self.msg = self.createMessage(TEST_MESSAGES, 0, about_record=self.record)
def tearDown(self):
super(RecordModelUnitTests,self).tearDown()
# Not calling self.createRecord here, since that calls record.create_default_carenets, which is
# transaction-managed. This is why we have to be very careful with the @enable_transactions
# decorator.
@enable_transactions
def test_construction(self):
# should save normally with proper data, external_id or no
try:
args = {'label':'empty_record',
'owner':self.account,
}
r = Record.objects.create(**args)
except IntegrityError:
transaction.rollback()
self.fail('Could not create record with standard args')
else:
self.assertEqual(r, Record.objects.get(pk=r.pk))
try:
args = {'label':'test_record_extid',
'owner':self.account,
'external_id':Record.prepare_external_id('RECORD5_EXTID',self.account.email),
}
r = Record.objects.create(**args)
except IntegrityError:
transaction.rollback()
self.fail('Could not create record with external id')
else:
self.assertEqual(r, Record.objects.get(pk=r.pk))
def test_prepare_external_id(self):
local_id = 'TEST_ID'
principal_email = 'a@b.com'
self.assertEqual(Record.prepare_external_id(local_id, principal_email), '%s/%s'%(principal_email, local_id))
self.assertEqual(Record.prepare_external_id(None, principal_email), None)
def test_can_admin(self):
self.assertTrue(self.record.can_admin(self.account))
self.assertTrue(self.record.can_admin(self.s_account))
self.assertFalse(self.record.can_admin(self.u_account))
def test_phas(self):
self.assertEqual(self.record.phas, [self.app])
def test_has_pha(self):
self.assertTrue(self.record.has_pha(self.app))
self.assertFalse(self.record.has_pha(self.u_app))
def test_get_accounts_to_notify(self):
self.assertEqual(set(self.record.get_accounts_to_notify()), set([self.account, self.s_account]))
def test_get_messages(self):
self.assertEqual(list(self.record.get_messages()), [self.msg])
def test_send_message(self):
self.record.send_message('msg_ext_id', self.app, 'subj', 'body', severity='high')
# Make sure the right people got notified
for account in [rnr.account for rnr in self.rnrs]:
self.assertTrue(Message.objects.filter(sender=self.app,
about_record=self.record,
account=account).exists())
self.assertFalse(Message.objects.filter(sender=self.app,
about_record=self.record,
account=self.u_account).exists())
def test_notify(self):
self.record.notify(self.app, 'Notify This!', app_url=self.app.callback_url)
# and without optional params
self.record.notify(self.app, 'Notify This Twice!')
# Make sure the right people got notified
for account in [rnr.account for rnr in self.rnrs]:
self.assertEqual(Notification.objects.filter(record=self.record, account=account).count(), 2)
self.assertEqual(Notification.objects.filter(record=self.record, account=self.u_account).count(), 0)
def test_create_default_carenets(self):
# Eliminate all of our default carenets
Carenet.objects.filter(record=self.record).delete()
self.assertEqual(Carenet.objects.filter(record=self.record).count(), 0)
# And recreate them
self.record.create_default_carenets()
self.assertEqual(Carenet.objects.filter(record=self.record).count(), len(settings.INDIVO_DEFAULT_CARENETS))
for name in settings.INDIVO_DEFAULT_CARENETS:
self.assertTrue(Carenet.objects.filter(record=self.record, name=name).exists())
def test_carenet_alias_id(self):
self.assertEqual(self.record.carenet_alias_id, self.record.id)
| gpl-3.0 |
WladimirSidorenko/DiscourseSegmenter | dsegmenter/common.py | 1 | 4516 | #!/usr/bin/env python2.7
# -*- mode: python; coding: utf-8; -*-
##################################################################
# Documentation
"""Module defining methods common to many modules.
Attributes:
_ispunct (method): check if word consists only of punctuation characters
prune_punc (method): remove tokens representing punctuation from set
read_segments (method): default method for reading segment files
score_substitute (method): custom weighting function used for token alignment
translate_toks (method): replace tokens and return updated set
"""
##################################################################
# Imports
import string
##################################################################
# Constants
DEPS = "deps"
NONE = str(None)
REL = "rel"
TAG = "tag"
WORD = "word"
##################################################################
# Methods
def _ispunct(a_word):
"""Check if word consists only of punctuation characters.
Args:
a_word (str): word to check
Returns:
bool: True if word consists only of punctuation characters,
False otherwise
"""
return all(c in string.punctuation for c in a_word)
def prune_punc(a_toks):
"""Remove tokens representing punctuation from set.
Args:
a_toks (iterable): original tokens
Returns:
frozenset: tokens without punctuation marks
"""
return frozenset([tok for tok in a_toks if not _ispunct(tok[-1])])
def read_segments(a_lines):
"""Read file and return a list of segment dictionaries.
Args:
a_lines (list): decoded lines of the input file
Returns:
dict: mapping from tokens to segments
"""
segs2toks = {}
s_c = t_c = 0
tokens = []
atoks = []
new_seg = None
active_tokens = set()
active_segments = []
# read segments
for iline in a_lines:
iline = iline.strip()
if not iline:
continue
# do some clean-up
active_tokens.clear()
del atoks[:]
del active_segments[:]
tokens = iline.split()
# establish correspondence between tokens and segments
for tok in tokens:
if tok[0] == '(' and len(tok) > 1:
active_tokens = set(atoks)
del atoks[:]
for a_s in active_segments:
segs2toks[a_s].update(active_tokens)
new_seg = (s_c, tok[1:])
active_segments.append(new_seg)
segs2toks[new_seg] = set()
s_c += 1
continue
elif tok == ')':
assert active_segments, \
"Unbalanced closing parenthesis at line: " + repr(iline)
active_tokens = set(atoks)
del atoks[:]
for a_s in active_segments:
segs2toks[a_s].update(active_tokens)
active_segments.pop()
continue
else:
atoks.append((t_c, tok))
t_c += 1
assert not active_segments, \
"Unbalanced opening parenthesis at line: " + repr(iline)
toks2segs = dict()
segments = segs2toks.keys()
segments.sort(key=lambda el: el[0])
for seg in segments:
toks = frozenset(segs2toks[seg])
# it can be same tokenset corresponds to multiple segments, in that
# case we leave the first one that we encounter
if toks in toks2segs:
continue
assert toks not in toks2segs, \
"Multiple segments correspond to the same tokenset: '" + \
repr(toks) + "': " + repr(seg) + ", " + repr(toks2segs[toks])
toks2segs[toks] = seg
return toks2segs
def score_substitute(a_c1, a_c2):
"""Score substitution of two characters.
Args:
a_c1 (str): first word to compare
a_c2 (str): second word to compare
Returns:
int: 2 if the last characters of both words are equal, -3 otherwise
"""
return 2 if a_c1[-1] == a_c2[-1] else -3
def translate_toks(a_toks, a_translation):
"""Translate tokens and return translated set.
Args:
a_toks (iterable): tokens to be translated
a_translation (dict): - translation dictionary for tokens
Returns:
frozenset: translated tokens
"""
if a_translation is None:
return a_toks
ret = set()
for tok in a_toks:
for t_tok in a_translation[tok]:
ret.add(t_tok)
return frozenset(ret)
| mit |
indico/indico | indico/modules/events/models/persons.py | 1 | 17383 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from operator import attrgetter
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm import mapper
from indico.core import signals
from indico.core.db.sqlalchemy import PyIntEnum, UTCDateTime, db
from indico.core.db.sqlalchemy.principals import EmailPrincipal
from indico.core.db.sqlalchemy.util.models import auto_table_args, override_attr
from indico.core.db.sqlalchemy.util.session import no_autoflush
from indico.modules.users.models.users import PersonMixin, UserTitle
from indico.util.decorators import strict_classproperty
from indico.util.locators import locator_property
from indico.util.string import format_repr
class PersonLinkDataMixin:
@property
def person_link_data(self):
return {x: x.is_submitter for x in self.person_links}
@person_link_data.setter
@no_autoflush
def person_link_data(self, value):
# Revoke submission rights for removed persons
for person_link in set(self.person_links) - value.keys():
principal = person_link.person.principal
if principal:
self.update_principal(principal, del_permissions={'submit'})
# Update person links
self.person_links = list(value.keys())
for person_link, is_submitter in value.items():
person = person_link.person
principal = person.principal
if not principal:
continue
action = {'add_permissions': {'submit'}} if is_submitter else {'del_permissions': {'submit'}}
self.update_principal(principal, **action)
class AuthorsSpeakersMixin:
AUTHORS_SPEAKERS_DISPLAY_ORDER_ATTR = 'display_order_key'
@property
def speakers(self):
return [person_link
for person_link in sorted(self.person_links, key=attrgetter(self.AUTHORS_SPEAKERS_DISPLAY_ORDER_ATTR))
if person_link.is_speaker]
@property
def primary_authors(self):
from indico.modules.events.contributions.models.persons import AuthorType
return [person_link
for person_link in sorted(self.person_links, key=attrgetter(self.AUTHORS_SPEAKERS_DISPLAY_ORDER_ATTR))
if person_link.author_type == AuthorType.primary]
@property
def secondary_authors(self):
from indico.modules.events.contributions.models.persons import AuthorType
return [person_link
for person_link in sorted(self.person_links, key=attrgetter(self.AUTHORS_SPEAKERS_DISPLAY_ORDER_ATTR))
if person_link.author_type == AuthorType.secondary]
class EventPerson(PersonMixin, db.Model):
"""A person inside an event, e.g. a speaker/author etc."""
__tablename__ = 'persons'
__table_args__ = (db.UniqueConstraint('event_id', 'user_id'),
db.CheckConstraint('email = lower(email)', 'lowercase_email'),
db.Index(None, 'event_id', 'email', unique=True, postgresql_where=db.text("email != ''")),
{'schema': 'events'})
id = db.Column(
db.Integer,
primary_key=True
)
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
nullable=False,
index=True
)
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=True,
index=True
)
first_name = db.Column(
db.String,
nullable=False,
default=''
)
last_name = db.Column(
db.String,
nullable=False
)
email = db.Column(
db.String,
nullable=False,
index=True,
default=''
)
# the title of the user - you usually want the `title` property!
_title = db.Column(
'title',
PyIntEnum(UserTitle),
nullable=False,
default=UserTitle.none
)
affiliation = db.Column(
db.String,
nullable=False,
default=''
)
address = db.Column(
db.Text,
nullable=False,
default=''
)
phone = db.Column(
db.String,
nullable=False,
default=''
)
invited_dt = db.Column(
UTCDateTime,
nullable=True
)
is_untrusted = db.Column(
db.Boolean,
nullable=False,
default=False
)
event = db.relationship(
'Event',
lazy=True,
backref=db.backref(
'persons',
cascade='all, delete-orphan',
cascade_backrefs=False,
lazy='dynamic'
)
)
user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'event_persons',
cascade_backrefs=False,
lazy='dynamic'
)
)
# relationship backrefs:
# - abstract_links (AbstractPersonLink.person)
# - contribution_links (ContributionPersonLink.person)
# - event_links (EventPersonLink.person)
# - session_block_links (SessionBlockPersonLink.person)
# - subcontribution_links (SubContributionPersonLink.person)
@locator_property
def locator(self):
return dict(self.event.locator, person_id=self.id)
def __repr__(self):
return format_repr(self, 'id', is_untrusted=False, _text=self.full_name)
@property
def principal(self):
if self.user is not None:
return self.user
elif self.email:
return EmailPrincipal(self.email)
return None
@property
def identifier(self):
return f'EventPerson:{self.id}'
@classmethod
def create_from_user(cls, user, event=None, is_untrusted=False):
return EventPerson(user=user, event=event, first_name=user.first_name, last_name=user.last_name,
email=user.email, affiliation=user.affiliation, address=user.address, phone=user.phone,
is_untrusted=is_untrusted)
@classmethod
def for_user(cls, user, event=None, is_untrusted=False):
"""Return EventPerson for a matching User in Event creating if needed."""
person = event.persons.filter_by(user=user).first() if event else None
return person or cls.create_from_user(user, event, is_untrusted=is_untrusted)
@classmethod
def merge_users(cls, target, source):
"""Merge the EventPersons of two users.
:param target: The target user of the merge
:param source: The user that is being merged into `target`
"""
existing_persons = {ep.event_id: ep for ep in target.event_persons}
for event_person in source.event_persons:
existing = existing_persons.get(event_person.event_id)
if existing is None:
event_person.user = target
else:
existing.merge_person_info(event_person)
db.session.delete(event_person)
db.session.flush()
@classmethod
def link_user_by_email(cls, user):
"""
Link all email-based persons matching the user's
email addresses with the user.
:param user: A User object.
"""
from indico.modules.events.models.events import Event
query = (cls.query
.join(EventPerson.event)
.filter(~Event.is_deleted,
cls.email.in_(user.all_emails),
cls.user_id.is_(None)))
for event_person in query:
existing = (cls.query
.filter_by(user_id=user.id, event_id=event_person.event_id)
.one_or_none())
if existing is None:
event_person.user = user
else:
existing.merge_person_info(event_person)
db.session.delete(event_person)
db.session.flush()
def sync_user(self, *, notify=True):
"""Update all person data based on the current user data.
:param notify: Whether to trigger the ``person_updated`` signal.
"""
if not self.user:
return
fields = ('first_name', 'last_name', 'email', 'affiliation', 'address', 'phone')
has_changes = False
for field in fields:
new = getattr(self.user, field)
if getattr(self, field) != new:
setattr(self, field, new)
has_changes = True
if notify and has_changes:
signals.event.person_updated.send(self)
@no_autoflush
def merge_person_info(self, other):
from indico.modules.events.contributions.models.persons import AuthorType
for column_name in {'_title', 'affiliation', 'address', 'phone', 'first_name', 'last_name'}:
value = getattr(self, column_name) or getattr(other, column_name)
setattr(self, column_name, value)
for event_link in other.event_links:
existing_event_link = next((link for link in self.event_links if link.event_id == event_link.event_id),
None)
if existing_event_link is None:
event_link.person = self
else:
other.event_links.remove(event_link)
for abstract_link in other.abstract_links:
existing_abstract_link = next((link for link in self.abstract_links
if link.abstract_id == abstract_link.abstract_id), None)
if existing_abstract_link is None:
abstract_link.person = self
else:
existing_abstract_link.is_speaker |= abstract_link.is_speaker
existing_abstract_link.author_type = AuthorType.get_highest(existing_abstract_link.author_type,
abstract_link.author_type)
other.abstract_links.remove(abstract_link)
for contribution_link in other.contribution_links:
existing_contribution_link = next((link for link in self.contribution_links
if link.contribution_id == contribution_link.contribution_id), None)
if existing_contribution_link is None:
contribution_link.person = self
else:
existing_contribution_link.is_speaker |= contribution_link.is_speaker
existing_contribution_link.author_type = AuthorType.get_highest(existing_contribution_link.author_type,
contribution_link.author_type)
other.contribution_links.remove(contribution_link)
for subcontribution_link in other.subcontribution_links:
existing_subcontribution_link = next(
(link for link in self.subcontribution_links
if link.subcontribution_id == subcontribution_link.subcontribution_id), None)
if existing_subcontribution_link is None:
subcontribution_link.person = self
else:
other.subcontribution_links.remove(subcontribution_link)
for session_block_link in other.session_block_links:
existing_session_block_link = next((link for link in self.session_block_links
if link.session_block_id == session_block_link.session_block_id),
None)
if existing_session_block_link is None:
session_block_link.person = self
else:
other.session_block_links.remove(session_block_link)
db.session.flush()
def has_role(self, role, obj):
"""Whether the person has a role in the ACL list of a given object."""
principals = [x for x in obj.acl_entries if x.has_management_permission(role, explicit=True)]
return any(x
for x in principals
if ((self.user_id is not None and self.user_id == x.user_id) or
(self.email is not None and self.email == x.email)))
class PersonLinkBase(PersonMixin, db.Model):
"""Base class for EventPerson associations."""
__abstract__ = True
__auto_table_args = {'schema': 'events'}
#: The name of the backref on the `EventPerson`
person_link_backref_name = None
#: The columns which should be included in the unique constraint.
person_link_unique_columns = None
#: The name of the relationship pointing to the object the person is linked to
object_relationship_name = None
@strict_classproperty
@classmethod
def __auto_table_args(cls):
return (db.UniqueConstraint('person_id', *cls.person_link_unique_columns),)
@declared_attr
def __table_args__(cls):
return auto_table_args(cls)
@declared_attr
def id(cls):
return db.Column(
db.Integer,
primary_key=True
)
@declared_attr
def person_id(cls):
return db.Column(
db.Integer,
db.ForeignKey('events.persons.id'),
index=True,
nullable=False
)
@declared_attr
def _first_name(cls):
return db.Column(
'first_name',
db.String,
nullable=True
)
@declared_attr
def _last_name(cls):
return db.Column(
'last_name',
db.String,
nullable=True
)
@declared_attr
def _title(cls):
return db.Column(
'title',
PyIntEnum(UserTitle),
nullable=True
)
@declared_attr
def _affiliation(cls):
return db.Column(
'affiliation',
db.String,
nullable=True
)
@declared_attr
def _address(cls):
return db.Column(
'address',
db.Text,
nullable=True
)
@declared_attr
def _phone(cls):
return db.Column(
'phone',
db.String,
nullable=True
)
@declared_attr
def person(cls):
return db.relationship(
'EventPerson',
lazy=False,
backref=db.backref(
cls.person_link_backref_name,
cascade='all, delete-orphan',
cascade_backrefs=False,
lazy=True
)
)
@declared_attr
def display_order(cls):
return db.Column(
db.Integer,
nullable=False,
default=0
)
@property
def email(self):
return self.person.email
@property
def display_order_key(self):
return self.display_order, self.display_full_name
@property
def display_order_key_lastname(self):
return self.display_order, self.last_name, self.first_name
@hybrid_property
def object(self):
return getattr(self, self.object_relationship_name)
first_name = override_attr('first_name', 'person')
last_name = override_attr('last_name', 'person')
title = override_attr('title', 'person', fget=lambda self, __: self._get_title())
affiliation = override_attr('affiliation', 'person')
address = override_attr('address', 'person')
phone = override_attr('phone', 'person')
def __init__(self, *args, **kwargs):
# Needed in order to ensure `person` is set before the overridable attrs
self.person = kwargs.pop('person', None)
super().__init__(*args, **kwargs)
class EventPersonLink(PersonLinkBase):
"""Association between EventPerson and Event.
Chairperson or speaker (lecture)
"""
__tablename__ = 'event_person_links'
__auto_table_args = {'schema': 'events'}
person_link_backref_name = 'event_links'
person_link_unique_columns = ('event_id',)
object_relationship_name = 'event'
event_id = db.Column(
db.Integer,
db.ForeignKey('events.events.id'),
index=True,
nullable=False
)
# relationship backrefs:
# - event (Event.person_links)
@property
def is_submitter(self):
if not self.event:
raise Exception('No event to check submission rights against')
return self.person.has_role('submit', self.event)
def __repr__(self):
return format_repr(self, 'id', 'person_id', 'event_id', _text=self.full_name)
@listens_for(mapper, 'after_configured', once=True)
def _mapper_configured():
@listens_for(EventPersonLink.event, 'set')
def _associate_event_person(target, value, *unused):
if value is None:
return
if target.person.event is None:
target.person.event = value
else:
assert target.person.event == value
@listens_for(EventPerson.event_links, 'append')
@listens_for(EventPerson.session_block_links, 'append')
@listens_for(EventPerson.contribution_links, 'append')
@listens_for(EventPerson.subcontribution_links, 'append')
def _mark_not_untrusted(target, value, *unused):
target.is_untrusted = False
| mit |
stanford-rc/shine | lib/Shine/Commands/Show.py | 2 | 6964 | # Show.py -- Show command
# Copyright (C) 2008-2015 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
Shine `show' command classes.
The show command aims to show various shine configuration parameters.
"""
import sys
# Configuration
from Shine.Configuration.Configuration import Configuration
from Shine.Configuration.Globals import Globals
from Shine.Configuration.Backend.BackendRegistry import BackendRegistry
# Command base class
from Shine.Commands.Base.Command import Command, CommandHelpException
# CLI
from Shine.CLI.Display import setup_table
class Show(Command):
"""
shine show [-f <fsname>] [-v] <conf|fs|info|storage>
"""
NAME = "show"
DESCRIPTION = "Show configuration parameters."
SUBCOMMANDS = [ "conf", "fs", "info", "storage" ]
def cmd_show_conf(self):
"""Show shine.conf"""
tbl = setup_table(self.options, "%param %value")
for key, value in Globals().as_dict().items():
tbl.append({'param': key, 'value': str(value)})
print str(tbl)
return 0
def cmd_show_fs(self):
"""Show filesystems"""
# XXX: Use a constant here
verb = self.options.verbose >= 2
tbl = setup_table(self.options, "%fsname %description")
for fsname in self.iter_fsname():
try:
fs_conf = Configuration.load_from_cache(fsname)
except:
print "Error with FS ``%s'' configuration files." % fsname
raise
if not verb:
print fs_conf.get_fs_name()
else:
tbl.append({'fsname': fs_conf.get_fs_name(),
'description': fs_conf.get_description()})
if verb:
print str(tbl)
return 0
def cmd_show_info(self):
"""Show filesystem info"""
# Walk through the list of file system managed
# by the current node and specified by the user.
for fsname in self.iter_fsname():
try:
# Get the file system configuration structure
fs_conf = Configuration.load_from_cache(fsname)
except:
# We fail to get current file system configuration information.
# Display an error message.
msg = "Error with FS ``%s'' configuration files." % fsname
print >> sys.stderr, msg
raise
# Retrieve quota configuration information
if Globals().lustre_version_is_smaller('2.4'):
quota_info = ''
if fs_conf.has_quota():
quota_info += 'type=%s ' % fs_conf.get_quota_type()
qiunit = fs_conf.get_quota_iunit() or '[lustre_default]'
quota_info += 'iunit=%s ' % qiunit
qbunit = fs_conf.get_quota_bunit() or '[lustre_default]'
quota_info += 'bunit=%s ' % qbunit
qitune = fs_conf.get_quota_itune() or '[lustre_default]'
quota_info += 'itune=%s ' % qitune
qbtune = fs_conf.get_quota_btune() or '[lustre_default]'
quota_info += 'btune=%s ' % qbtune
else:
quota_info = 'not activated'
# Get file system stripping configuration information
stripping = 'stripe_size=%s ' % fs_conf.get_stripesize()
stripping += 'stripe_count=%s' % fs_conf.get_stripecount()
# Get the device path used to mount the file system on client node
mgsnodes = [fs_conf.get_target_mgt().get_nodename()]
mgsnodes += fs_conf.get_target_mgt().ha_nodes()
mgsnids = [ ','.join(fs_conf.get_nid(node)) for node in mgsnodes ]
device_path = "%s:/%s" % (':'.join(mgsnids), fs_conf.get_fs_name())
tbl = setup_table(self.options, "%name %value")
# Add configuration parameter to the list of element displayed in
# the summary tab.
tbl.append({'name': 'name', 'value': fs_conf.get_fs_name()})
tbl.append({'name': 'default mount path',
'value': fs_conf.get_mount_path()})
tbl.append({'name': 'device path', 'value': device_path})
tbl.append({'name': 'mount options',
'value': fs_conf.get_default_mount_options()})
if Globals().lustre_version_is_smaller('2.4'):
tbl.append({'name': 'quotas', 'value': quota_info})
tbl.append({'name': 'stripping', 'value': stripping})
tbl.append({'name': 'tuning',
'value': Globals().get_tuning_file()})
tbl.append({'name': 'description',
'value': fs_conf.get_description()})
# Display the list of collected configuration information
print str(tbl)
def cmd_show_storage(self):
"""Show storage info"""
backend = BackendRegistry().selected()
if not backend:
# no backend? check to be sure
assert Globals().get_backend() == "None", \
"Error: please check your storage backend configuration" \
"(backend=%s)" % Globals().get_backend()
print "Storage backend is disabled, please check storage " \
"information as a per-filesystem basis with ``show info''."
else:
backend.start()
cnt = 0
for tgt in [ 'mgt', 'mdt', 'ost']:
for dev in backend.get_target_devices(tgt):
print dev
cnt += 1
print "Total: %d devices" % cnt
return 0
def execute(self):
# Option sanity check
self.forbidden(self.options.model, "-m, see -f")
# This check is already done when parsing argument.
# If this is modified, optparse code should also be fixed.
if len(self.arguments) != 1:
raise CommandHelpException("Invalid command usage.", self)
subcmd = self.arguments[0]
if subcmd not in self.SUBCOMMANDS:
raise CommandHelpException("Cannot show this.", self)
return getattr(self, 'cmd_show_%s' % subcmd)()
| gpl-2.0 |
srisankethu/coala-bears | tests/php/PHPLintBearTest.py | 12 | 1139 | import os
from queue import Queue
from shutil import which
from unittest.case import skipIf
from bears.php.PHPLintBear import PHPLintBear
from coalib.testing.LocalBearTestHelper import LocalBearTestHelper
from coalib.settings.Section import Section
@skipIf(which('php') is None, 'PHP is not installed')
class PHPLintBearTest(LocalBearTestHelper):
def setUp(self):
self.section = Section('test section')
self.uut = PHPLintBear(self.section, Queue())
self.test_file1 = os.path.join(os.path.dirname(__file__),
'test_files',
'phplint_test1.php')
self.test_file2 = os.path.join(os.path.dirname(__file__),
'test_files',
'phplint_test2.php')
def test_run(self):
# Test a file with errors and warnings
self.check_invalidity(
self.uut,
[],
self.test_file1)
# Test a file without any issues
self.check_validity(
self.uut,
[],
self.test_file2)
| agpl-3.0 |
seibert/numba | numba/tests/test_indexing.py | 2 | 39577 | import decimal
import itertools
import numpy as np
import unittest
from numba.core.compiler import compile_isolated, Flags
from numba import njit, typeof
from numba.core import utils, types, errors
from numba.tests.support import TestCase, tag
from numba.core.typing import arraydecl
from numba.core.types import intp, ellipsis, slice2_type, slice3_type
enable_pyobj_flags = Flags()
enable_pyobj_flags.set("enable_pyobject")
Noflags = Flags()
Noflags.set("nrt")
def slicing_1d_usecase(a, start, stop, step):
return a[start:stop:step]
def slicing_1d_usecase2(a, start, stop, step):
b = a[start:stop:step]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase3(a, start, stop):
b = a[start:stop]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase4(a):
b = a[:]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase5(a, start):
b = a[start:]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase6(a, stop):
b = a[:stop]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase7(a, start):
# Omitted stop with negative step (issue #1690)
b = a[start::-2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_1d_usecase8(a, start):
# Omitted start with negative step
b = a[::-2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_2d_usecase(a, start1, stop1, step1, start2, stop2, step2):
# The index is a homogeneous tuple of slices
return a[start1:stop1:step1, start2:stop2:step2]
def slicing_2d_usecase3(a, start1, stop1, step1, index):
# The index is a heterogeneous tuple
return a[start1:stop1:step1, index]
def slicing_3d_usecase(a, index0, start1, index2):
b = a[index0, start1:, index2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def slicing_3d_usecase2(a, index0, stop1, index2):
b = a[index0, :stop1, index2]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def partial_1d_usecase(a, index):
b = a[index]
total = 0
for i in range(b.shape[0]):
total += b[i] * (i + 1)
return total
def integer_indexing_1d_usecase(a, i):
return a[i]
def integer_indexing_2d_usecase(a, i1, i2):
return a[i1,i2]
def integer_indexing_2d_usecase2(a, i1, i2):
return a[i1][i2]
def ellipsis_usecase1(a, i, j):
return a[i:j, ...]
def ellipsis_usecase2(a, i, j):
return a[..., i:j]
def ellipsis_usecase3(a, i, j):
return a[i, ..., j]
def none_index_usecase(a):
return a[None]
def empty_tuple_usecase(a):
return a[()]
@njit
def setitem_usecase(a, index, value):
a[index] = value
@njit
def setitem_broadcast_usecase(a, value):
a[:] = value
def slicing_1d_usecase_set(a, b, start, stop, step):
a[start:stop:step] = b
return a
def slicing_1d_usecase_add(a, b, start, stop):
# NOTE: uses the ROT_FOUR opcode on Python 2, only on the [start:stop]
# with inplace operator form.
a[start:stop] += b
return a
def slicing_2d_usecase_set(a, b, start, stop, step, start2, stop2, step2):
a[start:stop:step,start2:stop2:step2] = b
return a
class TestGetItem(TestCase):
"""
Test basic indexed load from an array (returning a view or a scalar).
Note fancy indexing is tested in test_fancy_indexing.
"""
def test_1d_slicing(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
for indices in [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2),
(9, 0, -1),
(-5, -2, 1),
(0, -1, 1),
]:
expected = pyfunc(a, *indices)
self.assertPreciseEqual(cfunc(a, *indices), expected)
def test_1d_slicing_npm(self):
self.test_1d_slicing(flags=Noflags)
def test_1d_slicing2(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase2
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
args = [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
args = [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_1d_slicing2_npm(self):
self.test_1d_slicing2(flags=Noflags)
def test_1d_slicing3(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase3
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
args = [(3, 10),
(2, 3),
(10, 0),
(0, 10),
(5, 10)]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_1d_slicing3_npm(self):
self.test_1d_slicing3(flags=Noflags)
def test_1d_slicing4(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase4
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype,)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
self.assertEqual(pyfunc(a), cfunc(a))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype,)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
self.assertEqual(pyfunc(a), cfunc(a))
def test_1d_slicing4_npm(self):
self.test_1d_slicing4(flags=Noflags)
def check_1d_slicing_with_arg(self, pyfunc, flags):
args = list(range(-9, 10))
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
for arg in args:
self.assertEqual(pyfunc(a, arg), cfunc(a, arg))
# Any
arraytype = types.Array(types.int32, 1, 'A')
argtys = (arraytype, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
for arg in args:
self.assertEqual(pyfunc(a, arg), cfunc(a, arg))
def test_1d_slicing5(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase5
self.check_1d_slicing_with_arg(pyfunc, flags)
def test_1d_slicing5_npm(self):
self.test_1d_slicing5(flags=Noflags)
def test_1d_slicing6(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase6
self.check_1d_slicing_with_arg(pyfunc, flags)
def test_1d_slicing6_npm(self):
self.test_1d_slicing6(flags=Noflags)
def test_1d_slicing7(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase7
self.check_1d_slicing_with_arg(pyfunc, flags)
def test_1d_slicing7_npm(self):
self.test_1d_slicing7(flags=Noflags)
def test_1d_slicing8(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase8
self.check_1d_slicing_with_arg(pyfunc, flags)
def test_1d_slicing8_npm(self):
self.test_1d_slicing8(flags=Noflags)
def test_2d_slicing(self, flags=enable_pyobj_flags):
"""
arr_2d[a:b:c]
"""
pyfunc = slicing_1d_usecase
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
for args in [(0, 10, 1), (2, 3, 1), (10, 0, 1),
(0, 10, -1), (0, 10, 2)]:
self.assertPreciseEqual(pyfunc(a, *args), cfunc(a, *args),
msg="for args %s" % (args,))
def test_2d_slicing_npm(self):
self.test_2d_slicing(flags=Noflags)
def test_2d_slicing2(self, flags=enable_pyobj_flags):
"""
arr_2d[a:b:c, d:e:f]
"""
# C layout
pyfunc = slicing_2d_usecase
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
indices = [(0, 10, 1),
(2, 3, 1),
(10, 0, 1),
(0, 10, -1),
(0, 10, 2),
(10, 0, -1),
(9, 0, -2),
(-5, -2, 1),
(0, -1, 1),
]
args = [tup1 + tup2
for (tup1, tup2) in itertools.product(indices, indices)]
for arg in args:
expected = pyfunc(a, *arg)
self.assertPreciseEqual(cfunc(a, *arg), expected)
# Any layout
arraytype = types.Array(types.int32, 2, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2]
for arg in args:
expected = pyfunc(a, *arg)
self.assertPreciseEqual(cfunc(a, *arg), expected)
def test_2d_slicing2_npm(self):
self.test_2d_slicing2(flags=Noflags)
def test_2d_slicing3(self, flags=enable_pyobj_flags):
"""
arr_2d[a:b:c, d]
"""
# C layout
pyfunc = slicing_2d_usecase3
arraytype = types.Array(types.int32, 2, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
args = [
(0, 10, 1, 0),
(2, 3, 1, 1),
(10, 0, -1, 8),
(9, 0, -2, 4),
(0, 10, 2, 3),
(0, -1, 3, 1),
]
for arg in args:
expected = pyfunc(a, *arg)
self.assertPreciseEqual(cfunc(a, *arg), expected)
# Any layout
arraytype = types.Array(types.int32, 2, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32,
types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(400, dtype='i4').reshape(20, 20)[::2, ::2]
for arg in args:
expected = pyfunc(a, *arg)
self.assertPreciseEqual(cfunc(a, *arg), expected)
def test_2d_slicing3_npm(self):
self.test_2d_slicing3(flags=Noflags)
def test_3d_slicing(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_3d_usecase
arraytype = types.Array(types.int32, 3, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(1000, dtype='i4').reshape(10, 10, 10)
args = [
(0, 9, 1),
(2, 3, 1),
(9, 0, 1),
(0, 9, -1),
(0, 9, 2),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 3, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10)
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_3d_slicing_npm(self):
self.test_3d_slicing(flags=Noflags)
def test_3d_slicing2(self, flags=enable_pyobj_flags):
# C layout
pyfunc = slicing_3d_usecase2
arraytype = types.Array(types.int32, 3, 'C')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(1000, dtype='i4').reshape(10, 10, 10)
args = [
(0, 9, 1),
(2, 3, 1),
(9, 0, 1),
(0, 9, -1),
(0, 9, 2),
]
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
# Any layout
arraytype = types.Array(types.int32, 3, 'A')
argtys = (arraytype, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
a = np.arange(2000, dtype='i4')[::2].reshape(10, 10, 10)
for arg in args:
self.assertEqual(pyfunc(a, *arg), cfunc(a, *arg))
def test_3d_slicing2_npm(self):
self.test_3d_slicing2(flags=Noflags)
def test_1d_integer_indexing(self, flags=enable_pyobj_flags):
# C layout
pyfunc = integer_indexing_1d_usecase
arraytype = types.Array(types.int32, 1, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')
self.assertEqual(pyfunc(a, 0), cfunc(a, 0))
self.assertEqual(pyfunc(a, 9), cfunc(a, 9))
self.assertEqual(pyfunc(a, -1), cfunc(a, -1))
# Any layout
arraytype = types.Array(types.int32, 1, 'A')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(10, dtype='i4')[::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
self.assertEqual(pyfunc(a, 0), cfunc(a, 0))
self.assertEqual(pyfunc(a, 2), cfunc(a, 2))
self.assertEqual(pyfunc(a, -1), cfunc(a, -1))
# Using a 0-d array as integer index
arraytype = types.Array(types.int32, 1, 'C')
indextype = types.Array(types.int16, 0, 'C')
cr = compile_isolated(pyfunc, (arraytype, indextype), flags=flags)
cfunc = cr.entry_point
a = np.arange(3, 13, dtype=np.int32)
for i in (0, 9, -2):
idx = np.array(i).astype(np.int16)
assert idx.ndim == 0
self.assertEqual(pyfunc(a, idx), cfunc(a, idx))
def test_1d_integer_indexing_npm(self):
self.test_1d_integer_indexing(flags=Noflags)
def test_integer_indexing_1d_for_2d(self, flags=enable_pyobj_flags):
# Test partial (1d) indexing of a 2d array
pyfunc = integer_indexing_1d_usecase
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertPreciseEqual(pyfunc(a, 0), cfunc(a, 0))
self.assertPreciseEqual(pyfunc(a, 9), cfunc(a, 9))
self.assertPreciseEqual(pyfunc(a, -1), cfunc(a, -1))
arraytype = types.Array(types.int32, 2, 'A')
cr = compile_isolated(pyfunc, (arraytype, types.int32), flags=flags)
cfunc = cr.entry_point
a = np.arange(20, dtype='i4').reshape(5, 4)[::2]
self.assertPreciseEqual(pyfunc(a, 0), cfunc(a, 0))
def test_integer_indexing_1d_for_2d_npm(self):
self.test_integer_indexing_1d_for_2d(flags=Noflags)
def test_2d_integer_indexing(self, flags=enable_pyobj_flags,
pyfunc=integer_indexing_2d_usecase):
# C layout
a = np.arange(100, dtype='i4').reshape(10, 10)
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 3), cfunc(a, 0, 3))
self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9))
self.assertEqual(pyfunc(a, -2, -1), cfunc(a, -2, -1))
# Any layout
a = np.arange(100, dtype='i4').reshape(10, 10)[::2, ::2]
self.assertFalse(a.flags['C_CONTIGUOUS'])
self.assertFalse(a.flags['F_CONTIGUOUS'])
arraytype = types.Array(types.int32, 2, 'A')
cr = compile_isolated(pyfunc, (arraytype, types.int32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 1), cfunc(a, 0, 1))
self.assertEqual(pyfunc(a, 2, 2), cfunc(a, 2, 2))
self.assertEqual(pyfunc(a, -2, -1), cfunc(a, -2, -1))
# With 0-d arrays as integer indices
a = np.arange(100, dtype='i4').reshape(10, 10)
arraytype = types.Array(types.int32, 2, 'C')
indextype = types.Array(types.int32, 0, 'C')
cr = compile_isolated(pyfunc, (arraytype, indextype, indextype),
flags=flags)
cfunc = cr.entry_point
for i, j in [(0, 3), (8, 9), (-2, -1)]:
i = np.array(i).astype(np.int32)
j = np.array(j).astype(np.int32)
self.assertEqual(pyfunc(a, i, j), cfunc(a, i, j))
def test_2d_integer_indexing_npm(self):
self.test_2d_integer_indexing(flags=Noflags)
def test_2d_integer_indexing2(self):
self.test_2d_integer_indexing(pyfunc=integer_indexing_2d_usecase2)
self.test_2d_integer_indexing(flags=Noflags,
pyfunc=integer_indexing_2d_usecase2)
def test_2d_integer_indexing_via_call(self):
@njit
def index1(X, i0):
return X[i0]
@njit
def index2(X, i0, i1):
return index1(X[i0], i1)
a = np.arange(10).reshape(2, 5)
self.assertEqual(index2(a, 0, 0), a[0][0])
self.assertEqual(index2(a, 1, 1), a[1][1])
self.assertEqual(index2(a, -1, -1), a[-1][-1])
def test_2d_float_indexing(self, flags=enable_pyobj_flags):
a = np.arange(100, dtype='i4').reshape(10, 10)
pyfunc = integer_indexing_2d_usecase
arraytype = types.Array(types.int32, 2, 'C')
cr = compile_isolated(pyfunc, (arraytype, types.float32, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(a, 0, 0), cfunc(a, 0, 0))
self.assertEqual(pyfunc(a, 9, 9), cfunc(a, 9, 9))
self.assertEqual(pyfunc(a, -1, -1), cfunc(a, -1, -1))
def test_partial_1d_indexing(self, flags=enable_pyobj_flags):
pyfunc = partial_1d_usecase
def check(arr, arraytype):
cr = compile_isolated(pyfunc, (arraytype, types.int32),
flags=flags)
cfunc = cr.entry_point
self.assertEqual(pyfunc(arr, 0), cfunc(arr, 0))
n = arr.shape[0] - 1
self.assertEqual(pyfunc(arr, n), cfunc(arr, n))
self.assertEqual(pyfunc(arr, -1), cfunc(arr, -1))
a = np.arange(12, dtype='i4').reshape((4, 3))
arraytype = types.Array(types.int32, 2, 'C')
check(a, arraytype)
a = np.arange(12, dtype='i4').reshape((3, 4)).T
arraytype = types.Array(types.int32, 2, 'F')
check(a, arraytype)
a = np.arange(12, dtype='i4').reshape((3, 4))[::2]
arraytype = types.Array(types.int32, 2, 'A')
check(a, arraytype)
def check_ellipsis(self, pyfunc, flags):
def compile_func(arr):
cr = compile_isolated(pyfunc, (typeof(arr), types.intp, types.intp),
flags=flags)
return cr.entry_point
def run(a):
bounds = (0, 1, 2, -1, -2)
cfunc = compile_func(a)
for i, j in itertools.product(bounds, bounds):
x = cfunc(a, i, j)
np.testing.assert_equal(pyfunc(a, i, j), cfunc(a, i, j))
run(np.arange(16, dtype='i4').reshape(4, 4))
run(np.arange(27, dtype='i4').reshape(3, 3, 3))
def test_ellipsis1(self, flags=enable_pyobj_flags):
self.check_ellipsis(ellipsis_usecase1, flags)
def test_ellipsis1_npm(self):
self.test_ellipsis1(flags=Noflags)
def test_ellipsis2(self, flags=enable_pyobj_flags):
self.check_ellipsis(ellipsis_usecase2, flags)
def test_ellipsis2_npm(self):
self.test_ellipsis2(flags=Noflags)
def test_ellipsis3(self, flags=enable_pyobj_flags):
self.check_ellipsis(ellipsis_usecase3, flags)
def test_ellipsis3_npm(self):
self.test_ellipsis3(flags=Noflags)
def test_ellipsis_issue1498(self):
# This is an issue due to incorrect layout inferred for when
# ellpsis is used and ndenumerate is specializing on the layout.
@njit
def udt(arr):
out = np.zeros_like(arr)
i = 0
for index, val in np.ndenumerate(arr[..., i]):
out[index][i] = val
return out
py_func = udt.py_func
outersize = 4
innersize = 4
arr = np.arange(outersize * innersize).reshape(outersize, innersize)
got = udt(arr)
expected = py_func(arr)
np.testing.assert_equal(got, expected)
def test_ellipsis_issue1499(self):
# This tests an issue when ndarray.__getitem__ recv a tuple of
# constants. The lowering is mishandling the constant value creation.
@njit
def udt(arr):
return arr[..., 0]
arr = np.arange(3)
got = udt(arr)
expected = udt.py_func(arr)
np.testing.assert_equal(got, expected)
def test_none_index(self, flags=enable_pyobj_flags):
pyfunc = none_index_usecase
arraytype = types.Array(types.int32, 2, 'C')
# TODO should be enable to handle this in NoPython mode
cr = compile_isolated(pyfunc, (arraytype,), flags=flags)
cfunc = cr.entry_point
a = np.arange(100, dtype='i4').reshape(10, 10)
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_none_index_npm(self):
with self.assertTypingError():
self.test_none_index(flags=Noflags)
def test_empty_tuple_indexing(self, flags=enable_pyobj_flags):
pyfunc = empty_tuple_usecase
arraytype = types.Array(types.int32, 0, 'C')
cr = compile_isolated(pyfunc, (arraytype,), flags=flags)
cfunc = cr.entry_point
a = np.arange(1, dtype='i4').reshape(())
self.assertPreciseEqual(pyfunc(a), cfunc(a))
def test_empty_tuple_indexing_npm(self):
self.test_empty_tuple_indexing(flags=Noflags)
class TestSetItem(TestCase):
"""
Test basic indexed store into an array.
Note fancy indexing is tested in test_fancy_indexing.
"""
def test_conversion_setitem(self, flags=enable_pyobj_flags):
""" this used to work, and was used in one of the tutorials """
from numba import jit
def pyfunc(array):
for index in range(len(array)):
array[index] = index % decimal.Decimal(100)
cfunc = jit("void(i8[:])")(pyfunc)
udt = np.arange(100, dtype='i1')
control = udt.copy()
pyfunc(control)
cfunc(udt)
self.assertPreciseEqual(udt, control)
def test_1d_slicing_set(self, flags=enable_pyobj_flags):
"""
1d to 1d slice assignment
"""
pyfunc = slicing_1d_usecase_set
# Note heterogeneous types for the source and destination arrays
# (int16[:] -> int32[:])
dest_type = types.Array(types.int32, 1, 'C')
src_type = types.Array(types.int16, 1, 'A')
argtys = (dest_type, src_type, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
N = 10
arg = np.arange(N, dtype='i2') + 40
bounds = [0, 2, N - 2, N, N + 1, N + 3,
-2, -N + 2, -N, -N - 1, -N - 3]
def make_dest():
return np.zeros_like(arg, dtype='i4')
for start, stop in itertools.product(bounds, bounds):
for step in (1, 2, -1, -2):
args = start, stop, step
index = slice(*args)
pyleft = pyfunc(make_dest(), arg[index], *args)
cleft = cfunc(make_dest(), arg[index], *args)
self.assertPreciseEqual(pyleft, cleft)
# Mismatching input size and slice length
with self.assertRaises(ValueError):
cfunc(np.zeros_like(arg, dtype=np.int32), arg, 0, 0, 1)
def check_1d_slicing_set_sequence(self, flags, seqty, seq):
"""
Generic sequence to 1d slice assignment
"""
pyfunc = slicing_1d_usecase_set
dest_type = types.Array(types.int32, 1, 'C')
argtys = (dest_type, seqty, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
N = 10
k = len(seq)
arg = np.arange(N, dtype=np.int32)
args = (seq, 1, -N + k + 1, 1)
expected = pyfunc(arg.copy(), *args)
got = cfunc(arg.copy(), *args)
self.assertPreciseEqual(expected, got)
args = (seq, 1, -N + k, 1)
with self.assertRaises(ValueError) as raises:
cfunc(arg.copy(), *args)
def test_1d_slicing_set_tuple(self, flags=enable_pyobj_flags):
"""
Tuple to 1d slice assignment
"""
self.check_1d_slicing_set_sequence(
flags, types.UniTuple(types.int16, 2), (8, -42))
def test_1d_slicing_set_list(self, flags=enable_pyobj_flags):
"""
List to 1d slice assignment
"""
self.check_1d_slicing_set_sequence(
flags, types.List(types.int16), [8, -42])
def test_1d_slicing_broadcast(self, flags=enable_pyobj_flags):
"""
scalar to 1d slice assignment
"""
pyfunc = slicing_1d_usecase_set
arraytype = types.Array(types.int32, 1, 'C')
# Note heterogeneous types for the source scalar and the destination
# array (int16 -> int32[:])
argtys = (arraytype, types.int16, types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
N = 10
arg = np.arange(N, dtype='i4')
val = 42
bounds = [0, 2, N - 2, N, N + 1, N + 3,
-2, -N + 2, -N, -N - 1, -N - 3]
for start, stop in itertools.product(bounds, bounds):
for step in (1, 2, -1, -2):
args = val, start, stop, step
pyleft = pyfunc(arg.copy(), *args)
cleft = cfunc(arg.copy(), *args)
self.assertPreciseEqual(pyleft, cleft)
def test_1d_slicing_add(self, flags=enable_pyobj_flags):
pyfunc = slicing_1d_usecase_add
arraytype = types.Array(types.int32, 1, 'C')
argtys = (arraytype, arraytype, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
arg = np.arange(10, dtype='i4')
for test in ((0, 10), (2, 5)):
pyleft = pyfunc(np.zeros_like(arg), arg[slice(*test)], *test)
cleft = cfunc(np.zeros_like(arg), arg[slice(*test)], *test)
self.assertPreciseEqual(pyleft, cleft)
def test_1d_slicing_set_npm(self):
self.test_1d_slicing_set(flags=Noflags)
def test_1d_slicing_set_list_npm(self):
self.test_1d_slicing_set_list(flags=Noflags)
def test_1d_slicing_set_tuple_npm(self):
self.test_1d_slicing_set_tuple(flags=Noflags)
def test_1d_slicing_broadcast_npm(self):
self.test_1d_slicing_broadcast(flags=Noflags)
def test_1d_slicing_add_npm(self):
self.test_1d_slicing_add(flags=Noflags)
def test_2d_slicing_set(self, flags=enable_pyobj_flags):
"""
2d to 2d slice assignment
"""
pyfunc = slicing_2d_usecase_set
arraytype = types.Array(types.int32, 2, 'A')
argtys = (arraytype, arraytype, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
arg = np.arange(10*10, dtype='i4').reshape(10,10)
tests = [
(0, 10, 1, 0, 10, 1),
(2, 3, 1, 2, 3, 1),
(10, 0, 1, 10, 0, 1),
(0, 10, -1, 0, 10, -1),
(0, 10, 2, 0, 10, 2),
]
for test in tests:
pyleft = pyfunc(np.zeros_like(arg), arg[slice(*test[0:3]), slice(*test[3:6])], *test)
cleft = cfunc(np.zeros_like(arg), arg[slice(*test[0:3]), slice(*test[3:6])], *test)
self.assertPreciseEqual(cleft, pyleft)
def test_2d_slicing_broadcast(self, flags=enable_pyobj_flags):
"""
scalar to 2d slice assignment
"""
pyfunc = slicing_2d_usecase_set
arraytype = types.Array(types.int32, 2, 'C')
# Note heterogeneous types for the source scalar and the destination
# array (int16 -> int32[:])
argtys = (arraytype, types.int16, types.int32, types.int32, types.int32,
types.int32, types.int32, types.int32)
cr = compile_isolated(pyfunc, argtys, flags=flags)
cfunc = cr.entry_point
arg = np.arange(10*10, dtype='i4').reshape(10,10)
val = 42
tests = [
(0, 10, 1, 0, 10, 1),
(2, 3, 1, 2, 3, 1),
(10, 0, 1, 10, 0, 1),
(0, 10, -1, 0, 10, -1),
(0, 10, 2, 0, 10, 2),
]
for test in tests:
pyleft = pyfunc(arg.copy(), val, *test)
cleft = cfunc(arg.copy(), val, *test)
self.assertPreciseEqual(cleft, pyleft)
def test_2d_slicing_set_npm(self):
self.test_2d_slicing_set(flags=Noflags)
def test_2d_slicing_broadcast_npm(self):
self.test_2d_slicing_broadcast(flags=Noflags)
def test_setitem(self):
"""
scalar indexed assignment
"""
arr = np.arange(5)
setitem_usecase(arr, 1, 42)
self.assertEqual(arr.tolist(), [0, 42, 2, 3, 4])
# Using a 0-d array as scalar index
setitem_usecase(arr, np.array(3).astype(np.uint16), 8)
self.assertEqual(arr.tolist(), [0, 42, 2, 8, 4])
# Scalar Broadcasting
arr = np.arange(9).reshape(3, 3)
setitem_usecase(arr, 1, 42)
self.assertEqual(arr.tolist(), [[0, 1, 2], [42, 42, 42], [6, 7, 8]])
def test_setitem_broadcast(self):
"""
broadcasted array assignment
"""
# Scalar Broadcasting
dst = np.arange(5)
setitem_broadcast_usecase(dst, 42)
self.assertEqual(dst.tolist(), [42] * 5)
# 1D -> 2D Array Broadcasting
dst = np.arange(6).reshape(2, 3)
setitem_broadcast_usecase(dst, np.arange(1, 4))
self.assertEqual(dst.tolist(), [[1, 2, 3], [1, 2, 3]])
# 2D -> 2D Array Broadcasting
dst = np.arange(6).reshape(2, 3)
setitem_broadcast_usecase(dst, np.arange(1, 4).reshape(1, 3))
self.assertEqual(dst.tolist(), [[1, 2, 3], [1, 2, 3]])
# 2D -> 4D Array Broadcasting
dst = np.arange(12).reshape(2, 1, 2, 3)
setitem_broadcast_usecase(dst, np.arange(1, 4).reshape(1, 3))
inner2 = [[1, 2, 3], [1, 2, 3]]
self.assertEqual(dst.tolist(), [[inner2]] * 2)
# 2D -> 1D Array Broadcasting
dst = np.arange(5)
setitem_broadcast_usecase(dst, np.arange(1, 6).reshape(1, 5))
self.assertEqual(dst.tolist(), [1, 2, 3, 4, 5])
# 4D -> 2D Array Broadcasting
dst = np.arange(6).reshape(2, 3)
setitem_broadcast_usecase(dst, np.arange(1, 1 + dst.size).reshape(1, 1, 2, 3))
self.assertEqual(dst.tolist(), [[1, 2, 3], [4, 5, 6]])
def test_setitem_broadcast_error(self):
# higher dim assigned into lower dim
# 2D -> 1D
dst = np.arange(5)
src = np.arange(10).reshape(2, 5)
with self.assertRaises(ValueError) as raises:
setitem_broadcast_usecase(dst, src)
errmsg = str(raises.exception)
self.assertEqual('cannot broadcast source array for assignment',
errmsg)
# 3D -> 2D
dst = np.arange(5).reshape(1, 5)
src = np.arange(10).reshape(1, 2, 5)
with self.assertRaises(ValueError) as raises:
setitem_broadcast_usecase(dst, src)
errmsg = str(raises.exception)
self.assertEqual('cannot assign slice from input of different size',
errmsg)
# lower to higher
# 1D -> 2D
dst = np.arange(10).reshape(2, 5)
src = np.arange(4)
with self.assertRaises(ValueError) as raises:
setitem_broadcast_usecase(dst, src)
errmsg = str(raises.exception)
self.assertEqual('cannot assign slice from input of different size',
errmsg)
def test_slicing_1d_broadcast(self):
# 1D -> 2D sliced (1)
dst = np.arange(6).reshape(3, 2)
src = np.arange(1, 3)
slicing_1d_usecase_set(dst, src, 0, 2, 1)
self.assertEqual(dst.tolist(), [[1, 2], [1, 2], [4, 5]])
# 1D -> 2D sliced (2)
dst = np.arange(6).reshape(3, 2)
src = np.arange(1, 3)
slicing_1d_usecase_set(dst, src, 0, None, 2)
self.assertEqual(dst.tolist(), [[1, 2], [2, 3], [1, 2]])
# 2D -> 2D sliced (3)
dst = np.arange(6).reshape(3, 2)
src = np.arange(1, 5).reshape(2, 2)
slicing_1d_usecase_set(dst, src, None, 2, 1)
self.assertEqual(dst.tolist(), [[1, 2], [3, 4], [4, 5]])
def test_setitem_readonly(self):
arr = np.arange(5)
arr.flags.writeable = False
with self.assertRaises((TypeError, errors.TypingError)) as raises:
setitem_usecase(arr, 1, 42)
self.assertIn("Cannot modify value of type readonly array",
str(raises.exception))
class TestTyping(TestCase):
"""
Check typing of basic indexing operations
"""
def test_layout(self):
"""
Check an appropriate layout is inferred for the result of array
indexing.
"""
func = arraydecl.get_array_index_type
cty = types.Array(types.float64, 3, 'C')
fty = types.Array(types.float64, 3, 'F')
aty = types.Array(types.float64, 3, 'A')
indices = [
# Tuples of (indexing arguments, keeps "C" layout, keeps "F" layout)
((), True, True),
((ellipsis,), True, True),
# Indexing from the left => can sometimes keep "C" layout
((intp,), True, False),
((slice2_type,), True, False),
((intp, slice2_type), True, False),
((slice2_type, intp), False, False),
((slice2_type, slice2_type), False, False),
# Strided slices = > "A" layout
((intp, slice3_type), False, False),
((slice3_type,), False, False),
# Indexing from the right => can sometimes keep "F" layout
((ellipsis, intp,), False, True),
((ellipsis, slice2_type,), False, True),
((ellipsis, intp, slice2_type,), False, False),
((ellipsis, slice2_type, intp,), False, True),
((ellipsis, slice2_type, slice2_type,), False, False),
# Strided slices = > "A" layout
((ellipsis, slice3_type,), False, False),
((ellipsis, slice3_type, intp,), False, False),
# Indexing from both sides => only if all dimensions are indexed
((intp, ellipsis, intp,), False, False),
((slice2_type, ellipsis, slice2_type,), False, False),
((intp, intp, slice2_type,), True, False),
((intp, ellipsis, intp, slice2_type,), True, False),
((slice2_type, intp, intp,), False, True),
((slice2_type, intp, ellipsis, intp,), False, True),
((intp, slice2_type, intp,), False, False),
# Strided slices = > "A" layout
((slice3_type, intp, intp,), False, False),
((intp, intp, slice3_type,), False, False),
]
for index_tuple, keep_c, _ in indices:
index = types.Tuple(index_tuple)
r = func(cty, index)
self.assertEqual(tuple(r.index), index_tuple)
self.assertEqual(r.result.layout, 'C' if keep_c else 'A',
index_tuple)
self.assertFalse(r.advanced)
for index_tuple, _, keep_f in indices:
index = types.Tuple(index_tuple)
r = func(fty, index)
self.assertEqual(tuple(r.index), index_tuple)
self.assertEqual(r.result.layout, 'F' if keep_f else 'A',
index_tuple)
self.assertFalse(r.advanced)
for index_tuple, _, _ in indices:
index = types.Tuple(index_tuple)
r = func(aty, index)
self.assertEqual(tuple(r.index), index_tuple)
self.assertEqual(r.result.layout, 'A')
self.assertFalse(r.advanced)
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
khirayama/bottle | test/test_config.py | 13 | 2574 | import unittest
from bottle import ConfigDict
class TestConfDict(unittest.TestCase):
def test_isadict(self):
""" ConfigDict should behaves like a normal dict. """
# It is a dict-subclass, so this kind of pointless, but it doen't hurt.
d, m = dict(), ConfigDict()
d['key'], m['key'] = 'value', 'value'
d['k2'], m['k2'] = 'v1', 'v1'
d['k2'], m['k2'] = 'v2', 'v2'
self.assertEqual(d.keys(), m.keys())
self.assertEqual(list(d.values()), list(m.values()))
self.assertEqual(d.get('key'), m.get('key'))
self.assertEqual(d.get('cay'), m.get('cay'))
self.assertEqual(list(iter(d)), list(iter(m)))
self.assertEqual([k for k in d], [k for k in m])
self.assertEqual(len(d), len(m))
self.assertEqual('key' in d, 'key' in m)
self.assertEqual('cay' in d, 'cay' in m)
self.assertRaises(KeyError, lambda: m['cay'])
def test_write(self):
c = ConfigDict()
c['key'] = 'value'
self.assertEqual(c['key'], 'value')
self.assertTrue('key' in c)
c['key'] = 'value2'
self.assertEqual(c['key'], 'value2')
def test_update(self):
c = ConfigDict()
c['key'] = 'value'
c.update(key='value2', key2='value3')
self.assertEqual(c['key'], 'value2')
self.assertEqual(c['key2'], 'value3')
def test_namespaces(self):
c = ConfigDict()
c.update('a.b', key='value')
self.assertEqual(c['a.b.key'], 'value')
def test_meta(self):
c = ConfigDict()
c.meta_set('bool', 'filter', bool)
c.meta_set('int', 'filter', int)
c['bool'] = 'I am so true!'
c['int'] = '6'
self.assertTrue(c['bool'] is True)
self.assertEqual(c['int'], 6)
self.assertRaises(ValueError, lambda: c.update(int='not an int'))
def test_load_dict(self):
c = ConfigDict()
d = dict(a=dict(b=dict(foo=5, bar=6), baz=7))
c.load_dict(d)
self.assertEqual(c['a.b.foo'], 5)
self.assertEqual(c['a.b.bar'], 6)
self.assertEqual(c['a.baz'], 7)
# unicode keys (see issue #720)
try:
key = unichr(12354)
except NameError:
key = chr(12354)
c = ConfigDict()
c.load_dict({key: 'value'})
self.assertEqual('value', c[key])
c = ConfigDict()
c.load_dict({key: {'subkey': 'value'}})
self.assertEqual('value', c[key + '.subkey'])
if __name__ == '__main__': #pragma: no cover
unittest.main()
| mit |
translate/pootle | pootle/apps/pootle_config/abstracts.py | 10 | 1957 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import collections
from jsonfield.fields import JSONField
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models
from .managers import ConfigManager, ConfigQuerySet
class AbstractConfig(models.Model):
content_type = models.ForeignKey(
ContentType,
blank=True,
null=True,
db_index=True,
verbose_name='content type',
related_name="content_type_set_for_%(class)s",
on_delete=models.CASCADE)
object_pk = models.CharField(
'object ID',
max_length=255,
blank=True,
null=True)
content_object = GenericForeignKey(
ct_field="content_type",
fk_field="object_pk")
key = models.CharField(
'Configuration key',
max_length=255,
blank=False,
null=False,
db_index=True)
value = JSONField(
'Configuration value',
default="",
blank=True,
null=False,
load_kwargs={'object_pairs_hook': collections.OrderedDict})
objects = ConfigManager.from_queryset(ConfigQuerySet)()
class Meta(object):
abstract = True
ordering = ['pk']
index_together = ["content_type", "object_pk"]
def save(self, **kwargs):
if not self.key:
raise ValidationError("Config object must have a key")
if self.object_pk and not self.content_type:
raise ValidationError(
"Config object must have content_type when object_pk is set")
super(AbstractConfig, self).save(**kwargs)
| gpl-3.0 |
commial/miasm | miasm/core/interval.py | 5 | 8511 | from __future__ import print_function
INT_EQ = 0 # Equivalent
INT_B_IN_A = 1 # B in A
INT_A_IN_B = -1 # A in B
INT_DISJOIN = 2 # Disjoint
INT_JOIN = 3 # Overlap
INT_JOIN_AB = 4 # B starts at the end of A
INT_JOIN_BA = 5 # A starts at the end of B
def cmp_interval(inter1, inter2):
"""Compare @inter1 and @inter2 and returns the associated INT_* case
@inter1, @inter2: interval instance
"""
if inter1 == inter2:
return INT_EQ
inter1_start, inter1_stop = inter1
inter2_start, inter2_stop = inter2
result = INT_JOIN
if inter1_start <= inter2_start and inter1_stop >= inter2_stop:
result = INT_B_IN_A
if inter2_start <= inter1_start and inter2_stop >= inter1_stop:
result = INT_A_IN_B
if inter1_stop + 1 == inter2_start:
result = INT_JOIN_AB
if inter2_stop + 1 == inter1_start:
result = INT_JOIN_BA
if inter1_start > inter2_stop + 1 or inter2_start > inter1_stop + 1:
result = INT_DISJOIN
return result
class interval(object):
"""Stands for intervals with integer bounds
Offers common methods to work with interval"""
def __init__(self, bounds=None):
"""Instance an interval object
@bounds: (optional) list of (int, int) and/or interval instance
"""
if bounds is None:
bounds = []
elif isinstance(bounds, interval):
bounds = bounds.intervals
self.is_cannon = False
self.intervals = bounds
self.cannon()
def __iter__(self):
"""Iterate on intervals"""
for inter in self.intervals:
yield inter
@staticmethod
def cannon_list(tmp):
"""
Return a cannonizes list of intervals
@tmp: list of (int, int)
"""
tmp = sorted([x for x in tmp if x[0] <= x[1]])
out = []
if not tmp:
return out
out.append(tmp.pop())
while tmp:
x = tmp.pop()
rez = cmp_interval(out[-1], x)
if rez == INT_EQ:
continue
elif rez == INT_DISJOIN:
out.append(x)
elif rez == INT_B_IN_A:
continue
elif rez in [INT_JOIN, INT_JOIN_AB, INT_JOIN_BA, INT_A_IN_B]:
u, v = x
while out and cmp_interval(out[-1], (u, v)) in [
INT_JOIN, INT_JOIN_AB, INT_JOIN_BA, INT_A_IN_B]:
u = min(u, out[-1][0])
v = max(v, out[-1][1])
out.pop()
out.append((u, v))
else:
raise ValueError('unknown state', rez)
return out[::-1]
def cannon(self):
"Apply .cannon_list() on self contained intervals"
if self.is_cannon is True:
return
self.intervals = interval.cannon_list(self.intervals)
self.is_cannon = True
def __repr__(self):
if self.intervals:
o = " U ".join(["[0x%X 0x%X]" % (x[0], x[1])
for x in self.intervals])
else:
o = "[]"
return o
def __contains__(self, other):
if isinstance(other, interval):
for intervalB in other.intervals:
is_in = False
for intervalA in self.intervals:
if cmp_interval(intervalA, intervalB) in [INT_EQ, INT_B_IN_A]:
is_in = True
break
if not is_in:
return False
return True
else:
for intervalA in self.intervals:
if intervalA[0] <= other <= intervalA[1]:
return True
return False
def __eq__(self, i):
return self.intervals == i.intervals
def __ne__(self, other):
return not self.__eq__(other)
def union(self, other):
"""
Return the union of intervals
@other: interval instance
"""
if isinstance(other, interval):
other = other.intervals
other = interval(self.intervals + other)
return other
def difference(self, other):
"""
Return the difference of intervals
@other: interval instance
"""
to_test = self.intervals[:]
i = -1
to_del = other.intervals[:]
while i < len(to_test) - 1:
i += 1
x = to_test[i]
if x[0] > x[1]:
del to_test[i]
i -= 1
continue
while to_del and to_del[0][1] < x[0]:
del to_del[0]
for y in to_del:
if y[0] > x[1]:
break
rez = cmp_interval(x, y)
if rez == INT_DISJOIN:
continue
elif rez == INT_EQ:
del to_test[i]
i -= 1
break
elif rez == INT_A_IN_B:
del to_test[i]
i -= 1
break
elif rez == INT_B_IN_A:
del to_test[i]
i1 = (x[0], y[0] - 1)
i2 = (y[1] + 1, x[1])
to_test[i:i] = [i1, i2]
i -= 1
break
elif rez in [INT_JOIN_AB, INT_JOIN_BA]:
continue
elif rez == INT_JOIN:
del to_test[i]
if x[0] < y[0]:
to_test[i:i] = [(x[0], y[0] - 1)]
else:
to_test[i:i] = [(y[1] + 1, x[1])]
i -= 1
break
else:
raise ValueError('unknown state', rez)
return interval(to_test)
def intersection(self, other):
"""
Return the intersection of intervals
@other: interval instance
"""
out = []
for x in self.intervals:
if x[0] > x[1]:
continue
for y in other.intervals:
rez = cmp_interval(x, y)
if rez == INT_DISJOIN:
continue
elif rez == INT_EQ:
out.append(x)
continue
elif rez == INT_A_IN_B:
out.append(x)
continue
elif rez == INT_B_IN_A:
out.append(y)
continue
elif rez == INT_JOIN_AB:
continue
elif rez == INT_JOIN_BA:
continue
elif rez == INT_JOIN:
if x[0] < y[0]:
out.append((y[0], x[1]))
else:
out.append((x[0], y[1]))
continue
else:
raise ValueError('unknown state', rez)
return interval(out)
def __add__(self, other):
return self.union(other)
def __and__(self, other):
return self.intersection(other)
def __sub__(self, other):
return self.difference(other)
def hull(self):
"Return the first and the last bounds of intervals"
if not self.intervals:
return None, None
return self.intervals[0][0], self.intervals[-1][1]
@property
def empty(self):
"""Return True iff the interval is empty"""
return not self.intervals
def show(self, img_x=1350, img_y=20, dry_run=False):
"""
show image representing the interval
"""
try:
import Image
import ImageDraw
except ImportError:
print('cannot import python PIL imaging')
return
img = Image.new('RGB', (img_x, img_y), (100, 100, 100))
draw = ImageDraw.Draw(img)
i_min, i_max = self.hull()
print(hex(i_min), hex(i_max))
addr2x = lambda addr: ((addr - i_min) * img_x) // (i_max - i_min)
for a, b in self.intervals:
draw.rectangle((addr2x(a), 0, addr2x(b), img_y), (200, 0, 0))
if dry_run is False:
img.show()
@property
def length(self):
"""
Return the cumulated length of intervals
"""
# Do not use __len__ because we may return a value > 32 bits
return sum((stop - start + 1) for start, stop in self.intervals)
| gpl-2.0 |
avedaee/DIRAC | DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py | 1 | 16844 | ########################################################################
# $HeadURL $
# File: ReplicateAndRegister.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id $"
# #
# @file ReplicateAndRegister.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gMonitor
from DIRAC.RequestManagementSystem.private.OperationHandlerBase import OperationHandlerBase
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Resources.Utilities import Utils
########################################################################
class ReplicateAndRegister( OperationHandlerBase, DMSRequestOperationsBase ):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super( ReplicateAndRegister, self ).__init__( operation, csPath )
# # own gMonitor stuff for files
gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # for FTS
gMonitor.registerActivity( "FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # SE cache
self.seCache = {}
# Clients
self.fc = FileCatalog()
self.ftsClient = FTSClient()
def __call__( self ):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error( checkReplicas["Message"] )
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
bannedGroups = getattr( self, "FTSBannedGroups" ) if hasattr( self, "FTSBannedGroups" ) else ()
if self.request.OwnerGroup in bannedGroups:
self.log.info( "usage of FTS system is banned for request's owner" )
return self.rmTransfer()
return self.ftsTransfer()
return self.rmTransfer()
def __checkReplicas( self ):
""" check done replicas and update file states """
waitingFiles = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation
if opFile.Status in ( "Waiting", "Scheduled" ) ] )
targetSESet = set( self.operation.targetSEList )
replicas = self.fc.getReplicas( waitingFiles )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
reMissing = re.compile( "no such file or directory" )
for failedLFN, errStr in replicas["Value"]["Failed"].items():
waitingFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
self.log.error( "file %s does not exists" % failedLFN )
gMonitor.addMark( "ReplicateFail", len( targetSESet ) )
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].items():
if targetSESet.issubset( set( reps ) ):
self.log.info( "file %s has been replicated to all targets" % successfulLFN )
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles( self, toSchedule ):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': [opFile, validReplicas, validTargets], 'lfn2': [opFile, validReplicas, validTargets]}
"""
if toSchedule:
self.log.info( "found %s files to schedule, getting metadata from FC" % len( toSchedule ) )
lfns = toSchedule.keys()
else:
self.log.info( "No files to schedule" )
return S_OK()
res = self.fc.getFileMetadata( lfns )
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn( "Can't schedule %d files: problems getting the metadata: %s" % ( len( res['Value']['Failed'] ),
', '.join( res['Value']['Failed'] ) ) )
metadata = res['Value']['Successful']
filesToScheduleList = []
for lfnsToSchedule, lfnMetadata in metadata.items():
opFileToSchedule = toSchedule[lfnsToSchedule][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
opFileToSchedule.Checksum = metadata[lfnsToSchedule]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfnsToSchedule]['CheckSumType']
opFileToSchedule.Size = metadata[lfnsToSchedule]['Size']
filesToScheduleList.append( ( opFileToSchedule.toJSON()['Value'],
toSchedule[lfnsToSchedule][1],
toSchedule[lfnsToSchedule][2] ) )
return S_OK( filesToScheduleList )
def _filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
from DIRAC.Core.Utilities.Adler import compareAdler
ret = { "Valid" : [], "Banned" : [], "Bad" : [] }
replicas = self.dm.getActiveReplicas( opFile.LFN )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
reNotExists = re.compile( "not such file or directory" )
replicas = replicas["Value"]
failed = replicas["Failed"].get( opFile.LFN , "" )
if reNotExists.match( failed.lower() ):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR( failed )
replicas = replicas["Successful"][opFile.LFN] if opFile.LFN in replicas["Successful"] else {}
for repSEName in replicas:
seRead = self.rssSEStatus( repSEName, "ReadAccess" )
if not seRead["OK"]:
self.log.info( seRead["Message"] )
ret["Banned"].append( repSEName )
continue
if not seRead["Value"]:
self.log.info( "StorageElement '%s' is banned for reading" % ( repSEName ) )
repSE = self.seCache.get( repSEName, None )
if not repSE:
repSE = StorageElement( repSEName, "SRM2" )
self.seCache[repSE] = repSE
pfn = Utils.executeSingleFileOrDirWrapper( repSE.getPfnForLfn( opFile.LFN ) )
if not pfn["OK"]:
self.log.warn( "unable to create pfn for %s lfn: %s" % ( opFile.LFN, pfn["Message"] ) )
ret["Banned"].append( repSEName )
continue
pfn = pfn["Value"]
repSEMetadata = repSE.getFileMetadata( pfn, singleFile = True )
if not repSEMetadata["OK"]:
self.log.warn( repSEMetadata["Message"] )
ret["Banned"].append( repSEName )
continue
repSEMetadata = repSEMetadata["Value"]
seChecksum = repSEMetadata.get( "Checksum" )
if opFile.Checksum and seChecksum and not compareAdler( seChecksum, opFile.Checksum ) :
self.log.warn( " %s checksum mismatch: %s %s:%s" % ( opFile.LFN,
opFile.Checksum,
repSE,
seChecksum ) )
ret["Bad"].append( repSEName )
continue
# # if we're here repSE is OK
ret["Valid"].append( repSEName )
return S_OK( ret )
def ftsTransfer( self ):
""" replicate and register using FTS """
self.log.info( "scheduling files in FTS..." )
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "FTSScheduleAtt" )
gMonitor.addMark( "FTSScheduleFail" )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) )
# Can continue now
self.log.verbose( "No targets banned for writing" )
toSchedule = {}
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark( "FTSScheduleAtt" )
# # check replicas
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
if not replicas["Valid"] and replicas["Banned"]:
self.log.warn( "unable to schedule '%s', replicas only at banned SEs" % opFile.LFN )
gMonitor.addMark( "FTSScheduleFail" )
continue
validReplicas = replicas["Valid"]
bannedReplicas = replicas["Banned"]
if not validReplicas and bannedReplicas:
self.log.warn( "unable to schedule '%s', replicas only at banned SEs" % opFile.LFN )
gMonitor.addMark( "FTSScheduleFail" )
continue
if validReplicas:
validTargets = list( set( self.operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
self.log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
continue
toSchedule[opFile.LFN] = [ opFile, validReplicas, validTargets ]
res = self._addMetadataToFiles( toSchedule )
if not res['OK']:
return res
else:
filesToScheduleList = res['Value']
if filesToScheduleList:
ftsSchedule = self.ftsClient.ftsSchedule( self.request.RequestID,
self.operation.OperationID,
filesToScheduleList )
if not ftsSchedule["OK"]:
self.log.error( ftsSchedule["Message"] )
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
for fileID in ftsSchedule["Successful"]:
gMonitor.addMark( "FTSScheduleOK", 1 )
for opFile in self.operation:
if fileID == opFile.FileID:
opFile.Status = "Scheduled"
self.log.always( "%s has been scheduled for FTS" % opFile.LFN )
for fileID, reason in ftsSchedule["Failed"]:
gMonitor.addMark( "FTSScheduleFail", 1 )
for opFile in self.operation:
if fileID == opFile.FileID:
opFile.Error = reason
self.log.error( "unable to schedule %s for FTS: %s" % ( opFile.LFN, opFile.Error ) )
else:
self.log.info( "No files to schedule after metadata checks" )
# Just in case some transfers could not be scheduled, try them with RM
return self.rmTransfer( fromFTS = True )
def rmTransfer( self, fromFTS = False ):
""" replicate and register using ReplicaManager """
# # get waiting files. If none just return
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
if fromFTS:
self.log.info( "Trying transfer using replica manager as FTS failed" )
else:
self.log.info( "Transferring files using Data manager..." )
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
sourceRead = self.rssSEStatus( sourceSE, "ReadAccess" )
if not sourceRead["OK"]:
self.log.info( sourceRead["Message"] )
for opFile in self.operation:
opFile.Error = sourceRead["Message"]
opFile.Status = "Failed"
self.operation.Error = sourceRead["Message"]
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return sourceRead
if not sourceRead["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info( self.operation.Error )
return S_OK( self.operation.Error )
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) )
# Can continue now
self.log.verbose( "No targets banned for writing" )
# # loop over files
for opFile in waitingFiles:
gMonitor.addMark( "ReplicateAndRegisterAtt", 1 )
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
continue
replicas = replicas["Value"]
if not replicas["Valid"]:
self.log.warn( "unable to find valid replicas for %s" % lfn )
continue
# # get the first one in the list
if sourceSE not in replicas['Valid']:
if sourceSE:
self.log.warn( "%s is not at specified sourceSE %s, changed to %s" % ( lfn, sourceSE, replicas["Valid"][0] ) )
sourceSE = replicas["Valid"][0]
# # loop over targetSE
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE == sourceSE:
self.log.warn( "Request to replicate %s to the source SE: %s" % ( lfn, sourceSE ) )
continue
res = self.rm.replicateAndRegister( lfn, targetSE, sourceSE = sourceSE )
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % ( lfn, targetSE, repTime )
gMonitor.addMark( "ReplicateOK", 1 )
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark( "RegisterOK", 1 )
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info( prString )
else:
gMonitor.addMark( "RegisterFail", 1 )
prString += " but failed to register"
self.log.warn( prString )
opFile.Error = "Failed to register"
opFile.Status = "Failed"
# # add register replica operation
registerOperation = self.getRegisterOperation( opFile, targetSE )
self.request.insertAfter( registerOperation, self.operation )
else:
self.log.error( "failed to replicate %s to %s." % ( lfn, targetSE ) )
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark( "ReplicateFail", 1 )
reason = res["Value"]["Failed"][lfn]
self.log.error( "failed to replicate and register file %s at %s: %s" % ( lfn, targetSE, reason ) )
opFile.Error = reason
else:
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error( opFile.Error )
if not opFile.Error:
if len( self.operation.targetSEList ) > 1:
self.log.info( "file %s has been replicated to all targetSEs" % lfn )
opFile.Status = "Done"
return S_OK()
| gpl-3.0 |
gabrielfalcao/lettuce | tests/integration/lib/Django-1.2.5/tests/modeltests/or_lookups/tests.py | 92 | 7584 | from datetime import datetime
from operator import attrgetter
from django.db.models import Q
from django.test import TestCase
from models import Article
class OrLookupsTests(TestCase):
def setUp(self):
self.a1 = Article.objects.create(
headline='Hello', pub_date=datetime(2005, 11, 27)
).pk
self.a2 = Article.objects.create(
headline='Goodbye', pub_date=datetime(2005, 11, 28)
).pk
self.a3 = Article.objects.create(
headline='Hello and goodbye', pub_date=datetime(2005, 11, 29)
).pk
def test_filter_or(self):
self.assertQuerysetEqual(
Article.objects.filter(headline__startswith='Hello') | Article.objects.filter(headline__startswith='Goodbye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__contains='Hello') | Article.objects.filter(headline__contains='bye'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(headline__iexact='Hello') | Article.objects.filter(headline__contains='ood'), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__startswith='Goodbye')), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline")
)
def test_stages(self):
# You can shorten this syntax with code like the following, which is
# especially useful if building the query in stages:
articles = Article.objects.all()
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__startswith='Goodbye'),
[]
)
self.assertQuerysetEqual(
articles.filter(headline__startswith='Hello') & articles.filter(headline__contains='bye'), [
'Hello and goodbye'
],
attrgetter("headline")
)
def test_pk_q(self):
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | Q(pk=self.a2) | Q(pk=self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_pk_in(self):
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=(self.a1, self.a2, self.a3)), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[self.a1, self.a2, self.a3, 40000]), [
'Hello',
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_negated(self):
# Q objects can be negated
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) | ~Q(pk=self.a2)), [
'Hello',
'Hello and goodbye'
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.filter(~Q(pk=self.a1) & ~Q(pk=self.a2)), [
'Hello and goodbye'
],
attrgetter("headline"),
)
# This allows for more complex queries than filter() and exclude()
# alone would allow
self.assertQuerysetEqual(
Article.objects.filter(Q(pk=self.a1) & (~Q(pk=self.a2) | Q(pk=self.a3))), [
'Hello'
],
attrgetter("headline"),
)
def test_complex_filter(self):
# The 'complex_filter' method supports framework features such as
# 'limit_choices_to' which normally take a single dictionary of lookup
# arguments but need to support arbitrary queries via Q objects too.
self.assertQuerysetEqual(
Article.objects.complex_filter({'pk': self.a1}), [
'Hello'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.complex_filter(Q(pk=self.a1) | Q(pk=self.a2)), [
'Hello',
'Goodbye'
],
attrgetter("headline"),
)
def test_empty_in(self):
# Passing "in" an empty list returns no results ...
self.assertQuerysetEqual(
Article.objects.filter(pk__in=[]),
[]
)
# ... but can return results if we OR it with another query.
self.assertQuerysetEqual(
Article.objects.filter(Q(pk__in=[]) | Q(headline__icontains='goodbye')), [
'Goodbye',
'Hello and goodbye'
],
attrgetter("headline"),
)
def test_q_and(self):
# Q arg objects are ANDed
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')), [
'Hello and goodbye'
],
attrgetter("headline")
)
# Q arg AND order is irrelevant
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello'), [
'Hello and goodbye'
],
attrgetter("headline"),
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello') & Q(headline__startswith='Goodbye')),
[]
)
def test_q_exclude(self):
self.assertQuerysetEqual(
Article.objects.exclude(Q(headline__startswith='Hello')), [
'Goodbye'
],
attrgetter("headline")
)
def test_other_arg_queries(self):
# Try some arg queries with operations other than filter.
self.assertEqual(
Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye')).headline,
'Hello and goodbye'
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello') | Q(headline__contains='bye')).count(),
3
)
self.assertQuerysetEqual(
Article.objects.filter(Q(headline__startswith='Hello'), Q(headline__contains='bye')).values(), [
{"headline": "Hello and goodbye", "id": self.a3, "pub_date": datetime(2005, 11, 29)},
],
lambda o: o,
)
self.assertEqual(
Article.objects.filter(Q(headline__startswith='Hello')).in_bulk([self.a1, self.a2]),
{self.a1: Article.objects.get(pk=self.a1)}
)
| gpl-3.0 |
mozilla/captain | vendor/lib/python/django/utils/dates.py | 488 | 2237 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| mpl-2.0 |
KDGDev/meizu-mx-kernel | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <fweisbec@gmail.com>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
FreeAgent/djangoappengine-starter | django/contrib/sites/managers.py | 491 | 1985 | from django.conf import settings
from django.db import models
from django.db.models.fields import FieldDoesNotExist
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
self.__is_validated = False
def _validate_field_name(self):
field_names = self.model._meta.get_all_field_names()
# If a custom name is provided, make sure the field exists on the model
if self.__field_name is not None and self.__field_name not in field_names:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
# Otherwise, see if there is a field called either 'site' or 'sites'
else:
for potential_name in ['site', 'sites']:
if potential_name in field_names:
self.__field_name = potential_name
self.__is_validated = True
break
# Now do a type check on the field (FK or M2M only)
try:
field = self.model._meta.get_field(self.__field_name)
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
raise TypeError("%s must be a ForeignKey or ManyToManyField." %self.__field_name)
except FieldDoesNotExist:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
self.__is_validated = True
def get_query_set(self):
if not self.__is_validated:
self._validate_field_name()
return super(CurrentSiteManager, self).get_query_set().filter(**{self.__field_name + '__id__exact': settings.SITE_ID})
| bsd-3-clause |
art-solopov/mdwiki | wiki/migrations/0006_comment.py | 1 | 1317 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-07-07 16:48
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wiki', '0005_auto_20160701_1520'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('lft', models.PositiveIntegerField(db_index=True)),
('rgt', models.PositiveIntegerField(db_index=True)),
('tree_id', models.PositiveIntegerField(db_index=True)),
('depth', models.PositiveIntegerField(db_index=True)),
('comment', models.TextField(verbose_name='Comment')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wiki.Article')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
| mit |
Eficent/manufacture | quality_control/models/qc_inspection.py | 2 | 12193 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, fields, api, exceptions, _
class QcInspection(models.Model):
_name = 'qc.inspection'
_description = 'Quality control inspection'
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.one
@api.depends('inspection_lines', 'inspection_lines.success')
def _success(self):
self.success = all([x.success for x in self.inspection_lines])
@api.multi
def _links_get(self):
link_obj = self.env['res.request.link']
return [(r.object, r.name) for r in link_obj.search([])]
@api.one
@api.depends('object_id')
def _get_product(self):
if self.object_id and self.object_id._name == 'product.product':
self.product = self.object_id
else:
self.product = False
name = fields.Char(
string='Inspection number', required=True, default='/', select=True,
readonly=True, states={'draft': [('readonly', False)]}, copy=False)
date = fields.Datetime(
string='Date', required=True, readonly=True, copy=False,
default=fields.Datetime.now,
states={'draft': [('readonly', False)]}, select=True)
object_id = fields.Reference(
string='Reference', selection=_links_get, readonly=True,
states={'draft': [('readonly', False)]}, ondelete="set null")
product = fields.Many2one(
comodel_name="product.product", compute="_get_product", store=True,
help="Product associated with the inspection")
qty = fields.Float(string="Quantity", default=1.0)
test = fields.Many2one(
comodel_name='qc.test', string='Test', readonly=True, select=True)
inspection_lines = fields.One2many(
comodel_name='qc.inspection.line', inverse_name='inspection_id',
string='Inspection lines', readonly=True,
states={'ready': [('readonly', False)]})
internal_notes = fields.Text(string='Internal notes')
external_notes = fields.Text(
string='External notes',
states={'success': [('readonly', True)],
'failed': [('readonly', True)]})
state = fields.Selection(
[('draft', 'Draft'),
('ready', 'Ready'),
('waiting', 'Waiting supervisor approval'),
('success', 'Quality success'),
('failed', 'Quality failed'),
('canceled', 'Canceled')],
string='State', readonly=True, default='draft')
success = fields.Boolean(
compute="_success", string='Success',
help='This field will be marked if all tests have succeeded.',
store=True)
auto_generated = fields.Boolean(
string='Auto-generated', readonly=True, copy=False,
help='If an inspection is auto-generated, it can be canceled nor '
'removed.')
company_id = fields.Many2one(
comodel_name='res.company', string='Company', readonly=True,
states={'draft': [('readonly', False)]},
default=lambda self: self.env['res.company']._company_default_get(
'qc.inspection'))
user = fields.Many2one(
comodel_name='res.users', string='Responsible',
track_visibility='always', default=lambda self: self.env.user)
@api.model
def create(self, vals):
if vals.get('name', '/') == '/':
vals['name'] = self.env['ir.sequence'].get('qc.inspection')
return super(QcInspection, self).create(vals)
@api.multi
def unlink(self):
for inspection in self:
if inspection.auto_generated:
raise exceptions.Warning(
_("You cannot remove an auto-generated inspection."))
if inspection.state != 'draft':
raise exceptions.Warning(
_("You cannot remove an inspection that is not in draft "
"state."))
return super(QcInspection, self).unlink()
@api.multi
def action_draft(self):
self.write({'state': 'draft'})
@api.multi
def action_todo(self):
for inspection in self:
if not inspection.test:
raise exceptions.Warning(
_("You must first set the test to perform."))
self.write({'state': 'ready'})
@api.multi
def action_confirm(self):
for inspection in self:
for line in inspection.inspection_lines:
if line.question_type == 'qualitative':
if not line.qualitative_value:
raise exceptions.Warning(
_("You should provide an answer for all "
"qualitative questions."))
else:
if not line.uom_id:
raise exceptions.Warning(
_("You should provide a unit of measure for "
"quantitative questions."))
if inspection.success:
inspection.state = 'success'
else:
inspection.state = 'waiting'
@api.multi
def action_approve(self):
for inspection in self:
if inspection.success:
inspection.state = 'success'
else:
inspection.state = 'failed'
@api.multi
def action_cancel(self):
self.write({'state': 'canceled'})
@api.multi
def set_test(self, trigger_line, force_fill=False):
for inspection in self:
header = self._prepare_inspection_header(
inspection.object_id, trigger_line)
del header['state'] # don't change current status
del header['auto_generated'] # don't change auto_generated flag
del header['user'] # don't change current user
inspection.write(header)
inspection.inspection_lines.unlink()
inspection.inspection_lines = inspection._prepare_inspection_lines(
trigger_line.test, force_fill=force_fill)
@api.multi
def _make_inspection(self, object_ref, trigger_line):
"""Overridable hook method for creating inspection from test.
:param object_ref: Object instance
:param trigger_line: Trigger line instance
:return: Inspection object
"""
inspection = self.create(self._prepare_inspection_header(
object_ref, trigger_line))
inspection.set_test(trigger_line)
return inspection
@api.multi
def _prepare_inspection_header(self, object_ref, trigger_line):
"""Overridable hook method for preparing inspection header.
:param object_ref: Object instance
:param trigger_line: Trigger line instance
:return: List of values for creating the inspection
"""
return {
'object_id': object_ref and '%s,%s' % (object_ref._name,
object_ref.id) or False,
'state': 'ready',
'test': trigger_line.test.id,
'user': trigger_line.user.id,
'auto_generated': True,
}
@api.multi
def _prepare_inspection_lines(self, test, force_fill=False):
new_data = []
for line in test.test_lines:
data = self._prepare_inspection_line(
test, line, fill=test.fill_correct_values or force_fill)
new_data.append((0, 0, data))
return new_data
@api.multi
def _prepare_inspection_line(self, test, line, fill=None):
data = {
'name': line.name,
'test_line': line.id,
'notes': line.notes,
'min_value': line.min_value,
'max_value': line.max_value,
'test_uom_id': line.uom_id.id,
'uom_id': line.uom_id.id,
'question_type': line.type,
'possible_ql_values': [x.id for x in line.ql_values]
}
if fill:
if line.type == 'qualitative':
# Fill with the first correct value found
for value in line.ql_values:
if value.ok:
data['qualitative_value'] = value.id
break
else:
# Fill with a value inside the interval
data['quantitative_value'] = (line.min_value +
line.max_value) * 0.5
return data
class QcInspectionLine(models.Model):
_name = 'qc.inspection.line'
_description = "Quality control inspection line"
@api.one
@api.depends('question_type', 'uom_id', 'test_uom_id', 'max_value',
'min_value', 'quantitative_value', 'qualitative_value',
'possible_ql_values')
def quality_test_check(self):
if self.question_type == 'qualitative':
self.success = self.qualitative_value.ok
else:
if self.uom_id.id == self.test_uom_id.id:
amount = self.quantitative_value
else:
amount = self.env['product.uom']._compute_qty(
self.uom_id.id, self.quantitative_value,
self.test_uom_id.id)
self.success = self.max_value >= amount >= self.min_value
@api.one
@api.depends('possible_ql_values', 'min_value', 'max_value', 'test_uom_id',
'question_type')
def get_valid_values(self):
if self.question_type == 'qualitative':
self.valid_values = ", ".join([x.name for x in
self.possible_ql_values if x.ok])
else:
self.valid_values = "%s-%s" % (self.min_value, self.max_value)
if self.env.ref("product.group_uom") in self.env.user.groups_id:
self.valid_values += " %s" % self.test_uom_id.name
inspection_id = fields.Many2one(
comodel_name='qc.inspection', string='Inspection')
name = fields.Char(string="Question", readonly=True)
product = fields.Many2one(
comodel_name="product.product", related="inspection_id.product",
store=True)
test_line = fields.Many2one(
comodel_name='qc.test.question', string='Test question',
readonly=True)
possible_ql_values = fields.Many2many(
comodel_name='qc.test.question.value', string='Answers')
quantitative_value = fields.Float(
'Quantitative value', digits=(16, 5),
help="Value of the result for a quantitative question.")
qualitative_value = fields.Many2one(
comodel_name='qc.test.question.value', string='Qualitative value',
help="Value of the result for a qualitative question.",
domain="[('id', 'in', possible_ql_values[0][2])]")
notes = fields.Text(string='Notes')
min_value = fields.Float(
string='Min', digits=(16, 5), readonly=True,
help="Minimum valid value for a quantitative question.")
max_value = fields.Float(
string='Max', digits=(16, 5), readonly=True,
help="Maximum valid value for a quantitative question.")
test_uom_id = fields.Many2one(
comodel_name='product.uom', string='Test UoM', readonly=True,
help="UoM for minimum and maximum values for a quantitative "
"question.")
test_uom_category = fields.Many2one(
comodel_name="product.uom.categ", related="test_uom_id.category_id",
store=True)
uom_id = fields.Many2one(
comodel_name='product.uom', string='UoM',
domain="[('category_id', '=', test_uom_category)]",
help="UoM of the inspection value for a quantitative question.")
question_type = fields.Selection(
[('qualitative', 'Qualitative'),
('quantitative', 'Quantitative')],
string='Question type', readonly=True)
valid_values = fields.Char(string="Valid values", store=True,
compute="get_valid_values")
success = fields.Boolean(
compute="quality_test_check", string="Success?", store=True)
| agpl-3.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/rust/runtime/tests/build_model.py | 2 | 2571 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Builds a simple NNVM graph for testing."""
from os import path as osp
import nnvm
from nnvm import sym
from nnvm.compiler import graph_util
from nnvm.testing import init
import numpy as np
import tvm
CWD = osp.dirname(osp.abspath(osp.expanduser(__file__)))
def _get_model(dshape):
data = sym.Variable('data', shape=dshape)
fc1 = sym.dense(data, units=dshape[-1]*2, use_bias=True)
left, right = sym.split(fc1, indices_or_sections=2, axis=1)
return sym.Group(((left + 1), (right - 1)))
def _init_params(graph, input_shapes, initializer=init.Xavier(), seed=10):
if isinstance(graph, sym.Symbol):
graph = nnvm.graph.create(graph)
ishapes, _ = graph_util.infer_shape(graph, **input_shapes)
param_shapes = dict(zip(graph.index.input_names, ishapes))
np.random.seed(seed)
params = {}
for param, shape in param_shapes.items():
if param in {'data', 'label'} or not shape:
continue
init_value = np.empty(shape).astype('float32')
initializer(param, init_value)
params[param] = tvm.nd.array(init_value)
return params
def main():
dshape = (32, 16)
net = _get_model(dshape)
ishape_dict = {'data': dshape}
params = _init_params(net, ishape_dict)
graph, lib, params = nnvm.compiler.build(net, 'llvm',
shape=ishape_dict,
params=params,
dtype='float32')
with open(osp.join(CWD, 'graph.json'), 'w') as f_resnet:
f_resnet.write(graph.json())
with open(osp.join(CWD, 'graph.params'), 'wb') as f_params:
f_params.write(nnvm.compiler.save_param_dict(params))
if __name__ == '__main__':
main()
| apache-2.0 |
nhomar/odoo | addons/l10n_in_hr_payroll/report/report_payroll_advice.py | 374 | 3442 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import osv
from openerp.report import report_sxw
from openerp.tools import amount_to_text_en
class payroll_advice_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(payroll_advice_report, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'get_month': self.get_month,
'convert': self.convert,
'get_detail': self.get_detail,
'get_bysal_total': self.get_bysal_total,
})
self.context = context
def get_month(self, input_date):
payslip_pool = self.pool.get('hr.payslip')
res = {
'from_name': '', 'to_name': ''
}
slip_ids = payslip_pool.search(self.cr, self.uid, [('date_from','<=',input_date), ('date_to','>=',input_date)], context=self.context)
if slip_ids:
slip = payslip_pool.browse(self.cr, self.uid, slip_ids, context=self.context)[0]
from_date = datetime.strptime(slip.date_from, '%Y-%m-%d')
to_date = datetime.strptime(slip.date_to, '%Y-%m-%d')
res['from_name']= from_date.strftime('%d')+'-'+from_date.strftime('%B')+'-'+from_date.strftime('%Y')
res['to_name']= to_date.strftime('%d')+'-'+to_date.strftime('%B')+'-'+to_date.strftime('%Y')
return res
def convert(self, amount, cur):
return amount_to_text_en.amount_to_text(amount, 'en', cur);
def get_bysal_total(self):
return self.total_bysal
def get_detail(self, line_ids):
result = []
self.total_bysal = 0.00
for l in line_ids:
res = {}
res.update({
'name': l.employee_id.name,
'acc_no': l.name,
'ifsc_code': l.ifsc_code,
'bysal': l.bysal,
'debit_credit': l.debit_credit,
})
self.total_bysal += l.bysal
result.append(res)
return result
class wrapped_report_payroll_advice(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_payrolladvice'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_payrolladvice'
_wrapped_report_class = payroll_advice_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
project-hypr/hypr2 | hypr/request.py | 1 | 1190 | # Copyright 2014-2016 Morgan Delahaye-Prat. All Rights Reserved.
#
# Licensed under the Simplified BSD License (the "License");
# you may not use this file except in compliance with the License.
"""Request and related classes."""
import json
import asyncio
from aiohttp.web_reqrep import Request as BaseRequest
class Request(BaseRequest):
"""Contains all the information about an incoming HTTP request."""
def __init__(self, *args, **kwargs):
"""Create a new Request."""
super().__init__(*args, **kwargs)
@property
def args(self):
"""Alternate spelling for `Request.GET`."""
return self.GET
@asyncio.coroutine
def json(self, *, loader=json.loads):
"""Parse the request content as JSON."""
body = yield from self.text()
if body:
return loader(body)
return None
@asyncio.coroutine
def _prepare_hook(self, response):
#
# allow early catch of HTTPExceptions before matching a rule
apps = ()
if self.match_info:
apps = self.match_info.apps
for app in apps:
yield from app.on_response_prepare.send(self, response)
| bsd-2-clause |
andim/scipy | scipy/io/harwell_boeing/tests/test_hb.py | 126 | 2389 | from __future__ import division, print_function, absolute_import
import os
import sys
if sys.version_info[0] >= 3:
from io import StringIO
else:
from StringIO import StringIO
import tempfile
import numpy as np
from numpy.testing import TestCase, assert_equal, \
assert_array_almost_equal_nulp
from scipy.sparse import coo_matrix, csc_matrix, rand
from scipy.io import hb_read, hb_write
from scipy.io.harwell_boeing import HBFile, HBInfo
SIMPLE = """\
No Title |No Key
9 4 1 4
RUA 100 100 10 0
(26I3) (26I3) (3E23.15)
1 2 2 2 2 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3 3
3 3 3 3 3 3 3 4 4 4 6 6 6 6 6 6 6 6 6 6 6 8 9 9 9 9
9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 9 11
37 71 89 18 30 45 70 19 25 52
2.971243799687726e-01 3.662366682877375e-01 4.786962174699534e-01
6.490068647991184e-01 6.617490424831662e-02 8.870370343191623e-01
4.196478590163001e-01 5.649603072111251e-01 9.934423887087086e-01
6.912334991524289e-01
"""
SIMPLE_MATRIX = coo_matrix(
(
(0.297124379969, 0.366236668288, 0.47869621747, 0.649006864799,
0.0661749042483, 0.887037034319, 0.419647859016,
0.564960307211, 0.993442388709, 0.691233499152,),
(np.array([[36, 70, 88, 17, 29, 44, 69, 18, 24, 51],
[0, 4, 58, 61, 61, 72, 72, 73, 99, 99]]))))
def assert_csc_almost_equal(r, l):
r = csc_matrix(r)
l = csc_matrix(l)
assert_equal(r.indptr, l.indptr)
assert_equal(r.indices, l.indices)
assert_array_almost_equal_nulp(r.data, l.data, 10000)
class TestHBReader(TestCase):
def test_simple(self):
m = hb_read(StringIO(SIMPLE))
assert_csc_almost_equal(m, SIMPLE_MATRIX)
class TestRBRoundtrip(TestCase):
def test_simple(self):
rm = rand(100, 1000, 0.05).tocsc()
fd, filename = tempfile.mkstemp(suffix="rb")
try:
hb_write(filename, rm, HBInfo.from_data(rm))
m = hb_read(filename)
finally:
os.close(fd)
os.remove(filename)
assert_csc_almost_equal(m, rm)
| bsd-3-clause |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/nntplib.py | 8 | 21135 | """An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific comamnds, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P<group>[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
| apache-2.0 |
zetaops/ulakbus | ulakbus/services/personel/hitap/hizmet_unvan_getir.py | 1 | 1181 | # -*- coding: utf-8 -*-
# Copyright (C) 2015 ZetaOps Inc.
#
# This file is licensed under the GNU General Public License v3
# (GPLv3). See LICENSE.txt for details.
"""HITAP Ünvan Sorgula
Hitap üzerinden personelin ünvan bilgilerinin sorgulamasını yapar.
"""
from ulakbus.services.personel.hitap.hitap_sorgula import HITAPSorgula
class HizmetUnvanGetir(HITAPSorgula):
"""
HITAP Sorgulama servisinden kalıtılmış Ünvan Bilgisi Sorgulama servisi
"""
HAS_CHANNEL = True
service_dict = {
'service_name': 'HizmetUnvanSorgula',
'bean_name': 'HizmetUnvanServisBean',
'fields': {
'tckn': 'tckn',
'kayit_no': 'kayitNo',
'unvan_kod': 'unvanKod',
'unvan_tarihi': 'unvanTarihi',
'unvan_bitis_tarihi': 'unvanBitisTarihi',
'hizmet_sinifi': 'hizmetSinifi',
'asil_vekil': 'asilVekil',
'atama_sekli': 'atamaSekli',
'fhz_orani': 'fhzOrani',
'kurum_onay_tarihi': 'kurumOnayTarihi'
},
'date_filter': ['unvan_tarihi', 'unvan_bitis_tarihi', 'kurum_onay_tarihi'],
'required_fields': ['tckn']
}
| gpl-3.0 |
MihaiMoldovanu/ansible | lib/ansible/modules/storage/netapp/na_cdot_volume.py | 28 | 13714 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: na_cdot_volume
short_description: Manage NetApp cDOT volumes
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar (sumit4@netapp.com)
description:
- Create or destroy volumes on NetApp cDOT
options:
state:
description:
- Whether the specified volume should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the volume to manage.
required: true
infinite:
description:
- Set True if the volume is an Infinite Volume.
choices: ['True', 'False']
default: 'False'
online:
description:
- Whether the specified volume is online, or not.
choices: ['True', 'False']
default: 'True'
aggregate_name:
description:
- The name of the aggregate the flexvol should exist on. Required when C(state=present).
size:
description:
- The size of the volume in (size_unit). Required when C(state=present).
size_unit:
description:
- The unit used to interpret the size parameter.
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
default: 'gb'
vserver:
description:
- Name of the vserver to use.
required: true
default: None
'''
EXAMPLES = """
- name: Create FlexVol
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
aggregate_name: aggr1
size: 20
size_unit: mb
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
- name: Make FlexVol offline
na_cdot_volume:
state: present
name: ansibleVolume
infinite: False
online: False
vserver: ansibleVServer
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTVolume(object):
def __init__(self):
self._size_unit_map = dict(
bytes=1,
b=1,
kb=1024,
mb=1024 ** 2,
gb=1024 ** 3,
tb=1024 ** 4,
pb=1024 ** 5,
eb=1024 ** 6,
zb=1024 ** 7,
yb=1024 ** 8
)
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
is_infinite=dict(required=False, type='bool', default=False, aliases=['infinite']),
is_online=dict(required=False, type='bool', default=True, aliases=['online']),
size=dict(type='int'),
size_unit=dict(default='gb',
choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb',
'pb', 'eb', 'zb', 'yb'], type='str'),
aggregate_name=dict(type='str'),
vserver=dict(required=True, type='str', default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['aggregate_name', 'size'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.is_infinite = p['is_infinite']
self.is_online = p['is_online']
self.size_unit = p['size_unit']
self.vserver = p['vserver']
if p['size'] is not None:
self.size = p['size'] * self._size_unit_map[self.size_unit]
else:
self.size = None
self.aggregate_name = p['aggregate_name']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module, vserver=self.vserver)
def get_volume(self):
"""
Return details about the volume
:param:
name : Name of the volume
:return: Details about the volume. None if not found.
:rtype: dict
"""
volume_info = netapp_utils.zapi.NaElement('volume-get-iter')
volume_attributes = netapp_utils.zapi.NaElement('volume-attributes')
volume_id_attributes = netapp_utils.zapi.NaElement('volume-id-attributes')
volume_id_attributes.add_new_child('name', self.name)
volume_attributes.add_child_elem(volume_id_attributes)
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(volume_attributes)
volume_info.add_child_elem(query)
result = self.server.invoke_successfully(volume_info, True)
return_value = None
if result.get_child_by_name('num-records') and \
int(result.get_child_content('num-records')) >= 1:
volume_attributes = result.get_child_by_name(
'attributes-list').get_child_by_name(
'volume-attributes')
# Get volume's current size
volume_space_attributes = volume_attributes.get_child_by_name(
'volume-space-attributes')
current_size = volume_space_attributes.get_child_content('size')
# Get volume's state (online/offline)
volume_state_attributes = volume_attributes.get_child_by_name(
'volume-state-attributes')
current_state = volume_state_attributes.get_child_content('state')
is_online = None
if current_state == "online":
is_online = True
elif current_state == "offline":
is_online = False
return_value = {
'name': self.name,
'size': current_size,
'is_online': is_online,
}
return return_value
def create_volume(self):
volume_create = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-create', **{'volume': self.name,
'containing-aggr-name': self.aggregate_name,
'size': str(self.size)})
try:
self.server.invoke_successfully(volume_create,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error provisioning volume %s of size %s: %s' % (self.name, self.size, to_native(e)),
exception=traceback.format_exc())
def delete_volume(self):
if self.is_infinite:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy-async', **{'volume-name': self.name})
else:
volume_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-destroy', **{'name': self.name, 'unmount-and-offline':
'true'})
try:
self.server.invoke_successfully(volume_delete,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error deleting volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def rename_volume(self):
"""
Rename the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename-async',
**{'volume-name': self.name, 'new-volume-name': str(
self.name)})
else:
volume_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-rename', **{'volume': self.name, 'new-volume-name': str(
self.name)})
try:
self.server.invoke_successfully(volume_rename,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error renaming volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def resize_volume(self):
"""
Re-size the volume.
Note: 'is_infinite' needs to be set to True in order to rename an
Infinite Volume.
"""
if self.is_infinite:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size-async',
**{'volume-name': self.name, 'new-size': str(
self.size)})
else:
volume_resize = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-size', **{'volume': self.name, 'new-size': str(
self.size)})
try:
self.server.invoke_successfully(volume_resize,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error re-sizing volume %s: %s' % (self.name, to_native(e)),
exception=traceback.format_exc())
def change_volume_state(self):
"""
Change volume's state (offline/online).
Note: 'is_infinite' needs to be set to True in order to change the
state of an Infinite Volume.
"""
state_requested = None
if self.is_online:
# Requested state is 'online'.
state_requested = "online"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-online',
**{'name': self.name})
else:
# Requested state is 'offline'.
state_requested = "offline"
if self.is_infinite:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline-async',
**{'volume-name': self.name})
else:
volume_change_state = netapp_utils.zapi.NaElement.create_node_with_children(
'volume-offline',
**{'name': self.name})
try:
self.server.invoke_successfully(volume_change_state,
enable_tunneling=True)
except netapp_utils.zapi.NaApiError as e:
self.module.fail_json(msg='Error changing the state of volume %s to %s: %s' %
(self.name, state_requested, to_native(e)),
exception=traceback.format_exc())
def apply(self):
changed = False
volume_exists = False
rename_volume = False
resize_volume = False
volume_detail = self.get_volume()
if volume_detail:
volume_exists = True
if self.state == 'absent':
changed = True
elif self.state == 'present':
if str(volume_detail['size']) != str(self.size):
resize_volume = True
changed = True
if (volume_detail['is_online'] is not None) and (volume_detail['is_online'] != self.is_online):
changed = True
if self.is_online is False:
# Volume is online, but requested state is offline
pass
else:
# Volume is offline but requested state is online
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not volume_exists:
self.create_volume()
else:
if resize_volume:
self.resize_volume()
if volume_detail['is_online'] is not \
None and volume_detail['is_online'] != \
self.is_online:
self.change_volume_state()
# Ensure re-naming is the last change made.
if rename_volume:
self.rename_volume()
elif self.state == 'absent':
self.delete_volume()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTVolume()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
bjsmith/motivation-simulation | ActionModel.py | 1 | 1161 | from UnitModel import *
__author__ = 'benjaminsmith'
"""Inherits from UnitModel"""
class ActionModel(UnitModel):
"""Set initial ActionModel."""
def __init__(self,action_name,tendency,value,pos_expectancy,neg_expectancy,pos_val,neg_val,threshold,chaos,persistence):
super(ActionModel,self).__init__(action_name,value)
#user interface
#self.name = str(action_name)
#psychology
self.tendency=tendency
self.persistence = persistence
#self.value = value
self.pos_expectancy=pos_expectancy
self.neg_expectancy=neg_expectancy
self.threshold=threshold
#environment
self.pos_val=pos_val
self.neg_val=neg_val
self.chaos=chaos
def __repr__(self): #let's not do this for now.
#def __print__(self):
return ("ActionModel " + self.name + "(ten=" +
str(self.tendency) + ",value=" + str(self.value) +
",pe=" + str(self.pos_expectancy) +
",ne=" + str(self.neg_expectancy) +
",pv=" + str(self.pos_val) +
",nv=" + str(self.neg_val) +
") ")# + super(ActionModel,self).__repr__(self))
| gpl-3.0 |
xkollar/spacewalk | backend/server/rhnMapping.py | 4 | 1089 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
from spacewalk.server import rhnSQL
# checks if an arch is for real
def check_package_arch(name):
name = str(name)
if name is None or len(name) == 0:
return None
h = rhnSQL.prepare("select id from rhnPackageArch where label = :label")
h.execute(label=name)
ret = h.fetchone_dict()
if not ret:
return None
return name
if __name__ == '__main__':
"""Test code.
"""
rhnSQL.initDB()
print check_package_arch('i386')
| gpl-2.0 |
anidata/rasp | rasp/tor_engine.py | 1 | 6869 | import getpass
import os
from contextlib import contextmanager
from datetime import datetime, timedelta
from stem import Signal
from stem.connection import connect
from rasp.base import DefaultEngine
from rasp.errors import ControllerError
class TorController(object):
"""
Attributes:
address (str): IP address of Tor SOCKS proxy to connect,
can also be set with the ``RASP_TOR_ADDRESS``
environment variable.
control_port (int, optional): Port number for control
port usage to refresh endpoint address,
can also be set with the ``RASP_TOR_CONTROL_PASSWORD``
environment variable.
control_password (str, optional): Password to protect control
port usage,
can also be set with the ``RASP_TOR_CONTROL_PORT``
environment variable.
callback (callable, optional): A function that returns a boolean value
to determine when to send a signal to Tor"""
def __init__(self,
address=None,
port=None,
password=None,
signal_limiter=None):
self.address = (
address
or os.environ.get('RASP_TOR_ADDRESS')
or '127.0.0.1'
)
self.port = (
port
or os.environ.get('RASP_TOR_CONTROL_PORT')
or 9051
)
self.password = (
password
or os.environ.get('RASP_TOR_CONTROL_PASSWORD')
or getpass.getpass("Tor control password: ")
)
def default_limiter(): return True
self.limiter = (
signal_limiter or default_limiter
)
def __copy__(self):
return TorController(
address=self.address,
port=self.port,
password=self.password,
signal_limiter=self.limiter
)
def _enforce_connection(method):
"""Method decorator to enforce that the connection was established"""
def enforce(self, *args, **kwargs):
if not hasattr(self, 'connection'):
raise ControllerError(
'Signal controller has not been opened'
)
return method(self, *args, **kwargs)
return enforce
def open(self):
"""Establishes a connection to the Tor server"""
info = (self.address, self.port)
self.connection = connect(
control_port=info,
password=self.password
)
@_enforce_connection
def close(self):
"""Closes the TorController connection"""
self.connection.close()
@_enforce_connection
def ready_to_signal(self):
"""Checks to see if the Tor server is available to be signalled.
The availability only applies to the instance's connection, and
doesn't take into account any other instances.
Returns:
True if signal can be sent, False if not
"""
return self.connection.is_newnym_available()
@_enforce_connection
def signal(self):
"""Refreshes the mapping of nodes we connect through.
When a new path from the user to the destination is required, this
method refreshes the node path we use to connect. This gives us a new IP
address with which the destination sees.
"""
if self.ready_to_signal():
self.connection.signal(Signal.NEWNYM)
def limited_signal(self):
if self.ready_to_signal() and self.limiter():
self.signal()
@contextmanager
def connected(self):
"""Context manager to automatically handle closing the connection"""
try:
self.open()
yield
finally:
self.close()
@staticmethod
def call_limited(request_amount):
"""Returns a function that returns True when the
number of calls to it is equal to
``request_amount``.
Parameters:
request_amount (int): the time to wait before
the returned function will return True
"""
def call_limit():
call_limit.total_calls += 1
enough_calls = call_limit.total_calls == request_amount
if enough_calls:
call_limit.total_calls = 0
return True
else:
return False
call_limit.total_calls = 0
return call_limit
@staticmethod
def time_limited(time_window_seconds):
"""Returns a function that returns True when the
elapsed time is greater than ``time_window_seconds``
from now.
Parameters:
time_window_seconds (float): the time to wait before
the returned function will return True
"""
def time_limit():
delta = timedelta(seconds=time_window_seconds)
out_of_window = time_limit.last_signal <= (datetime.now() - delta)
if out_of_window:
time_limit.last_signal = datetime.now()
return True
else:
return False
time_limit.last_signal = datetime.now()
return time_limit
class TorEngine(DefaultEngine):
"""Uses ``Tor`` as a socks proxy to route all ``requests`` based calls through.
This engine provides two functions on top of ``DefaultEngine``:
1. Route web requests through an anonymous proxy.
2. Get new endpoint IP address for every request.
Attributes:
session (:obj:`requests.Session`): Session object for which all
requests are routed through.
headers (dict, optional): Base headers for all requests.
address (str): IP address of Tor SOCKS proxy to connect,
can also be set with the ``RASP_TOR_ADDRESS``
environment variable.
port (int): Port number of Tor SOCKS proxy for web requests,
can also be set with the ``RASP_TOR_PORT``
environment variable.
"""
def __init__(self,
headers=None,
pre_fetch_callback=None,
address=None,
port=None):
super(TorEngine, self).__init__(headers, pre_fetch_callback)
self.address = (
address
or os.environ.get('RASP_TOR_ADDRESS')
or '127.0.0.1'
)
self.port = (
port
or os.environ.get('RASP_TOR_PORT')
or 9050
)
proxy_uri = 'socks5://{}:{}'.format(self.address, self.port)
proxies = {'http': proxy_uri, 'https': proxy_uri}
self.session.proxies.update(proxies)
def __copy__(self):
return TorEngine(
self.headers,
self.callback,
self.address,
self.port
)
| bsd-3-clause |
Endika/odoo | addons/account/report/account_treasury_report.py | 385 | 3872 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields,osv
import openerp.addons.decimal_precision as dp
class account_treasury_report(osv.osv):
_name = "account.treasury.report"
_description = "Treasury Analysis"
_auto = False
def _compute_balances(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
all_treasury_lines = self.search(cr, uid, [], context=context)
all_companies = self.pool.get('res.company').search(cr, uid, [], context=context)
current_sum = dict((company, 0.0) for company in all_companies)
res = dict((id, dict((fn, 0.0) for fn in field_names)) for id in all_treasury_lines)
for record in self.browse(cr, uid, all_treasury_lines, context=context):
res[record.id]['starting_balance'] = current_sum[record.company_id.id]
current_sum[record.company_id.id] += record.balance
res[record.id]['ending_balance'] = current_sum[record.company_id.id]
return res
_columns = {
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscalyear', readonly=True),
'period_id': fields.many2one('account.period', 'Period', readonly=True),
'debit': fields.float('Debit', readonly=True),
'credit': fields.float('Credit', readonly=True),
'balance': fields.float('Balance', readonly=True),
'date': fields.date('Beginning of Period Date', readonly=True),
'starting_balance': fields.function(_compute_balances, digits_compute=dp.get_precision('Account'), string='Starting Balance', multi='balance'),
'ending_balance': fields.function(_compute_balances, digits_compute=dp.get_precision('Account'), string='Ending Balance', multi='balance'),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
}
_order = 'date asc'
def init(self, cr):
tools.drop_view_if_exists(cr, 'account_treasury_report')
cr.execute("""
create or replace view account_treasury_report as (
select
p.id as id,
p.fiscalyear_id as fiscalyear_id,
p.id as period_id,
sum(l.debit) as debit,
sum(l.credit) as credit,
sum(l.debit-l.credit) as balance,
p.date_start as date,
am.company_id as company_id
from
account_move_line l
left join account_account a on (l.account_id = a.id)
left join account_move am on (am.id=l.move_id)
left join account_period p on (am.period_id=p.id)
where l.state != 'draft'
and a.type = 'liquidity'
group by p.id, p.fiscalyear_id, p.date_start, am.company_id
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
iuliat/nova | nova/api/openstack/compute/schemas/v3/evacuate.py | 83 | 1175 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.validation import parameter_types
evacuate = {
'type': 'object',
'properties': {
'evacuate': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
'onSharedStorage': parameter_types.boolean,
'adminPass': parameter_types.admin_password,
},
'required': ['onSharedStorage'],
'additionalProperties': False,
},
},
'required': ['evacuate'],
'additionalProperties': False,
}
| apache-2.0 |
feigaochn/leetcode | p745_prefix_and_suffix_search.py | 2 | 1582 | class WordFilter:
def __init__(self, words):
"""
:type words: List[str]
"""
from collections import defaultdict
self.prefix = defaultdict(list)
self.suffix = defaultdict(list)
for wi, word in enumerate(words):
for i in range(len(word) + 1):
self.prefix[word[0:i]].append(wi)
self.suffix[word[i:len(word)]].append(wi)
def f(self, prefix, suffix):
"""
:type prefix: str
:type suffix: str
:rtype: int
"""
i, j = len(self.prefix[prefix]) - 1, len(self.suffix[suffix]) - 1
while i >= 0 and j >= 0:
if self.prefix[prefix][i] == self.suffix[suffix][j]:
return self.prefix[prefix][i]
elif self.prefix[prefix][i] < self.suffix[suffix][j]:
j -= 1
elif self.prefix[prefix][i] > self.suffix[suffix][j]:
i -= 1
else:
return -1
# # Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(["apple"])
# print(obj.f("a", "e"))
# print(obj.f("b", "e"))
obj = WordFilter([
"cabaabaaaa", "ccbcababac", "bacaabccba", "bcbbcbacaa", "abcaccbcaa",
"accabaccaa", "cabcbbbcca", "ababccabcb", "caccbbcbab", "bccbacbcba"
])
for p, s in [
["bccbacbcba", "a"],
["ab", "abcaccbcaa"],
["a", "aa"],
["cabaaba", "abaaaa"],
["cacc", "accbbcbab"],
["ccbcab", "bac"],
["bac", "cba"],
["ac", "accabaccaa"],
["bcbb", "aa"],
["ccbca", "cbcababac"],
]:
print(obj.f(p, s))
| mit |
dennybaa/st2 | st2common/st2common/util/param.py | 2 | 7537 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
import networkx as nx
from jinja2 import meta
from st2common import log as logging
from st2common.constants.action import ACTION_CONTEXT_KV_PREFIX
from st2common.constants.system import SYSTEM_KV_PREFIX
from st2common.exceptions.param import ParamException
from st2common.services.keyvalues import KeyValueLookup
from st2common.util.casts import get_cast
from st2common.util.compat import to_unicode
from st2common.util import jinja as jinja_utils
LOG = logging.getLogger(__name__)
ENV = jinja_utils.get_jinja_environment()
__all__ = [
'render_live_params',
'render_final_params',
]
def _split_params(runner_parameters, action_parameters, mixed_params):
def pf(params, skips):
result = {k: v for k, v in six.iteritems(mixed_params)
if k in params and k not in skips}
return result
return (pf(runner_parameters, {}), pf(action_parameters, runner_parameters))
def _cast_params(rendered, parameter_schemas):
'''
It's just here to make tests happy
'''
casted_params = {}
for k, v in six.iteritems(rendered):
casted_params[k] = _cast(v, parameter_schemas[k] or {})
return casted_params
def _cast(v, parameter_schema):
if v is None or not parameter_schema:
return v
parameter_type = parameter_schema.get('type', None)
if not parameter_type:
return v
cast = get_cast(cast_type=parameter_type)
if not cast:
return v
return cast(v)
def _create_graph(action_context):
'''
Creates a generic directed graph for depencency tree and fills it with basic context variables
'''
G = nx.DiGraph()
G.add_node(SYSTEM_KV_PREFIX, value=KeyValueLookup())
G.add_node(ACTION_CONTEXT_KV_PREFIX, value=action_context)
return G
def _process(G, name, value):
'''
Determines whether parameter is a template or a value. Adds graph nodes and edges accordingly.
'''
# Jinja defaults to ascii parser in python 2.x unless you set utf-8 support on per module level
# Instead we're just assuming every string to be a unicode string
if isinstance(value, str):
value = to_unicode(value)
template_ast = ENV.parse(value)
# Dependencies of the node represent jinja variables used in the template
# We're connecting nodes with an edge for every depencency to traverse them in the right order
# and also make sure that we don't have missing or cyclic dependencies upfront.
dependencies = meta.find_undeclared_variables(template_ast)
if dependencies:
G.add_node(name, template=value)
for dependency in dependencies:
G.add_edge(dependency, name)
else:
G.add_node(name, value=value)
def _process_defaults(G, schemas):
'''
Process dependencies for parameters default values in the order schemas are defined.
'''
for schema in schemas:
for name, value in six.iteritems(schema):
absent = name not in G.node
is_none = G.node.get(name, {}).get('value') is None
immutable = value.get('immutable', False)
if absent or is_none or immutable:
_process(G, name, value.get('default'))
def _validate(G):
'''
Validates dependency graph to ensure it has no missing or cyclic dependencies
'''
for name in G.nodes():
if 'value' not in G.node[name] and 'template' not in G.node[name]:
msg = 'Dependecy unsatisfied in %s' % name
raise ParamException(msg)
if not nx.is_directed_acyclic_graph(G):
msg = 'Cyclic dependecy found'
raise ParamException(msg)
def _render(node, render_context):
'''
Render the node depending on its type
'''
if 'template' in node:
return ENV.from_string(node['template']).render(render_context)
if 'value' in node:
return node['value']
def _resolve_dependencies(G):
'''
Traverse the dependency graph starting from resolved nodes
'''
context = {}
for name in nx.topological_sort(G):
node = G.node[name]
try:
context[name] = _render(node, context)
except Exception as e:
LOG.debug('Failed to render %s: %s', name, e, exc_info=True)
msg = 'Failed to render parameter "%s": %s' % (name, str(e))
raise ParamException(msg)
return context
def _cast_params_from(params, context, schemas):
'''
Pick a list of parameters from context and cast each of them according to the schemas provided
'''
result = {}
for name in params:
param_schema = {}
for schema in schemas:
if name in schema:
param_schema = schema[name]
result[name] = _cast(context[name], param_schema)
return result
def render_live_params(runner_parameters, action_parameters, params, action_context):
'''
Renders list of parameters. Ensures that there's no cyclic or missing dependencies. Returns a
dict of plain rendered parameters.
'''
G = _create_graph(action_context)
[_process(G, name, value) for name, value in six.iteritems(params)]
_process_defaults(G, [action_parameters, runner_parameters])
_validate(G)
context = _resolve_dependencies(G)
live_params = _cast_params_from(params, context, [action_parameters, runner_parameters])
return live_params
def render_final_params(runner_parameters, action_parameters, params, action_context):
'''
Renders missing parameters required for action to execute. Treats parameters from the dict as
plain values instead of trying to render them again. Returns dicts for action and runner
parameters.
'''
G = _create_graph(action_context)
# by that point, all params should already be resolved so any template should be treated value
[G.add_node(name, value=value) for name, value in six.iteritems(params)]
_process_defaults(G, [action_parameters, runner_parameters])
_validate(G)
context = _resolve_dependencies(G)
context = _cast_params_from(context, context, [action_parameters, runner_parameters])
return _split_params(runner_parameters, action_parameters, context)
def get_finalized_params(runnertype_parameter_info, action_parameter_info, liveaction_parameters,
action_context):
'''
Left here to keep tests running. Later we would need to split tests so they start testing each
function separately.
'''
params = render_live_params(runnertype_parameter_info, action_parameter_info,
liveaction_parameters, action_context)
return render_final_params(runnertype_parameter_info, action_parameter_info, params,
action_context)
| apache-2.0 |
LukeMurphey/splunk-network-tools | tests/selenium/webdriver/support/abstract_event_listener.py | 61 | 2033 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
class AbstractEventListener(object):
"""
Event listener must subclass and implement this fully or partially
"""
def before_navigate_to(self, url, driver):
pass
def after_navigate_to(self, url, driver):
pass
def before_navigate_back(self, driver):
pass
def after_navigate_back(self, driver):
pass
def before_navigate_forward(self, driver):
pass
def after_navigate_forward(self, driver):
pass
def before_find(self, by, value, driver):
pass
def after_find(self, by, value, driver):
pass
def before_click(self, element, driver):
pass
def after_click(self, element, driver):
pass
def before_change_value_of(self, element, driver):
pass
def after_change_value_of(self, element, driver):
pass
def before_execute_script(self, script, driver):
pass
def after_execute_script(self, script, driver):
pass
def before_close(self, driver):
pass
def after_close(self, driver):
pass
def before_quit(self, driver):
pass
def after_quit(self, driver):
pass
def on_exception(self, exception, driver):
pass
| apache-2.0 |
openstack/taskflow | taskflow/tests/unit/worker_based/test_executor.py | 2 | 13622 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import time
from taskflow.engines.worker_based import executor
from taskflow.engines.worker_based import protocol as pr
from taskflow import task as task_atom
from taskflow import test
from taskflow.test import mock
from taskflow.tests import utils as test_utils
from taskflow.types import failure
class TestWorkerTaskExecutor(test.MockTestCase):
def setUp(self):
super(TestWorkerTaskExecutor, self).setUp()
self.task = test_utils.DummyTask()
self.task_uuid = 'task-uuid'
self.task_args = {'a': 'a'}
self.task_result = 'task-result'
self.task_failures = {}
self.timeout = 60
self.broker_url = 'broker-url'
self.executor_uuid = 'executor-uuid'
self.executor_exchange = 'executor-exchange'
self.executor_topic = 'test-topic1'
self.proxy_started_event = threading.Event()
# patch classes
self.proxy_mock, self.proxy_inst_mock = self.patchClass(
executor.proxy, 'Proxy')
self.request_mock, self.request_inst_mock = self.patchClass(
executor.pr, 'Request', autospec=False)
# other mocking
self.proxy_inst_mock.start.side_effect = self._fake_proxy_start
self.proxy_inst_mock.stop.side_effect = self._fake_proxy_stop
self.request_inst_mock.uuid = self.task_uuid
self.request_inst_mock.expired = False
self.request_inst_mock.created_on = 0
self.request_inst_mock.task_cls = self.task.name
self.message_mock = mock.MagicMock(name='message')
self.message_mock.properties = {'correlation_id': self.task_uuid,
'type': pr.RESPONSE}
def _fake_proxy_start(self):
self.proxy_started_event.set()
while self.proxy_started_event.is_set():
time.sleep(0.01)
def _fake_proxy_stop(self):
self.proxy_started_event.clear()
def executor(self, reset_master_mock=True, **kwargs):
executor_kwargs = dict(uuid=self.executor_uuid,
exchange=self.executor_exchange,
topics=[self.executor_topic],
url=self.broker_url)
executor_kwargs.update(kwargs)
ex = executor.WorkerTaskExecutor(**executor_kwargs)
if reset_master_mock:
self.resetMasterMock()
return ex
def test_creation(self):
ex = self.executor(reset_master_mock=False)
master_mock_calls = [
mock.call.Proxy(self.executor_uuid, self.executor_exchange,
on_wait=ex._on_wait,
url=self.broker_url, transport=mock.ANY,
transport_options=mock.ANY,
retry_options=mock.ANY),
mock.call.proxy.dispatcher.type_handlers.update(mock.ANY),
]
self.assertEqual(master_mock_calls, self.master_mock.mock_calls)
def test_on_message_response_state_running(self):
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.transition_and_log_error(pr.RUNNING, logger=mock.ANY),
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_progress(self):
response = pr.Response(pr.EVENT,
event_type=task_atom.EVENT_UPDATE_PROGRESS,
details={'progress': 1.0})
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.task.notifier.notify(task_atom.EVENT_UPDATE_PROGRESS,
{'progress': 1.0}),
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_failure(self):
a_failure = failure.Failure.from_exception(Exception('test'))
failure_dict = a_failure.to_dict()
response = pr.Response(pr.FAILURE, result=failure_dict)
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual(0, len(ex._ongoing_requests))
expected_calls = [
mock.call.transition_and_log_error(pr.FAILURE, logger=mock.ANY),
mock.call.set_result(result=test_utils.FailureMatcher(a_failure))
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_state_success(self):
response = pr.Response(pr.SUCCESS, result=self.task_result,
event='executed')
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
expected_calls = [
mock.call.transition_and_log_error(pr.SUCCESS, logger=mock.ANY),
mock.call.set_result(result=self.task_result)
]
self.assertEqual(expected_calls, self.request_inst_mock.mock_calls)
def test_on_message_response_unknown_state(self):
response = pr.Response(state='<unknown>')
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_message_response_unknown_task(self):
self.message_mock.properties['correlation_id'] = '<unknown>'
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_message_response_no_correlation_id(self):
self.message_mock.properties = {'type': pr.RESPONSE}
response = pr.Response(pr.RUNNING)
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
ex._process_response(response.to_dict(), self.message_mock)
self.assertEqual([], self.request_inst_mock.mock_calls)
def test_on_wait_task_not_expired(self):
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
self.assertEqual(1, len(ex._ongoing_requests))
ex._on_wait()
self.assertEqual(1, len(ex._ongoing_requests))
@mock.patch('oslo_utils.timeutils.now')
def test_on_wait_task_expired(self, mock_now):
mock_now.side_effect = [0, 120]
self.request_inst_mock.expired = True
self.request_inst_mock.created_on = 0
ex = self.executor()
ex._ongoing_requests[self.task_uuid] = self.request_inst_mock
self.assertEqual(1, len(ex._ongoing_requests))
ex._on_wait()
self.assertEqual(0, len(ex._ongoing_requests))
def test_execute_task(self):
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, timeout=self.timeout,
result=mock.ANY, failures=mock.ANY),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_revert_task(self):
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.revert_task(self.task, self.task_uuid, self.task_args,
self.task_result, self.task_failures)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'revert',
self.task_args, timeout=self.timeout,
failures=self.task_failures,
result=self.task_result),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_execute_task_topic_not_found(self):
ex = self.executor()
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, timeout=self.timeout,
result=mock.ANY, failures=mock.ANY),
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_execute_task_publish_error(self):
self.proxy_inst_mock.publish.side_effect = Exception('Woot!')
ex = self.executor()
ex._finder._add(self.executor_topic, [self.task.name])
ex.execute_task(self.task, self.task_uuid, self.task_args)
expected_calls = [
mock.call.Request(self.task, self.task_uuid, 'execute',
self.task_args, timeout=self.timeout,
result=mock.ANY, failures=mock.ANY),
mock.call.request.transition_and_log_error(pr.PENDING,
logger=mock.ANY),
mock.call.proxy.publish(self.request_inst_mock,
self.executor_topic,
reply_to=self.executor_uuid,
correlation_id=self.task_uuid),
mock.call.request.transition_and_log_error(pr.FAILURE,
logger=mock.ANY),
mock.call.request.set_result(mock.ANY)
]
self.assertEqual(expected_calls, self.master_mock.mock_calls)
def test_start_stop(self):
ex = self.executor()
ex.start()
# make sure proxy thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
def test_start_already_running(self):
ex = self.executor()
ex.start()
# make sure proxy thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# start executor again
self.assertRaises(RuntimeError, ex.start)
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
def test_stop_not_running(self):
self.executor().stop()
self.assertEqual([], self.master_mock.mock_calls)
def test_stop_not_alive(self):
self.proxy_inst_mock.start.side_effect = None
# start executor
ex = self.executor()
ex.start()
# stop executor
ex.stop()
# since proxy thread is already done - stop is not called
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait()
], any_order=True)
def test_restart(self):
ex = self.executor()
ex.start()
# make sure thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# restart executor
ex.stop()
ex.start()
# make sure thread started
self.assertTrue(self.proxy_started_event.wait(test_utils.WAIT_TIMEOUT))
# stop executor
ex.stop()
self.master_mock.assert_has_calls([
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop(),
mock.call.proxy.start(),
mock.call.proxy.wait(),
mock.call.proxy.stop()
], any_order=True)
| apache-2.0 |
brunogamacatao/portalsaladeaula | django/core/cache/backends/memcached.py | 16 | 3551 | "Memcached cache backend"
import time
from django.core.cache.backends.base import BaseCache, InvalidCacheBackendError
from django.utils.encoding import smart_unicode, smart_str
try:
import cmemcache as memcache
import warnings
warnings.warn(
"Support for the 'cmemcache' library has been deprecated. Please use python-memcached instead.",
DeprecationWarning
)
except ImportError:
try:
import memcache
except:
raise InvalidCacheBackendError("Memcached cache backend requires either the 'memcache' or 'cmemcache' library")
class CacheClass(BaseCache):
def __init__(self, server, params):
BaseCache.__init__(self, params)
self._cache = memcache.Client(server.split(';'))
def _get_memcache_timeout(self, timeout):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
timeout = timeout or self.default_timeout
if timeout > 2592000: # 60*60*24*30, 30 days
# See http://code.google.com/p/memcached/wiki/FAQ
# "You can set expire times up to 30 days in the future. After that
# memcached interprets it as a date, and will expire the item after
# said date. This is a simple (but obscure) mechanic."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return timeout
def add(self, key, value, timeout=0):
if isinstance(value, unicode):
value = value.encode('utf-8')
return self._cache.add(smart_str(key), value, self._get_memcache_timeout(timeout))
def get(self, key, default=None):
val = self._cache.get(smart_str(key))
if val is None:
return default
return val
def set(self, key, value, timeout=0):
self._cache.set(smart_str(key), value, self._get_memcache_timeout(timeout))
def delete(self, key):
self._cache.delete(smart_str(key))
def get_many(self, keys):
return self._cache.get_multi(map(smart_str,keys))
def close(self, **kwargs):
self._cache.disconnect_all()
def incr(self, key, delta=1):
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on non-existent keys by
# raising a ValueError. Cmemcache returns None. In both
# cases, we should raise a ValueError though.
except ValueError:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1):
try:
val = self._cache.decr(key, delta)
# python-memcache responds to decr on non-existent keys by
# raising a ValueError. Cmemcache returns None. In both
# cases, we should raise a ValueError though.
except ValueError:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=0):
safe_data = {}
for key, value in data.items():
if isinstance(value, unicode):
value = value.encode('utf-8')
safe_data[smart_str(key)] = value
self._cache.set_multi(safe_data, self._get_memcache_timeout(timeout))
def delete_many(self, keys):
self._cache.delete_multi(map(smart_str, keys))
def clear(self):
self._cache.flush_all()
| bsd-3-clause |
2ndQuadrant/ansible | lib/ansible/modules/storage/netapp/netapp_e_global.py | 52 | 4992 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: netapp_e_global
short_description: NetApp E-Series manage global settings configuration
description:
- Allow the user to configure several of the global settings associated with an E-Series storage-system
version_added: '2.7'
author: Michael Price (@lmprice)
extends_documentation_fragment:
- netapp.eseries
options:
name:
description:
- Set the name of the E-Series storage-system
- This label/name doesn't have to be unique.
- May be up to 30 characters in length.
aliases:
- label
log_path:
description:
- A local path to a file to be used for debug logging
required: no
notes:
- Check mode is supported.
- This module requires Web Services API v1.3 or newer.
"""
EXAMPLES = """
- name: Set the storage-system name
netapp_e_global:
name: myArrayName
api_url: "10.1.1.1:8443"
api_username: "admin"
api_password: "myPass"
"""
RETURN = """
msg:
description: Success message
returned: on success
type: str
sample: The settings have been updated.
name:
description:
- The current name/label of the storage-system.
returned: on success
sample: myArrayName
type: str
"""
import json
import logging
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netapp import request, eseries_host_argument_spec
from ansible.module_utils._text import to_native
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json",
}
class GlobalSettings(object):
def __init__(self):
argument_spec = eseries_host_argument_spec()
argument_spec.update(dict(
name=dict(type='str', required=False, aliases=['label']),
log_path=dict(type='str', required=False),
))
self.module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True, )
args = self.module.params
self.name = args['name']
self.ssid = args['ssid']
self.url = args['api_url']
self.creds = dict(url_password=args['api_password'],
validate_certs=args['validate_certs'],
url_username=args['api_username'], )
self.check_mode = self.module.check_mode
log_path = args['log_path']
# logging setup
self._logger = logging.getLogger(self.__class__.__name__)
if log_path:
logging.basicConfig(
level=logging.DEBUG, filename=log_path, filemode='w',
format='%(relativeCreated)dms %(levelname)s %(module)s.%(funcName)s:%(lineno)d\n %(message)s')
if not self.url.endswith('/'):
self.url += '/'
if self.name and len(self.name) > 30:
self.module.fail_json(msg="The provided name is invalid, it must be < 30 characters in length.")
def get_name(self):
try:
(rc, result) = request(self.url + 'storage-systems/%s' % self.ssid, headers=HEADERS, **self.creds)
if result['status'] in ['offline', 'neverContacted']:
self.module.fail_json(msg="This storage-system is offline! Array Id [%s]." % (self.ssid))
return result['name']
except Exception as err:
self.module.fail_json(msg="Connection failure! Array Id [%s]. Error [%s]." % (self.ssid, to_native(err)))
def update_name(self):
name = self.get_name()
update = False
if self.name != name:
update = True
body = dict(name=self.name)
if update and not self.check_mode:
try:
(rc, result) = request(self.url + 'storage-systems/%s/configuration' % self.ssid, method='POST',
data=json.dumps(body), headers=HEADERS, **self.creds)
self._logger.info("Set name to %s.", result['name'])
# This is going to catch cases like a connection failure
except Exception as err:
self.module.fail_json(
msg="We failed to set the storage-system name! Array Id [%s]. Error [%s]."
% (self.ssid, to_native(err)))
return update
def update(self):
update = self.update_name()
name = self.get_name()
self.module.exit_json(msg="The requested settings have been updated.", changed=update, name=name)
def __call__(self, *args, **kwargs):
self.update()
def main():
settings = GlobalSettings()
settings()
if __name__ == '__main__':
main()
| gpl-3.0 |
sdarji/lpthw | Lib/site-packages/pip/_vendor/html5lib/serializer/htmlserializer.py | 310 | 12909 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import gettext
_ = gettext.gettext
try:
from functools import reduce
except ImportError:
pass
from ..constants import voidElements, booleanAttributes, spaceCharacters
from ..constants import rcdataElements, entities, xmlEntities
from .. import utils
from xml.sax.saxutils import escape
spaceCharacters = "".join(spaceCharacters)
try:
from codecs import register_error, xmlcharrefreplace_errors
except ImportError:
unicode_encode_errors = "strict"
else:
unicode_encode_errors = "htmlentityreplace"
encode_entity_map = {}
is_ucs4 = len("\U0010FFFF") == 1
for k, v in list(entities.items()):
# skip multi-character entities
if ((is_ucs4 and len(v) > 1) or
(not is_ucs4 and len(v) > 2)):
continue
if v != "&":
if len(v) == 2:
v = utils.surrogatePairToCodepoint(v)
else:
v = ord(v)
if not v in encode_entity_map or k.islower():
# prefer < over < and similarly for &, >, etc.
encode_entity_map[v] = k
def htmlentityreplace_errors(exc):
if isinstance(exc, (UnicodeEncodeError, UnicodeTranslateError)):
res = []
codepoints = []
skip = False
for i, c in enumerate(exc.object[exc.start:exc.end]):
if skip:
skip = False
continue
index = i + exc.start
if utils.isSurrogatePair(exc.object[index:min([exc.end, index + 2])]):
codepoint = utils.surrogatePairToCodepoint(exc.object[index:index + 2])
skip = True
else:
codepoint = ord(c)
codepoints.append(codepoint)
for cp in codepoints:
e = encode_entity_map.get(cp)
if e:
res.append("&")
res.append(e)
if not e.endswith(";"):
res.append(";")
else:
res.append("&#x%s;" % (hex(cp)[2:]))
return ("".join(res), exc.end)
else:
return xmlcharrefreplace_errors(exc)
register_error(unicode_encode_errors, htmlentityreplace_errors)
del register_error
class HTMLSerializer(object):
# attribute quoting options
quote_attr_values = False
quote_char = '"'
use_best_quote_char = True
# tag syntax options
omit_optional_tags = True
minimize_boolean_attributes = True
use_trailing_solidus = False
space_before_trailing_solidus = True
# escaping options
escape_lt_in_attrs = False
escape_rcdata = False
resolve_entities = True
# miscellaneous options
alphabetical_attributes = False
inject_meta_charset = True
strip_whitespace = False
sanitize = False
options = ("quote_attr_values", "quote_char", "use_best_quote_char",
"omit_optional_tags", "minimize_boolean_attributes",
"use_trailing_solidus", "space_before_trailing_solidus",
"escape_lt_in_attrs", "escape_rcdata", "resolve_entities",
"alphabetical_attributes", "inject_meta_charset",
"strip_whitespace", "sanitize")
def __init__(self, **kwargs):
"""Initialize HTMLSerializer.
Keyword options (default given first unless specified) include:
inject_meta_charset=True|False
Whether it insert a meta element to define the character set of the
document.
quote_attr_values=True|False
Whether to quote attribute values that don't require quoting
per HTML5 parsing rules.
quote_char=u'"'|u"'"
Use given quote character for attribute quoting. Default is to
use double quote unless attribute value contains a double quote,
in which case single quotes are used instead.
escape_lt_in_attrs=False|True
Whether to escape < in attribute values.
escape_rcdata=False|True
Whether to escape characters that need to be escaped within normal
elements within rcdata elements such as style.
resolve_entities=True|False
Whether to resolve named character entities that appear in the
source tree. The XML predefined entities < > & " '
are unaffected by this setting.
strip_whitespace=False|True
Whether to remove semantically meaningless whitespace. (This
compresses all whitespace to a single space except within pre.)
minimize_boolean_attributes=True|False
Shortens boolean attributes to give just the attribute value,
for example <input disabled="disabled"> becomes <input disabled>.
use_trailing_solidus=False|True
Includes a close-tag slash at the end of the start tag of void
elements (empty elements whose end tag is forbidden). E.g. <hr/>.
space_before_trailing_solidus=True|False
Places a space immediately before the closing slash in a tag
using a trailing solidus. E.g. <hr />. Requires use_trailing_solidus.
sanitize=False|True
Strip all unsafe or unknown constructs from output.
See `html5lib user documentation`_
omit_optional_tags=True|False
Omit start/end tags that are optional.
alphabetical_attributes=False|True
Reorder attributes to be in alphabetical order.
.. _html5lib user documentation: http://code.google.com/p/html5lib/wiki/UserDocumentation
"""
if 'quote_char' in kwargs:
self.use_best_quote_char = False
for attr in self.options:
setattr(self, attr, kwargs.get(attr, getattr(self, attr)))
self.errors = []
self.strict = False
def encode(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, unicode_encode_errors)
else:
return string
def encodeStrict(self, string):
assert(isinstance(string, text_type))
if self.encoding:
return string.encode(self.encoding, "strict")
else:
return string
def serialize(self, treewalker, encoding=None):
self.encoding = encoding
in_cdata = False
self.errors = []
if encoding and self.inject_meta_charset:
from ..filters.inject_meta_charset import Filter
treewalker = Filter(treewalker, encoding)
# WhitespaceFilter should be used before OptionalTagFilter
# for maximum efficiently of this latter filter
if self.strip_whitespace:
from ..filters.whitespace import Filter
treewalker = Filter(treewalker)
if self.sanitize:
from ..filters.sanitizer import Filter
treewalker = Filter(treewalker)
if self.omit_optional_tags:
from ..filters.optionaltags import Filter
treewalker = Filter(treewalker)
# Alphabetical attributes must be last, as other filters
# could add attributes and alter the order
if self.alphabetical_attributes:
from ..filters.alphabeticalattributes import Filter
treewalker = Filter(treewalker)
for token in treewalker:
type = token["type"]
if type == "Doctype":
doctype = "<!DOCTYPE %s" % token["name"]
if token["publicId"]:
doctype += ' PUBLIC "%s"' % token["publicId"]
elif token["systemId"]:
doctype += " SYSTEM"
if token["systemId"]:
if token["systemId"].find('"') >= 0:
if token["systemId"].find("'") >= 0:
self.serializeError(_("System identifer contains both single and double quote characters"))
quote_char = "'"
else:
quote_char = '"'
doctype += " %s%s%s" % (quote_char, token["systemId"], quote_char)
doctype += ">"
yield self.encodeStrict(doctype)
elif type in ("Characters", "SpaceCharacters"):
if type == "SpaceCharacters" or in_cdata:
if in_cdata and token["data"].find("</") >= 0:
self.serializeError(_("Unexpected </ in CDATA"))
yield self.encode(token["data"])
else:
yield self.encode(escape(token["data"]))
elif type in ("StartTag", "EmptyTag"):
name = token["name"]
yield self.encodeStrict("<%s" % name)
if name in rcdataElements and not self.escape_rcdata:
in_cdata = True
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
for (attr_namespace, attr_name), attr_value in token["data"].items():
# TODO: Add namespace support here
k = attr_name
v = attr_value
yield self.encodeStrict(' ')
yield self.encodeStrict(k)
if not self.minimize_boolean_attributes or \
(k not in booleanAttributes.get(name, tuple())
and k not in booleanAttributes.get("", tuple())):
yield self.encodeStrict("=")
if self.quote_attr_values or not v:
quote_attr = True
else:
quote_attr = reduce(lambda x, y: x or (y in v),
spaceCharacters + ">\"'=", False)
v = v.replace("&", "&")
if self.escape_lt_in_attrs:
v = v.replace("<", "<")
if quote_attr:
quote_char = self.quote_char
if self.use_best_quote_char:
if "'" in v and '"' not in v:
quote_char = '"'
elif '"' in v and "'" not in v:
quote_char = "'"
if quote_char == "'":
v = v.replace("'", "'")
else:
v = v.replace('"', """)
yield self.encodeStrict(quote_char)
yield self.encode(v)
yield self.encodeStrict(quote_char)
else:
yield self.encode(v)
if name in voidElements and self.use_trailing_solidus:
if self.space_before_trailing_solidus:
yield self.encodeStrict(" /")
else:
yield self.encodeStrict("/")
yield self.encode(">")
elif type == "EndTag":
name = token["name"]
if name in rcdataElements:
in_cdata = False
elif in_cdata:
self.serializeError(_("Unexpected child element of a CDATA element"))
yield self.encodeStrict("</%s>" % name)
elif type == "Comment":
data = token["data"]
if data.find("--") >= 0:
self.serializeError(_("Comment contains --"))
yield self.encodeStrict("<!--%s-->" % token["data"])
elif type == "Entity":
name = token["name"]
key = name + ";"
if not key in entities:
self.serializeError(_("Entity %s not recognized" % name))
if self.resolve_entities and key not in xmlEntities:
data = entities[key]
else:
data = "&%s;" % name
yield self.encodeStrict(data)
else:
self.serializeError(token["data"])
def render(self, treewalker, encoding=None):
if encoding:
return b"".join(list(self.serialize(treewalker, encoding)))
else:
return "".join(list(self.serialize(treewalker)))
def serializeError(self, data="XXX ERROR MESSAGE NEEDED"):
# XXX The idea is to make data mandatory.
self.errors.append(data)
if self.strict:
raise SerializeError
def SerializeError(Exception):
"""Error in serialized tree"""
pass
| unlicense |
gpitel/pyjs | examples/showcase/src/demos_panels/tabPanel.py | 13 | 1600 | """
The ``ui.TabPanel`` class implements a tabbed window, where clicking on a tab
causes the associated contents to be displayed.
The TabPanel relies heavily on cascading stylesheet definitions to operate.
The following stylesheet definitions are used by the example shown below:
.gwt-TabPanel {
}
.gwt-TabPanelBottom {
border: 1px solid #87B3FF;
}
.gwt-TabBar {
background-color: #C3D9FF;
}
.gwt-TabBar .gwt-TabBarFirst {
height: 100%;
padding-left: 3px;
}
.gwt-TabBar .gwt-TabBarRest {
padding-right: 3px;
}
.gwt-TabBar .gwt-TabBarItem {
border-top: 1px solid #C3D9FF;
border-bottom: 1px solid #C3D9FF;
padding: 2px;
cursor: pointer;
}
.gwt-TabBar .gwt-TabBarItem-selected {
font-weight: bold;
background-color: #E8EEF7;
border-top: 1px solid #87B3FF;
border-left: 1px solid #87B3FF;
border-right: 1px solid #87B3FF;
border-bottom: 1px solid #E8EEF7;
padding: 2px;
cursor: default;
}
"""
from pyjamas.ui.SimplePanel import SimplePanel
from pyjamas.ui.TabPanel import TabPanel
from pyjamas.ui.HTML import HTML
class TabPanelDemo(SimplePanel):
def __init__(self):
SimplePanel.__init__(self)
tabs = TabPanel(Width="100%", Height="250px")
tabs.add(HTML("The quick brown fox jumps over the lazy dog."), "Tab 1")
tabs.add(HTML("The early bird catches the worm."), "Tab 2")
tabs.add(HTML("The smart money is on the black horse."), "Tab 3")
tabs.selectTab(0)
self.add(tabs)
| apache-2.0 |
mickael-guene/binutils | gdb/testsuite/gdb.python/py-bad-printers.py | 7 | 2584 | # Copyright (C) 2008-2017 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This file is part of the GDB testsuite. It tests GDB's handling of
# bad python pretty printers.
# Test a printer with a bad children iterator.
import re
import gdb.printing
class BadChildrenContainerPrinter1(object):
"""Children iterator doesn't return a tuple of two elements."""
def __init__(self, val):
self.val = val
def to_string(self):
return 'container %s with %d elements' % (self.val['name'], self.val['len'])
@staticmethod
def _bad_iterator(pointer, len):
start = pointer
end = pointer + len
while pointer != end:
yield 'intentional violation of children iterator protocol'
pointer += 1
def children(self):
return self._bad_iterator(self.val['elements'], self.val['len'])
class BadChildrenContainerPrinter2(object):
"""Children iterator returns a tuple of two elements with bad values."""
def __init__(self, val):
self.val = val
def to_string(self):
return 'container %s with %d elements' % (self.val['name'], self.val['len'])
@staticmethod
def _bad_iterator(pointer, len):
start = pointer
end = pointer + len
while pointer != end:
# The first argument is supposed to be a string.
yield (42, 'intentional violation of children iterator protocol')
pointer += 1
def children(self):
return self._bad_iterator(self.val['elements'], self.val['len'])
def build_pretty_printer():
pp = gdb.printing.RegexpCollectionPrettyPrinter("bad-printers")
pp.add_printer('container1', '^container$',
BadChildrenContainerPrinter1)
pp.add_printer('container2', '^container$',
BadChildrenContainerPrinter2)
return pp
my_pretty_printer = build_pretty_printer()
gdb.printing.register_pretty_printer(gdb, my_pretty_printer)
| gpl-2.0 |
alexlib/openpiv-python | setup.py | 2 | 1786 | from os import path
from setuptools import setup, find_packages
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
# with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name="OpenPIV",
version='0.23.6',
packages=find_packages(),
include_package_data=True,
long_description=long_description,
long_description_content_type='text/markdown',
setup_requires=[
'setuptools',
],
install_requires=[
'numpy',
'imageio',
'matplotlib>=3',
'scikit-image',
'scipy',
'natsort',
'GitPython',
'pytest',
'tqdm'
],
classifiers=[
# PyPI-specific version type. The number specified here is a magic
# constant
# with no relation to this application's version numbering scheme.
# *sigh*
'Development Status :: 4 - Beta',
# Sublist of all supported Python versions.
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Sublist of all supported platforms and environments.
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
# Miscellaneous metadata.
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Topic :: Scientific/Engineering',
],
# long_description=long_description,
# long_description_content_type='text/markdown'
)
| gpl-3.0 |
Grirrane/odoo | addons/l10n_in_hr_payroll/report/report_hr_yearly_salary_detail.py | 8 | 6789 | #-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>). All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
import datetime
from openerp.report import report_sxw
from openerp.osv import osv
class employees_yearly_salary_report(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(employees_yearly_salary_report, self).__init__(cr, uid, name, context)
self.localcontext.update({
'time': time,
'get_employee': self.get_employee,
'get_employee_detail': self.get_employee_detail,
'cal_monthly_amt': self.cal_monthly_amt,
'get_periods': self.get_periods,
'get_total': self.get_total,
'get_allow': self.get_allow,
'get_deduct': self.get_deduct,
})
self.context = context
def get_periods(self, form):
self.mnths = []
# Get start year-month-date and end year-month-date
first_year = int(form['date_from'][0:4])
last_year = int(form['date_to'][0:4])
first_month = int(form['date_from'][5:7])
last_month = int(form['date_to'][5:7])
no_months = (last_year-first_year) * 12 + last_month - first_month + 1
current_month = first_month
current_year = first_year
# Get name of the months from integer
mnth_name = []
for count in range(0, no_months):
m = datetime.date(current_year, current_month, 1).strftime('%b')
mnth_name.append(m)
self.mnths.append(str(current_month) + '-' + str(current_year))
if current_month == 12:
current_month = 0
current_year = last_year
current_month = current_month + 1
for c in range(0, (12-no_months)):
mnth_name.append('')
self.mnths.append('')
return [mnth_name]
def get_employee(self, form):
return self.pool.get('hr.employee').browse(self.cr,self.uid, form.get('employee_ids', []), context=self.context)
def get_employee_detail(self, form, obj):
self.allow_list = []
self.deduct_list = []
self.total = 0.00
gross = False
net = False
payslip_lines = self.cal_monthly_amt(form, obj.id)
for line in payslip_lines:
for line[0] in line:
if line[0][0] == "Gross":
gross = line[0]
elif line[0][0] == "Net":
net = line[0]
elif line[0][13] > 0.0 and line[0][0] != "Net":
self.total += line[0][len(line[0])-1]
self.allow_list.append(line[0])
elif line[0][13] < 0.0:
self.total += line[0][len(line[0])-1]
self.deduct_list.append(line[0])
if gross:
self.allow_list.append(gross)
if net:
self.deduct_list.append(net)
return None
def cal_monthly_amt(self, form, emp_id):
category_obj = self.pool.get('hr.salary.rule.category')
result = []
res = []
salaries = {}
self.cr.execute('''SELECT rc.code, pl.name, sum(pl.total), \
to_char(date_to,'mm-yyyy') as to_date FROM hr_payslip_line as pl \
LEFT JOIN hr_salary_rule_category AS rc on (pl.category_id = rc.id) \
LEFT JOIN hr_payslip as p on pl.slip_id = p.id \
LEFT JOIN hr_employee as emp on emp.id = p.employee_id \
WHERE p.employee_id = %s \
GROUP BY rc.parent_id, pl.sequence, pl.id, pl.category_id,pl.name,p.date_to,rc.code \
ORDER BY pl.sequence, rc.parent_id''',(emp_id,))
salary = self.cr.fetchall()
for category in salary:
if category[0] not in salaries:
salaries.setdefault(category[0], {})
salaries[category[0]].update({category[1]: {category[3]: category[2]}})
elif category[1] not in salaries[category[0]]:
salaries[category[0]].setdefault(category[1], {})
salaries[category[0]][category[1]].update({category[3]: category[2]})
else:
salaries[category[0]][category[1]].update({category[3]: category[2]})
category_ids = category_obj.search(self.cr,self.uid, [], context=self.context)
categories = category_obj.read(self.cr, self.uid, category_ids, ['code'], context=self.context)
for code in map(lambda x: x['code'], categories):
if code in salaries:
res = self.salary_list(salaries[code])
result.append(res)
return result
def salary_list(self, salaries):
cat_salary_all = []
for category_name,amount in salaries.items():
cat_salary = []
total = 0.0
cat_salary.append(category_name)
for mnth in self.mnths:
if mnth <> 'None':
if len(mnth) != 7:
mnth = '0' + str(mnth)
if mnth in amount and amount[mnth]:
cat_salary.append(amount[mnth])
total += amount[mnth]
else:
cat_salary.append(0.00)
else:
cat_salary.append('')
cat_salary.append(total)
cat_salary_all.append(cat_salary)
return cat_salary_all
def get_allow(self):
return self.allow_list
def get_deduct(self):
return self.deduct_list
def get_total(self):
return self.total
class wrapped_report_payslip(osv.AbstractModel):
_name = 'report.l10n_in_hr_payroll.report_hryearlysalary'
_inherit = 'report.abstract_report'
_template = 'l10n_in_hr_payroll.report_hryearlysalary'
_wrapped_report_class = employees_yearly_salary_report
| agpl-3.0 |
etkirsch/legends-of-erukar | erukar/system/engine/commands/TargetedCommand.py | 1 | 1711 | from erukar.system.engine import Interaction
from .Command import Command
from .CommandResult import CommandResult
class TargetedCommand(Command):
def process_args(self):
if not self.args:
raise Exception('Cannot process args -- Command\'s args are undefined')
if 'interaction' in self.args:
self.find_interaction()
for argument in self.args:
if 'interaction' not in self.args or not self.args['interaction']: break
try: arg_uuid = self.get_uuid_for_argument(argument)
except: continue
obj = self.args['interaction'].main_npc.get_object_by_uuid(arg_uuid)
if obj and self.object_index_is_valid(obj):
self.args[argument] = obj
super().process_args()
def find_interaction(self):
for interaction in getattr(self, 'interactions', []):
if str(interaction.uuid) == self.args['interaction']:
self.args['interaction'] = interaction
def execute(self):
self.process_args()
return self.perform()
def succeed_with_new_interaction(self, interaction):
result = CommandResult(True, self, self.results, self.dirtied_characters)
result.interaction = interaction
self.sever()
return result
def check_for_failure_on_interaction(self):
if self.invalid('interaction'):
return self.fail('Interaction not specified.')
if not isinstance(self.args['interaction'], Interaction):
return self.fail('Target is not an interaction')
if self.args['interaction'].ended:
return self.fail('Target Interaction has already ended')
| agpl-3.0 |
livc/Paddle | benchmark/tensorflow/rnn/rnn_multi_gpu.py | 21 | 12436 | #!/usr/bin/env python
from six.moves import xrange # pylint: disable=redefined-builtin
import re
import math
import time
import numpy as np
from datetime import datetime
import reader
import tensorflow as tf
from tensorflow.python.ops import rnn
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 64, """Batch size.""")
tf.app.flags.DEFINE_integer('num_batches', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_layers', 1, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('max_len', 100, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('hidden_size', 128, """Number of batches to run.""")
tf.app.flags.DEFINE_integer('emb_size', 64, """Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
tf.app.flags.DEFINE_integer('num_gpus', 4, """How many GPUs to use.""")
VOCAB_SIZE = 30000
NUM_CLASS = 2
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EPOCHS_PER_DECAY = 50
INITIAL_LEARNING_RATE = 0.1
LEARNING_RATE_DECAY_FACTOR = 0.1
TOWER_NAME = 'tower'
train_dataset = reader.create_datasets("imdb.pkl", VOCAB_SIZE)
def get_incoming_shape(incoming):
""" Returns the incoming data shape """
if isinstance(incoming, tf.Tensor):
return incoming.get_shape().as_list()
elif type(incoming) in [np.array, list, tuple]:
return np.shape(incoming)
else:
raise Exception("Invalid incoming layer.")
# Note input * W is done in LSTMCell,
# which is different from PaddlePaddle
def single_lstm(name,
incoming,
n_units,
use_peepholes=True,
return_seq=False,
return_state=False):
with tf.name_scope(name) as scope:
cell = tf.nn.rnn_cell.LSTMCell(n_units, use_peepholes=use_peepholes)
output, _cell_state = rnn.rnn(cell, incoming, dtype=tf.float32)
out = output if return_seq else output[-1]
return (out, _cell_state) if return_state else out
def lstm(name,
incoming,
n_units,
use_peepholes=True,
return_seq=False,
return_state=False,
num_layers=1):
with tf.name_scope(name) as scope:
lstm_cell = tf.nn.rnn_cell.LSTMCell(
n_units, use_peepholes=use_peepholes)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * num_layers)
initial_state = cell.zero_state(FLAGS.batch_size, dtype=tf.float32)
if not isinstance(incoming, list):
# if the input is embeding, the Tensor shape : [None, time_step, emb_size]
incoming = [
tf.squeeze(input_, [1])
for input_ in tf.split(1, FLAGS.max_len, incoming)
]
outputs, state = tf.nn.rnn(cell,
incoming,
initial_state=initial_state,
dtype=tf.float32)
out = outputs if return_seq else outputs[-1]
return (out, _cell_state) if return_state else out
def embedding(name, incoming, vocab_size, emb_size):
with tf.name_scope(name) as scope:
#with tf.device("/cpu:0"):
embedding = tf.get_variable(
name + '_emb', [vocab_size, emb_size], dtype=tf.float32)
out = tf.nn.embedding_lookup(embedding, incoming)
return out
def fc(name, inpOp, nIn, nOut, act=True):
with tf.name_scope(name) as scope:
kernel = tf.get_variable(
name + '_w', [nIn, nOut],
initializer=tf.truncated_normal_initializer(
stddev=0.01, dtype=tf.float32),
dtype=tf.float32)
biases = tf.get_variable(
name + '_b', [nOut],
initializer=tf.constant_initializer(
value=0.0, dtype=tf.float32),
dtype=tf.float32,
trainable=True)
net = tf.nn.relu_layer(inpOp, kernel, biases, name=name) if act else \
tf.matmul(inpOp, kernel) + biases
return net
def inference(seq):
net = embedding('emb', seq, VOCAB_SIZE, FLAGS.emb_size)
print "emb:", get_incoming_shape(net)
net = lstm('lstm', net, FLAGS.hidden_size, num_layers=FLAGS.num_layers)
print "lstm:", get_incoming_shape(net)
net = fc('fc1', net, FLAGS.hidden_size, 2)
return net
def loss(logits, labels):
# one label index for one sample
#labels = tf.cast(labels, tf.int64)
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
# logits, labels, name='cross_entropy_per_example')
labels = tf.cast(labels, tf.float32)
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
logits, labels, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name='total_loss')
def tower_loss(scope):
"""Calculate the total loss on a single tower running the model.
Args:
scope: unique prefix string identifying the tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
data, label = train_dataset.next_batch(FLAGS.batch_size)
# Build a Graph that computes the logits predictions from the
# inference model.
last_layer = inference(data)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
#_ = loss(last_layer, label)
_ = loss(last_layer, label)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
#tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def time_tensorflow_run(session, target):
num_steps_burn_in = 80
total_duration = 0.0
total_duration_squared = 0.0
for i in xrange(FLAGS.num_batches + num_steps_burn_in):
start_time = time.time()
_ = session.run(target, feed_dict={x_input: data, y_input: label})
_, loss_value = session.run(target)
duration = time.time() - start_time
if i > num_steps_burn_in:
if not i % 10:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
# sec_per_batch = duration / FLAGS.num_gpus
sec_per_batch = duration
format_str = (
'%s: step %d, loss= %.2f (%.1f examples/sec; %.3f '
'sec/batch batch_size= %d)')
print(format_str %
(datetime.now(), i - num_steps_burn_in, loss_value,
duration, sec_per_batch, num_examples_per_step))
total_duration += duration
total_duration_squared += duration * duration
mn = total_duration / FLAGS.num_batches
vr = total_duration_squared / FLAGS.num_batches - mn * mn
sd = math.sqrt(vr)
print('%s: FwdBwd across %d steps, %.3f +/- %.3f sec / batch' %
(datetime.now(), FLAGS.num_batches, mn, sd))
def run_benchmark():
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0),
trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Create an optimizer that performs gradient descent.
opt = tf.train.AdamOptimizer(0.001)
#train_dataset = reader.create_datasets("imdb.pkl", VOCAB_SIZE)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the model. This function
# constructs the entire model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
# summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op)
# Build an initialization operation.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
time_tensorflow_run(sess, [train_op, loss])
def main(_):
run_benchmark()
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
alvin319/CarnotKE | jyhton/lib-python/2.7/distutils/tests/test_check.py | 81 | 4050 | # -*- encoding: utf8 -*-
"""Tests for distutils.command.check."""
import unittest
from test.test_support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, **options):
if metadata is None:
metadata = {}
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with Unicode entries
metadata = {'url': u'xxx', 'author': u'\u00c9ric',
'author_email': u'xxx', u'name': 'xxx',
'version': u'xxx',
'description': u'Something about esszet \u00df',
'long_description': u'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
def test_check_document(self):
if not HAS_DOCUTILS: # won't test without docutils
return
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
def test_check_restructuredtext(self):
if not HAS_DOCUTILS: # won't test without docutils
return
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = u'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
ashione/bamslips | task/fund_realtime_net_unit.py | 1 | 1294 | from apscheduler.schedulers.blocking import BlockingScheduler
from bamslips.fund.fund_realtime_info import paralle_get_fund_realtime_info_according_fund_code
from bamslips.conf.fund_code_list import code_list
sched = BlockingScheduler()
#@sched.scheduled_job('cron',id='fund_realtime_fun_info_job',hour='0-4,9-16,21-24',minute="*/10")
#def sch_append_fund_realtime_info():
# paralle_get_fund_realtime_info_according_fund_code()
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_0',hour='0-4,9-16,21-24',minute="0/10")
def sch_append_fund_realtime_info_0():
paralle_get_fund_realtime_info_according_fund_code(code_list[0::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_1',hour='0-4,9-16,21-24',minute="1/10")
def sch_append_fund_realtime_info_1():
paralle_get_fund_realtime_info_according_fund_code(code_list[1::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_2',hour='0-4,9-16,21-24',minute="2/10")
def sch_append_fund_realtime_info_2():
paralle_get_fund_realtime_info_according_fund_code(code_list[2::4])
@sched.scheduled_job('cron',id='fund_realtime_fun_info_job_3',hour='0-4,9-16,21-24',minute="3/10")
def sch_append_fund_realtime_info_3():
paralle_get_fund_realtime_info_according_fund_code(code_list[3::4])
sched.start()
| gpl-3.0 |
arunkgupta/gramps | gramps/webapp/grampsdb/view/note.py | 1 | 5867 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Douglas S. Blank <doug.blank@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# $Id: utils.py 19637 2012-05-24 17:22:14Z dsblank $
#
""" Views for Person, Name, and Surname """
## Gramps Modules
from webapp.utils import _, boolean, update_last_changed, StyledNoteFormatter, parse_styled_text, build_search
from webapp.grampsdb.models import Note
from webapp.grampsdb.forms import *
from webapp.libdjango import DjangoInterface
from webapp.dbdjango import DbDjango
## Django Modules
from django.shortcuts import get_object_or_404, render_to_response, redirect
from django.template import Context, RequestContext
## Globals
dji = DjangoInterface()
db = DbDjango()
snf = StyledNoteFormatter(db)
def process_note(request, context, handle, act, add_to=None): # view, edit, save
"""
Process act on person. Can return a redirect.
"""
context["tview"] = _("Note")
context["tviews"] = _("Notes")
context["action"] = "view"
view_template = "view_note_detail.html"
if handle == "add":
act = "add"
if request.POST.has_key("action"):
act = request.POST.get("action")
# Handle: edit, view, add, create, save, delete, share, save-share
if act == "share":
item, handle = add_to
context["pickform"] = PickForm("Pick note",
Note,
(),
request.POST)
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "save-share":
item, handle = add_to
pickform = PickForm("Pick note",
Note,
(),
request.POST)
if pickform.data["picklist"]:
parent_model = dji.get_model(item) # what model?
parent_obj = parent_model.objects.get(handle=handle) # to add
ref_handle = pickform.data["picklist"]
ref_obj = Note.objects.get(handle=ref_handle)
dji.add_note_ref(parent_obj, ref_obj)
dji.rebuild_cache(parent_obj) # rebuild cache
return redirect("/%s/%s%s#tab-notes" % (item, handle, build_search(request)))
else:
context["pickform"] = pickform
context["object_handle"] = handle
context["object_type"] = item
return render_to_response("pick.html", context)
elif act == "add":
note = Note(gramps_id=dji.get_next_id(Note, "N"))
notetext = ""
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act in ["view", "edit"]:
note = Note.objects.get(handle=handle)
genlibnote = db.get_note_from_handle(note.handle)
notetext = snf.format(genlibnote)
noteform = NoteForm(instance=note, initial={"notetext": notetext})
noteform.model = note
elif act == "save":
note = Note.objects.get(handle=handle)
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "edit"
elif act == "create":
note = Note(handle=create_id())
notetext = ""
noteform = NoteForm(request.POST, instance=note, initial={"notetext": notetext})
noteform.model = note
if noteform.is_valid():
update_last_changed(note, request.user.username)
notedata = parse_styled_text(noteform.data["notetext"])
note.text = notedata[0]
note = noteform.save()
dji.save_note_markup(note, notedata[1])
dji.rebuild_cache(note)
if add_to:
item, handle = add_to
model = dji.get_model(item)
obj = model.objects.get(handle=handle)
dji.add_note_ref(obj, note)
dji.rebuild_cache(obj)
return redirect("/%s/%s#tab-notes" % (item, handle))
notetext = noteform.data["notetext"]
act = "view"
else:
notetext = noteform.data["notetext"]
act = "add"
elif act == "delete":
# FIXME: delete markup too for this note
note = Note.objects.get(handle=handle)
note.delete()
return redirect("/note/")
else:
raise Exception("Unhandled act: '%s'" % act)
context["noteform"] = noteform
context["object"] = note
context["notetext"] = notetext
context["note"] = note
context["action"] = act
return render_to_response(view_template, context)
| gpl-2.0 |
icomms/wqmanager | reportlab/graphics/samples/bubble.py | 7 | 3570 | #Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.charts.legends import Legend
from reportlab.graphics.charts.lineplots import ScatterPlot
from reportlab.graphics.shapes import Drawing, _DrawingEditorMixin, String
from reportlab.graphics.charts.textlabels import Label
from excelcolors import *
class Bubble(_DrawingEditorMixin,Drawing):
def __init__(self,width=200,height=150,*args,**kw):
apply(Drawing.__init__,(self,width,height)+args,kw)
self._add(self,ScatterPlot(),name='chart',validate=None,desc="The main chart")
self.chart.width = 115
self.chart.height = 80
self.chart.x = 30
self.chart.y = 40
self.chart.lines[0].strokeColor = color01
self.chart.lines[1].strokeColor = color02
self.chart.lines[2].strokeColor = color03
self.chart.lines[3].strokeColor = color04
self.chart.lines[4].strokeColor = color05
self.chart.lines[5].strokeColor = color06
self.chart.lines[6].strokeColor = color07
self.chart.lines[7].strokeColor = color08
self.chart.lines[8].strokeColor = color09
self.chart.lines[9].strokeColor = color10
self.chart.lines.symbol.kind ='Circle'
self.chart.lines.symbol.size = 15
self.chart.fillColor = backgroundGrey
self.chart.lineLabels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontName = 'Helvetica'
self.chart.xValueAxis.labels.fontSize = 7
self.chart.xValueAxis.forceZero = 0
self.chart.data = [((100,100), (200,200), (250,210), (300,300), (350,450))]
self.chart.xValueAxis.avoidBoundFrac = 1
self.chart.xValueAxis.gridEnd = 115
self.chart.xValueAxis.tickDown = 3
self.chart.xValueAxis.visibleGrid = 1
self.chart.yValueAxis.tickLeft = 3
self.chart.yValueAxis.labels.fontName = 'Helvetica'
self.chart.yValueAxis.labels.fontSize = 7
self._add(self,Label(),name='Title',validate=None,desc="The title at the top of the chart")
self.Title.fontName = 'Helvetica-Bold'
self.Title.fontSize = 7
self.Title.x = 100
self.Title.y = 135
self.Title._text = 'Chart Title'
self.Title.maxWidth = 180
self.Title.height = 20
self.Title.textAnchor ='middle'
self._add(self,Legend(),name='Legend',validate=None,desc="The legend or key for the chart")
self.Legend.colorNamePairs = [(color01, 'Widgets')]
self.Legend.fontName = 'Helvetica'
self.Legend.fontSize = 7
self.Legend.x = 153
self.Legend.y = 85
self.Legend.dxTextSpace = 5
self.Legend.dy = 5
self.Legend.dx = 5
self.Legend.deltay = 5
self.Legend.alignment ='right'
self.chart.lineLabelFormat = None
self.chart.xLabel = 'X Axis'
self.chart.y = 30
self.chart.yLabel = 'Y Axis'
self.chart.yValueAxis.labelTextFormat = '%d'
self.chart.yValueAxis.forceZero = 1
self.chart.xValueAxis.forceZero = 1
self._add(self,0,name='preview',validate=None,desc=None)
if __name__=="__main__": #NORUNTESTS
Bubble().save(formats=['pdf'],outDir=None,fnRoot='bubble')
| bsd-3-clause |
sebastic/QGIS | python/ext-libs/yaml/nodes.py | 985 | 1440 |
class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
def __repr__(self):
value = self.value
#if isinstance(value, list):
# if len(value) == 0:
# value = '<empty>'
# elif len(value) == 1:
# value = '<1 item>'
# else:
# value = '<%d items>' % len(value)
#else:
# if len(value) > 75:
# value = repr(value[:70]+u' ... ')
# else:
# value = repr(value)
value = repr(value)
return '%s(tag=%r, value=%s)' % (self.__class__.__name__, self.tag, value)
class ScalarNode(Node):
id = 'scalar'
def __init__(self, tag, value,
start_mark=None, end_mark=None, style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.style = style
class CollectionNode(Node):
def __init__(self, tag, value,
start_mark=None, end_mark=None, flow_style=None):
self.tag = tag
self.value = value
self.start_mark = start_mark
self.end_mark = end_mark
self.flow_style = flow_style
class SequenceNode(CollectionNode):
id = 'sequence'
class MappingNode(CollectionNode):
id = 'mapping'
| gpl-2.0 |
readbeyond/aeneas | aeneas/syncmap/fragmentlist.py | 5 | 31617 | #!/usr/bin/env python
# coding=utf-8
# aeneas is a Python/C library and a set of tools
# to automagically synchronize audio and text (aka forced alignment)
#
# Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it)
# Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it)
# Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import print_function
from copy import deepcopy
from bisect import insort
from aeneas.exacttiming import TimeInterval
from aeneas.exacttiming import TimeValue
from aeneas.logger import Loggable
from aeneas.syncmap.fragment import SyncMapFragment
from aeneas.textfile import TextFragment
import aeneas.globalconstants as gc
class SyncMapFragmentList(Loggable):
"""
A type representing a list of sync map fragments,
with some constraints:
* the begin and end time of each fragment should be within the list begin and end times;
* two time fragments can only overlap at the boundary;
* the list is kept sorted.
This class has some convenience methods for
clipping, offsetting, moving fragment boundaries,
and fixing fragments with zero length.
:param begin: the begin time
:type begin: :class:`~aeneas.exacttiming.TimeValue`
:param end: the end time
:type end: :class:`~aeneas.exacttiming.TimeValue`
:raises TypeError: if ``begin`` or ``end`` are not instances of :class:`~aeneas.exacttiming.TimeValue`
:raises ValueError: if ``begin`` is negative or if ``begin`` is bigger than ``end``
.. versionadded:: 1.7.0
"""
ALLOWED_POSITIONS = [
TimeInterval.RELATIVE_POSITION_PP_L,
TimeInterval.RELATIVE_POSITION_PP_C,
TimeInterval.RELATIVE_POSITION_PP_G,
TimeInterval.RELATIVE_POSITION_PI_LL,
TimeInterval.RELATIVE_POSITION_PI_LC,
TimeInterval.RELATIVE_POSITION_PI_CG,
TimeInterval.RELATIVE_POSITION_PI_GG,
TimeInterval.RELATIVE_POSITION_IP_L,
TimeInterval.RELATIVE_POSITION_IP_B,
TimeInterval.RELATIVE_POSITION_IP_E,
TimeInterval.RELATIVE_POSITION_IP_G,
TimeInterval.RELATIVE_POSITION_II_LL,
TimeInterval.RELATIVE_POSITION_II_LB,
TimeInterval.RELATIVE_POSITION_II_EG,
TimeInterval.RELATIVE_POSITION_II_GG,
]
""" Allowed positions for any pair of time intervals in the list """
TAG = u"SyncMapFragmentList"
def __init__(self, begin, end, rconf=None, logger=None):
if not isinstance(begin, TimeValue):
raise TypeError(u"begin is not an instance of TimeValue")
if not isinstance(end, TimeValue):
raise TypeError(u"end is not an instance of TimeValue")
if begin < 0:
raise ValueError(u"begin is negative")
if begin > end:
raise ValueError(u"begin is bigger than end")
super(SyncMapFragmentList, self).__init__(rconf=rconf, logger=logger)
self.begin = begin
self.end = end
self.__sorted = True
self.__fragments = []
def __len__(self):
return len(self.__fragments)
def __getitem__(self, index):
return self.__fragments[index]
def __setitem__(self, index, value):
self.__fragments[index] = value
def _is_valid_index(self, index):
"""
Return ``True`` if and only if the given ``index``
is valid.
"""
if isinstance(index, int):
return (index >= 0) and (index < len(self))
if isinstance(index, list):
valid = True
for i in index:
valid = valid or self._is_valid_index(i)
return valid
return False
def _check_boundaries(self, fragment):
"""
Check that the interval of the given fragment
is within the boundaries of the list.
Raises an error if not OK.
"""
if not isinstance(fragment, SyncMapFragment):
raise TypeError(u"fragment is not an instance of SyncMapFragment")
interval = fragment.interval
if not isinstance(interval, TimeInterval):
raise TypeError(u"interval is not an instance of TimeInterval")
if (self.begin is not None) and (interval.begin < self.begin):
raise ValueError(u"interval.begin is before self.begin")
if (self.end is not None) and (interval.end > self.end):
raise ValueError(u"interval.end is after self.end")
def _check_overlap(self, fragment):
"""
Check that the interval of the given fragment does not overlap
any existing interval in the list (except at its boundaries).
Raises an error if not OK.
"""
#
# NOTE bisect does not work if there is a configuration like:
#
# *********** <- existing interval
# *** <- query interval
#
# TODO one should probably check this by doing bisect
# over the begin and end lists separately
#
for existing_fragment in self.fragments:
if existing_fragment.interval.relative_position_of(fragment.interval) not in self.ALLOWED_POSITIONS:
self.log_exc(u"interval overlaps another already present interval", None, True, ValueError)
def _check_min_max_indices(self, min_index=None, max_index=None):
"""
Ensure the given start/end fragment indices make sense:
if one of them is ``None`` (i.e., not specified),
then set it to ``0`` or ``len(self)``.
"""
min_index = min_index or 0
max_index = max_index or len(self)
if min_index < 0:
self.log_exc(u"min_index is negative", None, True, ValueError)
if max_index > len(self):
self.log_exc(u"max_index is bigger than the number of intervals in the list", None, True, ValueError)
return min_index, max_index
def clone(self):
"""
Return a deep copy of this configuration object.
:rtype: :class:`~aeneas.syncmap.fragmentlist.SyncMapFragmentList`
"""
return deepcopy(self)
@property
def is_guaranteed_sorted(self):
"""
Return ``True`` if the list is sorted,
and ``False`` if it might not be sorted
(for example, because an ``add(..., sort=False)`` operation
was performed).
:rtype: bool
"""
return self.__sorted
@property
def fragments(self):
"""
Iterates through the fragments in the list
(which are sorted).
:rtype: generator of :class:`~aeneas.syncmap.SyncMapFragment`
"""
for fragment in self.__fragments:
yield fragment
@property
def regular_fragments(self):
"""
Iterates through the regular fragments in the list
(which are sorted).
:rtype: generator of (int, :class:`~aeneas.syncmap.SyncMapFragment`)
"""
for i, fragment in enumerate(self.__fragments):
if fragment.fragment_type == SyncMapFragment.REGULAR:
yield (i, fragment)
@property
def nonspeech_fragments(self):
"""
Iterates through the nonspeech fragments in the list
(which are sorted).
:rtype: generator of (int, :class:`~aeneas.syncmap.SyncMapFragment`)
"""
for i, fragment in enumerate(self.__fragments):
if fragment.fragment_type == SyncMapFragment.NONSPEECH:
yield (i, fragment)
def remove(self, indices):
"""
Remove the fragments corresponding to the given list of indices.
:param indices: the list of indices to be removed
:type indices: list of int
:raises ValueError: if one of the indices is not valid
"""
if not self._is_valid_index(indices):
self.log_exc(u"The given list of indices is not valid", None, True, ValueError)
new_fragments = []
sorted_indices = sorted(indices)
i = 0
j = 0
while (i < len(self)) and (j < len(sorted_indices)):
if i != sorted_indices[j]:
new_fragments.append(self[i])
else:
j += 1
i += 1
while i < len(self):
new_fragments.append(self[i])
i += 1
self.__fragments = new_fragments
def sort(self):
"""
Sort the fragments in the list.
:raises ValueError: if there is a fragment which violates
the list constraints
"""
if self.is_guaranteed_sorted:
self.log(u"Already sorted, returning")
return
self.log(u"Sorting...")
self.__fragments = sorted(self.__fragments)
self.log(u"Sorting... done")
self.log(u"Checking relative positions...")
for i in range(len(self) - 1):
current_interval = self[i].interval
next_interval = self[i + 1].interval
if current_interval.relative_position_of(next_interval) not in self.ALLOWED_POSITIONS:
self.log(u"Found overlapping fragments:")
self.log([u" Index %d => %s", i, current_interval])
self.log([u" Index %d => %s", i + 1, next_interval])
self.log_exc(u"The list contains two fragments overlapping in a forbidden way", None, True, ValueError)
self.log(u"Checking relative positions... done")
self.__sorted = True
def remove_nonspeech_fragments(self, zero_length_only=False):
"""
Remove ``NONSPEECH`` fragments from the list.
If ``zero_length_only`` is ``True``, remove only
those fragments with zero length,
and make all the others ``REGULAR``.
:param bool zero_length_only: remove only zero length NONSPEECH fragments
"""
self.log(u"Removing nonspeech fragments...")
nonspeech = list(self.nonspeech_fragments)
if zero_length_only:
nonspeech = [(i, f) for i, f in nonspeech if f.has_zero_length]
nonspeech_indices = [i for i, f in nonspeech]
self.remove(nonspeech_indices)
if zero_length_only:
for i, f in list(self.nonspeech_fragments):
f.fragment_type = SyncMapFragment.REGULAR
self.log(u"Removing nonspeech fragments... done")
def has_zero_length_fragments(self, min_index=None, max_index=None):
"""
Return ``True`` if the list has at least one interval
with zero length withing ``min_index`` and ``max_index``.
If the latter are not specified, check all intervals.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
"""
min_index, max_index = self._check_min_max_indices(min_index, max_index)
zero = [i for i in range(min_index, max_index) if self[i].has_zero_length]
self.log([u"Fragments with zero length: %s", zero])
return (len(zero) > 0)
def has_adjacent_fragments_only(self, min_index=None, max_index=None):
"""
Return ``True`` if the list contains only adjacent fragments,
that is, if it does not have gaps.
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
:rtype: bool
"""
min_index, max_index = self._check_min_max_indices(min_index, max_index)
for i in range(min_index, max_index - 1):
current_interval = self[i].interval
next_interval = self[i + 1].interval
if not current_interval.is_adjacent_before(next_interval):
self.log(u"Found non adjacent fragments")
self.log([u" Index %d => %s", i, current_interval])
self.log([u" Index %d => %s", i + 1, next_interval])
return False
return True
def add(self, fragment, sort=True):
"""
Add the given fragment to the list (and keep the latter sorted).
An error is raised if the fragment cannot be added,
for example if its interval violates the list constraints.
:param fragment: the fragment to be added
:type fragment: :class:`~aeneas.syncmap.SyncMapFragment`
:param bool sort: if ``True`` ensure that after the insertion the list is kept sorted
:raises TypeError: if ``interval`` is not an instance of ``TimeInterval``
:raises ValueError: if ``interval`` does not respect the boundaries of the list
or if it overlaps an existing interval,
or if ``sort=True`` but the list is not guaranteed sorted
"""
self._check_boundaries(fragment)
if sort:
if not self.is_guaranteed_sorted:
self.log_exc(u"Unable to add with sort=True if the list is not guaranteed sorted", None, True, ValueError)
self._check_overlap(fragment)
insort(self.__fragments, fragment)
# self.log(u"Inserted and kept sorted flag true")
else:
self.__fragments.append(fragment)
self.__sorted = False
# self.log(u"Appended at the end and invalidated sorted flag")
def offset(self, offset):
"""
Move all the intervals in the list by the given ``offset``.
:param offset: the shift to be applied
:type offset: :class:`~aeneas.exacttiming.TimeValue`
:raises TypeError: if ``offset`` is not an instance of ``TimeValue``
"""
self.log(u"Applying offset to all fragments...")
self.log([u" Offset %.3f", offset])
for fragment in self.fragments:
fragment.interval.offset(
offset=offset,
allow_negative=False,
min_begin_value=self.begin,
max_end_value=self.end
)
self.log(u"Applying offset to all fragments... done")
def move_transition_point(self, fragment_index, value):
"""
Change the transition point between fragment ``fragment_index``
and the next fragment to the time value ``value``.
This method fails silently
(without changing the fragment list)
if at least one of the following conditions holds:
* ``fragment_index`` is negative
* ``fragment_index`` is the last or the second-to-last
* ``value`` is after the current end of the next fragment
* the current fragment and the next one are not adjacent and both proper intervals (not zero length)
The above conditions ensure that the move makes sense
and that it keeps the list satisfying the constraints.
:param int fragment_index: the fragment index whose end should be moved
:param value: the new transition point
:type value: :class:`~aeneas.exacttiming.TimeValue`
"""
self.log(u"Called move_transition_point with")
self.log([u" fragment_index %d", fragment_index])
self.log([u" value %.3f", value])
if (fragment_index < 0) or (fragment_index > (len(self) - 3)):
self.log(u"Bad fragment_index, returning")
return
current_interval = self[fragment_index].interval
next_interval = self[fragment_index + 1].interval
if value > next_interval.end:
self.log(u"Bad value, returning")
return
if not current_interval.is_non_zero_before_non_zero(next_interval):
self.log(u"Bad interval configuration, returning")
return
current_interval.end = value
next_interval.begin = value
self.log(u"Moved transition point")
def fragments_ending_inside_nonspeech_intervals(
self,
nonspeech_intervals,
tolerance
):
"""
Determine a list of pairs (nonspeech interval, fragment index),
such that the nonspeech interval contains exactly one fragment
ending inside it (within the given tolerance) and
adjacent to the next fragment.
:param nonspeech_intervals: the list of nonspeech intervals to be examined
:type nonspeech_intervals: list of :class:`~aeneas.exacttiming.TimeInterval`
:param tolerance: the tolerance to be applied when checking if the end point
falls within a given nonspeech interval
:type tolerance: :class:`~aeneas.exacttiming.TimeValue`
:rtype: list of (:class:`~aeneas.exacttiming.TimeInterval`, int)
"""
self.log(u"Called fragments_ending_inside_nonspeech_intervals")
self.log([u" List begin: %.3f", self.begin])
self.log([u" List end: %.3f", self.end])
nsi_index = 0
frag_index = 0
nsi_counter = [(n, []) for n in nonspeech_intervals]
# NOTE the last fragment is not eligible to be returned
while (nsi_index < len(nonspeech_intervals)) and (frag_index < len(self) - 1):
nsi = nonspeech_intervals[nsi_index]
if nsi.end > self.end:
self.log(u" nsi ends after self.end => breaking")
break
nsi_shadow = nsi.shadow(tolerance)
frag = self[frag_index]
self.log([u" nsi %s", nsi])
self.log([u" nsi_shadow %s", nsi_shadow])
self.log([u" frag %s", frag.interval])
if not frag.is_head_or_tail:
self.log(u" Fragment is not HEAD or TAIL => inspecting it")
if nsi_shadow.contains(frag.end):
if nsi_shadow.contains(frag.begin):
#
# *************** nsi shadow
# | *********** | nsi
# | ***X | frag (X=frag.end)
#
# NOTE this case might happen as the following:
#
# *************** nsi shadow
# | *** | nsi
# | **X | frag (X=frag.end)
#
# so we must invalidate the nsi if this happens
#
nsi_counter[nsi_index] = (None, [])
nsi_index += 1
frag_index += 1
self.log(u" nsi_shadow entirely contains frag => invalidate nsi, and skip to next fragment, nsi")
else:
#
# *************** nsi shadow
# | *********** | nsi
# *****|***X | frag (X=frag.end)
#
nsi_counter[nsi_index][1].append(frag_index)
frag_index += 1
self.log(u" nsi_shadow contains frag end only => save it and go to next fragment")
elif nsi_shadow.begin > frag.end:
#
# *************** nsi shadow
# | *********** | nsi
# **X | | frag (X=frag.end)
#
frag_index += 1
self.log(u" nsi_shadow begins after frag end => skip to next fragment")
else:
#
# *************** nsi shadow
# | *********** | nsi
# | *****|**X frag (X=frag.end)
#
nsi_index += 1
self.log(u" nsi_shadow ends before frag end => skip to next nsi")
else:
self.log(u" Fragment is HEAD or TAIL => skipping it")
frag_index += 1
self.log(u"")
tbr = [(n, c[0]) for (n, c) in nsi_counter if len(c) == 1]
self.log([u"Returning: %s", tbr])
return tbr
def inject_long_nonspeech_fragments(self, pairs, replacement_string):
"""
Inject nonspeech fragments corresponding to the given intervals
in this fragment list.
It is assumed that ``pairs`` are consistent, e.g. they are produced
by ``fragments_ending_inside_nonspeech_intervals``.
:param list pairs: list of ``(TimeInterval, int)`` pairs,
each identifying a nonspeech interval and
the corresponding fragment index ending inside it
:param string replacement_string: the string to be applied to the nonspeech intervals
"""
self.log(u"Called inject_long_nonspeech_fragments")
# set the appropriate fragment text
if replacement_string in [None, gc.PPV_TASK_ADJUST_BOUNDARY_NONSPEECH_REMOVE]:
self.log(u" Remove long nonspeech")
lines = []
else:
self.log([u" Replace long nonspeech with '%s'", replacement_string])
lines = [replacement_string]
# first, make room for the nonspeech intervals
self.log(u" First pass: making room...")
for nsi, index in pairs:
self[index].interval.end = nsi.begin
self[index + 1].interval.begin = nsi.end
self.log(u" First pass: making room... done")
self.log(u" Second pass: append nonspeech intervals...")
for i, (nsi, index) in enumerate(pairs, 1):
identifier = u"n%06d" % i
self.add(SyncMapFragment(
text_fragment=TextFragment(
identifier=identifier,
language=None,
lines=lines,
filtered_lines=lines
),
interval=nsi,
fragment_type=SyncMapFragment.NONSPEECH
), sort=False)
self.log(u" Second pass: append nonspeech intervals... done")
self.log(u" Third pass: sorting...")
self.sort()
self.log(u" Third pass: sorting... done")
def fix_zero_length_fragments(self, duration=TimeValue("0.001"), min_index=None, max_index=None, ensure_adjacent=True):
"""
Fix fragments with zero length,
enlarging them to have length ``duration``,
reclaiming the difference from the next fragment(s),
or moving the next fragment(s) forward.
This function assumes the fragments to be adjacent.
:param duration: set the zero length fragments to have this duration
:type duration: :class:`~aeneas.exacttiming.TimeValue`
:param int min_index: examine fragments with index greater than or equal to this index (i.e., included)
:param int max_index: examine fragments with index smaller than this index (i.e., excluded)
:raises ValueError: if ``min_index`` is negative or ``max_index``
is bigger than the current number of fragments
"""
self.log(u"Called fix_zero_length_fragments")
self.log([u" Duration %.3f", duration])
min_index, max_index = self._check_min_max_indices(min_index, max_index)
if len(self) < 1:
self.log(u"The list has no fragments: returning")
return
if not self.has_adjacent_fragments_only(min_index, max_index):
self.log_warn(u"There are non adjacent fragments: aborting")
return
original_first_begin = None
if (
(ensure_adjacent) and
(min_index > 0) and
(self[min_index - 1].interval.is_adjacent_before(self[min_index].interval))
):
original_first_begin = self[min_index].begin
self.log([u"Original first was adjacent with previous, starting at %.3f", original_first_begin])
original_last_end = None
if (
(ensure_adjacent) and
(len(self) > 1) and
(max_index < len(self)) and
(self[max_index - 1].interval.is_adjacent_before(self[max_index].interval))
):
original_last_end = self[max_index - 1].end
self.log([u"Original last was adjacent with next, ending at %.3f", original_last_end])
i = min_index
while i < max_index:
if self[i].has_zero_length:
self.log([u" Fragment %d (%s) has zero length => ENLARGE", i, self[i].interval])
moves = [(i, "ENLARGE", duration)]
slack = duration
j = i + 1
self.log([u" Entered while with j == %d", j])
while (j < max_index) and (self[j].interval.length < slack):
if self[j].has_zero_length:
self.log([u" Fragment %d (%s) has zero length => ENLARGE", j, self[j].interval])
moves.append((j, "ENLARGE", duration))
slack += duration
else:
self.log([u" Fragment %d (%s) has non zero length => MOVE", j, self[j].interval])
moves.append((j, "MOVE", None))
j += 1
self.log([u" Exited while with j == %d", j])
fixable = False
if (j == max_index) and (self[j - 1].interval.end + slack <= self.end):
self.log(u" Fixable by moving back")
current_time = self[j - 1].interval.end + slack
fixable = True
elif j < max_index:
self.log(u" Fixable by shrinking")
self[j].interval.shrink(slack)
current_time = self[j].interval.begin
fixable = True
if fixable:
for index, move_type, move_amount in moves[::-1]:
self.log([u" Calling move_end_at with %.3f at index %d", current_time, index])
self[index].interval.move_end_at(current_time)
if move_type == "ENLARGE":
self.log([u" Calling enlarge with %.3f at index %d", move_amount, index])
self[index].interval.enlarge(move_amount)
self.log([u" Interval %d is now: %s", index, self[index].interval])
current_time = self[index].interval.begin
else:
self.log([u"Unable to fix fragment %d (%s)", i, self[i].interval])
i = j - 1
i += 1
if original_first_begin is not None:
if self[min_index].begin != self[min_index - 1].end:
self.log(u"First fragment begin moved, restoring adjacency")
self.log([u" Original was %.3f", original_first_begin])
self.log([u" New is %.3f", self[min_index - 1].end])
self[min_index].begin = self[min_index - 1].end
if original_last_end is not None:
if self[max_index].begin != self[max_index - 1].end:
self.log(u"Last fragment end moved, restoring adjacency")
self.log([u" Original was %.3f", original_last_end])
self.log([u" New is %.3f", self[max_index].begin])
self[max_index].begin = self[max_index - 1].end
self.log(u"Fragments after fixing:")
for i, fragment in enumerate(self):
self.log([u" %d => %.3f %.3f", i, fragment.interval.begin, fragment.interval.end])
def fix_fragment_rate(self, fragment_index, max_rate, aggressive=False):
def fix_pair(current_index, donor_index):
self.log(u"Called fix_pair")
if (
(current_index < 0) or
(current_index >= len(self)) or
(donor_index < 0) or
(donor_index >= len(self)) or
(abs(current_index - donor_index) > 1)
):
self.log(u"Invalid index, returning False")
return False
donor_is_previous = donor_index < current_index
current_fragment = self[current_index]
donor_fragment = self[donor_index]
if (current_fragment.rate is not None) and (current_fragment.rate <= max_rate):
self.log(u"Current fragment rate is already <= max_rate, returning True")
return True
if donor_is_previous:
if not donor_fragment.interval.is_non_zero_before_non_zero(current_fragment.interval):
self.log(u"donor fragment is not adjacent before current fragment, returning False")
return False
else:
if not current_fragment.interval.is_non_zero_before_non_zero(donor_fragment.interval):
self.log(u"current fragment is not adjacent before donor fragment, returning False")
return False
self.log(u"Current and donor fragments are adjacent and not zero length")
current_lack = current_fragment.rate_lack(max_rate)
donor_slack = donor_fragment.rate_slack(max_rate)
self.log([u"Current lack %.3f", current_lack])
self.log([u"Donor slack %.3f", donor_slack])
if donor_slack <= 0:
self.log(u"Donor has no slack, returning False")
return False
self.log(u"Donor has some slack")
effective_slack = min(current_lack, donor_slack)
if donor_is_previous:
self.move_transition_point(donor_index, donor_fragment.end - effective_slack)
else:
self.move_transition_point(current_index, current_fragment.end + effective_slack)
if effective_slack == current_lack:
self.log(u"Current lack can be fully stolen from donor")
return True
else:
self.log(u"Current lack can be partially stolen from donor")
return False
# try fixing rate stealing slack from the previous fragment
if fix_pair(fragment_index, fragment_index - 1):
return True
# if aggressive, try fixing rate stealing slack from the next fragment
if aggressive:
return fix_pair(fragment_index, fragment_index + 1)
# cannot be fixed
return False
| agpl-3.0 |
vxgmichel/asyncio | asyncio/windows_utils.py | 37 | 6844 | """
Various Windows specific bits and pieces
"""
import sys
if sys.platform != 'win32': # pragma: no cover
raise ImportError('win32 only')
import _winapi
import itertools
import msvcrt
import os
import socket
import subprocess
import tempfile
import warnings
__all__ = ['socketpair', 'pipe', 'Popen', 'PIPE', 'PipeHandle']
# Constants/globals
BUFSIZE = 8192
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
_mmap_counter = itertools.count()
if hasattr(socket, 'socketpair'):
# Since Python 3.5, socket.socketpair() is now also available on Windows
socketpair = socket.socketpair
else:
# Replacement for socket.socketpair()
def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
"""A socket pair usable as a self-pipe, for Windows.
Origin: https://gist.github.com/4325783, by Geert Jansen.
Public domain.
"""
if family == socket.AF_INET:
host = '127.0.0.1'
elif family == socket.AF_INET6:
host = '::1'
else:
raise ValueError("Only AF_INET and AF_INET6 socket address "
"families are supported")
if type != socket.SOCK_STREAM:
raise ValueError("Only SOCK_STREAM socket type is supported")
if proto != 0:
raise ValueError("Only protocol zero is supported")
# We create a connected TCP socket. Note the trick with setblocking(0)
# that prevents us from having to create a thread.
lsock = socket.socket(family, type, proto)
try:
lsock.bind((host, 0))
lsock.listen(1)
# On IPv6, ignore flow_info and scope_id
addr, port = lsock.getsockname()[:2]
csock = socket.socket(family, type, proto)
try:
csock.setblocking(False)
try:
csock.connect((addr, port))
except (BlockingIOError, InterruptedError):
pass
csock.setblocking(True)
ssock, _ = lsock.accept()
except:
csock.close()
raise
finally:
lsock.close()
return (ssock, csock)
# Replacement for os.pipe() using handles instead of fds
def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE):
"""Like os.pipe() but with overlapped support and using handles not fds."""
address = tempfile.mktemp(prefix=r'\\.\pipe\python-pipe-%d-%d-' %
(os.getpid(), next(_mmap_counter)))
if duplex:
openmode = _winapi.PIPE_ACCESS_DUPLEX
access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
obsize, ibsize = bufsize, bufsize
else:
openmode = _winapi.PIPE_ACCESS_INBOUND
access = _winapi.GENERIC_WRITE
obsize, ibsize = 0, bufsize
openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
if overlapped[0]:
openmode |= _winapi.FILE_FLAG_OVERLAPPED
if overlapped[1]:
flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED
else:
flags_and_attribs = 0
h1 = h2 = None
try:
h1 = _winapi.CreateNamedPipe(
address, openmode, _winapi.PIPE_WAIT,
1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL)
h2 = _winapi.CreateFile(
address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
flags_and_attribs, _winapi.NULL)
ov = _winapi.ConnectNamedPipe(h1, overlapped=True)
ov.GetOverlappedResult(True)
return h1, h2
except:
if h1 is not None:
_winapi.CloseHandle(h1)
if h2 is not None:
_winapi.CloseHandle(h2)
raise
# Wrapper for a pipe handle
class PipeHandle:
"""Wrapper for an overlapped pipe handle which is vaguely file-object like.
The IOCP event loop can use these instead of socket objects.
"""
def __init__(self, handle):
self._handle = handle
def __repr__(self):
if self._handle is not None:
handle = 'handle=%r' % self._handle
else:
handle = 'closed'
return '<%s %s>' % (self.__class__.__name__, handle)
@property
def handle(self):
return self._handle
def fileno(self):
if self._handle is None:
raise ValueError("I/O operatioon on closed pipe")
return self._handle
def close(self, *, CloseHandle=_winapi.CloseHandle):
if self._handle is not None:
CloseHandle(self._handle)
self._handle = None
def __del__(self):
if self._handle is not None:
warnings.warn("unclosed %r" % self, ResourceWarning)
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
# Replacement for subprocess.Popen using overlapped pipe handles
class Popen(subprocess.Popen):
"""Replacement for subprocess.Popen using overlapped pipe handles.
The stdin, stdout, stderr are None or instances of PipeHandle.
"""
def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds):
assert not kwds.get('universal_newlines')
assert kwds.get('bufsize', 0) == 0
stdin_rfd = stdout_wfd = stderr_wfd = None
stdin_wh = stdout_rh = stderr_rh = None
if stdin == PIPE:
stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True)
stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY)
else:
stdin_rfd = stdin
if stdout == PIPE:
stdout_rh, stdout_wh = pipe(overlapped=(True, False))
stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0)
else:
stdout_wfd = stdout
if stderr == PIPE:
stderr_rh, stderr_wh = pipe(overlapped=(True, False))
stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0)
elif stderr == STDOUT:
stderr_wfd = stdout_wfd
else:
stderr_wfd = stderr
try:
super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd,
stderr=stderr_wfd, **kwds)
except:
for h in (stdin_wh, stdout_rh, stderr_rh):
if h is not None:
_winapi.CloseHandle(h)
raise
else:
if stdin_wh is not None:
self.stdin = PipeHandle(stdin_wh)
if stdout_rh is not None:
self.stdout = PipeHandle(stdout_rh)
if stderr_rh is not None:
self.stderr = PipeHandle(stderr_rh)
finally:
if stdin == PIPE:
os.close(stdin_rfd)
if stdout == PIPE:
os.close(stdout_wfd)
if stderr == PIPE:
os.close(stderr_wfd)
| apache-2.0 |
totollo/CocosBuilder-X | CocosBuilder-X/libs/nodejs/lib/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/xcode_emulation.py | 149 | 42289 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Computed lazily by _GetSdkBaseDir(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_base_dir = None
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkBaseDir(self):
"""Returns the root of the 'Developer' directory. On Xcode 4.2 and prior,
this is usually just /Developer. Xcode 4.3 moved that folder into the Xcode
bundle."""
if not XcodeSettings._sdk_base_dir:
import subprocess
job = subprocess.Popen(['xcode-select', '-print-path'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = job.communicate()
if job.returncode != 0:
print out
raise Exception('Error %d running xcode-select' % job.returncode)
# The Developer folder moved in Xcode 4.3.
xcode43_sdk_path = os.path.join(
out.rstrip(), 'Platforms/MacOSX.platform/Developer/SDKs')
if os.path.isdir(xcode43_sdk_path):
XcodeSettings._sdk_base_dir = xcode43_sdk_path
else:
XcodeSettings._sdk_base_dir = os.path.join(out.rstrip(), 'SDKs')
return XcodeSettings._sdk_base_dir
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx10.5')
if sdk_root.startswith('macosx'):
return os.path.join(self._GetSdkBaseDir(),
'MacOSX' + sdk_root[len('macosx'):] + '.sdk')
return sdk_root
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._Appendf(cflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._Appendf(
ldflags, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise Exception(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
| mit |
zstyblik/infernal-twin | build/pillow/PIL/IcnsImagePlugin.py | 26 | 10597 | #
# The Python Imaging Library.
# $Id$
#
# Mac OS X icns file decoder, based on icns.py by Bob Ippolito.
#
# history:
# 2004-10-09 fl Turned into a PIL plugin; removed 2.3 dependencies.
#
# Copyright (c) 2004 by Bob Ippolito.
# Copyright (c) 2004 by Secret Labs.
# Copyright (c) 2004 by Fredrik Lundh.
# Copyright (c) 2014 by Alastair Houghton.
#
# See the README file for information on usage and redistribution.
#
from PIL import Image, ImageFile, PngImagePlugin, _binary
import io
import os
import shutil
import struct
import sys
import tempfile
enable_jpeg2k = hasattr(Image.core, 'jp2klib_version')
if enable_jpeg2k:
from PIL import Jpeg2KImagePlugin
i8 = _binary.i8
HEADERSIZE = 8
def nextheader(fobj):
return struct.unpack('>4sI', fobj.read(HEADERSIZE))
def read_32t(fobj, start_length, size):
# The 128x128 icon seems to have an extra header for some reason.
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(4)
if sig != b'\x00\x00\x00\x00':
raise SyntaxError('Unknown signature, expecting 0x00000000')
return read_32(fobj, (start + 4, length - 4), size)
def read_32(fobj, start_length, size):
"""
Read a 32bit RGB icon resource. Seems to be either uncompressed or
an RLE packbits-like scheme.
"""
(start, length) = start_length
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
if length == sizesq * 3:
# uncompressed ("RGBRGBGB")
indata = fobj.read(length)
im = Image.frombuffer("RGB", pixel_size, indata, "raw", "RGB", 0, 1)
else:
# decode image
im = Image.new("RGB", pixel_size, None)
for band_ix in range(3):
data = []
bytesleft = sizesq
while bytesleft > 0:
byte = fobj.read(1)
if not byte:
break
byte = i8(byte)
if byte & 0x80:
blocksize = byte - 125
byte = fobj.read(1)
for i in range(blocksize):
data.append(byte)
else:
blocksize = byte + 1
data.append(fobj.read(blocksize))
bytesleft -= blocksize
if bytesleft <= 0:
break
if bytesleft != 0:
raise SyntaxError(
"Error reading channel [%r left]" % bytesleft
)
band = Image.frombuffer(
"L", pixel_size, b"".join(data), "raw", "L", 0, 1
)
im.im.putband(band.im, band_ix)
return {"RGB": im}
def read_mk(fobj, start_length, size):
# Alpha masks seem to be uncompressed
start = start_length[0]
fobj.seek(start)
pixel_size = (size[0] * size[2], size[1] * size[2])
sizesq = pixel_size[0] * pixel_size[1]
band = Image.frombuffer(
"L", pixel_size, fobj.read(sizesq), "raw", "L", 0, 1
)
return {"A": band}
def read_png_or_jpeg2000(fobj, start_length, size):
(start, length) = start_length
fobj.seek(start)
sig = fobj.read(12)
if sig[:8] == b'\x89PNG\x0d\x0a\x1a\x0a':
fobj.seek(start)
im = PngImagePlugin.PngImageFile(fobj)
return {"RGBA": im}
elif sig[:4] == b'\xff\x4f\xff\x51' \
or sig[:4] == b'\x0d\x0a\x87\x0a' \
or sig == b'\x00\x00\x00\x0cjP \x0d\x0a\x87\x0a':
if not enable_jpeg2k:
raise ValueError('Unsupported icon subimage format (rebuild PIL '
'with JPEG 2000 support to fix this)')
# j2k, jpc or j2c
fobj.seek(start)
jp2kstream = fobj.read(length)
f = io.BytesIO(jp2kstream)
im = Jpeg2KImagePlugin.Jpeg2KImageFile(f)
if im.mode != 'RGBA':
im = im.convert('RGBA')
return {"RGBA": im}
else:
raise ValueError('Unsupported icon subimage format')
class IcnsFile(object):
SIZES = {
(512, 512, 2): [
(b'ic10', read_png_or_jpeg2000),
],
(512, 512, 1): [
(b'ic09', read_png_or_jpeg2000),
],
(256, 256, 2): [
(b'ic14', read_png_or_jpeg2000),
],
(256, 256, 1): [
(b'ic08', read_png_or_jpeg2000),
],
(128, 128, 2): [
(b'ic13', read_png_or_jpeg2000),
],
(128, 128, 1): [
(b'ic07', read_png_or_jpeg2000),
(b'it32', read_32t),
(b't8mk', read_mk),
],
(64, 64, 1): [
(b'icp6', read_png_or_jpeg2000),
],
(32, 32, 2): [
(b'ic12', read_png_or_jpeg2000),
],
(48, 48, 1): [
(b'ih32', read_32),
(b'h8mk', read_mk),
],
(32, 32, 1): [
(b'icp5', read_png_or_jpeg2000),
(b'il32', read_32),
(b'l8mk', read_mk),
],
(16, 16, 2): [
(b'ic11', read_png_or_jpeg2000),
],
(16, 16, 1): [
(b'icp4', read_png_or_jpeg2000),
(b'is32', read_32),
(b's8mk', read_mk),
],
}
def __init__(self, fobj):
"""
fobj is a file-like object as an icns resource
"""
# signature : (start, length)
self.dct = dct = {}
self.fobj = fobj
sig, filesize = nextheader(fobj)
if sig != b'icns':
raise SyntaxError('not an icns file')
i = HEADERSIZE
while i < filesize:
sig, blocksize = nextheader(fobj)
if blocksize <= 0:
raise SyntaxError('invalid block header')
i += HEADERSIZE
blocksize -= HEADERSIZE
dct[sig] = (i, blocksize)
fobj.seek(blocksize, 1)
i += blocksize
def itersizes(self):
sizes = []
for size, fmts in self.SIZES.items():
for (fmt, reader) in fmts:
if fmt in self.dct:
sizes.append(size)
break
return sizes
def bestsize(self):
sizes = self.itersizes()
if not sizes:
raise SyntaxError("No 32bit icon resources found")
return max(sizes)
def dataforsize(self, size):
"""
Get an icon resource as {channel: array}. Note that
the arrays are bottom-up like windows bitmaps and will likely
need to be flipped or transposed in some way.
"""
dct = {}
for code, reader in self.SIZES[size]:
desc = self.dct.get(code)
if desc is not None:
dct.update(reader(self.fobj, desc, size))
return dct
def getimage(self, size=None):
if size is None:
size = self.bestsize()
if len(size) == 2:
size = (size[0], size[1], 1)
channels = self.dataforsize(size)
im = channels.get('RGBA', None)
if im:
return im
im = channels.get("RGB").copy()
try:
im.putalpha(channels["A"])
except KeyError:
pass
return im
##
# Image plugin for Mac OS icons.
class IcnsImageFile(ImageFile.ImageFile):
"""
PIL image support for Mac OS .icns files.
Chooses the best resolution, but will possibly load
a different size image if you mutate the size attribute
before calling 'load'.
The info dictionary has a key 'sizes' that is a list
of sizes that the icns file has.
"""
format = "ICNS"
format_description = "Mac OS icns resource"
def _open(self):
self.icns = IcnsFile(self.fp)
self.mode = 'RGBA'
self.best_size = self.icns.bestsize()
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
self.info['sizes'] = self.icns.itersizes()
# Just use this to see if it's loaded or not yet.
self.tile = ('',)
def load(self):
if len(self.size) == 3:
self.best_size = self.size
self.size = (self.best_size[0] * self.best_size[2],
self.best_size[1] * self.best_size[2])
Image.Image.load(self)
if not self.tile:
return
self.load_prepare()
# This is likely NOT the best way to do it, but whatever.
im = self.icns.getimage(self.best_size)
# If this is a PNG or JPEG 2000, it won't be loaded yet
im.load()
self.im = im.im
self.mode = im.mode
self.size = im.size
self.fp = None
self.icns = None
self.tile = ()
self.load_end()
def _save(im, fp, filename):
"""
Saves the image as a series of PNG files,
that are then converted to a .icns file
using the OS X command line utility 'iconutil'.
OS X only.
"""
try:
fp.flush()
except:
pass
# create the temporary set of pngs
iconset = tempfile.mkdtemp('.iconset')
last_w = None
last_im = None
for w in [16, 32, 128, 256, 512]:
prefix = 'icon_{}x{}'.format(w, w)
if last_w == w:
im_scaled = last_im
else:
im_scaled = im.resize((w, w), Image.LANCZOS)
im_scaled.save(os.path.join(iconset, prefix+'.png'))
im_scaled = im.resize((w*2, w*2), Image.LANCZOS)
im_scaled.save(os.path.join(iconset, prefix+'@2x.png'))
last_im = im_scaled
# iconutil -c icns -o {} {}
from subprocess import Popen, PIPE, CalledProcessError
convert_cmd = ["iconutil", "-c", "icns", "-o", filename, iconset]
stderr = tempfile.TemporaryFile()
convert_proc = Popen(convert_cmd, stdout=PIPE, stderr=stderr)
convert_proc.stdout.close()
retcode = convert_proc.wait()
# remove the temporary files
shutil.rmtree(iconset)
if retcode:
raise CalledProcessError(retcode, convert_cmd)
Image.register_open("ICNS", IcnsImageFile, lambda x: x[:4] == b'icns')
Image.register_extension("ICNS", '.icns')
if sys.platform == 'darwin':
Image.register_save("ICNS", _save)
Image.register_mime("ICNS", "image/icns")
if __name__ == '__main__':
imf = IcnsImageFile(open(sys.argv[1], 'rb'))
for size in imf.info['sizes']:
imf.size = size
imf.load()
im = imf.im
im.save('out-%s-%s-%s.png' % size)
im = Image.open(open(sys.argv[1], "rb"))
im.save("out.png")
if sys.platform == 'windows':
os.startfile("out.png")
| gpl-3.0 |
Architektor/PySnip | venv/lib/python2.7/site-packages/Cython/Tests/TestCodeWriter.py | 132 | 2316 | from Cython.TestUtils import CythonTest
class TestCodeWriter(CythonTest):
# CythonTest uses the CodeWriter heavily, so do some checking by
# roundtripping Cython code through the test framework.
# Note that this test is dependant upon the normal Cython parser
# to generate the input trees to the CodeWriter. This save *a lot*
# of time; better to spend that time writing other tests than perfecting
# this one...
# Whitespace is very significant in this process:
# - always newline on new block (!)
# - indent 4 spaces
# - 1 space around every operator
def t(self, codestr):
self.assertCode(codestr, self.fragment(codestr).root)
def test_print(self):
self.t(u"""
print x, y
print x + y ** 2
print x, y, z,
""")
def test_if(self):
self.t(u"if x:\n pass")
def test_ifelifelse(self):
self.t(u"""
if x:
pass
elif y:
pass
elif z + 34 ** 34 - 2:
pass
else:
pass
""")
def test_def(self):
self.t(u"""
def f(x, y, z):
pass
def f(x = 34, y = 54, z):
pass
""")
def test_longness_and_signedness(self):
self.t(u"def f(unsigned long long long long long int y):\n pass")
def test_signed_short(self):
self.t(u"def f(signed short int y):\n pass")
def test_typed_args(self):
self.t(u"def f(int x, unsigned long int y):\n pass")
def test_cdef_var(self):
self.t(u"""
cdef int hello
cdef int hello = 4, x = 3, y, z
""")
def test_for_loop(self):
self.t(u"""
for x, y, z in f(g(h(34) * 2) + 23):
print x, y, z
else:
print 43
""")
def test_inplace_assignment(self):
self.t(u"x += 43")
def test_attribute(self):
self.t(u"a.x")
if __name__ == "__main__":
import unittest
unittest.main()
| gpl-3.0 |
stack-of-tasks/rbdlpy | tutorial/lib/python2.7/site-packages/OpenGL/GLX/SGIX/dmbuffer.py | 8 | 3287 | '''OpenGL extension SGIX.dmbuffer
This module customises the behaviour of the
OpenGL.raw.GLX.SGIX.dmbuffer to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces a new type of GLXPbuffer, a DM or Digital
Media pbuffer. A DM pbuffer is a GLXPbuffer that adopts one or more
of its buffers from an externally defined and allocated DMbuffer.
In this way it resembles a GLXPixmap that attaches to an existing
X pixmap. Like a standard pbuffer it can be used as a non-displayable
rendering area, or as a read source for pixel data. The externally
defined buffers of the DM pbuffer can be sequentially associated
with other DM buffers of the same size and configuration.
. The DM pbuffer is described with a new attrib passed to
glXCreateGLXPbuffer.
. A new GLX command allows associating a compatible DMbuffer with
the DM GLXPbuffer. At associate time the DM buffers described
by the DMparams are used directly as the corresponding
buffers of the GLXPbuffer drawable, as described by the FBconfig.
All other buffers that are part of the config will be created by
GL without externally referenceable names. A DM pbuffer must be
associated with a compatible DMbuffer before it can be made current
to a GLX context.
. The externally defined buffers of the DM pbuffer can be changed at
any time by successfully associating a new DMbuffer with the GLXPbuffer
through the associate command. It will be the application's
responsibility to synchronize DMedia, GLX, and GL commands that
effect the contents of the DM pbuffer.
. Any direct GLX rendering context that satisfies config compatibility
can be used to render to or read from a DM GLXPbuffer.
A currently associated DMbuffer may simultaneously be in use by
other clients on the same Display, but will not be destroyed
while associated with a GLXPbuffer. When the GLXpbuffer is
destroyed, all buffers that have no remaining clients, including
the DMbuffer, will be freed.
DM GLXPbuffers are proposed as a means for OpenGL to access buffers
generated by the VL, compression, and other digital media libraries in
a uniform way. The DMbuffer and DMparams descriptors are left
intentionally abstract so that the GLX extension can adapt to future
DM library changes.
The initial implementation will require that the DMparams descriptor
supply at least pixel format and packing information, and that the
DMbuffer provide the size and location of a colorbuffer. GLX will do
compatibility checking when possible based on the config, params, and
buffer information at associate time.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/SGIX/dmbuffer.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.SGIX.dmbuffer import *
from OpenGL.raw.GLX.SGIX.dmbuffer import _EXTENSION_NAME
def glInitDmbufferSGIX():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | lgpl-3.0 |
google/llvm-propeller | lldb/test/API/commands/expression/persistent_variables/TestPersistentVariables.py | 2 | 1993 | """
Test that lldb persistent variables works correctly.
"""
import lldb
from lldbsuite.test.lldbtest import *
class PersistentVariablesTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def test_persistent_variables(self):
"""Test that lldb persistent variables works correctly."""
self.build()
lldbutil.run_to_source_breakpoint(self, "// break here", lldb.SBFileSpec("main.c"))
self.runCmd("expr int $i = i")
self.expect_expr("$i == i", result_type="bool", result_value="true")
self.expect_expr("$i + 1", result_type="int", result_value="6")
self.expect_expr("$i + 3", result_type="int", result_value="8")
self.expect_expr("$1 + $2", result_type="int", result_value="14")
self.expect_expr("$3", result_type="int", result_value="14")
self.expect_expr("$2", result_type="int", result_value="8")
self.expect_expr("(int)-2", result_type="int", result_value="-2")
self.expect_expr("$4", result_type="int", result_value="-2")
self.expect_expr("$4 > (int)31", result_type="bool", result_value="false")
self.expect_expr("(long)$4", result_type="long", result_value="-2")
# Try assigning an existing persistent veriable with a numeric name.
self.expect("expr int $2 = 1234", error=True,
substrs=["Error [IRForTarget]: Names starting with $0, $1, ... are reserved for use as result names"])
# $2 should still have its original value.
self.expect_expr("$2", result_type="int", result_value="8")
# Try assigning an non-existing persistent veriable with a numeric name.
self.expect("expr int $200 = 3", error=True,
substrs=["Error [IRForTarget]: Names starting with $0, $1, ... are reserved for use as result names"])
# Test that $200 wasn't created by the previous expression.
self.expect("expr $200", error=True,
substrs=["use of undeclared identifier '$200'"])
| apache-2.0 |
adambain-vokal/django-rest-framework | rest_framework/pagination.py | 13 | 27736 | # coding: utf-8
"""
Pagination serializers determine the structure of the output that should
be used for paginated responses.
"""
from __future__ import unicode_literals
import warnings
from base64 import b64decode, b64encode
from collections import namedtuple
from django.core.paginator import Paginator as DjangoPaginator
from django.core.paginator import InvalidPage
from django.template import Context, loader
from django.utils import six
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import OrderedDict
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import remove_query_param, replace_query_param
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def _divide_with_ceil(a, b):
"""
Returns 'a' divded by 'b', with any remainder rounded up.
"""
if a % b:
return (a // b) + 1
return a // b
def _get_count(queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
def _get_displayed_page_numbers(current, final):
"""
This utility function determines a list of page numbers to display.
This gives us a nice contextually relevant set of page numbers.
For example:
current=14, final=16 -> [1, None, 13, 14, 15, 16]
This implementation gives one page to each side of the cursor,
or two pages to the side when the cursor is at the edge, then
ensures that any breaks between non-continous page numbers never
remove only a single page.
For an alernativative implementation which gives two pages to each side of
the cursor, eg. as in GitHub issue list pagination, see:
https://gist.github.com/tomchristie/321140cebb1c4a558b15
"""
assert current >= 1
assert final >= current
if final <= 5:
return list(range(1, final + 1))
# We always include the first two pages, last two pages, and
# two pages either side of the current page.
included = set((
1,
current - 1, current, current + 1,
final
))
# If the break would only exclude a single page number then we
# may as well include the page number instead of the break.
if current <= 4:
included.add(2)
included.add(3)
if current >= final - 3:
included.add(final - 1)
included.add(final - 2)
# Now sort the page numbers and drop anything outside the limits.
included = [
idx for idx in sorted(list(included))
if idx > 0 and idx <= final
]
# Finally insert any `...` breaks
if current > 4:
included.insert(1, None)
if current < final - 3:
included.insert(len(included) - 1, None)
return included
def _get_page_links(page_numbers, current, url_func):
"""
Given a list of page numbers and `None` page breaks,
return a list of `PageLink` objects.
"""
page_links = []
for page_number in page_numbers:
if page_number is None:
page_link = PAGE_BREAK
else:
page_link = PageLink(
url=url_func(page_number),
number=page_number,
is_active=(page_number == current),
is_break=False
)
page_links.append(page_link)
return page_links
def _reverse_ordering(ordering_tuple):
"""
Given an order_by tuple such as `('-created', 'uuid')` reverse the
ordering and return a new tuple, eg. `('created', '-uuid')`.
"""
def invert(x):
return x[1:] if (x.startswith('-')) else '-' + x
return tuple([invert(item) for item in ordering_tuple])
Cursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])
PageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])
PAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)
class BasePagination(object):
display_page_controls = False
def paginate_queryset(self, queryset, request, view=None): # pragma: no cover
raise NotImplementedError('paginate_queryset() must be implemented.')
def get_paginated_response(self, data): # pragma: no cover
raise NotImplementedError('get_paginated_response() must be implemented.')
def to_html(self): # pragma: no cover
raise NotImplementedError('to_html() must be implemented to display page controls.')
def get_results(self, data):
return data['results']
class PageNumberPagination(BasePagination):
"""
A simple page number based style that supports page numbers as
query parameters. For example:
http://api.example.org/accounts/?page=4
http://api.example.org/accounts/?page=4&page_size=100
"""
# The default page size.
# Defaults to `None`, meaning pagination is disabled.
page_size = api_settings.PAGE_SIZE
# Client can control the page using this query parameter.
page_query_param = 'page'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = None
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = None
last_page_strings = ('last',)
template = 'rest_framework/pagination/numbers.html'
invalid_page_message = _('Invalid page "{page_number}": {message}.')
def _handle_backwards_compat(self, view):
"""
Prior to version 3.1, pagination was handled in the view, and the
attributes were set there. The attributes should now be set on
the pagination class. The old style continues to work but is deprecated
and will be fully removed in version 3.3.
"""
assert not (
getattr(view, 'pagination_serializer_class', None) or
getattr(api_settings, 'DEFAULT_PAGINATION_SERIALIZER_CLASS', None)
), (
"The pagination_serializer_class attribute and "
"DEFAULT_PAGINATION_SERIALIZER_CLASS setting have been removed as "
"part of the 3.1 pagination API improvement. See the pagination "
"documentation for details on the new API."
)
for (settings_key, attr_name) in (
('PAGINATE_BY', 'page_size'),
('PAGINATE_BY_PARAM', 'page_size_query_param'),
('MAX_PAGINATE_BY', 'max_page_size')
):
value = getattr(api_settings, settings_key, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` settings key is deprecated. "
"Use the `%s` attribute on the pagination class instead." % (
settings_key, attr_name
),
DeprecationWarning,
)
for (view_attr, attr_name) in (
('paginate_by', 'page_size'),
('page_query_param', 'page_query_param'),
('paginate_by_param', 'page_size_query_param'),
('max_paginate_by', 'max_page_size')
):
value = getattr(view, view_attr, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` view attribute is deprecated. "
"Use the `%s` attribute on the pagination class instead." % (
view_attr, attr_name
),
DeprecationWarning,
)
def paginate_queryset(self, queryset, request, view=None):
"""
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
self._handle_backwards_compat(view)
page_size = self.get_page_size(request)
if not page_size:
return None
paginator = DjangoPaginator(queryset, page_size)
page_number = request.query_params.get(self.page_query_param, 1)
if page_number in self.last_page_strings:
page_number = paginator.num_pages
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.num_pages > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_page_size(self, request):
if self.page_size_query_param:
try:
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_next_link(self):
if not self.page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self.page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self.page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self.page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.page_query_param)
else:
return replace_query_param(base_url, self.page_query_param, page_number)
current = self.page.number
final = self.page.paginator.num_pages
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class LimitOffsetPagination(BasePagination):
"""
A limit/offset based style. For example:
http://api.example.org/accounts/?limit=100
http://api.example.org/accounts/?offset=400&limit=100
"""
default_limit = api_settings.PAGE_SIZE
limit_query_param = 'limit'
offset_query_param = 'offset'
max_limit = None
template = 'rest_framework/pagination/numbers.html'
def paginate_queryset(self, queryset, request, view=None):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.count = _get_count(queryset)
self.request = request
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
return list(queryset[self.offset:self.offset + self.limit])
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_limit(self, request):
if self.limit_query_param:
try:
return _positive_int(
request.query_params[self.limit_query_param],
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
try:
return _positive_int(
request.query_params[self.offset_query_param],
)
except (KeyError, ValueError):
return 0
def get_next_link(self):
if self.offset + self.limit >= self.count:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
offset = self.offset + self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_previous_link(self):
if self.offset <= 0:
return None
url = self.request.build_absolute_uri()
url = replace_query_param(url, self.limit_query_param, self.limit)
if self.offset - self.limit <= 0:
return remove_query_param(url, self.offset_query_param)
offset = self.offset - self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
current = _divide_with_ceil(self.offset, self.limit) + 1
# The number of pages is a little bit fiddly.
# We need to sum both the number of pages from current offset to end
# plus the number of pages up to the current offset.
# When offset is not strictly divisible by the limit then we may
# end up introducing an extra page as an artifact.
final = (
_divide_with_ceil(self.count - self.offset, self.limit) +
_divide_with_ceil(self.offset, self.limit)
)
if current > final:
current = final
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.offset_query_param)
else:
offset = self.offset + ((page_number - current) * self.limit)
return replace_query_param(base_url, self.offset_query_param, offset)
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class CursorPagination(BasePagination):
"""
The cursor pagination implementation is neccessarily complex.
For an overview of the position/offset style we use, see this post:
http://cramer.io/2011/03/08/building-cursors-for-the-disqus-api/
"""
cursor_query_param = 'cursor'
page_size = api_settings.PAGE_SIZE
invalid_cursor_message = _('Invalid cursor')
ordering = '-created'
template = 'rest_framework/pagination/previous_and_next.html'
def paginate_queryset(self, queryset, request, view=None):
self.page_size = self.get_page_size(request)
if not self.page_size:
return None
self.base_url = request.build_absolute_uri()
self.ordering = self.get_ordering(request, queryset, view)
self.cursor = self.decode_cursor(request)
if self.cursor is None:
(offset, reverse, current_position) = (0, False, None)
else:
(offset, reverse, current_position) = self.cursor
# Cursor pagination always enforces an ordering.
if reverse:
queryset = queryset.order_by(*_reverse_ordering(self.ordering))
else:
queryset = queryset.order_by(*self.ordering)
# If we have a cursor with a fixed position then filter by that.
if current_position is not None:
order = self.ordering[0]
is_reversed = order.startswith('-')
order_attr = order.lstrip('-')
# Test for: (cursor reversed) XOR (queryset reversed)
if self.cursor.reverse != is_reversed:
kwargs = {order_attr + '__lt': current_position}
else:
kwargs = {order_attr + '__gt': current_position}
queryset = queryset.filter(**kwargs)
# If we have an offset cursor then offset the entire page by that amount.
# We also always fetch an extra item in order to determine if there is a
# page following on from this one.
results = list(queryset[offset:offset + self.page_size + 1])
self.page = list(results[:self.page_size])
# Determine the position of the final item following the page.
if len(results) > len(self.page):
has_following_postion = True
following_position = self._get_position_from_instance(results[-1], self.ordering)
else:
has_following_postion = False
following_position = None
# If we have a reverse queryset, then the query ordering was in reverse
# so we need to reverse the items again before returning them to the user.
if reverse:
self.page = list(reversed(self.page))
if reverse:
# Determine next and previous positions for reverse cursors.
self.has_next = (current_position is not None) or (offset > 0)
self.has_previous = has_following_postion
if self.has_next:
self.next_position = current_position
if self.has_previous:
self.previous_position = following_position
else:
# Determine next and previous positions for forward cursors.
self.has_next = has_following_postion
self.has_previous = (current_position is not None) or (offset > 0)
if self.has_next:
self.next_position = following_position
if self.has_previous:
self.previous_position = current_position
# Display page controls in the browsable API if there is more
# than one page.
if (self.has_previous or self.has_next) and self.template is not None:
self.display_page_controls = True
return self.page
def get_page_size(self, request):
return self.page_size
def get_next_link(self):
if not self.has_next:
return None
if self.cursor and self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[-1], self.ordering)
else:
compare = self.next_position
offset = 0
for item in reversed(self.page):
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_previous:
# We are on the first page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# The change in direction will introduce a paging artifact,
# where we end up skipping forward a few extra items.
offset = 0
position = self.previous_position
else:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.previous_position
cursor = Cursor(offset=offset, reverse=False, position=position)
return self.encode_cursor(cursor)
def get_previous_link(self):
if not self.has_previous:
return None
if self.cursor and not self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[0], self.ordering)
else:
compare = self.previous_position
offset = 0
for item in self.page:
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_next:
# We are on the final page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.next_position
else:
# The change in direction will introduce a paging artifact,
# where we end up skipping back a few extra items.
offset = 0
position = self.next_position
cursor = Cursor(offset=offset, reverse=True, position=position)
return self.encode_cursor(cursor)
def get_ordering(self, request, queryset, view):
"""
Return a tuple of strings, that may be used in an `order_by` method.
"""
ordering_filters = [
filter_cls for filter_cls in getattr(view, 'filter_backends', [])
if hasattr(filter_cls, 'get_ordering')
]
if ordering_filters:
# If a filter exists on the view that implements `get_ordering`
# then we defer to that filter to determine the ordering.
filter_cls = ordering_filters[0]
filter_instance = filter_cls()
ordering = filter_instance.get_ordering(request, queryset, view)
assert ordering is not None, (
'Using cursor pagination, but filter class {filter_cls} '
'returned a `None` ordering.'.format(
filter_cls=filter_cls.__name__
)
)
else:
# The default case is to check for an `ordering` attribute
# on this pagination instance.
ordering = self.ordering
assert ordering is not None, (
'Using cursor pagination, but no ordering attribute was declared '
'on the pagination class.'
)
assert '__' not in ordering, (
'Cursor pagination does not support double underscore lookups '
'for orderings. Orderings should be an unchanging, unique or '
'nearly-unique field on the model, such as "-created" or "pk".'
)
assert isinstance(ordering, (six.string_types, list, tuple)), (
'Invalid ordering. Expected string or tuple, but got {type}'.format(
type=type(ordering).__name__
)
)
if isinstance(ordering, six.string_types):
return (ordering,)
return tuple(ordering)
def decode_cursor(self, request):
"""
Given a request with a cursor, return a `Cursor` instance.
"""
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
return None
# The offset in the cursor is used in situations where we have a
# nearly-unique index. (Eg millisecond precision creation timestamps)
# We guard against malicious users attempting to cause expensive database
# queries, by having a hard cap on the maximum possible size of the offset.
OFFSET_CUTOFF = 1000
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=OFFSET_CUTOFF)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
position = tokens.get('p', [None])[0]
except (TypeError, ValueError):
raise NotFound(self.invalid_cursor_message)
return Cursor(offset=offset, reverse=reverse, position=position)
def encode_cursor(self, cursor):
"""
Given a Cursor instance, return an url with encoded cursor.
"""
tokens = {}
if cursor.offset != 0:
tokens['o'] = str(cursor.offset)
if cursor.reverse:
tokens['r'] = '1'
if cursor.position is not None:
tokens['p'] = cursor.position
querystring = urlparse.urlencode(tokens, doseq=True)
encoded = b64encode(querystring.encode('ascii')).decode('ascii')
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def _get_position_from_instance(self, instance, ordering):
attr = getattr(instance, ordering[0].lstrip('-'))
return six.text_type(attr)
def get_paginated_response(self, data):
return Response(OrderedDict([
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_html_context(self):
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link()
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
| bsd-2-clause |
lilydjwg/you-get | src/you_get/util/fs.py | 26 | 1053 | #!/usr/bin/env python
import platform
def legitimize(text, os=platform.system()):
"""Converts a string to a valid filename.
"""
# POSIX systems
text = text.translate({
0: None,
ord('/'): '-',
})
if os == 'Windows':
# Windows (non-POSIX namespace)
text = text.translate({
# Reserved in Windows VFAT and NTFS
ord(':'): '-',
ord('*'): '-',
ord('?'): '-',
ord('\\'): '-',
ord('|'): '-',
ord('\"'): '\'',
# Reserved in Windows VFAT
ord('+'): '-',
ord('<'): '-',
ord('>'): '-',
ord('['): '(',
ord(']'): ')',
})
else:
# *nix
if os == 'Darwin':
# Mac OS HFS+
text = text.translate({
ord(':'): '-',
})
# Remove leading .
if text.startswith("."):
text = text[1:]
text = text[:82] # Trim to 82 Unicode characters long
return text
| mit |
EDUlib/eTracesX | Translation_software/edx_to_MOOCdb_piping/import.openedx.apipe/scripts/lookupOpenEdxHash.py | 1 | 1241 | #!/usr/bin/env python
import os
import sys
# Command line tool to look up the human readable strings that
# correspond to edX platform generated hash strings for problems and
# videos. The given strings may be just the 32 bit hex numbers, or the
# long strings in tracking logs that contain that hex number somewhere
# inside it
# Add json_to_relation source dir to $PATH
# for duration of this execution:
source_dir = [os.path.join(os.path.dirname(os.path.abspath(__file__)), "../json_to_relation/")]
source_dir.extend(sys.path)
sys.path = source_dir
from modulestoreImporter import ModulestoreImporter
from edxTrackLogJSONParser import EdXTrackLogJSONParser
if __name__ == '__main__':
USAGE = 'Usage: lookupOpenEdxHash.py hashStr1 hashstr2 ...'
if len(sys.argv) < 2:
print(USAGE)
sys.exit()
hashLookup = ModulestoreImporter(os.path.join(os.path.dirname(__file__),'../json_to_relation/data/modulestore_latest.json'),
useCache=True)
for hashStr in sys.argv[1:]:
match = EdXTrackLogJSONParser.findHashPattern.search(hashStr)
if match is not None:
print(hashLookup.getDisplayName(match.group(1)))
else:
print 'None'
| agpl-3.0 |
FinnStutzenstein/OpenSlides | server/openslides/assignments/serializers.py | 3 | 6716 | from openslides.poll.serializers import (
BASE_OPTION_FIELDS,
BASE_POLL_FIELDS,
BASE_VOTE_FIELDS,
BaseOptionSerializer,
BasePollSerializer,
BaseVoteSerializer,
)
from openslides.utils.rest_api import (
BooleanField,
DecimalField,
IdPrimaryKeyRelatedField,
IntegerField,
ModelSerializer,
ValidationError,
)
from ..utils.auth import has_perm
from ..utils.autoupdate import inform_changed_data
from ..utils.validate import validate_html_strict
from .models import (
Assignment,
AssignmentOption,
AssignmentPoll,
AssignmentRelatedUser,
AssignmentVote,
)
def posts_validator(data):
"""
Validator for open posts. It checks that the values for the open posts are greater than 0.
"""
if data["open_posts"] and data["open_posts"] is not None and data["open_posts"] < 1:
raise ValidationError(
{"detail": "Value for 'open_posts' must be greater than 0"}
)
return data
class AssignmentRelatedUserSerializer(ModelSerializer):
"""
Serializer for assignment.models.AssignmentRelatedUser objects.
"""
class Meta:
model = AssignmentRelatedUser
fields = ("id", "user", "weight")
class AssignmentVoteSerializer(BaseVoteSerializer):
"""
Serializer for assignment.models.AssignmentVote objects.
"""
class Meta:
model = AssignmentVote
fields = BASE_VOTE_FIELDS
read_only_fields = BASE_VOTE_FIELDS
class AssignmentOptionSerializer(BaseOptionSerializer):
"""
Serializer for assignment.models.AssignmentOption objects.
"""
class Meta:
model = AssignmentOption
fields = ("user", "weight") + BASE_OPTION_FIELDS
read_only_fields = ("user", "weight") + BASE_OPTION_FIELDS
class AssignmentPollSerializer(BasePollSerializer):
"""
Serializer for assignment.models.AssignmentPoll objects.
Serializes all polls.
"""
amount_global_yes = DecimalField(
max_digits=15, decimal_places=6, min_value=-2, read_only=True
)
amount_global_no = DecimalField(
max_digits=15, decimal_places=6, min_value=-2, read_only=True
)
amount_global_abstain = DecimalField(
max_digits=15, decimal_places=6, min_value=-2, read_only=True
)
class Meta:
model = AssignmentPoll
fields = (
"assignment",
"description",
"pollmethod",
"min_votes_amount",
"max_votes_amount",
"allow_multiple_votes_per_candidate",
"global_yes",
"amount_global_yes",
"global_no",
"amount_global_no",
"global_abstain",
"amount_global_abstain",
) + BASE_POLL_FIELDS
read_only_fields = ("state",)
def update(self, instance, validated_data):
"""Prevent updating the assignment"""
validated_data.pop("assignment", None)
return super().update(instance, validated_data)
def norm_100_percent_base_to_pollmethod(
self, onehundred_percent_base, pollmethod, old_100_percent_base=None
):
"""
Returns None, if the 100-%-base must not be changed, otherwise the correct 100-%-base.
"""
if pollmethod == AssignmentPoll.POLLMETHOD_YN and onehundred_percent_base in (
AssignmentPoll.PERCENT_BASE_Y,
AssignmentPoll.PERCENT_BASE_YNA,
):
return AssignmentPoll.PERCENT_BASE_YN
if (
pollmethod == AssignmentPoll.POLLMETHOD_YNA
and onehundred_percent_base == AssignmentPoll.PERCENT_BASE_Y
):
if old_100_percent_base is None:
return AssignmentPoll.PERCENT_BASE_YNA
else:
if old_100_percent_base in (
AssignmentPoll.PERCENT_BASE_YN,
AssignmentPoll.PERCENT_BASE_YNA,
):
return old_100_percent_base
else:
return pollmethod
if pollmethod == AssignmentPoll.POLLMETHOD_Y and onehundred_percent_base in (
AssignmentPoll.PERCENT_BASE_YN,
AssignmentPoll.PERCENT_BASE_YNA,
):
return AssignmentPoll.PERCENT_BASE_Y
return None
class AssignmentSerializer(ModelSerializer):
"""
Serializer for assignment.models.Assignment objects. With all polls.
"""
assignment_related_users = AssignmentRelatedUserSerializer(
many=True, read_only=True
)
agenda_create = BooleanField(write_only=True, required=False, allow_null=True)
agenda_type = IntegerField(
write_only=True, required=False, min_value=1, max_value=3, allow_null=True
)
agenda_parent_id = IntegerField(write_only=True, required=False, min_value=1)
polls = IdPrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Assignment
fields = (
"id",
"title",
"description",
"open_posts",
"phase",
"assignment_related_users",
"default_poll_description",
"agenda_item_id",
"list_of_speakers_id",
"agenda_create",
"agenda_type",
"agenda_parent_id",
"tags",
"attachments",
"number_poll_candidates",
"polls",
)
validators = (posts_validator,)
def validate(self, data):
if "description" in data:
data["description"] = validate_html_strict(data["description"])
return data
def create(self, validated_data):
"""
Customized create method. Set information about related agenda item
into agenda_item_update_information container.
"""
tags = validated_data.pop("tags", [])
attachments = validated_data.pop("attachments", [])
request_user = validated_data.pop("request_user") # this should always be there
agenda_create = validated_data.pop("agenda_create", None)
agenda_type = validated_data.pop("agenda_type", None)
agenda_parent_id = validated_data.pop("agenda_parent_id", None)
assignment = Assignment(**validated_data)
if has_perm(request_user, "agenda.can_manage"):
assignment.agenda_item_update_information["create"] = agenda_create
assignment.agenda_item_update_information["type"] = agenda_type
assignment.agenda_item_update_information["parent_id"] = agenda_parent_id
assignment.save()
assignment.tags.add(*tags)
assignment.attachments.add(*attachments)
inform_changed_data(assignment)
return assignment
| mit |
matrixise/odoo | addons/account/account_cash_statement.py | 283 | 15868 | # encoding: utf-8
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools import float_compare
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class account_cashbox_line(osv.osv):
""" Cash Box Details """
_name = 'account.cashbox.line'
_description = 'CashBox Line'
_rec_name = 'pieces'
def _sub_total(self, cr, uid, ids, name, arg, context=None):
""" Calculates Sub total
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
res[obj.id] = {
'subtotal_opening' : obj.pieces * obj.number_opening,
'subtotal_closing' : obj.pieces * obj.number_closing,
}
return res
def on_change_sub_opening(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the opening """
return {'value' : {'subtotal_opening' : (pieces * number) or 0.0 }}
def on_change_sub_closing(self, cr, uid, ids, pieces, number, *a):
""" Compute the subtotal for the closing """
return {'value' : {'subtotal_closing' : (pieces * number) or 0.0 }}
_columns = {
'pieces': fields.float('Unit of Currency', digits_compute=dp.get_precision('Account')),
'number_opening' : fields.integer('Number of Units', help='Opening Unit Numbers'),
'number_closing' : fields.integer('Number of Units', help='Closing Unit Numbers'),
'subtotal_opening': fields.function(_sub_total, string='Opening Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'subtotal_closing': fields.function(_sub_total, string='Closing Subtotal', type='float', digits_compute=dp.get_precision('Account'), multi='subtotal'),
'bank_statement_id' : fields.many2one('account.bank.statement', ondelete='cascade'),
}
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
def _update_balances(self, cr, uid, ids, context=None):
"""
Set starting and ending balances according to pieces count
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
if (statement.journal_id.type not in ('cash',)):
continue
if not statement.journal_id.cash_control:
prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account')
if float_compare(statement.balance_end_real, statement.balance_end, precision_digits=prec):
statement.write({'balance_end_real' : statement.balance_end})
continue
start = end = 0
for line in statement.details_ids:
start += line.subtotal_opening
end += line.subtotal_closing
data = {
'balance_start': start,
'balance_end_real': end,
}
res[statement.id] = data
super(account_cash_statement, self).write(cr, uid, [statement.id], data, context=context)
return res
def _get_sum_entry_encoding(self, cr, uid, ids, name, arg, context=None):
""" Find encoding total of statements "
@param name: Names of fields.
@param arg: User defined arguments
@return: Dictionary of values.
"""
res = {}
for statement in self.browse(cr, uid, ids, context=context):
res[statement.id] = sum((line.amount for line in statement.line_ids), 0.0)
return res
def _get_company(self, cr, uid, context=None):
user_pool = self.pool.get('res.users')
company_pool = self.pool.get('res.company')
user = user_pool.browse(cr, uid, uid, context=context)
company_id = user.company_id
if not company_id:
company_id = company_pool.search(cr, uid, [])
return company_id and company_id[0] or False
def _get_statement_from_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
result[line.statement_id.id] = True
return result.keys()
def _compute_difference(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = obj.balance_end_real - obj.balance_end
return result
def _compute_last_closing_balance(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0.0)
for obj in self.browse(cr, uid, ids, context=context):
if obj.state == 'draft':
statement_ids = self.search(cr, uid,
[('journal_id', '=', obj.journal_id.id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
if not statement_ids:
continue
else:
st = self.browse(cr, uid, statement_ids[0], context=context)
result[obj.id] = st.balance_end_real
return result
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
result = super(account_cash_statement, self).onchange_journal_id(cr, uid, ids, journal_id)
if not journal_id:
return result
statement_ids = self.search(cr, uid,
[('journal_id', '=', journal_id),('state', '=', 'confirm')],
order='create_date desc',
limit=1,
context=context
)
opening_details_ids = self._get_cash_open_box_lines(cr, uid, journal_id, context)
if opening_details_ids:
result['value']['opening_details_ids'] = opening_details_ids
if not statement_ids:
return result
st = self.browse(cr, uid, statement_ids[0], context=context)
result.setdefault('value', {}).update({'last_closing_balance' : st.balance_end_real})
return result
_columns = {
'total_entry_encoding': fields.function(_get_sum_entry_encoding, string="Total Transactions",
store = {
'account.bank.statement': (lambda self, cr, uid, ids, context=None: ids, ['line_ids','move_line_ids'], 10),
'account.bank.statement.line': (_get_statement_from_line, ['amount'], 10),
},
help="Total of cash transaction lines."),
'closing_date': fields.datetime("Closed On"),
'details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='CashBox Lines', copy=True),
'opening_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Opening Cashbox Lines'),
'closing_details_ids' : fields.one2many('account.cashbox.line', 'bank_statement_id', string='Closing Cashbox Lines'),
'user_id': fields.many2one('res.users', 'Responsible', required=False),
'difference' : fields.function(_compute_difference, method=True, string="Difference", type="float", help="Difference between the theoretical closing balance and the real closing balance."),
'last_closing_balance' : fields.function(_compute_last_closing_balance, method=True, string='Last Closing Balance', type='float'),
}
_defaults = {
'state': 'draft',
'date': lambda self, cr, uid, context={}: context.get('date', time.strftime("%Y-%m-%d %H:%M:%S")),
'user_id': lambda self, cr, uid, context=None: uid,
}
def _get_cash_open_box_lines(self, cr, uid, journal_id, context):
details_ids = []
if not journal_id:
return details_ids
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal and (journal.type == 'cash'):
last_pieces = None
if journal.with_last_closing_balance == True:
domain = [('journal_id', '=', journal.id),
('state', '=', 'confirm')]
last_bank_statement_ids = self.search(cr, uid, domain, limit=1, order='create_date desc', context=context)
if last_bank_statement_ids:
last_bank_statement = self.browse(cr, uid, last_bank_statement_ids[0], context=context)
last_pieces = dict(
(line.pieces, line.number_closing) for line in last_bank_statement.details_ids
)
for value in journal.cashbox_line_ids:
nested_values = {
'number_closing' : 0,
'number_opening' : last_pieces.get(value.pieces, 0) if isinstance(last_pieces, dict) else 0,
'pieces' : value.pieces
}
details_ids.append([0, False, nested_values])
return details_ids
def create(self, cr, uid, vals, context=None):
journal_id = vals.get('journal_id')
if journal_id and not vals.get('opening_details_ids'):
vals['opening_details_ids'] = vals.get('opening_details_ids') or self._get_cash_open_box_lines(cr, uid, journal_id, context)
res_id = super(account_cash_statement, self).create(cr, uid, vals, context=context)
self._update_balances(cr, uid, [res_id], context)
return res_id
def write(self, cr, uid, ids, vals, context=None):
"""
Update redord(s) comes in {ids}, with new value comes as {vals}
return True on success, False otherwise
@param cr: cursor to database
@param user: id of current user
@param ids: list of record ids to be update
@param vals: dict of new values to be set
@param context: context arguments, like lang, time zone
@return: True on success, False otherwise
"""
if vals.get('journal_id', False):
cashbox_line_obj = self.pool.get('account.cashbox.line')
cashbox_ids = cashbox_line_obj.search(cr, uid, [('bank_statement_id', 'in', ids)], context=context)
cashbox_line_obj.unlink(cr, uid, cashbox_ids, context)
res = super(account_cash_statement, self).write(cr, uid, ids, vals, context=context)
self._update_balances(cr, uid, ids, context)
return res
def _user_allow(self, cr, uid, statement_id, context=None):
return True
def button_open(self, cr, uid, ids, context=None):
""" Changes statement state to Running.
@return: True
"""
obj_seq = self.pool.get('ir.sequence')
if context is None:
context = {}
statement_pool = self.pool.get('account.bank.statement')
for statement in statement_pool.browse(cr, uid, ids, context=context):
vals = {}
if not self._user_allow(cr, uid, statement.id, context=context):
raise osv.except_osv(_('Error!'), (_('You do not have rights to open this %s journal!') % (statement.journal_id.name, )))
if statement.name and statement.name == '/':
c = {'fiscalyear_id': statement.period_id.fiscalyear_id.id}
if statement.journal_id.sequence_id:
st_number = obj_seq.next_by_id(cr, uid, statement.journal_id.sequence_id.id, context=c)
else:
st_number = obj_seq.next_by_code(cr, uid, 'account.cash.statement', context=c)
vals.update({
'name': st_number
})
vals.update({
'state': 'open',
})
self.write(cr, uid, [statement.id], vals, context=context)
return True
def statement_close(self, cr, uid, ids, journal_type='bank', context=None):
if journal_type == 'bank':
return super(account_cash_statement, self).statement_close(cr, uid, ids, journal_type, context)
vals = {
'state':'confirm',
'closing_date': time.strftime("%Y-%m-%d %H:%M:%S")
}
return self.write(cr, uid, ids, vals, context=context)
def check_status_condition(self, cr, uid, state, journal_type='bank'):
if journal_type == 'bank':
return super(account_cash_statement, self).check_status_condition(cr, uid, state, journal_type)
return state=='open'
def button_confirm_cash(self, cr, uid, ids, context=None):
absl_proxy = self.pool.get('account.bank.statement.line')
TABLES = ((_('Profit'), 'profit_account_id'), (_('Loss'), 'loss_account_id'),)
for obj in self.browse(cr, uid, ids, context=context):
if obj.difference == 0.0:
continue
elif obj.difference < 0.0:
account = obj.journal_id.loss_account_id
name = _('Loss')
if not obj.journal_id.loss_account_id:
raise osv.except_osv(_('Error!'), _('There is no Loss Account on the journal %s.') % (obj.journal_id.name,))
else: # obj.difference > 0.0
account = obj.journal_id.profit_account_id
name = _('Profit')
if not obj.journal_id.profit_account_id:
raise osv.except_osv(_('Error!'), _('There is no Profit Account on the journal %s.') % (obj.journal_id.name,))
values = {
'statement_id' : obj.id,
'journal_id' : obj.journal_id.id,
'account_id' : account.id,
'amount' : obj.difference,
'name' : name,
}
absl_proxy.create(cr, uid, values, context=context)
return super(account_cash_statement, self).button_confirm_bank(cr, uid, ids, context=context)
class account_journal(osv.osv):
_inherit = 'account.journal'
def _default_cashbox_line_ids(self, cr, uid, context=None):
# Return a list of coins in Euros.
result = [
dict(pieces=value) for value in [0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500]
]
return result
_columns = {
'cashbox_line_ids' : fields.one2many('account.journal.cashbox.line', 'journal_id', 'CashBox', copy=True),
}
_defaults = {
'cashbox_line_ids' : _default_cashbox_line_ids,
}
class account_journal_cashbox_line(osv.osv):
_name = 'account.journal.cashbox.line'
_rec_name = 'pieces'
_columns = {
'pieces': fields.float('Values', digits_compute=dp.get_precision('Account')),
'journal_id' : fields.many2one('account.journal', 'Journal', required=True, select=1, ondelete="cascade"),
}
_order = 'pieces asc'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gordon-/naobot | plugins/learn.py | 2 | 3503 | # -*- coding: utf-8 -*-
from stdPlugin import stdPlugin
from lib.markov import Markov
class learn(stdPlugin):
u'''Apprend continuellement les mots utilisés sur un canal, et génère des
phrases aléatoires et stupides.
'''
events = {'pubmsg': {'exclusive': False, 'command_namespace': 'say'},
'privmsg': {'exclusive': False, 'command_namespace': 'say'},
'action': {'exclusive': False},
'join': {'exclusive': False},
'run': {'frequency': (300, 30000)},
}
markov = Markov()
def __init__(self, bot, conf):
return_val = super(learn, self).__init__(bot, conf)
chans = self.bot.conf['chans'] if not self.bot.channels else \
self.bot.channels
for chan in chans:
self.get_dico(chan)
return return_val
def parse(self, chan, message):
self.markov.add_sentence(chan, message)
self.save_dico(chan)
def on_pubmsg(self, serv, ev, helper):
self.parse(helper['target'], helper['message'])
return False
def on_privmsg(self, serv, ev, helper):
self.parse(helper['target'], helper['message'])
return False
def on_action(self, serv, ev, helper):
self.parse(helper['target'], helper['message'])
return False
def on_join(self, serv, ev, helper):
if helper['sender'] == serv.username: # si c’est notre propre join
self.get_dico(helper['target'])
return False
else:
return False
def on_cmd(self, serv, ev, command, args, helper):
u'''%(namespace)s sentence : génère une phrase aléatoire.
%(namespace)s sentence <mot> : génère une phrase aléatoire contenant
le mot donné, s’il est connu.
%(namespace)s stats : indique le nombre de mots connus pour le canal
courant'''
if command == 'sentence':
if len(args) == 0:
serv.privmsg(helper['target'], self.markov.
get_sentence(helper['target']))
return True
else:
serv.privmsg(helper['target'], self.markov.
get_sentence(helper['target'], args[0]))
return True
# elif command == 'save':
# if self.save_dico(helper['target']):
# serv.privmsg(helper['target'], u'Dictionnaire sauvegardé : '
# '%d mots' % self.get_stats(helper['target']))
# return True
# else:
# serv.privmsg(helper['target'], u'Erreur lors de la '
# 'sauvegarde du dictionnaire !')
# return True
elif command == 'stats':
serv.privmsg(helper['target'], u'Mot connus : %d' % self.markov.
get_stats(helper['target']))
return True
else:
serv.privmsg(helper['target'], u'Je ne connais pas cette '
'commande.')
return True
def get_dico(self, chan):
data = self.bot.get_config(self, chan, self.markov.default_data())
self.markov.load(chan, data)
def save_dico(self, chan):
data = self.markov.dump(chan)
return self.bot.write_config(self, chan, data)
def on_run(self, serv, helper):
serv.privmsg(helper['target'], self.markov.
get_sentence(helper['target']))
| agpl-3.0 |
ofrobots/grpc | src/python/grpcio_health_checking/grpc/health/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
zooba/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32/lib/win32rcparser.py | 6 | 21179 | # Windows dialog .RC file parser, by Adam Walker.
# This module was adapted from the spambayes project, and is Copyright
# 2003/2004 The Python Software Foundation and is covered by the Python
# Software Foundation license.
"""
This is a parser for Windows .rc files, which are text files which define
dialogs and other Windows UI resources.
"""
__author__="Adam Walker"
__version__="0.11"
import sys, os, shlex, stat
import pprint
import win32con
import commctrl
_controlMap = {"DEFPUSHBUTTON":0x80,
"PUSHBUTTON":0x80,
"Button":0x80,
"GROUPBOX":0x80,
"Static":0x82,
"CTEXT":0x82,
"RTEXT":0x82,
"LTEXT":0x82,
"LISTBOX":0x83,
"SCROLLBAR":0x84,
"COMBOBOX":0x85,
"EDITTEXT":0x81,
"ICON":0x82,
"RICHEDIT":"RichEdit20A"
}
# These are "default styles" for certain controls - ie, Visual Studio assumes
# the styles will be applied, and emits a "NOT {STYLE_NAME}" if it is to be
# disabled. These defaults have been determined by experimentation, so may
# not be completely accurate (most notably, some styles and/or control-types
# may be missing.
_addDefaults = {"EDITTEXT":win32con.WS_BORDER | win32con.WS_TABSTOP,
"GROUPBOX":win32con.BS_GROUPBOX,
"LTEXT":win32con.SS_LEFT,
"DEFPUSHBUTTON":win32con.BS_DEFPUSHBUTTON | win32con.WS_TABSTOP,
"PUSHBUTTON": win32con.WS_TABSTOP,
"CTEXT":win32con.SS_CENTER,
"RTEXT":win32con.SS_RIGHT,
"ICON":win32con.SS_ICON,
"LISTBOX":win32con.LBS_NOTIFY,
}
defaultControlStyle = win32con.WS_CHILD | win32con.WS_VISIBLE
defaultControlStyleEx = 0
class DialogDef:
name = ""
id = 0
style = 0
styleEx = None
caption = ""
font = "MS Sans Serif"
fontSize = 8
x = 0
y = 0
w = 0
h = 0
template = None
def __init__(self, n, i):
self.name = n
self.id = i
self.styles = []
self.stylesEx = []
self.controls = []
#print "dialog def for ",self.name, self.id
def createDialogTemplate(self):
t = None
self.template = [[self.caption,
(self.x,self.y,self.w,self.h),
self.style, self.styleEx,
(self.fontSize, self.font)]
]
# Add the controls
for control in self.controls:
self.template.append(control.createDialogTemplate())
return self.template
class ControlDef:
id = ""
controlType = ""
subType = ""
idNum = 0
style = defaultControlStyle
styleEx = defaultControlStyleEx
label = ""
x = 0
y = 0
w = 0
h = 0
def __init__(self):
self.styles = []
self.stylesEx = []
def toString(self):
s = "<Control id:"+self.id+" controlType:"+self.controlType+" subType:"+self.subType\
+" idNum:"+str(self.idNum)+" style:"+str(self.style)+" styles:"+str(self.styles)+" label:"+self.label\
+" x:"+str(self.x)+" y:"+str(self.y)+" w:"+str(self.w)+" h:"+str(self.h)+">"
return s
def createDialogTemplate(self):
ct = self.controlType
if "CONTROL"==ct:
ct = self.subType
if ct in _controlMap:
ct = _controlMap[ct]
t = [ct, self.label, self.idNum, (self.x, self.y, self.w, self.h), self.style, self.styleEx]
#print t
return t
class StringDef:
def __init__(self, id, idNum, value):
self.id = id
self.idNum = idNum
self.value = value
def __repr__(self):
return "StringDef(%r, %r, %r)" % (self.id, self.idNum, self.value)
class RCParser:
next_id = 1001
dialogs = {}
_dialogs = {}
debugEnabled = False
token = ""
def __init__(self):
self.ungot = False
self.ids = {"IDC_STATIC": -1}
self.names = {-1:"IDC_STATIC"}
self.bitmaps = {}
self.stringTable = {}
self.icons = {}
def debug(self, *args):
if self.debugEnabled:
print(args)
def getToken(self):
if self.ungot:
self.ungot = False
self.debug("getToken returns (ungot):", self.token)
return self.token
self.token = self.lex.get_token()
self.debug("getToken returns:", self.token)
if self.token=="":
self.token = None
return self.token
def ungetToken(self):
self.ungot = True
def getCheckToken(self, expected):
tok = self.getToken()
assert tok == expected, "Expected token '%s', but got token '%s'!" % (expected, tok)
return tok
def getCommaToken(self):
return self.getCheckToken(",")
# Return the *current* token as a number, only consuming a token
# if it is the negative-sign.
def currentNumberToken(self):
mult = 1
if self.token=='-':
mult = -1
self.getToken()
return int(self.token) * mult
# Return the *current* token as a string literal (ie, self.token will be a
# quote. consumes all tokens until the end of the string
def currentQuotedString(self):
# Handle quoted strings - pity shlex doesn't handle it.
assert self.token.startswith('"'), self.token
bits = [self.token]
while 1:
tok = self.getToken()
if not tok.startswith('"'):
self.ungetToken()
break
bits.append(tok)
sval = "".join(bits)[1:-1] # Remove end quotes.
# Fixup quotes in the body, and all (some?) quoted characters back
# to their raw value.
for i, o in ('""', '"'), ("\\r", "\r"), ("\\n", "\n"), ("\\t", "\t"):
sval = sval.replace(i, o)
return sval
def load(self, rcstream):
"""
RCParser.loadDialogs(rcFileName) -> None
Load the dialog information into the parser. Dialog Definations can then be accessed
using the "dialogs" dictionary member (name->DialogDef). The "ids" member contains the dictionary of id->name.
The "names" member contains the dictionary of name->id
"""
self.open(rcstream)
self.getToken()
while self.token!=None:
self.parse()
self.getToken()
def open(self, rcstream):
self.lex = shlex.shlex(rcstream)
self.lex.commenters = "//#"
def parseH(self, file):
lex = shlex.shlex(file)
lex.commenters = "//"
token = " "
while token is not None:
token = lex.get_token()
if token == "" or token is None:
token = None
else:
if token=='define':
n = lex.get_token()
i = int(lex.get_token())
self.ids[n] = i
if i in self.names:
# Dupe ID really isn't a problem - most consumers
# want to go from name->id, and this is OK.
# It means you can't go from id->name though.
pass
# ignore AppStudio special ones
#if not n.startswith("_APS_"):
# print "Duplicate id",i,"for",n,"is", self.names[i]
else:
self.names[i] = n
if self.next_id<=i:
self.next_id = i+1
def parse(self):
noid_parsers = {
"STRINGTABLE": self.parse_stringtable,
}
id_parsers = {
"DIALOG" : self.parse_dialog,
"DIALOGEX": self.parse_dialog,
# "TEXTINCLUDE": self.parse_textinclude,
"BITMAP": self.parse_bitmap,
"ICON": self.parse_icon,
}
deep = 0
base_token = self.token
rp = noid_parsers.get(base_token)
if rp is not None:
rp()
else:
# Not something we parse that isn't prefixed by an ID
# See if it is an ID prefixed item - if it is, our token
# is the resource ID.
resource_id = self.token
self.getToken()
if self.token is None:
return
if "BEGIN" == self.token:
# A 'BEGIN' for a structure we don't understand - skip to the
# matching 'END'
deep = 1
while deep!=0 and self.token is not None:
self.getToken()
self.debug("Zooming over", self.token)
if "BEGIN" == self.token:
deep += 1
elif "END" == self.token:
deep -= 1
else:
rp = id_parsers.get(self.token)
if rp is not None:
self.debug("Dispatching '%s'" % (self.token,))
rp(resource_id)
else:
# We don't know what the resource type is, but we
# have already consumed the next, which can cause problems,
# so push it back.
self.debug("Skipping top-level '%s'" % base_token)
self.ungetToken()
def addId(self, id_name):
if id_name in self.ids:
id = self.ids[id_name]
else:
# IDOK, IDCANCEL etc are special - if a real resource has this value
for n in ["IDOK","IDCANCEL","IDYES","IDNO", "IDABORT"]:
if id_name == n:
v = getattr(win32con, n)
self.ids[n] = v
self.names[v] = n
return v
id = self.next_id
self.next_id += 1
self.ids[id_name] = id
self.names[id] = id_name
return id
def lang(self):
while self.token[0:4]=="LANG" or self.token[0:7]=="SUBLANG" or self.token==',':
self.getToken();
def parse_textinclude(self, res_id):
while self.getToken() != "BEGIN":
pass
while 1:
if self.token == "END":
break
s = self.getToken()
def parse_stringtable(self):
while self.getToken() != "BEGIN":
pass
while 1:
self.getToken()
if self.token == "END":
break
sid = self.token
self.getToken()
sd = StringDef(sid, self.addId(sid), self.currentQuotedString())
self.stringTable[sid] = sd
def parse_bitmap(self, name):
return self.parse_bitmap_or_icon(name, self.bitmaps)
def parse_icon(self, name):
return self.parse_bitmap_or_icon(name, self.icons)
def parse_bitmap_or_icon(self, name, dic):
self.getToken()
while not self.token.startswith('"'):
self.getToken()
bmf = self.token[1:-1] # quotes
dic[name] = bmf
def parse_dialog(self, name):
dlg = DialogDef(name,self.addId(name))
assert len(dlg.controls)==0
self._dialogs[name] = dlg
extras = []
self.getToken()
while not self.token.isdigit():
self.debug("extra", self.token)
extras.append(self.token)
self.getToken()
dlg.x = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.y = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.w = int(self.token)
self.getCommaToken()
self.getToken() # number
dlg.h = int(self.token)
self.getToken()
while not (self.token==None or self.token=="" or self.token=="END"):
if self.token=="STYLE":
self.dialogStyle(dlg)
elif self.token=="EXSTYLE":
self.dialogExStyle(dlg)
elif self.token=="CAPTION":
self.dialogCaption(dlg)
elif self.token=="FONT":
self.dialogFont(dlg)
elif self.token=="BEGIN":
self.controls(dlg)
else:
break
self.dialogs[name] = dlg.createDialogTemplate()
def dialogStyle(self, dlg):
dlg.style, dlg.styles = self.styles( [], win32con.DS_SETFONT)
def dialogExStyle(self, dlg):
self.getToken()
dlg.styleEx, dlg.stylesEx = self.styles( [], 0)
def styles(self, defaults, defaultStyle):
list = defaults
style = defaultStyle
if "STYLE"==self.token:
self.getToken()
i = 0
Not = False
while ((i%2==1 and ("|"==self.token or "NOT"==self.token)) or (i%2==0)) and not self.token==None:
Not = False;
if "NOT"==self.token:
Not = True
self.getToken()
i += 1
if self.token!="|":
if self.token in win32con.__dict__:
value = getattr(win32con,self.token)
else:
if self.token in commctrl.__dict__:
value = getattr(commctrl,self.token)
else:
value = 0
if Not:
list.append("NOT "+self.token)
self.debug("styles add Not",self.token, value)
style &= ~value
else:
list.append(self.token)
self.debug("styles add", self.token, value)
style |= value
self.getToken()
self.debug("style is ",style)
return style, list
def dialogCaption(self, dlg):
if "CAPTION"==self.token:
self.getToken()
self.token = self.token[1:-1]
self.debug("Caption is:",self.token)
dlg.caption = self.token
self.getToken()
def dialogFont(self, dlg):
if "FONT"==self.token:
self.getToken()
dlg.fontSize = int(self.token)
self.getCommaToken()
self.getToken() # Font name
dlg.font = self.token[1:-1] # it's quoted
self.getToken()
while "BEGIN"!=self.token:
self.getToken()
def controls(self, dlg):
if self.token=="BEGIN": self.getToken()
# All controls look vaguely like:
# TYPE [text, ] Control_id, l, t, r, b [, style]
# .rc parser documents all control types as:
# CHECKBOX, COMBOBOX, CONTROL, CTEXT, DEFPUSHBUTTON, EDITTEXT, GROUPBOX,
# ICON, LISTBOX, LTEXT, PUSHBUTTON, RADIOBUTTON, RTEXT, SCROLLBAR
without_text = ["EDITTEXT", "COMBOBOX", "LISTBOX", "SCROLLBAR"]
while self.token!="END":
control = ControlDef()
control.controlType = self.token;
self.getToken()
if control.controlType not in without_text:
if self.token[0:1]=='"':
control.label = self.currentQuotedString()
# Some funny controls, like icons and picture controls use
# the "window text" as extra resource ID (ie, the ID of the
# icon itself). This may be either a literal, or an ID string.
elif self.token=="-" or self.token.isdigit():
control.label = str(self.currentNumberToken())
else:
# An ID - use the numeric equiv.
control.label = str(self.addId(self.token))
self.getCommaToken()
self.getToken()
# Control IDs may be "names" or literal ints
if self.token=="-" or self.token.isdigit():
control.id = self.currentNumberToken()
control.idNum = control.id
else:
# name of an ID
control.id = self.token
control.idNum = self.addId(control.id)
self.getCommaToken()
if control.controlType == "CONTROL":
self.getToken()
control.subType = self.token[1:-1]
thisDefaultStyle = defaultControlStyle | \
_addDefaults.get(control.subType, 0)
# Styles
self.getCommaToken()
self.getToken()
control.style, control.styles = self.styles([], thisDefaultStyle)
else:
thisDefaultStyle = defaultControlStyle | \
_addDefaults.get(control.controlType, 0)
# incase no style is specified.
control.style = thisDefaultStyle
# Rect
control.x = int(self.getToken())
self.getCommaToken()
control.y = int(self.getToken())
self.getCommaToken()
control.w = int(self.getToken())
self.getCommaToken()
self.getToken()
control.h = int(self.token)
self.getToken()
if self.token==",":
self.getToken()
control.style, control.styles = self.styles([], thisDefaultStyle)
if self.token==",":
self.getToken()
control.styleEx, control.stylesEx = self.styles([], defaultControlStyleEx)
#print control.toString()
dlg.controls.append(control)
def ParseStreams(rc_file, h_file):
rcp = RCParser()
if h_file:
rcp.parseH(h_file)
try:
rcp.load(rc_file)
except:
lex = getattr(rcp, "lex", None)
if lex:
print("ERROR parsing dialogs at line", lex.lineno)
print("Next 10 tokens are:")
for i in range(10):
print(lex.get_token(), end=' ')
print()
raise
return rcp
def Parse(rc_name, h_name = None):
if h_name:
h_file = open(h_name, "rU")
else:
# See if same basename as the .rc
h_name = rc_name[:-2]+"h"
try:
h_file = open(h_name, "rU")
except IOError:
# See if MSVC default of 'resource.h' in the same dir.
h_name = os.path.join(os.path.dirname(rc_name), "resource.h")
try:
h_file = open(h_name, "rU")
except IOError:
# .h files are optional anyway
h_file = None
rc_file = open(rc_name, "rU")
try:
return ParseStreams(rc_file, h_file)
finally:
if h_file is not None:
h_file.close()
rc_file.close()
return rcp
def GenerateFrozenResource(rc_name, output_name, h_name = None):
"""Converts an .rc windows resource source file into a python source file
with the same basic public interface as the rest of this module.
Particularly useful for py2exe or other 'freeze' type solutions,
where a frozen .py file can be used inplace of a real .rc file.
"""
rcp = Parse(rc_name, h_name)
in_stat = os.stat(rc_name)
out = open(output_name, "wt")
out.write("#%s\n" % output_name)
out.write("#This is a generated file. Please edit %s instead.\n" % rc_name)
out.write("__version__=%r\n" % __version__)
out.write("_rc_size_=%d\n_rc_mtime_=%d\n" % (in_stat[stat.ST_SIZE], in_stat[stat.ST_MTIME]))
out.write("class StringDef:\n")
out.write("\tdef __init__(self, id, idNum, value):\n")
out.write("\t\tself.id = id\n")
out.write("\t\tself.idNum = idNum\n")
out.write("\t\tself.value = value\n")
out.write("\tdef __repr__(self):\n")
out.write("\t\treturn \"StringDef(%r, %r, %r)\" % (self.id, self.idNum, self.value)\n")
out.write("class FakeParser:\n")
for name in "dialogs", "ids", "names", "bitmaps", "icons", "stringTable":
out.write("\t%s = \\\n" % (name,))
pprint.pprint(getattr(rcp, name), out)
out.write("\n")
out.write("def Parse(s):\n")
out.write("\treturn FakeParser()\n")
out.close()
if __name__=='__main__':
if len(sys.argv) <= 1:
print(__doc__)
print()
print("See test_win32rcparser.py, and the win32rcparser directory (both")
print("in the test suite) for an example of this module's usage.")
else:
import pprint
filename = sys.argv[1]
if "-v" in sys.argv:
RCParser.debugEnabled = 1
print("Dumping all resources in '%s'" % filename)
resources = Parse(filename)
for id, ddef in resources.dialogs.items():
print("Dialog %s (%d controls)" % (id, len(ddef)))
pprint.pprint(ddef)
print()
for id, sdef in resources.stringTable.items():
print("String %s=%r" % (id, sdef.value))
print()
for id, sdef in resources.bitmaps.items():
print("Bitmap %s=%r" % (id, sdef))
print()
for id, sdef in resources.icons.items():
print("Icon %s=%r" % (id, sdef))
print()
| apache-2.0 |
sgerhart/ansible | lib/ansible/modules/storage/zfs/zfs.py | 55 | 9554 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2013, Johan Wiren <johan.wiren.se@gmail.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zfs
short_description: Manage zfs
description:
- Manages ZFS file systems, volumes, clones and snapshots
version_added: "1.1"
options:
name:
description:
- File system, snapshot or volume name e.g. C(rpool/myfs).
required: true
state:
description:
- Whether to create (C(present)), or remove (C(absent)) a
file system, snapshot or volume. All parents/children
will be created/destroyed as needed to reach the desired state.
choices: [ absent, present ]
required: true
origin:
description:
- Snapshot from which to create a clone.
key_value:
description:
- (**DEPRECATED**) This will be removed in Ansible-2.9. Set these values in the
- C(extra_zfs_properties) option instead.
- The C(zfs) module takes key=value pairs for zfs properties to be set.
- See the zfs(8) man page for more information.
extra_zfs_properties:
description:
- A dictionary of zfs properties to be set.
- See the zfs(8) man page for more information.
version_added: "2.5"
author:
- Johan Wiren (@johanwiren)
'''
EXAMPLES = '''
- name: Create a new file system called myfs in pool rpool with the setuid property turned off
zfs:
name: rpool/myfs
state: present
extra_zfs_properties:
setuid: off
- name: Create a new volume called myvol in pool rpool.
zfs:
name: rpool/myvol
state: present
extra_zfs_properties:
volsize: 10M
- name: Create a snapshot of rpool/myfs file system.
zfs:
name: rpool/myfs@mysnapshot
state: present
- name: Create a new file system called myfs2 with snapdir enabled
zfs:
name: rpool/myfs2
state: present
extra_zfs_properties:
snapdir: enabled
- name: Create a new file system by cloning a snapshot
zfs:
name: rpool/cloned_fs
state: present
origin: rpool/myfs@mysnapshot
- name: Destroy a filesystem
zfs:
name: rpool/myfs
state: absent
'''
import os
from ansible.module_utils.basic import AnsibleModule
class Zfs(object):
def __init__(self, module, name, properties):
self.module = module
self.name = name
self.properties = properties
self.changed = False
self.zfs_cmd = module.get_bin_path('zfs', True)
self.zpool_cmd = module.get_bin_path('zpool', True)
self.pool = name.split('/')[0]
self.is_solaris = os.uname()[0] == 'SunOS'
self.is_openzfs = self.check_openzfs()
self.enhanced_sharing = self.check_enhanced_sharing()
def check_openzfs(self):
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if version == '-':
return True
if int(version) == 5000:
return True
return False
def check_enhanced_sharing(self):
if self.is_solaris and not self.is_openzfs:
cmd = [self.zpool_cmd]
cmd.extend(['get', 'version'])
cmd.append(self.pool)
(rc, out, err) = self.module.run_command(cmd, check_rc=True)
version = out.splitlines()[-1].split()[2]
if int(version) >= 34:
return True
return False
def exists(self):
cmd = [self.zfs_cmd, 'list', '-t', 'all', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
return True
else:
return False
def create(self):
if self.module.check_mode:
self.changed = True
return
properties = self.properties
origin = self.module.params.get('origin', None)
cmd = [self.zfs_cmd]
if "@" in self.name:
action = 'snapshot'
elif origin:
action = 'clone'
else:
action = 'create'
cmd.append(action)
if action in ['create', 'clone']:
cmd += ['-p']
if properties:
for prop, value in properties.items():
if prop == 'volsize':
cmd += ['-V', value]
elif prop == 'volblocksize':
cmd += ['-b', value]
else:
cmd += ['-o', '%s="%s"' % (prop, value)]
if origin and action == 'clone':
cmd.append(origin)
cmd.append(self.name)
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def destroy(self):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'destroy', '-R', self.name]
(rc, out, err) = self.module.run_command(' '.join(cmd))
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_property(self, prop, value):
if self.module.check_mode:
self.changed = True
return
cmd = [self.zfs_cmd, 'set', prop + '=' + str(value), self.name]
(rc, out, err) = self.module.run_command(cmd)
if rc == 0:
self.changed = True
else:
self.module.fail_json(msg=err)
def set_properties_if_changed(self):
current_properties = self.get_current_properties()
for prop, value in self.properties.items():
if current_properties.get(prop, None) != value:
self.set_property(prop, value)
def get_current_properties(self):
cmd = [self.zfs_cmd, 'get', '-H']
if self.enhanced_sharing:
cmd += ['-e']
cmd += ['all', self.name]
rc, out, err = self.module.run_command(" ".join(cmd))
properties = dict()
for prop, value, source in [l.split('\t')[1:4] for l in out.splitlines()]:
if source == 'local':
properties[prop] = value
# Add alias for enhanced sharing properties
if self.enhanced_sharing:
properties['sharenfs'] = properties.get('share.nfs', None)
properties['sharesmb'] = properties.get('share.smb', None)
return properties
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
state=dict(type='str', required=True, choices=['absent', 'present']),
origin=dict(type='str', default=None),
# createparent is meaningless after 2.3, but this shouldn't
# be removed until check_invalid_arguments is.
createparent=dict(type='bool', default=None),
extra_zfs_properties=dict(type='dict', default={}),
),
supports_check_mode=True,
# Remove this in Ansible 2.9
check_invalid_arguments=False,
)
state = module.params.get('state')
name = module.params.get('name')
if module.params.get('origin') and '@' in name:
module.fail_json(msg='cannot specify origin when operating on a snapshot')
# The following is deprecated. Remove in Ansible 2.9
# Get all valid zfs-properties
properties = dict()
for prop, value in module.params.items():
# All freestyle params are zfs properties
if prop not in module.argument_spec:
if isinstance(value, bool):
if value is True:
properties[prop] = 'on'
else:
properties[prop] = 'off'
else:
properties[prop] = value
if properties:
module.deprecate('Passing zfs properties as arbitrary parameters to the zfs module is'
' deprecated. Send them as a dictionary in the extra_zfs_properties'
' parameter instead.', version='2.9')
# Merge, giving the module_params precedence
for prop, value in module.params['extra_zfs_properties'].items():
properties[prop] = value
module.params['extra_zfs_properties'] = properties
# End deprecated section
# Reverse the boolification of zfs properties
for prop, value in module.params['extra_zfs_properties'].items():
if isinstance(value, bool):
if value is True:
module.params['extra_zfs_properties'][prop] = 'on'
else:
module.params['extra_zfs_properties'][prop] = 'off'
else:
module.params['extra_zfs_properties'][prop] = value
result = dict(
name=name,
state=state,
)
zfs = Zfs(module, name, module.params['extra_zfs_properties'])
if state == 'present':
if zfs.exists():
zfs.set_properties_if_changed()
else:
zfs.create()
elif state == 'absent':
if zfs.exists():
zfs.destroy()
result.update(zfs.properties)
result['changed'] = zfs.changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| mit |
nxnfufunezn/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/mux_client_for_testing.py | 457 | 25761 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing mux extension.
This code should be independent from mod_pywebsocket. See the comment of
client_for_testing.py.
NOTE: This code is far from robust like client_for_testing.py.
"""
import Queue
import base64
import collections
import email
import email.parser
import logging
import math
import os
import random
import socket
import struct
import threading
from mod_pywebsocket import util
from test import client_for_testing
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
class _ControlBlock:
def __init__(self, opcode):
self.opcode = opcode
def _parse_handshake_response(response):
status_line, header_lines = response.split('\r\n', 1)
words = status_line.split(' ')
if len(words) < 3:
raise ValueError('Bad Status-Line syntax %r' % status_line)
[version, response_code] = words[:2]
if version != 'HTTP/1.1':
raise ValueError('Bad response version %r' % version)
if response_code != '101':
raise ValueError('Bad response code %r ' % response_code)
headers = email.parser.Parser().parsestr(header_lines)
return headers
def _parse_channel_id(data, offset=0):
length = len(data)
remaining = length - offset
if remaining <= 0:
raise Exception('No channel id found')
channel_id = ord(data[offset])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining < 4:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!L',
data[offset:offset+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining < 3:
raise Exception('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', data[offset+1:offset+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining < 2:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
channel_id_length = 2
return channel_id, channel_id_length
def _parse_number(data, offset=0):
first_byte = ord(data[offset])
if (first_byte & 0x80) != 0:
raise Exception('The MSB of number field must be unset')
first_byte = first_byte & 0x7f
if first_byte == 127:
if offset + 9 > len(data):
raise Exception('Invalid number')
return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
if first_byte == 126:
if offset + 3 > len(data):
raise Exception('Invalid number')
return struct.unpack('!H', data[offset+1:offset+3])[0], 3
return first_byte, 1
def _parse_size_and_contents(data, offset=0):
size, advance = _parse_number(data, offset)
start_position = offset + advance
end_position = start_position + size
if len(data) < end_position:
raise Exception('Invalid size of control block (%d < %d)' % (
len(data), end_position))
return data[start_position:end_position], size + advance
def _parse_control_blocks(data):
blocks = []
length = len(data)
pos = 0
while pos < length:
first_byte = ord(data[pos])
pos += 1
opcode = (first_byte >> 5) & 0x7
block = _ControlBlock(opcode)
# TODO(bashi): Support more opcode
if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
block.encode = first_byte & 3
block.rejected = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
encoded_handshake, advance = _parse_size_and_contents(data, pos)
block.encoded_handshake = encoded_handshake
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
block.mux_error = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
reason, advance = _parse_size_and_contents(data, pos)
if len(reason) == 0:
block.drop_code = None
block.drop_message = ''
elif len(reason) >= 2:
block.drop_code = struct.unpack('!H', reason[:2])[0]
block.drop_message = reason[2:]
else:
raise Exception('Invalid DropChannel')
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
send_quota, advance = _parse_number(data, pos)
block.send_quota = send_quota
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
fallback = first_byte & 1
slots, advance = _parse_number(data, pos)
pos += advance
send_quota, advance = _parse_number(data, pos)
pos += advance
if fallback == 1 and (slots != 0 or send_quota != 0):
raise Exception('slots and send_quota must be zero if F bit '
'is set')
block.fallback = fallback
block.slots = slots
block.send_quota = send_quota
blocks.append(block)
else:
raise Exception(
'Unsupported mux opcode %d received' % opcode)
return blocks
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
if number <= 125:
return chr(number)
elif number < (1 << 16):
return chr(0x7e) + struct.pack('!H', number)
elif number < (1 << 63):
return chr(0x7f) + struct.pack('!Q', number)
else:
raise Exception('Invalid number')
def _create_add_channel_request(channel_id, encoded_handshake,
encoding=0):
length = len(encoded_handshake)
handshake_length = _encode_number(length)
first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
return (chr(first_byte) + _encode_channel_id(channel_id) +
handshake_length + encoded_handshake)
def _create_flow_control(channel_id, replenished_quota):
first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
return (chr(first_byte) + _encode_channel_id(channel_id) +
_encode_number(replenished_quota))
class _MuxReaderThread(threading.Thread):
"""Mux reader thread.
Reads frames and passes them to the mux client. This thread accesses
private functions/variables of the mux client.
"""
def __init__(self, mux):
threading.Thread.__init__(self)
self.setDaemon(True)
self._mux = mux
self._stop_requested = False
def _receive_message(self):
first_opcode = None
pending_payload = []
while not self._stop_requested:
fin, rsv1, rsv2, rsv3, opcode, payload_length = (
client_for_testing.read_frame_header(self._mux._socket))
if not first_opcode:
if opcode == client_for_testing.OPCODE_TEXT:
raise Exception('Received a text message on physical '
'connection')
if opcode == client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received an intermediate frame but '
'fragmentation was not started')
if (opcode == client_for_testing.OPCODE_BINARY or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_CLOSE):
first_opcode = opcode
else:
raise Exception('Received an undefined opcode frame: %d' %
opcode)
elif opcode != client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received a new opcode before '
'terminating fragmentation')
payload = client_for_testing.receive_bytes(
self._mux._socket, payload_length)
if self._mux._incoming_frame_filter is not None:
payload = self._mux._incoming_frame_filter.filter(payload)
pending_payload.append(payload)
if fin:
break
if self._stop_requested:
return None, None
message = ''.join(pending_payload)
return first_opcode, message
def request_stop(self):
self._stop_requested = True
def run(self):
try:
while not self._stop_requested:
# opcode is OPCODE_BINARY or control opcodes when a message
# is succesfully received.
opcode, message = self._receive_message()
if not opcode:
return
if opcode == client_for_testing.OPCODE_BINARY:
channel_id, advance = _parse_channel_id(message)
self._mux._dispatch_frame(channel_id, message[advance:])
else:
self._mux._process_control_message(opcode, message)
finally:
self._mux._notify_reader_done()
class _InnerFrame(object):
def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
class _LogicalChannelData(object):
def __init__(self):
self.queue = Queue.Queue()
self.send_quota = 0
self.receive_quota = 0
class MuxClient(object):
"""WebSocket mux client.
Note that this class is NOT thread-safe. Do not access an instance of this
class from multiple threads at a same time.
"""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
self._options.enable_mux()
self._stream = None
self._socket = None
self._handshake = client_for_testing.WebSocketHandshake(self._options)
self._incoming_frame_filter = None
self._outgoing_frame_filter = None
self._is_active = False
self._read_thread = None
self._control_blocks_condition = threading.Condition()
self._control_blocks = []
self._channel_slots = collections.deque()
self._logical_channels_condition = threading.Condition();
self._logical_channels = {}
self._timeout = 2
self._physical_connection_close_event = None
self._physical_connection_close_message = None
def _parse_inner_frame(self, data):
if len(data) == 0:
raise Exception('Invalid encapsulated frame received')
first_byte = ord(data[0])
fin = (first_byte << 7) & 1
rsv1 = (first_byte << 6) & 1
rsv2 = (first_byte << 5) & 1
rsv3 = (first_byte << 4) & 1
opcode = first_byte & 0xf
if self._outgoing_frame_filter:
payload = self._outgoing_frame_filter.filter(
data[1:])
else:
payload = data[1:]
return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
def _process_mux_control_blocks(self):
for block in self._control_blocks:
if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
# AddChannelResponse will be handled in add_channel().
continue
elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
raise Exception('Invalid flow control received for '
'channel id %d' % block.channel_id)
self._logical_channels[block.channel_id].send_quota += (
block.send_quota)
self._logical_channels_condition.notify()
finally:
self._logical_channels_condition.release()
elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
self._channel_slots.extend([block.send_quota] * block.slots)
def _dispatch_frame(self, channel_id, payload):
if channel_id == _CONTROL_CHANNEL_ID:
try:
self._control_blocks_condition.acquire()
self._control_blocks += _parse_control_blocks(payload)
self._process_mux_control_blocks()
self._control_blocks_condition.notify()
finally:
self._control_blocks_condition.release()
else:
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise Exception('Received logical frame on channel id '
'%d, which is not established' %
channel_id)
inner_frame = self._parse_inner_frame(payload)
self._logical_channels[channel_id].receive_quota -= (
len(inner_frame.payload))
if self._logical_channels[channel_id].receive_quota < 0:
raise Exception('The server violates quota on '
'channel id %d' % channel_id)
finally:
self._logical_channels_condition.release()
self._logical_channels[channel_id].queue.put(inner_frame)
def _process_control_message(self, opcode, message):
# Ping/Pong are not supported.
if opcode == client_for_testing.OPCODE_CLOSE:
self._physical_connection_close_message = message
if self._is_active:
self._stream.send_close(
code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
self._read_thread.request_stop()
if self._physical_connection_close_event:
self._physical_connection_close_event.set()
def _notify_reader_done(self):
self._logger.debug('Read thread terminated.')
self.close_socket()
def _assert_channel_slot_available(self):
try:
self._control_blocks_condition.acquire()
if len(self._channel_slots) == 0:
# Wait once
self._control_blocks_condition.wait(timeout=self._timeout)
finally:
self._control_blocks_condition.release()
if len(self._channel_slots) == 0:
raise Exception('Failed to receive NewChannelSlot')
def _assert_send_quota_available(self, channel_id):
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota == 0:
# Wait once
self._logical_channels_condition.wait(timeout=self._timeout)
finally:
self._logical_channels_condition.release()
if self._logical_channels[channel_id].send_quota == 0:
raise Exception('Failed to receive FlowControl for channel id %d' %
channel_id)
def connect(self):
self._socket = client_for_testing.connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = client_for_testing.WebSocketStream(
self._socket, self._handshake)
self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
self._read_thread = _MuxReaderThread(self)
self._read_thread.start()
self._assert_channel_slot_available()
self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
self._is_active = True
self._logger.info('Connection established')
def add_channel(self, channel_id, options):
if not self._is_active:
raise Exception('Mux client is not active')
if channel_id in self._logical_channels:
raise Exception('Channel id %d already exists' % channel_id)
try:
send_quota = self._channel_slots.popleft()
except IndexError, e:
raise Exception('No channel slots: %r' % e)
# Create AddChannel request
request_line = 'GET %s HTTP/1.1\r\n' % options.resource
fields = []
if options.server_port == client_for_testing.DEFAULT_PORT:
fields.append('Host: %s\r\n' % options.server_host.lower())
else:
fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
options.server_port))
fields.append('Origin: %s\r\n' % options.origin.lower())
fields.append('Connection: Upgrade\r\n')
if len(options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(options.extensions))
handshake = request_line + ''.join(fields) + '\r\n'
add_channel_request = _create_add_channel_request(
channel_id, handshake)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
self._stream.send_binary(payload)
# Wait AddChannelResponse
self._logger.debug('Waiting AddChannelResponse for the request...')
response = None
try:
self._control_blocks_condition.acquire()
while True:
for block in self._control_blocks:
if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
continue
if block.channel_id == channel_id:
response = block
self._control_blocks.remove(response)
break
if response:
break
self._control_blocks_condition.wait(self._timeout)
if not self._is_active:
raise Exception('AddChannelRequest timed out')
finally:
self._control_blocks_condition.release()
# Validate AddChannelResponse
if response.rejected:
raise Exception('The server rejected AddChannelRequest')
fields = _parse_handshake_response(response.encoded_handshake)
# Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
# headers exist?
self._logical_channels_condition.acquire()
self._logical_channels[channel_id] = _LogicalChannelData()
self._logical_channels[channel_id].send_quota = send_quota
self._logical_channels_condition.release()
self._logger.debug('Logical channel %d established' % channel_id)
def _check_logical_channel_is_opened(self, channel_id):
if not self._is_active:
raise Exception('Mux client is not active')
if not channel_id in self._logical_channels:
raise Exception('Logical channel %d is not established.')
def drop_channel(self, channel_id):
# TODO(bashi): Implement
pass
def send_flow_control(self, channel_id, replenished_quota):
self._check_logical_channel_is_opened(channel_id)
flow_control = _create_flow_control(channel_id, replenished_quota)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
# Replenish receive quota
try:
self._logical_channels_condition.acquire()
self._logical_channels[channel_id].receive_quota += (
replenished_quota)
finally:
self._logical_channels_condition.release()
self._stream.send_binary(payload)
def send_message(self, channel_id, message, end=True, binary=False):
self._check_logical_channel_is_opened(channel_id)
if binary:
first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
else:
first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
message = message.encode('utf-8')
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota < len(message):
raise Exception('Send quota violation: %d < %d' % (
self._logical_channels[channel_id].send_quota,
len(message)))
self._logical_channels[channel_id].send_quota -= len(message)
finally:
self._logical_channels_condition.release()
payload = _encode_channel_id(channel_id) + chr(first_byte) + message
self._stream.send_binary(payload)
def assert_receive(self, channel_id, payload, binary=False):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if binary:
opcode = client_for_testing.OPCODE_BINARY
else:
opcode = client_for_testing.OPCODE_TEXT
if inner_frame.opcode != opcode:
raise Exception('Unexpected opcode received (%r != %r)' %
(expected_opcode, inner_frame.opcode))
if inner_frame.payload != payload:
raise Exception('Unexpected payload received')
def send_close(self, channel_id, code=None, reason=''):
self._check_logical_channel_is_opened(channel_id)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
payload = _encode_channel_id(channel_id) + chr(first_byte) + body
self._stream.send_binary(payload)
def assert_receive_close(self, channel_id):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
raise Exception('Didn\'t receive close frame')
def send_physical_connection_close(self, code=None, reason=''):
self._physical_connection_close_event = threading.Event()
self._stream.send_close(code, reason)
# This method can be used only after calling
# send_physical_connection_close().
def assert_physical_connection_receive_close(
self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
self._physical_connection_close_event.wait(timeout=self._timeout)
if (not self._physical_connection_close_event.isSet() or
not self._physical_connection_close_message):
raise Exception('Didn\'t receive closing handshake')
def close_socket(self):
self._is_active = False
self._socket.close()
| mpl-2.0 |
adrianmugnoz/Documentacion-Divulgame | readthedocs/core/templatetags/core_tags.py | 3 | 2737 | import urllib
import hashlib
from django import template
from django.conf import settings
from django.utils.safestring import mark_safe
from django.utils.encoding import force_bytes, force_text
from builds.models import Version
from projects.models import Project
register = template.Library()
@register.filter
def gravatar(email, size=48):
"""hacked from djangosnippets.org, but basically given an email address
render an img tag with the hashed up bits needed for leetness
omgwtfstillreading
"""
url = "http://www.gravatar.com/avatar.php?%s" % urllib.urlencode({
'gravatar_id': hashlib.md5(email).hexdigest(),
'size': str(size)
})
return ('<img src="%s" width="%s" height="%s" alt="gravatar" '
'class="gravatar" border="0" />' % (url, size, size))
@register.simple_tag(name="doc_url")
def make_document_url(project, version=None, page=None):
if not project:
return ""
if project.main_language_project:
base_url = project.get_translation_url(version)
else:
base_url = project.get_docs_url(version)
if page and page != "index":
if project.documentation_type == "sphinx_htmldir":
path = page + "/"
elif project.documentation_type == "sphinx_singlehtml":
path = "index.html#document-" + page
else:
path = page + ".html"
else:
path = ""
return base_url + path
@register.filter(is_safe=True)
def restructuredtext(value, short=False):
try:
from docutils.core import publish_parts
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError(
"Error in 'restructuredtext' filter: "
"The Python docutils library isn't installed."
)
return force_text(value)
else:
docutils_settings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS",
{})
parts = publish_parts(source=force_bytes(value), writer_name="html4css1",
settings_overrides=docutils_settings)
out = force_text(parts["fragment"])
try:
if short:
out = out.split("\n")[0]
except IndexError:
pass
finally:
return mark_safe(out)
@register.filter
def get_project(slug):
try:
return Project.objects.get(slug=slug)
except:
return None
@register.filter
def get_version(slug):
try:
return Project.objects.get(slug=slug)
except:
return None
@register.simple_tag
def url_replace(request, field, value):
dict_ = request.GET.copy()
dict_[field] = value
return dict_.urlencode()
| mit |
pongem/python-bot-project | kms/api-client/snippets_test.py | 1 | 5588 | #!/usr/bin/env python
# Copyright 2017 Google, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import random
import string
import googleapiclient.discovery
import snippets
# Your Google Cloud Platform Key Location
LOCATION = 'global'
# Your Google Cloud Platform KeyRing name
KEYRING = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12))
# Your Google Cloud Platform CryptoKey name
CRYPTOKEY = ''.join(
random.choice(string.ascii_lowercase + string.digits) for _ in range(12))
# Your Google Cloud Platform CryptoKeyVersion name
VERSION = 1
# A member to add to our IAM policy
MEMBER = 'user:ryanmats@google.com'
# The role we want our new member to have for our IAM policy
ROLE = 'roles/owner'
def test_create_keyring(capsys, cloud_config):
snippets.create_keyring(cloud_config.project, LOCATION, KEYRING)
out, _ = capsys.readouterr()
expected = 'Created KeyRing projects/{}/locations/{}/keyRings/{}.'.format(
cloud_config.project, LOCATION, KEYRING)
assert expected in out
def test_create_cryptokey(capsys, cloud_config):
snippets.create_cryptokey(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY)
out, _ = capsys.readouterr()
expected = (
'Created CryptoKey projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}.'
.format(cloud_config.project, LOCATION, KEYRING, CRYPTOKEY))
assert expected in out
def test_encrypt_decrypt(capsys, cloud_config, tmpdir):
# Write to a plaintext file.
tmpdir.join('in.txt').write('SampleText')
# Construct temporary files.
plaintext_file = tmpdir.join('in.txt')
encrypted_file = tmpdir.join('out.txt')
decrypted_file = tmpdir.join('out2.txt')
# Encrypt text and then decrypt it.
snippets.encrypt(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY,
str(plaintext_file), str(encrypted_file))
snippets.decrypt(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY,
str(encrypted_file), str(decrypted_file))
# Make sure the decrypted text matches the original text.
decrypted_text = decrypted_file.read()
assert decrypted_text == 'SampleText'
# Make sure other output is as expected.
out, _ = capsys.readouterr()
assert 'Saved encrypted text to {}.'.format(str(encrypted_file)) in out
assert 'Saved decrypted text to {}.'.format(str(decrypted_file)) in out
def test_disable_cryptokey_version(capsys, cloud_config):
snippets.disable_cryptokey_version(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY, VERSION)
out, _ = capsys.readouterr()
expected = (
'CryptoKeyVersion projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}/'
'cryptoKeyVersions/{}\'s state has been set to {}.'
.format(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY, VERSION,
'DISABLED'))
assert expected in out
def test_destroy_cryptokey_version(capsys, cloud_config):
snippets.destroy_cryptokey_version(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY, VERSION)
out, _ = capsys.readouterr()
expected = (
'CryptoKeyVersion projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}/'
'cryptoKeyVersions/{}\'s state has been set to {}.'
.format(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY, VERSION,
'DESTROY_SCHEDULED'))
assert expected in out
def test_add_member_to_cryptokey_policy(capsys, cloud_config):
snippets.add_member_to_cryptokey_policy(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY, MEMBER, ROLE)
out, _ = capsys.readouterr()
expected = (
'Member {} added with role {} to policy for CryptoKey {} in KeyRing {}'
.format(MEMBER, ROLE, CRYPTOKEY, KEYRING))
assert expected in out
kms_client = googleapiclient.discovery.build('cloudkms', 'v1beta1')
parent = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
cloud_config.project, LOCATION, KEYRING, CRYPTOKEY)
cryptokeys = kms_client.projects().locations().keyRings().cryptoKeys()
policy_request = cryptokeys.getIamPolicy(resource=parent)
policy_response = policy_request.execute()
assert 'bindings' in policy_response.keys()
bindings = policy_response['bindings']
found_member_role_pair = False
for binding in bindings:
if binding['role'] == ROLE:
for user in binding['members']:
if user == MEMBER:
found_member_role_pair = True
assert found_member_role_pair
def test_get_keyring_policy(capsys, cloud_config):
project_id = cloud_config.project
snippets.get_keyring_policy(project_id, LOCATION, KEYRING)
out, _ = capsys.readouterr()
expected_roles_exist = (
'Printing IAM policy for resource projects/{}/locations/{}/keyRings/{}'
':'.format(project_id, LOCATION, KEYRING))
expected_no_roles = (
'No roles found for resource projects/{}/locations/{}/keyRings/{}.'
.format(project_id, LOCATION, KEYRING))
assert (expected_roles_exist in out) or (expected_no_roles in out)
| apache-2.0 |
theflofly/tensorflow | tensorflow/contrib/learn/python/learn/estimators/test_data.py | 42 | 2180 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test data utilities (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
def get_quantile_based_buckets(feature_values, num_buckets):
quantiles = np.percentile(
np.array(feature_values),
([100 * (i + 1.) / (num_buckets + 1.) for i in range(num_buckets)]))
return list(quantiles)
def prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
return base.Dataset(data=iris.data[ids], target=iris.target[ids])
def iris_input_multiclass_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=(150, 1), dtype=dtypes.int32)
def iris_input_logistic_fn():
iris = prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=(100, 1), dtype=dtypes.int32)
| apache-2.0 |
valmynd/MediaFetcher | src/plugins/youtube_dl/youtube_dl/extractor/ninenow.py | 1 | 2762 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
float_or_none,
smuggle_url,
)
class NineNowIE(InfoExtractor):
IE_NAME = '9now.com.au'
_VALID_URL = r'https?://(?:www\.)?9now\.com\.au/(?:[^/]+/){2}(?P<id>[^/?#]+)'
_GEO_COUNTRIES = ['AU']
_TESTS = [{
# clip
'url': 'https://www.9now.com.au/afl-footy-show/2016/clip-ciql02091000g0hp5oktrnytc',
'md5': '17cf47d63ec9323e562c9957a968b565',
'info_dict': {
'id': '16801',
'ext': 'mp4',
'title': 'St. Kilda\'s Joey Montagna on the potential for a player\'s strike',
'description': 'Is a boycott of the NAB Cup "on the table"?',
'uploader_id': '4460760524001',
'upload_date': '20160713',
'timestamp': 1468421266,
},
'skip': 'Only available in Australia',
}, {
# episode
'url': 'https://www.9now.com.au/afl-footy-show/2016/episode-19',
'only_matching': True,
}, {
# DRM protected
'url': 'https://www.9now.com.au/andrew-marrs-history-of-the-world/season-1/episode-1',
'only_matching': True,
}]
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4460760524001/default_default/index.html?videoId=%s'
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
page_data = self._parse_json(self._search_regex(
r'window\.__data\s*=\s*({.*?});', webpage,
'page data'), display_id)
for kind in ('episode', 'clip'):
current_key = page_data.get(kind, {}).get(
'current%sKey' % kind.capitalize())
if not current_key:
continue
cache = page_data.get(kind, {}).get('%sCache' % kind, {})
if not cache:
continue
common_data = (cache.get(current_key) or list(cache.values())[0])[kind]
break
else:
raise ExtractorError('Unable to find video data')
video_data = common_data['video']
if video_data.get('drm'):
raise ExtractorError('This video is DRM protected.', expected=True)
brightcove_id = video_data.get('brightcoveId') or 'ref:' + video_data['referenceId']
video_id = compat_str(video_data.get('id') or brightcove_id)
title = common_data['name']
thumbnails = [{
'id': thumbnail_id,
'url': thumbnail_url,
'width': int_or_none(thumbnail_id[1:])
} for thumbnail_id, thumbnail_url in common_data.get('image', {}).get('sizes', {}).items()]
return {
'_type': 'url_transparent',
'url': smuggle_url(
self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id,
{'geo_countries': self._GEO_COUNTRIES}),
'id': video_id,
'title': title,
'description': common_data.get('description'),
'duration': float_or_none(video_data.get('duration'), 1000),
'thumbnails': thumbnails,
'ie_key': 'BrightcoveNew',
}
| gpl-3.0 |
ojengwa/django-1 | tests/model_inheritance/models.py | 99 | 4729 | """
XX. Model inheritance
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
#
# Abstract base classes
#
@python_2_unicode_compatible
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __str__(self):
return '%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Abstract base classes with related models
#
class Post(models.Model):
title = models.CharField(max_length=50)
@python_2_unicode_compatible
class Attachment(models.Model):
post = models.ForeignKey(Post, related_name='attached_%(class)s_set')
content = models.TextField()
class Meta:
abstract = True
def __str__(self):
return self.content
class Comment(Attachment):
is_spam = models.BooleanField(default=False)
class Link(Attachment):
url = models.URLField()
#
# Multi-table inheritance
#
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return "%s the chef" % self.name
@python_2_unicode_compatible
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
def __str__(self):
return "%s the place" % self.name
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
@python_2_unicode_compatible
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField(default=False)
serves_pizza = models.BooleanField(default=False)
chef = models.ForeignKey(Chef, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __str__(self):
return "%s the restaurant" % self.name
@python_2_unicode_compatible
class ItalianRestaurant(Restaurant):
serves_gnocchi = models.BooleanField(default=False)
def __str__(self):
return "%s the italian restaurant" % self.name
@python_2_unicode_compatible
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
def __str__(self):
return "%s the supplier" % self.name
@python_2_unicode_compatible
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, related_name='lot')
def __str__(self):
return "%s the parking lot" % self.name
#
# Abstract base classes with related models where the sub-class has the
# same name in a different app and inherits from the same abstract base
# class.
# NOTE: The actual API tests for the following classes are in
# model_inheritance_same_model_name/models.py - They are defined
# here in order to have the name conflict between apps
#
class Title(models.Model):
title = models.CharField(max_length=50)
class NamedURL(models.Model):
title = models.ForeignKey(Title, related_name='attached_%(app_label)s_%(class)s_set')
url = models.URLField()
class Meta:
abstract = True
@python_2_unicode_compatible
class Copy(NamedURL):
content = models.TextField()
def __str__(self):
return self.content
class Mixin(object):
def __init__(self):
self.other_attr = 1
super(Mixin, self).__init__()
class MixinModel(models.Model, Mixin):
pass
class Base(models.Model):
titles = models.ManyToManyField(Title)
class SubBase(Base):
sub_id = models.IntegerField(primary_key=True)
class GrandParent(models.Model):
first_name = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
email = models.EmailField(unique=True)
class Meta:
unique_together = ('first_name', 'last_name')
class Parent(GrandParent):
pass
class Child(Parent):
pass
class GrandChild(Child):
pass
| bsd-3-clause |
lucernae/geonode | geonode/layers/views.py | 2 | 54490 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import os
import sys
import logging
import shutil
import base64
import traceback
import uuid
import decimal
import re
from django.contrib.gis.geos import GEOSGeometry
from django.template.response import TemplateResponse
from requests import Request
from itertools import chain
from six import string_types
from owslib.wfs import WebFeatureService
from owslib.feature.schema import get_schema
from guardian.shortcuts import get_perms
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.conf import settings
from django.utils.translation import ugettext as _
from geonode import geoserver, qgis_server
try:
import json
except ImportError:
from django.utils import simplejson as json
from django.utils.html import escape
from django.template.defaultfilters import slugify
from django.forms.models import inlineformset_factory
from django.db import transaction
from django.db.models import F
from django.forms.utils import ErrorList
from geonode.services.models import Service
from geonode.layers.forms import LayerForm, LayerUploadForm, NewLayerUploadForm, LayerAttributeForm
from geonode.base.forms import CategoryForm, TKeywordForm
from geonode.layers.models import Layer, Attribute, UploadSession
from geonode.base.enumerations import CHARSETS
from geonode.base.models import TopicCategory
from geonode.groups.models import GroupProfile
from geonode.utils import (resolve_object,
default_map_config,
check_ogc_backend,
llbbox_to_mercator,
bbox_to_projection,
GXPLayer,
GXPMap)
from geonode.layers.utils import file_upload, is_raster, is_vector
from geonode.people.forms import ProfileForm, PocForm
from geonode.security.views import _perms_info_json
from geonode.documents.models import get_related_documents
from geonode.utils import build_social_links
from geonode.base.views import batch_modify
from geonode.base.models import Thesaurus
from geonode.maps.models import Map
from geonode.geoserver.helpers import (cascading_delete,
gs_catalog,
ogc_server_settings,
set_layer_style)
from .tasks import delete_layer
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.helpers import _render_thumbnail
if check_ogc_backend(qgis_server.BACKEND_PACKAGE):
from geonode.qgis_server.models import QGISServerLayer
CONTEXT_LOG_FILE = ogc_server_settings.LOG_FILE
logger = logging.getLogger("geonode.layers.views")
DEFAULT_SEARCH_BATCH_SIZE = 10
MAX_SEARCH_BATCH_SIZE = 25
GENERIC_UPLOAD_ERROR = _("There was an error while attempting to upload your data. \
Please try again, or contact and administrator if the problem continues.")
METADATA_UPLOADED_PRESERVE_ERROR = _("Note: this layer's orginal metadata was \
populated and preserved by importing a metadata XML file. This metadata cannot be edited.")
_PERMISSION_MSG_DELETE = _("You are not permitted to delete this layer")
_PERMISSION_MSG_GENERIC = _('You do not have permissions for this layer.')
_PERMISSION_MSG_MODIFY = _("You are not permitted to modify this layer")
_PERMISSION_MSG_METADATA = _(
"You are not permitted to modify this layer's metadata")
_PERMISSION_MSG_VIEW = _("You are not permitted to view this layer")
def log_snippet(log_file):
if not log_file or not os.path.isfile(log_file):
return "No log file at %s" % log_file
with open(log_file, "r") as f:
f.seek(0, 2) # Seek @ EOF
fsize = f.tell() # Get Size
f.seek(max(fsize - 10024, 0), 0) # Set pos @ last n chars
return f.read()
def _resolve_layer(request, alternate, permission='base.view_resourcebase',
msg=_PERMISSION_MSG_GENERIC, **kwargs):
"""
Resolve the layer by the provided typename (which may include service name) and check the optional permission.
"""
service_typename = alternate.split(":", 1)
if Service.objects.filter(name=service_typename[0]).exists():
service = Service.objects.filter(name=service_typename[0])
return resolve_object(
request,
Layer,
{
'alternate': service_typename[1] if service[0].method != "C" else alternate},
permission=permission,
permission_msg=msg,
**kwargs)
else:
return resolve_object(request,
Layer,
{'alternate': alternate},
permission=permission,
permission_msg=msg,
**kwargs)
# Basic Layer Views #
@login_required
def layer_upload(request, template='upload/layer_upload.html'):
if request.method == 'GET':
mosaics = Layer.objects.filter(is_mosaic=True).order_by('name')
ctx = {
'mosaics': mosaics,
'charsets': CHARSETS,
'is_layer': True,
}
return render(request, template, context=ctx)
elif request.method == 'POST':
name = None
form = NewLayerUploadForm(request.POST, request.FILES)
tempdir = None
saved_layer = None
errormsgs = []
out = {'success': False}
if form.is_valid():
title = form.cleaned_data["layer_title"]
# Replace dots in filename - GeoServer REST API upload bug
# and avoid any other invalid characters.
# Use the title if possible, otherwise default to the filename
if title is not None and len(title) > 0:
name_base = title
else:
name_base, __ = os.path.splitext(
form.cleaned_data["base_file"].name)
title = slugify(name_base.replace(".", "_"))
name = slugify(name_base.replace(".", "_"))
if form.cleaned_data["abstract"] is not None and len(
form.cleaned_data["abstract"]) > 0:
abstract = form.cleaned_data["abstract"]
else:
abstract = "No abstract provided."
try:
# Moved this inside the try/except block because it can raise
# exceptions when unicode characters are present.
# This should be followed up in upstream Django.
tempdir, base_file = form.write_files()
if not form.cleaned_data["style_upload_form"]:
saved_layer = file_upload(
base_file,
name=name,
user=request.user,
overwrite=False,
charset=form.cleaned_data["charset"],
abstract=abstract,
title=title,
metadata_uploaded_preserve=form.cleaned_data[
"metadata_uploaded_preserve"],
metadata_upload_form=form.cleaned_data["metadata_upload_form"])
else:
saved_layer = Layer.objects.get(alternate=title)
if not saved_layer:
msg = 'Failed to process. Could not find matching layer.'
raise Exception(msg)
sld = open(base_file).read()
set_layer_style(saved_layer, title, base_file, sld)
except Exception as e:
exception_type, error, tb = sys.exc_info()
logger.exception(e)
out['success'] = False
try:
out['errors'] = u''.join(error).encode('utf-8')
except BaseException:
try:
out['errors'] = str(error)
except BaseException:
try:
tb = traceback.format_exc()
out['errors'] = tb
except BaseException:
pass
# Assign the error message to the latest UploadSession from
# that user.
latest_uploads = UploadSession.objects.filter(
user=request.user).order_by('-date')
if latest_uploads.count() > 0:
upload_session = latest_uploads[0]
upload_session.error = str(error)
upload_session.traceback = traceback.format_exc(tb)
upload_session.context = log_snippet(CONTEXT_LOG_FILE)
upload_session.save()
out['traceback'] = upload_session.traceback
out['context'] = upload_session.context
out['upload_session'] = upload_session.id
else:
out['success'] = True
if hasattr(saved_layer, 'info'):
out['info'] = saved_layer.info
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
if hasattr(saved_layer, 'bbox_string'):
out['bbox'] = saved_layer.bbox_string
if hasattr(saved_layer, 'srid'):
out['crs'] = {
'type': 'name',
'properties': saved_layer.srid
}
out['ogc_backend'] = settings.OGC_SERVER['default']['BACKEND']
upload_session = saved_layer.upload_session
if upload_session:
upload_session.processed = True
upload_session.save()
permissions = form.cleaned_data["permissions"]
if permissions is not None and len(permissions.keys()) > 0:
saved_layer.set_permissions(permissions)
saved_layer.handle_moderated_uploads()
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
for e in form.errors.values():
errormsgs.extend([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
if settings.MONITORING_ENABLED:
if saved_layer or name:
layer_name = saved_layer.alternate if hasattr(
saved_layer, 'alternate') else name
request.add_resource('layer', layer_name)
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=status_code)
def layer_detail(request, layername, template='layers/layer_detail.html'):
layer = _resolve_layer(
request,
layername,
'base.view_resourcebase',
_PERMISSION_MSG_VIEW)
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox[0:4]
bbox = layer_bbox[:]
bbox[0] = float(layer_bbox[0])
bbox[1] = float(layer_bbox[2])
bbox[2] = float(layer_bbox[1])
bbox[3] = float(layer_bbox[3])
def decimal_encode(bbox):
import decimal
_bbox = []
for o in [float(coord) for coord in bbox]:
if isinstance(o, decimal.Decimal):
o = (str(o) for o in [o])
_bbox.append(o)
return _bbox
def sld_definition(style):
from urllib import quote
_sld = {
"title": style.sld_title or style.name,
"legend": {
"height": "40",
"width": "22",
"href": layer.ows_url +
"?service=wms&request=GetLegendGraphic&format=image%2Fpng&width=20&height=20&layer=" +
quote(layer.service_typename, safe=''),
"format": "image/png"
},
"name": style.name
}
return _sld
if hasattr(layer, 'srid'):
config['crs'] = {
'type': 'name',
'properties': layer.srid
}
# Add required parameters for GXP lazy-loading
attribution = "%s %s" % (layer.owner.first_name,
layer.owner.last_name) if layer.owner.first_name or layer.owner.last_name else str(
layer.owner)
srs = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:3857')
config["attribution"] = "<span class='gx-attribution-title'>%s</span>" % attribution
config["format"] = getattr(
settings, 'DEFAULT_LAYER_FORMAT', 'image/png')
config["title"] = layer.title
config["wrapDateLine"] = True
config["visibility"] = True
config["srs"] = srs
config["bbox"] = decimal_encode(
bbox_to_projection([float(coord) for coord in layer_bbox] + [layer.srid, ],
target_srid=int(srs.split(":")[1]))[:4])
config["capability"] = {
"abstract": layer.abstract,
"name": layer.alternate,
"title": layer.title,
"queryable": True,
"bbox": {
layer.srid: {
"srs": layer.srid,
"bbox": decimal_encode(bbox)
},
srs: {
"srs": srs,
"bbox": decimal_encode(
bbox_to_projection([float(coord) for coord in layer_bbox] + [layer.srid, ],
target_srid=int(srs.split(":")[1]))[:4])
},
"EPSG:4326": {
"srs": "EPSG:4326",
"bbox": decimal_encode(bbox) if layer.srid == 'EPSG:4326' else
decimal_encode(bbox_to_projection(
[float(coord) for coord in layer_bbox] + [layer.srid, ], target_srid=4326)[:4])
}
},
"srs": {
srs: True
},
"formats": ["image/png", "application/atom xml", "application/atom+xml", "application/json;type=utfgrid",
"application/openlayers", "application/pdf", "application/rss xml", "application/rss+xml",
"application/vnd.google-earth.kml", "application/vnd.google-earth.kml xml",
"application/vnd.google-earth.kml+xml", "application/vnd.google-earth.kml+xml;mode=networklink",
"application/vnd.google-earth.kmz", "application/vnd.google-earth.kmz xml",
"application/vnd.google-earth.kmz+xml", "application/vnd.google-earth.kmz;mode=networklink",
"atom", "image/geotiff", "image/geotiff8", "image/gif", "image/gif;subtype=animated",
"image/jpeg", "image/png8", "image/png; mode=8bit", "image/svg", "image/svg xml",
"image/svg+xml", "image/tiff", "image/tiff8", "image/vnd.jpeg-png",
"kml", "kmz", "openlayers", "rss", "text/html; subtype=openlayers", "utfgrid"],
"attribution": {
"title": attribution
},
"infoFormats": ["text/plain", "application/vnd.ogc.gml", "text/xml", "application/vnd.ogc.gml/3.1.1",
"text/xml; subtype=gml/3.1.1", "text/html", "application/json"],
"styles": [sld_definition(s) for s in layer.styles.all()],
"prefix": layer.alternate.split(":")[0] if ":" in layer.alternate else "",
"keywords": [k.name for k in layer.keywords.all()] if layer.keywords else [],
"llbbox": decimal_encode(bbox) if layer.srid == 'EPSG:4326' else
decimal_encode(bbox_to_projection(
[float(coord) for coord in layer_bbox] + [layer.srid, ], target_srid=4326)[:4])
}
if layer.storeType == "remoteStore":
service = layer.remote_service
source_params = {
"ptype": service.ptype,
"remote": True,
"url": service.service_url,
"name": service.name,
"title": "[R] %s" % service.title}
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config),
source_params=json.dumps(source_params))
else:
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config))
# Update count for popularity ranking,
# but do not includes admins or resource owners
layer.view_count_up(request.user)
# center/zoom don't matter; the viewer will center on the layer bounds
map_obj = GXPMap(
projection=getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:900913'))
NON_WMS_BASE_LAYERS = [
la for la in default_map_config(request)[1] if la.ows_url is None]
metadata = layer.link_set.metadata().filter(
name__in=settings.DOWNLOAD_FORMATS_METADATA)
granules = None
all_granules = None
all_times = None
filter = None
if layer.is_mosaic:
try:
cat = gs_catalog
cat._cache.clear()
store = cat.get_store(layer.name)
coverages = cat.mosaic_coverages(store)
filter = None
try:
if request.GET["filter"]:
filter = request.GET["filter"]
except BaseException:
pass
offset = 10 * (request.page - 1)
granules = cat.mosaic_granules(
coverages['coverages']['coverage'][0]['name'],
store,
limit=10,
offset=offset,
filter=filter)
all_granules = cat.mosaic_granules(
coverages['coverages']['coverage'][0]['name'], store, filter=filter)
except BaseException:
granules = {"features": []}
all_granules = {"features": []}
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
from geonode.geoserver.views import get_capabilities
if layer.has_time:
workspace, layername = layer.alternate.split(
":") if ":" in layer.alternate else (None, layer.alternate)
# WARNING Please make sure to have enabled DJANGO CACHE as per
# https://docs.djangoproject.com/en/2.0/topics/cache/#filesystem-caching
wms_capabilities_resp = get_capabilities(
request, layer.id, tolerant=True)
if wms_capabilities_resp.status_code >= 200 and wms_capabilities_resp.status_code < 400:
wms_capabilities = wms_capabilities_resp.getvalue()
if wms_capabilities:
import xml.etree.ElementTree as ET
e = ET.fromstring(wms_capabilities)
for atype in e.findall(
"Capability/Layer/Layer[Name='%s']/Extent" % (layername)):
dim_name = atype.get('name')
if dim_name:
dim_name = str(dim_name).lower()
if dim_name == 'time':
dim_values = atype.text
if dim_values:
all_times = dim_values.split(",")
break
group = None
if layer.group:
try:
group = GroupProfile.objects.get(slug=layer.group.name)
except GroupProfile.DoesNotExist:
group = None
# a flag to be used for qgis server
show_popup = False
if 'show_popup' in request.GET and request.GET["show_popup"]:
show_popup = True
context_dict = {
'resource': layer,
'group': group,
'perms_list': get_perms(request.user, layer.get_self_resource()),
"permissions_json": _perms_info_json(layer),
"documents": get_related_documents(layer),
"metadata": metadata,
"is_layer": True,
"wps_enabled": settings.OGC_SERVER['default']['WPS_ENABLED'],
"granules": granules,
"all_granules": all_granules,
"all_times": all_times,
"show_popup": show_popup,
"filter": filter,
"storeType": layer.storeType,
"online": (layer.remote_service.probe == 200) if layer.storeType == "remoteStore" else True
}
if 'access_token' in request.session:
access_token = request.session['access_token']
else:
u = uuid.uuid1()
access_token = u.hex
context_dict["viewer"] = json.dumps(map_obj.viewer_json(
request.user, access_token, * (NON_WMS_BASE_LAYERS + [maplayer])))
context_dict["preview"] = getattr(
settings,
'GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY',
'geoext')
context_dict["crs"] = getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:900913')
# provide bbox in EPSG:4326 for leaflet
if context_dict["preview"] == 'leaflet':
srid, wkt = layer.geographic_bounding_box.split(';')
srid = re.findall(r'\d+', srid)
geom = GEOSGeometry(wkt, srid=int(srid[0]))
geom.transform(4326)
context_dict["layer_bbox"] = ','.join([str(c) for c in geom.extent])
if layer.storeType == 'dataStore':
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_VECTOR)
else:
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_RASTER)
links_view = [item for idx, item in enumerate(links) if
item.url and 'wms' in item.url or 'gwc' in item.url]
links_download = [item for idx, item in enumerate(
links) if item.url and 'wms' not in item.url and 'gwc' not in item.url]
for item in links_view:
if item.url and access_token and 'access_token' not in item.url:
params = {'access_token': access_token}
item.url = Request('GET', item.url, params=params).prepare().url
for item in links_download:
if item.url and access_token and 'access_token' not in item.url:
params = {'access_token': access_token}
item.url = Request('GET', item.url, params=params).prepare().url
if request.user.has_perm('view_resourcebase', layer.get_self_resource()):
context_dict["links"] = links_view
if request.user.has_perm(
'download_resourcebase',
layer.get_self_resource()):
if layer.storeType == 'dataStore':
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_VECTOR)
else:
links = layer.link_set.download().filter(
name__in=settings.DOWNLOAD_FORMATS_RASTER)
context_dict["links_download"] = links_download
if settings.SOCIAL_ORIGINS:
context_dict["social_links"] = build_social_links(request, layer)
layers_names = layer.alternate
try:
if settings.DEFAULT_WORKSPACE and settings.DEFAULT_WORKSPACE in layers_names:
workspace, name = layers_names.split(':', 1)
else:
name = layers_names
except BaseException:
logger.error("Can not identify workspace type and layername")
context_dict["layer_name"] = json.dumps(layers_names)
try:
# get type of layer (raster or vector)
if layer.storeType == 'coverageStore':
context_dict["layer_type"] = "raster"
elif layer.storeType == 'dataStore':
if layer.has_time:
context_dict["layer_type"] = "vector_time"
else:
context_dict["layer_type"] = "vector"
location = "{location}{service}".format(** {
'location': settings.OGC_SERVER['default']['LOCATION'],
'service': 'wms',
})
# get schema for specific layer
username = settings.OGC_SERVER['default']['USER']
password = settings.OGC_SERVER['default']['PASSWORD']
schema = get_schema(
location,
name,
username=username,
password=password)
# get the name of the column which holds the geometry
if 'the_geom' in schema['properties']:
schema['properties'].pop('the_geom', None)
elif 'geom' in schema['properties']:
schema['properties'].pop("geom", None)
# filter the schema dict based on the values of layers_attributes
layer_attributes_schema = []
for key in schema['properties'].keys():
layer_attributes_schema.append(key)
filtered_attributes = layer_attributes_schema
context_dict["schema"] = schema
context_dict["filtered_attributes"] = filtered_attributes
except BaseException:
logger.error(
"Possible error with OWSLib. Turning all available properties to string")
if settings.GEOTIFF_IO_ENABLED:
from geonode.contrib.geotiffio import create_geotiff_io_url
context_dict["link_geotiff_io"] = create_geotiff_io_url(layer, access_token)
# maps owned by user needed to fill the "add to existing map section" in template
if request.user.is_authenticated():
context_dict["maps"] = Map.objects.filter(owner=request.user)
return TemplateResponse(
request, template, context=context_dict)
# Loads the data using the OWS lib when the "Do you want to filter it"
# button is clicked.
def load_layer_data(request, template='layers/layer_detail.html'):
context_dict = {}
data_dict = json.loads(request.POST.get('json_data'))
layername = data_dict['layer_name']
filtered_attributes = [x for x in data_dict['filtered_attributes'].split(
',') if '/load_layer_data' not in x]
workspace, name = layername.split(':')
location = "{location}{service}".format(** {
'location': settings.OGC_SERVER['default']['LOCATION'],
'service': 'wms',
})
try:
# TODO: should be improved by using OAuth2 token (or at least user
# related to it) instead of super-powers
username = settings.OGC_SERVER['default']['USER']
password = settings.OGC_SERVER['default']['PASSWORD']
wfs = WebFeatureService(
location,
version='1.1.0',
username=username,
password=password)
response = wfs.getfeature(
typename=name,
propertyname=filtered_attributes,
outputFormat='application/json')
x = response.read()
x = json.loads(x)
features_response = json.dumps(x)
decoded = json.loads(features_response)
decoded_features = decoded['features']
properties = {}
for key in decoded_features[0]['properties']:
properties[key] = []
# loop the dictionary based on the values on the list and add the properties
# in the dictionary (if doesn't exist) together with the value
for i in range(len(decoded_features)):
for key, value in decoded_features[i]['properties'].iteritems():
if value != '' and isinstance(value, (string_types, int, float)) and (
'/load_layer_data' not in value):
properties[key].append(value)
for key in properties:
properties[key] = list(set(properties[key]))
properties[key].sort()
context_dict["feature_properties"] = properties
except BaseException:
import traceback
traceback.print_exc()
logger.error("Possible error with OWSLib.")
return HttpResponse(json.dumps(context_dict),
content_type="application/json")
def layer_feature_catalogue(
request,
layername,
template='../../catalogue/templates/catalogue/feature_catalogue.xml'):
layer = _resolve_layer(request, layername)
if layer.storeType != 'dataStore':
out = {
'success': False,
'errors': 'layer is not a feature type'
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
attributes = []
for attrset in layer.attribute_set.order_by('display_order'):
attr = {
'name': attrset.attribute,
'type': attrset.attribute_type
}
attributes.append(attr)
context_dict = {
'layer': layer,
'attributes': attributes,
'metadata': settings.PYCSW['CONFIGURATION']['metadata:main']
}
return render(
request,
template,
context=context_dict,
content_type='application/xml')
@login_required
def layer_metadata(
request,
layername,
template='layers/layer_metadata.html',
ajax=True):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase_metadata',
_PERMISSION_MSG_METADATA)
layer_attribute_set = inlineformset_factory(
Layer,
Attribute,
extra=0,
form=LayerAttributeForm,
)
topic_category = layer.category
poc = layer.poc
metadata_author = layer.metadata_author
# assert False, str(layer_bbox)
config = layer.attribute_config()
# Add required parameters for GXP lazy-loading
layer_bbox = layer.bbox
bbox = [float(coord) for coord in list(layer_bbox[0:4])]
if hasattr(layer, 'srid'):
config['crs'] = {
'type': 'name',
'properties': layer.srid
}
config["srs"] = getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:900913')
config["bbox"] = bbox if config["srs"] != 'EPSG:900913' \
else llbbox_to_mercator([float(coord) for coord in bbox])
config["title"] = layer.title
config["queryable"] = True
if layer.storeType == "remoteStore":
service = layer.remote_service
source_params = {
"ptype": service.ptype,
"remote": True,
"url": service.service_url,
"name": service.name,
"title": "[R] %s" % service.title}
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config),
source_params=json.dumps(source_params))
else:
maplayer = GXPLayer(
name=layer.alternate,
ows_url=layer.ows_url,
layer_params=json.dumps(config))
# Update count for popularity ranking,
# but do not includes admins or resource owners
if request.user != layer.owner and not request.user.is_superuser:
Layer.objects.filter(
id=layer.id).update(popular_count=F('popular_count') + 1)
# center/zoom don't matter; the viewer will center on the layer bounds
map_obj = GXPMap(
projection=getattr(
settings,
'DEFAULT_MAP_CRS',
'EPSG:900913'))
NON_WMS_BASE_LAYERS = [
la for la in default_map_config(request)[1] if la.ows_url is None]
if request.method == "POST":
if layer.metadata_uploaded_preserve: # layer metadata cannot be edited
out = {
'success': False,
'errors': METADATA_UPLOADED_PRESERVE_ERROR
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
layer_form = LayerForm(request.POST, instance=layer, prefix="resource")
if not layer_form.is_valid():
out = {
'success': False,
'errors': layer_form.errors
}
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=400)
attribute_form = layer_attribute_set(
request.POST,
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(request.POST, prefix="category_choice_field", initial=int(
request.POST["category_choice_field"]) if "category_choice_field" in request.POST else None)
tkeywords_form = TKeywordForm(
request.POST,
prefix="tkeywords")
else:
layer_form = LayerForm(instance=layer, prefix="resource")
attribute_form = layer_attribute_set(
instance=layer,
prefix="layer_attribute_set",
queryset=Attribute.objects.order_by('display_order'))
category_form = CategoryForm(
prefix="category_choice_field",
initial=topic_category.id if topic_category else None)
# Keywords from THESAURI management
layer_tkeywords = layer.tkeywords.all()
tkeywords_list = ''
lang = 'en' # TODO: use user's language
if layer_tkeywords and len(layer_tkeywords) > 0:
tkeywords_ids = layer_tkeywords.values_list('id', flat=True)
if hasattr(settings, 'THESAURI'):
for el in settings.THESAURI:
thesaurus_name = el['name']
try:
t = Thesaurus.objects.get(identifier=thesaurus_name)
for tk in t.thesaurus.filter(pk__in=tkeywords_ids):
tkl = tk.keyword.filter(lang=lang)
if len(tkl) > 0:
tkl_ids = ",".join(
map(str, tkl.values_list('id', flat=True)))
tkeywords_list += "," + \
tkl_ids if len(
tkeywords_list) > 0 else tkl_ids
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
tkeywords_form = TKeywordForm(
prefix="tkeywords",
initial={'tkeywords': tkeywords_list})
if request.method == "POST" and layer_form.is_valid() and attribute_form.is_valid(
) and category_form.is_valid() and tkeywords_form.is_valid():
new_poc = layer_form.cleaned_data['poc']
new_author = layer_form.cleaned_data['metadata_author']
if new_poc is None:
if poc is None:
poc_form = ProfileForm(
request.POST,
prefix="poc",
instance=poc)
else:
poc_form = ProfileForm(request.POST, prefix="poc")
if poc_form.is_valid():
if len(poc_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = poc_form._errors.setdefault(
'profile', ErrorList())
errors.append(
_('You must set a point of contact for this resource'))
poc = None
if poc_form.has_changed and poc_form.is_valid():
new_poc = poc_form.save()
if new_author is None:
if metadata_author is None:
author_form = ProfileForm(request.POST, prefix="author",
instance=metadata_author)
else:
author_form = ProfileForm(request.POST, prefix="author")
if author_form.is_valid():
if len(author_form.cleaned_data['profile']) == 0:
# FIXME use form.add_error in django > 1.7
errors = author_form._errors.setdefault(
'profile', ErrorList())
errors.append(
_('You must set an author for this resource'))
metadata_author = None
if author_form.has_changed and author_form.is_valid():
new_author = author_form.save()
new_category = TopicCategory.objects.get(
id=category_form.cleaned_data['category_choice_field'])
for form in attribute_form.cleaned_data:
la = Attribute.objects.get(id=int(form['id'].id))
la.description = form["description"]
la.attribute_label = form["attribute_label"]
la.visible = form["visible"]
la.display_order = form["display_order"]
la.save()
if new_poc is not None or new_author is not None:
if new_poc is not None:
layer.poc = new_poc
if new_author is not None:
layer.metadata_author = new_author
new_keywords = layer_form.cleaned_data['keywords']
if new_keywords is not None:
layer.keywords.clear()
layer.keywords.add(*new_keywords)
new_regions = [x.strip() for x in layer_form.cleaned_data['regions']]
if new_regions is not None:
layer.regions.clear()
layer.regions.add(*new_regions)
the_layer = layer_form.instance
the_layer.save()
up_sessions = UploadSession.objects.filter(layer=the_layer.id)
if up_sessions.count() > 0 and up_sessions[0].user != the_layer.owner:
up_sessions.update(user=the_layer.owner)
if new_category is not None:
Layer.objects.filter(id=the_layer.id).update(
category=new_category
)
if getattr(settings, 'SLACK_ENABLED', False):
try:
from geonode.contrib.slack.utils import build_slack_message_layer, send_slack_messages
send_slack_messages(
build_slack_message_layer(
"layer_edit", the_layer))
except BaseException:
logger.error("Could not send slack message.")
if not ajax:
return HttpResponseRedirect(
reverse(
'layer_detail',
args=(
layer.service_typename,
)))
message = layer.alternate
try:
# Keywords from THESAURI management
tkeywords_to_add = []
tkeywords_cleaned = tkeywords_form.clean()
if tkeywords_cleaned and len(tkeywords_cleaned) > 0:
tkeywords_ids = []
for i, val in enumerate(tkeywords_cleaned):
try:
cleaned_data = [value for key, value in tkeywords_cleaned[i].items(
) if 'tkeywords-tkeywords' in key.lower() and 'autocomplete' not in key.lower()]
tkeywords_ids.extend(map(int, cleaned_data[0]))
except BaseException:
pass
if hasattr(settings, 'THESAURI'):
for el in settings.THESAURI:
thesaurus_name = el['name']
try:
t = Thesaurus.objects.get(
identifier=thesaurus_name)
for tk in t.thesaurus.all():
tkl = tk.keyword.filter(pk__in=tkeywords_ids)
if len(tkl) > 0:
tkeywords_to_add.append(tkl[0].keyword_id)
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
layer.tkeywords.add(*tkeywords_to_add)
except BaseException:
tb = traceback.format_exc()
logger.error(tb)
return HttpResponse(json.dumps({'message': message}))
if settings.ADMIN_MODERATE_UPLOADS:
if not request.user.is_superuser:
layer_form.fields['is_published'].widget.attrs.update(
{'disabled': 'true'})
can_change_metadata = request.user.has_perm(
'change_resourcebase_metadata',
layer.get_self_resource())
try:
is_manager = request.user.groupmember_set.all().filter(role='manager').exists()
except BaseException:
is_manager = False
if not is_manager or not can_change_metadata:
layer_form.fields['is_approved'].widget.attrs.update(
{'disabled': 'true'})
if poc is not None:
layer_form.fields['poc'].initial = poc.id
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = True
else:
poc_form = ProfileForm(prefix="poc")
poc_form.hidden = False
if metadata_author is not None:
layer_form.fields['metadata_author'].initial = metadata_author.id
author_form = ProfileForm(prefix="author")
author_form.hidden = True
else:
author_form = ProfileForm(prefix="author")
author_form.hidden = False
if 'access_token' in request.session:
access_token = request.session['access_token']
else:
u = uuid.uuid1()
access_token = u.hex
viewer = json.dumps(map_obj.viewer_json(
request.user, access_token, * (NON_WMS_BASE_LAYERS + [maplayer])))
metadataxsl = False
if "geonode.contrib.metadataxsl" in settings.INSTALLED_APPS:
metadataxsl = True
metadata_author_groups = []
if request.user.is_superuser or request.user.is_staff:
metadata_author_groups = GroupProfile.objects.all()
else:
try:
all_metadata_author_groups = chain(
request.user.group_list_all().distinct(),
GroupProfile.objects.exclude(access="private").exclude(access="public-invite"))
except BaseException:
all_metadata_author_groups = GroupProfile.objects.exclude(
access="private").exclude(access="public-invite")
[metadata_author_groups.append(item) for item in all_metadata_author_groups
if item not in metadata_author_groups]
return render(request, template, context={
"resource": layer,
"layer": layer,
"layer_form": layer_form,
"poc_form": poc_form,
"author_form": author_form,
"attribute_form": attribute_form,
"category_form": category_form,
"tkeywords_form": tkeywords_form,
"viewer": viewer,
"preview": getattr(settings, 'GEONODE_CLIENT_LAYER_PREVIEW_LIBRARY', 'geoext'),
"crs": getattr(settings, 'DEFAULT_MAP_CRS', 'EPSG:900913'),
"metadataxsl": metadataxsl,
"freetext_readonly": getattr(
settings,
'FREETEXT_KEYWORDS_READONLY',
False),
"metadata_author_groups": metadata_author_groups,
"GROUP_MANDATORY_RESOURCES":
getattr(settings, 'GROUP_MANDATORY_RESOURCES', False),
})
@login_required
def layer_metadata_advanced(request, layername):
return layer_metadata(
request,
layername,
template='layers/layer_metadata_advanced.html')
@login_required
def layer_change_poc(request, ids, template='layers/layer_change_poc.html'):
layers = Layer.objects.filter(id__in=ids.split('_'))
if settings.MONITORING_ENABLED:
for l in layers:
if hasattr(l, 'alternate'):
request.add_resource('layer', l.alternate)
if request.method == 'POST':
form = PocForm(request.POST)
if form.is_valid():
for layer in layers:
layer.poc = form.cleaned_data['contact']
layer.save()
# Process the data in form.cleaned_data
# ...
# Redirect after POST
return HttpResponseRedirect('/admin/maps/layer')
else:
form = PocForm() # An unbound form
return render(
request, template, context={'layers': layers, 'form': form})
@login_required
def layer_replace(request, layername, template='layers/layer_replace.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_MODIFY)
if request.method == 'GET':
ctx = {
'charsets': CHARSETS,
'layer': layer,
'is_featuretype': layer.is_vector(),
'is_layer': True,
}
return render(request, template, context=ctx)
elif request.method == 'POST':
form = LayerUploadForm(request.POST, request.FILES)
tempdir = None
out = {}
if form.is_valid():
try:
tempdir, base_file = form.write_files()
if layer.is_vector() and is_raster(base_file):
out['success'] = False
out['errors'] = _(
"You are attempting to replace a vector layer with a raster.")
elif (not layer.is_vector()) and is_vector(base_file):
out['success'] = False
out['errors'] = _(
"You are attempting to replace a raster layer with a vector.")
else:
if check_ogc_backend(geoserver.BACKEND_PACKAGE):
# delete geoserver's store before upload
cat = gs_catalog
cascading_delete(cat, layer.typename)
out['ogc_backend'] = geoserver.BACKEND_PACKAGE
elif check_ogc_backend(qgis_server.BACKEND_PACKAGE):
try:
qgis_layer = QGISServerLayer.objects.get(
layer=layer)
qgis_layer.delete()
except QGISServerLayer.DoesNotExist:
pass
out['ogc_backend'] = qgis_server.BACKEND_PACKAGE
saved_layer = file_upload(
base_file,
name=layer.name,
user=request.user,
overwrite=True,
charset=form.cleaned_data["charset"],
)
out['success'] = True
out['url'] = reverse(
'layer_detail', args=[
saved_layer.service_typename])
except Exception as e:
logger.exception(e)
tb = traceback.format_exc()
out['success'] = False
out['errors'] = str(tb)
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
else:
errormsgs = []
for e in form.errors.values():
errormsgs.append([escape(v) for v in e])
out['errors'] = form.errors
out['errormsgs'] = errormsgs
if out['success']:
status_code = 200
else:
status_code = 400
return HttpResponse(
json.dumps(out),
content_type='application/json',
status=status_code)
@login_required
def layer_remove(request, layername, template='layers/layer_remove.html'):
layer = _resolve_layer(
request,
layername,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if (request.method == 'GET'):
return render(request, template, context={
"layer": layer
})
if (request.method == 'POST'):
try:
with transaction.atomic():
delete_layer.delay(object_id=layer.id)
except Exception as e:
traceback.print_exc()
message = '{0}: {1}.'.format(
_('Unable to delete layer'), layer.alternate)
if 'referenced by layer group' in getattr(e, 'message', ''):
message = _(
'This layer is a member of a layer group, you must remove the layer from the group '
'before deleting.')
messages.error(request, message)
return render(
request, template, context={"layer": layer})
return HttpResponseRedirect(reverse("layer_browse"))
else:
return HttpResponse("Not allowed", status=403)
@login_required
def layer_granule_remove(
request,
granule_id,
layername,
template='layers/layer_granule_remove.html'):
layer = _resolve_layer(
request,
layername,
'base.delete_resourcebase',
_PERMISSION_MSG_DELETE)
if (request.method == 'GET'):
return render(request, template, context={
"granule_id": granule_id,
"layer": layer
})
if (request.method == 'POST'):
try:
cat = gs_catalog
cat._cache.clear()
store = cat.get_store(layer.name)
coverages = cat.mosaic_coverages(store)
cat.mosaic_delete_granule(
coverages['coverages']['coverage'][0]['name'], store, granule_id)
except Exception as e:
traceback.print_exc()
message = '{0}: {1}.'.format(
_('Unable to delete layer'), layer.alternate)
if 'referenced by layer group' in getattr(e, 'message', ''):
message = _(
'This layer is a member of a layer group, you must remove the layer from the group '
'before deleting.')
messages.error(request, message)
return render(
request, template, context={"layer": layer})
return HttpResponseRedirect(
reverse(
'layer_detail', args=(
layer.service_typename,)))
else:
return HttpResponse("Not allowed", status=403)
def layer_thumbnail(request, layername):
if request.method == 'POST':
layer_obj = _resolve_layer(request, layername)
try:
try:
preview = json.loads(request.body).get('preview', None)
except BaseException:
preview = None
if preview and preview == 'react':
format, image = json.loads(
request.body)['image'].split(';base64,')
image = base64.b64decode(image)
else:
image = _render_thumbnail(request.body)
if not image:
return
filename = "layer-%s-thumb.png" % layer_obj.uuid
layer_obj.save_thumbnail(filename, image)
return HttpResponse('Thumbnail saved')
except BaseException:
return HttpResponse(
content='error saving thumbnail',
status=500,
content_type='text/plain'
)
def get_layer(request, layername):
"""Get Layer object as JSON"""
# Function to treat Decimal in json.dumps.
# http://stackoverflow.com/a/16957370/1198772
def decimal_default(obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
raise TypeError
logger.debug('Call get layer')
if request.method == 'GET':
layer_obj = _resolve_layer(request, layername)
logger.debug(layername)
response = {
'typename': layername,
'name': layer_obj.name,
'title': layer_obj.title,
'url': layer_obj.get_tiles_url(),
'bbox_string': layer_obj.bbox_string,
'bbox_x0': layer_obj.bbox_x0,
'bbox_x1': layer_obj.bbox_x1,
'bbox_y0': layer_obj.bbox_y0,
'bbox_y1': layer_obj.bbox_y1,
}
return HttpResponse(json.dumps(
response,
ensure_ascii=False,
default=decimal_default
),
content_type='application/javascript')
def layer_metadata_detail(
request,
layername,
template='layers/layer_metadata_detail.html'):
layer = _resolve_layer(
request,
layername,
'view_resourcebase',
_PERMISSION_MSG_METADATA)
group = None
if layer.group:
try:
group = GroupProfile.objects.get(slug=layer.group.name)
except GroupProfile.DoesNotExist:
group = None
return render(request, template, context={
"resource": layer,
"group": group,
'SITEURL': settings.SITEURL[:-1]
})
def layer_metadata_upload(
request,
layername,
template='layers/layer_metadata_upload.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_METADATA)
return render(request, template, context={
"resource": layer,
"layer": layer,
'SITEURL': settings.SITEURL[:-1]
})
def layer_sld_upload(
request,
layername,
template='layers/layer_style_upload.html'):
layer = _resolve_layer(
request,
layername,
'base.change_resourcebase',
_PERMISSION_MSG_METADATA)
return render(request, template, context={
"resource": layer,
"layer": layer,
'SITEURL': settings.SITEURL[:-1]
})
@login_required
def layer_batch_metadata(request, ids):
return batch_modify(request, ids, 'Layer')
def layer_view_counter(layer_id, viewer):
l = Layer.objects.get(id=layer_id)
u = get_user_model().objects.get(username=viewer)
l.view_count_up(u, do_local=True)
| gpl-3.0 |
codehs/closure-linter-for-everybody | closure_linter/common/position.py | 285 | 3324 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to represent positions within strings."""
__author__ = ('robbyw@google.com (Robert Walker)',
'ajp@google.com (Andy Perelson)')
class Position(object):
"""Object representing a segment of a string.
Attributes:
start: The index in to the string where the segment starts.
length: The length of the string segment.
"""
def __init__(self, start, length):
"""Initialize the position object.
Args:
start: The start index.
length: The number of characters to include.
"""
self.start = start
self.length = length
def Get(self, string):
"""Returns this range of the given string.
Args:
string: The string to slice.
Returns:
The string within the range specified by this object.
"""
return string[self.start:self.start + self.length]
def Set(self, target, source):
"""Sets this range within the target string to the source string.
Args:
target: The target string.
source: The source string.
Returns:
The resulting string
"""
return target[:self.start] + source + target[self.start + self.length:]
def AtEnd(string):
"""Create a Position representing the end of the given string.
Args:
string: The string to represent the end of.
Returns:
The created Position object.
"""
return Position(len(string), 0)
AtEnd = staticmethod(AtEnd)
def IsAtEnd(self, string):
"""Returns whether this position is at the end of the given string.
Args:
string: The string to test for the end of.
Returns:
Whether this position is at the end of the given string.
"""
return self.start == len(string) and self.length == 0
def AtBeginning():
"""Create a Position representing the beginning of any string.
Returns:
The created Position object.
"""
return Position(0, 0)
AtBeginning = staticmethod(AtBeginning)
def IsAtBeginning(self):
"""Returns whether this position is at the beginning of any string.
Returns:
Whether this position is at the beginning of any string.
"""
return self.start == 0 and self.length == 0
def All(string):
"""Create a Position representing the entire string.
Args:
string: The string to represent the entirety of.
Returns:
The created Position object.
"""
return Position(0, len(string))
All = staticmethod(All)
def Index(index):
"""Returns a Position object for the specified index.
Args:
index: The index to select, inclusively.
Returns:
The created Position object.
"""
return Position(index, 1)
Index = staticmethod(Index)
| apache-2.0 |
lukeiwanski/tensorflow-opencl | tensorflow/contrib/distributions/__init__.py | 6 | 5042 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes representing statistical distributions and ops for working with them.
See the @{$python/contrib.distributions} guide.
@@ReparameterizationType
@@Distribution
@@Binomial
@@Bernoulli
@@BernoulliWithSigmoidProbs
@@Beta
@@BetaWithSoftplusConcentration
@@Categorical
@@Chi2
@@Chi2WithAbsDf
@@Exponential
@@ExponentialWithSoftplusRate
@@Gamma
@@GammaWithSoftplusConcentrationRate
@@Geometric
@@InverseGamma
@@InverseGammaWithSoftplusConcentrationRate
@@Laplace
@@LaplaceWithSoftplusScale
@@Logistic
@@NegativeBinomial
@@Normal
@@NormalWithSoftplusScale
@@Poisson
@@StudentT
@@StudentTWithAbsDfSoftplusScale
@@Uniform
@@MultivariateNormalDiag
@@MultivariateNormalTriL
@@MultivariateNormalDiagPlusLowRank
@@MultivariateNormalDiagWithSoftplusScale
@@Dirichlet
@@DirichletMultinomial
@@Multinomial
@@WishartCholesky
@@WishartFull
@@matrix_diag_transform
@@TransformedDistribution
@@QuantizedDistribution
@@Mixture
@@normal_conjugates_known_scale_posterior
@@normal_conjugates_known_scale_predictive
@@kl
@@RegisterKL
@@softplus_inverse
@@ExpRelaxedOneHotCategorical
@@OneHotCategorical
@@RelaxedBernoulli
@@RelaxedOneHotCategorical
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.distributions.python.ops.bernoulli import *
from tensorflow.contrib.distributions.python.ops.beta import *
from tensorflow.contrib.distributions.python.ops.binomial import *
from tensorflow.contrib.distributions.python.ops.categorical import *
from tensorflow.contrib.distributions.python.ops.chi2 import *
from tensorflow.contrib.distributions.python.ops.conditional_distribution import *
from tensorflow.contrib.distributions.python.ops.conditional_transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.dirichlet import *
from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import *
from tensorflow.contrib.distributions.python.ops.distribution import *
from tensorflow.contrib.distributions.python.ops.distribution_util import matrix_diag_transform
from tensorflow.contrib.distributions.python.ops.distribution_util import softplus_inverse
from tensorflow.contrib.distributions.python.ops.exponential import *
from tensorflow.contrib.distributions.python.ops.gamma import *
from tensorflow.contrib.distributions.python.ops.geometric import *
from tensorflow.contrib.distributions.python.ops.inverse_gamma import *
from tensorflow.contrib.distributions.python.ops.kullback_leibler import *
from tensorflow.contrib.distributions.python.ops.laplace import *
from tensorflow.contrib.distributions.python.ops.logistic import *
from tensorflow.contrib.distributions.python.ops.mixture import *
from tensorflow.contrib.distributions.python.ops.multinomial import *
from tensorflow.contrib.distributions.python.ops.mvn_diag import *
from tensorflow.contrib.distributions.python.ops.mvn_diag_plus_low_rank import *
from tensorflow.contrib.distributions.python.ops.mvn_tril import *
from tensorflow.contrib.distributions.python.ops.negative_binomial import *
from tensorflow.contrib.distributions.python.ops.normal import *
from tensorflow.contrib.distributions.python.ops.normal_conjugate_posteriors import *
from tensorflow.contrib.distributions.python.ops.onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.poisson import *
from tensorflow.contrib.distributions.python.ops.quantized_distribution import *
from tensorflow.contrib.distributions.python.ops.relaxed_bernoulli import *
from tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.student_t import *
from tensorflow.contrib.distributions.python.ops.transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.uniform import *
from tensorflow.contrib.distributions.python.ops.wishart import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = ['ConditionalDistribution',
'ConditionalTransformedDistribution',
'FULLY_REPARAMETERIZED', 'NOT_REPARAMETERIZED']
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
100star/h2o | py/testdir_single_jvm/test_KMeans_enum_fvec.py | 9 | 2335 | import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i
import h2o_kmeans, h2o_exec as h2e
def write_syn_dataset(csvPathname, rowCount, colCount, SEED):
r1 = random.Random(SEED)
dsf = open(csvPathname, "w+")
for i in range(rowCount):
rowData = []
for j in range(colCount):
r = random.choice(['a', 'b', 'c', 'd'])
rowData.append(r)
r = random.randint(0,2)
rowData.append(r)
rowDataCsv = ",".join(map(str,rowData))
dsf.write(rowDataCsv + "\n")
dsf.close()
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
global SEED
SEED = h2o.setup_random_seed()
h2o.init(1,java_heap_GB=4)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans2_enum(self):
SYNDATASETS_DIR = h2o.make_syn_dir()
tryList = [
(100, 11, 'cA', 5),
(100, 10, 'cB', 5),
(100, 9, 'cC', 5),
(100, 8, 'cD', 5),
(100, 7, 'cE', 5),
(100, 6, 'cF', 5),
(100, 5, 'cG', 5),
]
### h2b.browseTheCloud()
lenNodes = len(h2o.nodes)
cnum = 0
for (rowCount, colCount, hex_key, timeoutSecs) in tryList:
cnum += 1
csvFilename = 'syn_' + str(SEED) + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + '/' + csvFilename
print "Creating random", csvPathname
write_syn_dataset(csvPathname, rowCount, colCount, SEED)
parseResult = h2i.import_parse(path=csvPathname, schema='put', hex_key=csvFilename + ".hex")
print "Parse result['destination_key']:", parseResult['destination_key']
kwargs = {
'k': 2,
'initialization': 'Furthest',
'destination_key': 'benign_k.hex',
'max_iter': 10,
}
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, timeoutSecs=5, **kwargs)
h2o_kmeans.bigCheckResults(self, kmeans, csvPathname, parseResult, 'd', **kwargs)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
tri2sing/LinearAlgebraPython | submit_machine_learning_lab.py | 3 | 17643 | # version code 1049
######## ########
# Hi there, curious student. #
# #
# This submission script runs some tests on your #
# code and then uploads it to Coursera for grading. #
# #
# Changing anything in this script might cause your #
# submissions to fail. #
######## ########
import io, os, sys, doctest, traceback, importlib, urllib.request, urllib.parse, urllib.error, base64, hashlib, random, ast
SUBMIT_VERSION = "1049"
URL = 'matrix-001'
part_friendly_names = ['Signum', 'Evaluate', 'Error', 'Find Grad', 'Gradient Descent Step']
groups = [[('59qJbbUXhEMmAqc6myiKKi8MahYSqo1R', 'Signum', ">>> a = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9},{0: 267, 1: 277, 2: -398, 3: 446, 4: 844, 5: -613, 6: -157, 7: 28, 8: 25, 9: 416})\n>>> print(test_format(signum(a)))\n>>> b = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29},{0: -456, 1: -120, 2: 937, 3: -123, 4: 387, 5: 54, 6: -131, 7: 275, 8: 611, 9: 472, 10: 255, 11: -14, 12: 827, 13: -586, 14: 864, 15: -679, 16: -851, 17: -431, 18: 17, 19: 986, 20: 716, 21: 117, 22: 645, 23: 880, 24: -695, 25: 692, 26: 861, 27: -441, 28: -8, 29: 49})\n>>> print(test_format(signum(b)))\n>>> c = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49},{0: -470, 1: -30, 2: 513, 3: 799, 4: -721, 5: 754, 6: 359, 7: -337, 8: -687, 9: -588, 10: 505, 11: -582, 12: -351, 13: -176, 14: -667, 15: -851, 16: 435, 17: -238, 18: 544, 19: 383, 20: -985, 21: -830, 22: 856, 23: 333, 24: -631, 25: -271, 26: -309, 27: -461, 28: -995, 29: -245, 30: -21, 31: -534, 32: 379, 33: 976, 34: -337, 35: -800, 36: 37, 37: 811, 38: -598, 39: -439, 40: 653, 41: 317, 42: -468, 43: 705, 44: -649, 45: 363, 46: -698, 47: -837, 48: 477, 49: 520})\n>>> print(test_format(signum(c)))\n>>> d = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69},{0: 837, 1: -801, 2: -376, 3: 388, 4: 482, 5: 313, 6: 812, 7: 118, 8: 796, 9: -273, 10: 763, 11: -534, 12: 584, 13: 74, 14: 214, 15: 564, 16: -923, 17: 977, 18: -859, 19: 288, 20: 99, 21: 676, 22: -223, 23: -103, 24: 893, 25: 4, 26: -734, 27: 120, 28: 549, 29: 433, 30: -295, 31: 822, 32: 655, 33: 870, 34: 205, 35: -241, 36: -632, 37: 159, 38: 476, 39: 189, 40: -210, 41: 158, 42: -945, 43: -270, 44: 9, 45: 510, 46: 254, 47: 5, 48: 755, 49: 473, 50: 347, 51: 259, 52: -305, 53: -303, 54: -299, 55: 371, 56: 47, 57: 143, 58: -97, 59: -690, 60: 788, 61: 80, 62: -3, 63: 332, 64: -88, 65: -746, 66: 775, 67: -416, 68: 688, 69: -93})\n>>> print(test_format(signum(d)))\n>>> e = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89},{0: -984, 1: -230, 2: 334, 3: -757, 4: 496, 5: 522, 6: -592, 7: -278, 8: -245, 9: 168, 10: -175, 11: -835, 12: 803, 13: 862, 14: 932, 15: 717, 16: -693, 17: 520, 18: -761, 19: 814, 20: 571, 21: -891, 22: 316, 23: 396, 24: 146, 25: 202, 26: 956, 27: -62, 28: 616, 29: 594, 30: -482, 31: -485, 32: 183, 33: 830, 34: 857, 35: 512, 36: -459, 37: 589, 38: 705, 39: -629, 40: -240, 41: 207, 42: -514, 43: 122, 44: -999, 45: -463, 46: -38, 47: 22, 48: -535, 49: -82, 50: 514, 51: -303, 52: 410, 53: -658, 54: 18, 55: 66, 56: 823, 57: -237, 58: -256, 59: -956, 60: 667, 61: 915, 62: 588, 63: -716, 64: 885, 65: 791, 66: -557, 67: -613, 68: -113, 69: -364, 70: 334, 71: -872, 72: 135, 73: -270, 74: 265, 75: -998, 76: 376, 77: 184, 78: 845, 79: -656, 80: 703, 81: -541, 82: -152, 83: 330, 84: -67, 85: -598, 86: 921, 87: -113, 88: -643, 89: 893})\n>>> print(test_format(signum(e)))\n>>> print(test_format(signum(Vec({'a','c','$'}, {'c':-0.5}))))\n")], [('59qJbbUXhEMmAqc6Q7T0ivY1zW8PXGIE', 'Evaluate', '>>> from mat import Mat\n>>> from vec import Vec\n>>> from vecutil import list2vec\n>>> from matutil import listlist2mat\n>>> A1 = listlist2mat([[10, 7, 11, 10, 14], [1, 1, 13, 3, 2], [6, 13, 3, 2, 6], [10, 10, 12, 1, 2], [2, 1, 5, 7, 10]])\n>>> b1 = list2vec([1, 1, -1, -1, 1])\n>>> A2 = Mat((set(range(97,123)),set(range(65,91))),{(x,y): 301-(7*((x-97)+26*(y-65))%761) for x in range(97,123) for y in range(65,91)})\n>>> b2 = Vec(A2.D[0], {x:(-1)**i for i, x in enumerate(sorted(A2.D[0]))})\n>>> print(test_format(fraction_wrong(A1, b1, Vec(A1.D[1], {}))))\n>>> print(test_format(fraction_wrong(A1, b1, Vec(A1.D[1], {x:-2 for x in A1.D[1]}))))\n>>> print(test_format(fraction_wrong(A1, b1, Vec(A1.D[1], {x: (-1)**i for i, x in enumerate(sorted(A1.D[1]))}))))\n>>> print(test_format(fraction_wrong(A2, b2, Vec(A2.D[1], {}))))\n>>> print(test_format(fraction_wrong(A2, b2, Vec(A2.D[1], {x:-2 for x in A2.D[1]}))))\n>>> print(test_format(fraction_wrong(A2, b2, Vec(A2.D[1], {x: (-1)**i for i, x in enumerate(sorted(A2.D[1]))}))))\n')], [('59qJbbUXhEMmAqc6KN7zKn8z4FN8Qmhm', 'Error', '>>> from mat import Mat\n>>> from vec import Vec\n>>> from vecutil import list2vec\n>>> from matutil import listlist2mat\n>>> A1 = listlist2mat([[10, 7, 11, 10, 14], [1, 1, 13, 3, 2], [6, 13, 3, 2, 6], [10, 10, 12, 1, 2], [2, 1, 5, 7, 10]])\n>>> b1 = list2vec([1, 1, -1, -1, 1])\n>>> A2 = Mat((set(range(97,123)),set(range(65,91))),{(x,y): 301-(7*((x-97)+26*(y-65))%761) for x in range(97,123) for y in range(65,91)})\n>>> b2 = Vec(A2.D[0], {x:(-1)**i for i,x in enumerate(sorted(A2.D[0]))})\n>>> print(test_format(loss(A1, b1, Vec(A1.D[1], {}))))\n>>> print(test_format(loss(A1, b1, Vec(A1.D[1], {x:-2 for x in A1.D[1]}))))\n>>> print(test_format(loss(A1, b1, Vec(A1.D[1], {x: (-1)**i for i, x in enumerate(sorted(A1.D[1]))}))))\n>>> print(test_format(loss(A2, b2, Vec(A2.D[1], {}))))\n>>> print(test_format(loss(A2, b2, Vec(A2.D[1], {x:-2 for x in A2.D[1]}))))\n>>> print(test_format(loss(A2, b2, Vec(A2.D[1], {x: (-1)**i for i, x in enumerate(sorted(A2.D[1]))}))))\n')], [('59qJbbUXhEMmAqc6oyzwzQwAGN8JpU91', 'Find Grad', '>>> from vec import Vec\n>>> from mat import Mat\n>>> from vecutil import list2vec\n>>> from matutil import listlist2mat\n>>> A1 = listlist2mat([[10, 7, 11, 10, 14], [1, 1, 13, 3, 2], [6, 13, 3, 2, 6], [10, 10, 12, 1, 2], [2, 1, 5, 7, 10]])\n>>> b1 = list2vec([1, 1, -1, -1, 1])\n>>> A2 = Mat((set(range(97,123)),set(range(65,91))),{(x,y): 301-(7*((x-97)+26*(y-65))%761) for x in range(97,123) for y in range(65,91)})\n>>> b2 = Vec(A2.D[0], {x:1 for x in A2.D[0]})\n>>> print(test_format(find_grad(A1, b1, Vec(A1.D[1], {}))))\n>>> print(test_format(find_grad(A1, b1, Vec(A1.D[1], {x:-2 for x in A1.D[1]}))))\n>>> print(test_format(find_grad(A1, b1, Vec(A1.D[1], {x: (-1)**i for i, x in enumerate(sorted(A1.D[1]))}))))\n>>> print(test_format(find_grad(A2, b2, Vec(A2.D[1], {}))))\n>>> print(test_format(find_grad(A2, b2, Vec(A2.D[1], {x:-2 for x in A2.D[1]}))))\n>>> print(test_format(find_grad(A2, b2, Vec(A2.D[1], {x: (-1)**i for i, x in enumerate(sorted(A2.D[1]))}))))\n')], [('59qJbbUXhEMmAqc6ZznFxzJm5xqIbE3U', 'Gradient Descent Step', '>>> from vec import Vec\n>>> from mat import Mat\n>>> from vecutil import list2vec\n>>> from matutil import listlist2mat\n>>> A1 = listlist2mat([[10, 7, 11, 10, 14], [1, 1, 13, 3, 2], [6, 13, 3, 2, 6], [10, 10, 12, 1, 2], [2, 1, 5, 7, 10]])\n>>> b1 = list2vec([1, 1, -1, -1, 1])\n>>> A2 = Mat((set(range(97,123)),set(range(65,91))),{(x,y): 301-(7*((x-97)+26*(y-65))%761) for x in range(97,123) for y in range(65,91)})\n>>> b2 = Vec(A2.D[0], {x:1 for x in A2.D[0]})\n>>> print(test_format(gradient_descent_step(A1, b1, Vec(A1.D[1], {}), 2)))\n>>> print(test_format(gradient_descent_step(A1, b1, Vec(A1.D[1], {x:-2 for x in A1.D[1]}), 2)))\n>>> print(test_format(gradient_descent_step(A1, b1, Vec(A1.D[1], {x: (-1)**i for i, x in enumerate(sorted(A1.D[1]))}), 2)))\n>>> print(test_format(gradient_descent_step(A2, b2, Vec(A2.D[1], {}), 3)))\n>>> print(test_format(gradient_descent_step(A2, b2, Vec(A2.D[1], {x:-2 for x in A2.D[1]}), 3)))\n>>> print(test_format(gradient_descent_step(A2, b2, Vec(A2.D[1], {x: (-1)**i for i, x in enumerate(sorted(A2.D[1]))}), 3)))\n')]]
source_files = ['machine_learning_lab.py'] * len(sum(groups,[]))
try:
import machine_learning_lab as solution
test_vars = vars(solution).copy()
except Exception as exc:
print(exc)
print("!! It seems like you have an error in your stencil file. Please fix before submitting.")
sys.exit(1)
def find_lines(varname):
return list(filter(lambda l: varname in l, list(open("python_lab.py"))))
def find_line(varname):
ls = find_lines(varname)
return ls[0] if len(ls) else None
def use_comprehension(varname):
lines = find_lines(varname)
for line in lines:
try:
if "comprehension" in ast.dump(ast.parse(line)):
return True
except: pass
return False
def double_comprehension(varname):
line = find_line(varname)
return ast.dump(ast.parse(line)).count("comprehension") == 2
def line_contains_substr(varname, word):
lines = find_line(varname)
for line in lines:
if word in line:
return True
return False
def test_format(obj, precision=6):
tf = lambda o: test_format(o, precision)
delimit = lambda o: ', '.join(o)
otype = type(obj)
if otype is str:
return "'%s'" % obj
elif otype is float or otype is int:
if otype is int:
obj = float(obj)
if -0.000001 < obj < 0.000001:
obj = 0.0
fstr = '%%.%df' % precision
return fstr % obj
elif otype is set:
if len(obj) == 0:
return 'set()'
return '{%s}' % delimit(sorted(map(tf, obj)))
elif otype is dict:
return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
elif otype is list:
return '[%s]' % delimit(map(tf, obj))
elif otype is tuple:
return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
elif otype.__name__ in ['Vec','Mat']:
entries = tf({x:obj.f[x] for x in obj.f if tf(obj.f[x]) != tf(0)})
return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
else:
return str(obj)
def output(tests):
dtst = doctest.DocTestParser().get_doctest(tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
return runner.results
test_vars['test_format'] = test_vars['tf'] = test_format
test_vars['find_lines'] = find_lines
test_vars['find_line'] = find_line
test_vars['use_comprehension'] = use_comprehension
test_vars['double_comprehension'] = double_comprehension
test_vars['line_contains_substr'] = line_contains_substr
base_url = '://class.coursera.org/%s/assignment/' % URL
protocol = 'https'
colorize = False
verbose = False
class ModifiedDocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
self.results = []
return super(ModifiedDocTestRunner, self).__init__(*args, checker=OutputAccepter(), **kwargs)
def report_success(self, out, test, example, got):
self.results.append(got)
def report_unexpected_exception(self, out, test, example, exc_info):
exf = traceback.format_exception_only(exc_info[0], exc_info[1])[-1]
self.results.append(exf)
class OutputAccepter(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
return True
def submit(parts_string, login, password):
print('= Coding the Matrix Homework and Lab Submission')
if not login:
login = login_prompt()
if not password:
password = password_prompt()
if not parts_string:
parts_string = parts_prompt()
parts = parse_parts(parts_string)
if not all([parts, login, password]):
return
for sid, name, part_tests in parts:
sys.stdout.write('== Submitting "%s"' % name)
if 'DEV' in os.environ: sid += '-dev'
(login, ch, state, ch_aux) = get_challenge(login, sid)
if not all([login, ch, state]):
print(' !! Error: %s\n' % login)
return
# to stop Coursera's strip() from doing anything, we surround in parens
results = output(part_tests)
prog_out = '(%s)' % ''.join(map(str.rstrip, results))
token = challenge_response(login, password, ch)
src = source(sid)
feedback = submit_solution(login, token, sid, prog_out, src, state, ch_aux)
if len(feedback.strip()) > 0:
if colorize:
good = 'incorrect' not in feedback.lower()
print(': \033[1;3%dm%s\033[0m' % (2 if good else 1, feedback.strip()))
else:
print(': %s' % feedback.strip())
if verbose:
res_itr = iter(results)
for t in part_tests.split('\n'):
print(t)
if t[:3] == '>>>':
sys.stdout.write(next(res_itr))
# print(part_tests)
# print(results)
# for t, r in zip(part_tests.split('\n>>>'), results):
# sys.stdout.write('>>> %s\n%s' % (t, r))
sys.stdout.write('\n\n')
def login_prompt():
return input('Login email address: ')
def password_prompt():
return input("One-time password from the assignment page (NOT your own account's password): ")
def parts_prompt():
print('These are the assignment parts that you can submit:')
for i, name in enumerate(part_friendly_names):
print(' %d) %s' % (i+1, name))
return input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
def parse_parts(string):
def extract_range(s):
s = s.split('-')
if len(s) == 1: return [int(s[0])]
else: return list(range(int(s[0]), 1+int(s[1])))
parts = map(extract_range, string.split(','))
flat_parts = sum(parts, [])
return sum(list(map(lambda p: groups[p-1], flat_parts)),[])
def get_challenge(email, sid):
"""Gets the challenge salt from the server. Returns (email,ch,state,ch_aux)."""
params = {'email_address': email, 'assignment_part_sid': sid, 'response_encoding': 'delim'}
challenge_url = '%s%schallenge' % (protocol, base_url)
data = urllib.parse.urlencode(params).encode('utf-8')
req = urllib.request.Request(challenge_url, data)
resp = urllib.request.urlopen(req)
text = resp.readall().decode('utf-8').strip().split('|')
if len(text) != 9:
print(' !! %s' % '|'.join(text))
sys.exit(1)
return tuple(text[x] for x in [2,4,6,8])
def challenge_response(email, passwd, challenge):
return hashlib.sha1((challenge+passwd).encode('utf-8')).hexdigest()
def submit_solution(email_address, ch_resp, sid, output, source, state, ch_aux):
b64ize = lambda s: str(base64.encodebytes(s.encode('utf-8')), 'ascii')
values = { 'assignment_part_sid' : sid
, 'email_address' : email_address
, 'submission' : b64ize(output)
, 'submission_aux' : b64ize(source)
, 'challenge_response' : ch_resp
, 'state' : state
}
submit_url = '%s%ssubmit' % (protocol, base_url)
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
response = urllib.request.urlopen(req)
return response.readall().decode('utf-8').strip()
def source(sid):
src = ['# submit version: %s' % SUBMIT_VERSION]
for fn in set(source_files):
with open(fn) as source_f:
src.append(source_f.read())
return '\n\n'.join(src)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
env = os.environ
helps = [ 'numbers or ranges of tasks to submit'
, 'the email address on your Coursera account'
, 'your ONE-TIME password'
, 'use ANSI color escape sequences'
, 'show the test\'s interaction with your code'
, 'use an encrypted connection to Coursera'
, 'use an unencrypted connection to Coursera'
]
parser.add_argument('tasks', default=env.get('COURSERA_TASKS'), nargs='*', help=helps[0])
parser.add_argument('--email', default=env.get('COURSERA_EMAIL'), help=helps[1])
parser.add_argument('--password', default=env.get('COURSERA_PASS'), help=helps[2])
parser.add_argument('--colorize', default=False, action='store_true', help=helps[3])
parser.add_argument('--verbose', default=False, action='store_true', help=helps[4])
group = parser.add_mutually_exclusive_group()
group.add_argument('--https', dest="protocol", const="https", action="store_const", help=helps[-2])
group.add_argument('--http', dest="protocol", const="http", action="store_const", help=helps[-1])
args = parser.parse_args()
if args.protocol: protocol = args.protocol
colorize = args.colorize
verbose = args.verbose
submit(','.join(args.tasks), args.email, args.password)
| apache-2.0 |
jsoref/django | tests/validation/test_error_messages.py | 398 | 3642 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from unittest import TestCase
from django.core.exceptions import ValidationError
from django.db import models
class ValidationMessagesTest(TestCase):
def _test_validation_messages(self, field, value, expected):
with self.assertRaises(ValidationError) as cm:
field.clean(value, None)
self.assertEqual(cm.exception.messages, expected)
def test_autofield_field_raises_error_message(self):
f = models.AutoField(primary_key=True)
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
def test_integer_field_raises_error_message(self):
f = models.IntegerField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be an integer."])
def test_boolean_field_raises_error_message(self):
f = models.BooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either True or False."])
def test_float_field_raises_error_message(self):
f = models.FloatField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a float."])
def test_decimal_field_raises_error_message(self):
f = models.DecimalField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be a decimal number."])
def test_null_boolean_field_raises_error_message(self):
f = models.NullBooleanField()
self._test_validation_messages(f, 'fõo',
["'fõo' value must be either None, True or False."])
def test_date_field_raises_error_message(self):
f = models.DateField()
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, 'aaaa-10-10',
["'aaaa-10-10' value has an invalid date format. "
"It must be in YYYY-MM-DD format."])
self._test_validation_messages(f, '2011-13-10',
["'2011-13-10' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format (YYYY-MM-DD) "
"but it is an invalid date."])
def test_datetime_field_raises_error_message(self):
f = models.DateTimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be "
"in YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ] format."])
# Correct format but invalid date
self._test_validation_messages(f, '2011-10-32',
["'2011-10-32' value has the correct format "
"(YYYY-MM-DD) but it is an invalid date."])
# Correct format but invalid date/time
self._test_validation_messages(f, '2011-10-32 10:10',
["'2011-10-32 10:10' value has the correct format "
"(YYYY-MM-DD HH:MM[:ss[.uuuuuu]][TZ]) "
"but it is an invalid date/time."])
def test_time_field_raises_error_message(self):
f = models.TimeField()
# Wrong format
self._test_validation_messages(f, 'fõo',
["'fõo' value has an invalid format. It must be in "
"HH:MM[:ss[.uuuuuu]] format."])
# Correct format but invalid time
self._test_validation_messages(f, '25:50',
["'25:50' value has the correct format "
"(HH:MM[:ss[.uuuuuu]]) but it is an invalid time."])
| bsd-3-clause |
lalrae/qemu | tests/qemu-iotests/nbd-fault-injector.py | 127 | 8350 | #!/usr/bin/env python
# NBD server - fault injection utility
#
# Configuration file syntax:
# [inject-error "disconnect-neg1"]
# event=neg1
# io=readwrite
# when=before
#
# Note that Python's ConfigParser squashes together all sections with the same
# name, so give each [inject-error] a unique name.
#
# inject-error options:
# event - name of the trigger event
# "neg1" - first part of negotiation struct
# "export" - export struct
# "neg2" - second part of negotiation struct
# "request" - NBD request struct
# "reply" - NBD reply struct
# "data" - request/reply data
# io - I/O direction that triggers this rule:
# "read", "write", or "readwrite"
# default: readwrite
# when - after how many bytes to inject the fault
# -1 - inject error after I/O
# 0 - inject error before I/O
# integer - inject error after integer bytes
# "before" - alias for 0
# "after" - alias for -1
# default: before
#
# Currently the only error injection action is to terminate the server process.
# This resets the TCP connection and thus forces the client to handle
# unexpected connection termination.
#
# Other error injection actions could be added in the future.
#
# Copyright Red Hat, Inc. 2014
#
# Authors:
# Stefan Hajnoczi <stefanha@redhat.com>
#
# This work is licensed under the terms of the GNU GPL, version 2 or later.
# See the COPYING file in the top-level directory.
import sys
import socket
import struct
import collections
import ConfigParser
FAKE_DISK_SIZE = 8 * 1024 * 1024 * 1024 # 8 GB
# Protocol constants
NBD_CMD_READ = 0
NBD_CMD_WRITE = 1
NBD_CMD_DISC = 2
NBD_REQUEST_MAGIC = 0x25609513
NBD_REPLY_MAGIC = 0x67446698
NBD_PASSWD = 0x4e42444d41474943
NBD_OPTS_MAGIC = 0x49484156454F5054
NBD_CLIENT_MAGIC = 0x0000420281861253
NBD_OPT_EXPORT_NAME = 1 << 0
# Protocol structs
neg_classic_struct = struct.Struct('>QQQI124x')
neg1_struct = struct.Struct('>QQH')
export_tuple = collections.namedtuple('Export', 'reserved magic opt len')
export_struct = struct.Struct('>IQII')
neg2_struct = struct.Struct('>QH124x')
request_tuple = collections.namedtuple('Request', 'magic type handle from_ len')
request_struct = struct.Struct('>IIQQI')
reply_struct = struct.Struct('>IIQ')
def err(msg):
sys.stderr.write(msg + '\n')
sys.exit(1)
def recvall(sock, bufsize):
received = 0
chunks = []
while received < bufsize:
chunk = sock.recv(bufsize - received)
if len(chunk) == 0:
raise Exception('unexpected disconnect')
chunks.append(chunk)
received += len(chunk)
return ''.join(chunks)
class Rule(object):
def __init__(self, name, event, io, when):
self.name = name
self.event = event
self.io = io
self.when = when
def match(self, event, io):
if event != self.event:
return False
if io != self.io and self.io != 'readwrite':
return False
return True
class FaultInjectionSocket(object):
def __init__(self, sock, rules):
self.sock = sock
self.rules = rules
def check(self, event, io, bufsize=None):
for rule in self.rules:
if rule.match(event, io):
if rule.when == 0 or bufsize is None:
print 'Closing connection on rule match %s' % rule.name
sys.exit(0)
if rule.when != -1:
return rule.when
return bufsize
def send(self, buf, event):
bufsize = self.check(event, 'write', bufsize=len(buf))
self.sock.sendall(buf[:bufsize])
self.check(event, 'write')
def recv(self, bufsize, event):
bufsize = self.check(event, 'read', bufsize=bufsize)
data = recvall(self.sock, bufsize)
self.check(event, 'read')
return data
def close(self):
self.sock.close()
def negotiate_classic(conn):
buf = neg_classic_struct.pack(NBD_PASSWD, NBD_CLIENT_MAGIC,
FAKE_DISK_SIZE, 0)
conn.send(buf, event='neg-classic')
def negotiate_export(conn):
# Send negotiation part 1
buf = neg1_struct.pack(NBD_PASSWD, NBD_OPTS_MAGIC, 0)
conn.send(buf, event='neg1')
# Receive export option
buf = conn.recv(export_struct.size, event='export')
export = export_tuple._make(export_struct.unpack(buf))
assert export.magic == NBD_OPTS_MAGIC
assert export.opt == NBD_OPT_EXPORT_NAME
name = conn.recv(export.len, event='export-name')
# Send negotiation part 2
buf = neg2_struct.pack(FAKE_DISK_SIZE, 0)
conn.send(buf, event='neg2')
def negotiate(conn, use_export):
'''Negotiate export with client'''
if use_export:
negotiate_export(conn)
else:
negotiate_classic(conn)
def read_request(conn):
'''Parse NBD request from client'''
buf = conn.recv(request_struct.size, event='request')
req = request_tuple._make(request_struct.unpack(buf))
assert req.magic == NBD_REQUEST_MAGIC
return req
def write_reply(conn, error, handle):
buf = reply_struct.pack(NBD_REPLY_MAGIC, error, handle)
conn.send(buf, event='reply')
def handle_connection(conn, use_export):
negotiate(conn, use_export)
while True:
req = read_request(conn)
if req.type == NBD_CMD_READ:
write_reply(conn, 0, req.handle)
conn.send('\0' * req.len, event='data')
elif req.type == NBD_CMD_WRITE:
_ = conn.recv(req.len, event='data')
write_reply(conn, 0, req.handle)
elif req.type == NBD_CMD_DISC:
break
else:
print 'unrecognized command type %#02x' % req.type
break
conn.close()
def run_server(sock, rules, use_export):
while True:
conn, _ = sock.accept()
handle_connection(FaultInjectionSocket(conn, rules), use_export)
def parse_inject_error(name, options):
if 'event' not in options:
err('missing \"event\" option in %s' % name)
event = options['event']
if event not in ('neg-classic', 'neg1', 'export', 'neg2', 'request', 'reply', 'data'):
err('invalid \"event\" option value \"%s\" in %s' % (event, name))
io = options.get('io', 'readwrite')
if io not in ('read', 'write', 'readwrite'):
err('invalid \"io\" option value \"%s\" in %s' % (io, name))
when = options.get('when', 'before')
try:
when = int(when)
except ValueError:
if when == 'before':
when = 0
elif when == 'after':
when = -1
else:
err('invalid \"when\" option value \"%s\" in %s' % (when, name))
return Rule(name, event, io, when)
def parse_config(config):
rules = []
for name in config.sections():
if name.startswith('inject-error'):
options = dict(config.items(name))
rules.append(parse_inject_error(name, options))
else:
err('invalid config section name: %s' % name)
return rules
def load_rules(filename):
config = ConfigParser.RawConfigParser()
with open(filename, 'rt') as f:
config.readfp(f, filename)
return parse_config(config)
def open_socket(path):
'''Open a TCP or UNIX domain listen socket'''
if ':' in path:
host, port = path.split(':', 1)
sock = socket.socket()
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, int(port)))
else:
sock = socket.socket(socket.AF_UNIX)
sock.bind(path)
sock.listen(0)
print 'Listening on %s' % path
return sock
def usage(args):
sys.stderr.write('usage: %s [--classic-negotiation] <tcp-port>|<unix-path> <config-file>\n' % args[0])
sys.stderr.write('Run an fault injector NBD server with rules defined in a config file.\n')
sys.exit(1)
def main(args):
if len(args) != 3 and len(args) != 4:
usage(args)
use_export = True
if args[1] == '--classic-negotiation':
use_export = False
elif len(args) == 4:
usage(args)
sock = open_socket(args[1 if use_export else 2])
rules = load_rules(args[2 if use_export else 3])
run_server(sock, rules, use_export)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
dgarage/bc2 | contrib/linearize/linearize-hashes.py | 27 | 4579 | #!/usr/bin/env python3
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
try: # Python 3
import http.client as httplib
except ImportError: # Python 2
import httplib
import json
import re
import base64
import sys
import os
import os.path
settings = {}
##### Switch endian-ness #####
def hex_switchEndian(s):
""" Switches the endianness of a hex string (in pairs of hex chars) """
pairList = [s[i:i+2].encode() for i in range(0, len(s), 2)]
return b''.join(pairList[::-1]).decode()
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
authpair = authpair.encode('utf-8')
self.authhdr = b"Basic " + base64.b64encode(authpair)
self.conn = httplib.HTTPConnection(host, port=port, timeout=30)
def execute(self, obj):
try:
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
except ConnectionRefusedError:
print('RPC connection refused. Check RPC settings and the server status.',
file=sys.stderr)
return None
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read().decode('utf-8')
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
if reply is None:
print('Cannot continue. Program will halt.')
return None
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
if settings['rev_hash_bytes'] == 'true':
resp_obj['result'] = hex_switchEndian(resp_obj['result'])
print(resp_obj['result'])
height += num_blocks
def get_rpc_cookie():
# Open the cookie file
with open(os.path.join(os.path.expanduser(settings['datadir']), '.cookie'), 'r') as f:
combined = f.readline()
combined_split = combined.split(":")
settings['rpcuser'] = combined_split[0]
settings['rpcpassword'] = combined_split[1]
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rev_hash_bytes' not in settings:
settings['rev_hash_bytes'] = 'false'
use_userpass = True
use_datadir = False
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
use_userpass = False
if 'datadir' in settings and not use_userpass:
use_datadir = True
if not use_userpass and not use_datadir:
print("Missing datadir or username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
# Force hash byte format setting to be lowercase to make comparisons easier.
settings['rev_hash_bytes'] = settings['rev_hash_bytes'].lower()
# Get the rpc user and pass from the cookie if the datadir is set
if use_datadir:
get_rpc_cookie()
get_block_hashes(settings)
| mit |
adamjermyn/AstroMicroPhysics | sourceClass.py | 1 | 14504 | from scipy.interpolate import RegularGridInterpolator, RectBivariateSpline, interp1d
from scipy.ndimage.filters import gaussian_filter, minimum_filter
import numpy as np
class source:
"""
An object which wraps equation of state tables and functions.
"""
def __init__(self, inNames, outNames, contains, smoothMask, data):
"""
Arguments:
inNames -- A list of strings specifying the inputs required to receive an output, in the
order in which they are required. For instance, one might give ['X','Z','Rho','T'] to
specify hydrogen fraction, metallicity, density, and temperature as requirements.
outNames -- A list of strings specifying the outputs that the source may return, in the
order in which they will be returned. Thus, for example, one might specify
['P','gradad','gamma1'] for the pressure, adiabatic gradient, and first adiabatic
index.
contains -- A function which takes as an argument a 2D numpy array of shape (N,len(inNames))
containing points as specified by inNames, and returns a 1D numpy array of shape (N,),
which takes on the value 1 if the point is inside the range over which the source has
data and 0 otherwise.
smoothMask-- A function which takes as an argument a 2D numpy array of shape (N,len(inNames))
containing points as specified by inNames, and returns a 1D numpy array of shape (N,).
The value of this array specifies how far any individual position is from the edges of
the available data. This mask is zero by definition where data is unavailable, and should
quickly approach unity as one moves away from the edges of the data. This mask should smoothly
vary as a function of position in the tables.
If the underlying data representation is discrete:
At the edges (i.e. the outermost points with defined values) it should take on the a small
but non-zero value. This should represent a smoothing of the output of contains. The returned
value should be consistent (in terms of the smoothing kernel) with the function contains
reaching zero at the next point (i.e. the first one outside of the data range). The smoothing
should have distance of a few grid points.
If the underlying data representation is continuous:
At the edges this should reach precisely zero. The smoothing should have distance of order a
few percent the overall range of the table.
data -- A function which takes as an argument a 2D numpy array of shape (N,len(inNames))
containing points as specified by inNames, and returns a 1D numpy array of shape (N,len(outNames)).
The value of the array should be the various output quantities evaluated at the various input points.
"""
self.inNames = inNames
self.outNames = outNames
self.contains = contains
self.smoothMask = smoothMask
self.data = data
def nameIndices(self, names):
"""
Returns a 1D list containing the indices in outNames corresponding to the listed names.
Arguments:
names -- List of names appearing in outNames
"""
indices = []
for n in names:
indices.append(self.outNames.index(n))
return indices
def mergeSources(sources, inNames, outNames, weights=None):
"""
Merges multiple sources together with optional weights.
Arguments:
sources -- The source objects to merge.
inNames -- The input names on all of the sources. Must be the same across sources.
outNames-- The quantities to output. All input sources must contain all requested names in outNames.
weights -- The weighting of the various sources, given as a numpy array of length len(sources).
Default is None, which is processed as np.ones(len(sources)).
The sources are averaged, weighted based on their smoothMask values multiplied by weights.
These values are zero out-of-bounds, and unity in the interior of the data set.
At the edges of the data set, the mask smoothly transitions between 1 and 0 to ensure continuity.
"""
if weights is None:
weights = np.ones(len(sources))
# Set up new contains function
def contains(points):
cont = [s.contains(points) for s in sources]
cont = sum(cont) / len(cont)
cont[cont > 0] = 1.
return cont
indices = [s.nameIndices(outNames) for s in sources]
# Set up a new smoothMask function
def smoothMask(points):
masks = sum([sources[i].smoothMask(points)
for i in range(len(sources))])
masks[masks > 1] = 1.
return masks
# Set up new data function
def data(points):
out = [sources[i].data(points)[:, indices[i]] for i in range(len(sources))]
masks = [weights[i] * sources[i].smoothMask(points) for i in range(len(sources))]
out = sum([out[i] * masks[i][:, np.newaxis]
for i in range(len(sources))])
norm = sum(masks)
out /= norm[:, np.newaxis]
return out
return source(inNames, outNames, contains, smoothMask, data)
def interpolateSources(sources, vals, newName, inNames, outNames, kine='linear'):
"""
Creates a source which interpolates along an additional axis between other sources. Useful for nested interpolation.
Arguments:
sources -- The source objects to merge. The 'contains' method of each source must agree at all points.
vals -- The values of the sources along the new axis.
newName -- The name of the new axis.
inNames -- The input names on all of the sources. Must be the same across sources.
outNames-- The quantities to output. All input sources must contain all requested names in outNames.
kind -- String specifying the kind of interpolation. Default is 'linear'. This option is just passed
to interp1d in scipy.interpolate, so any supported by that are supported here.
Returns a source object which interpolates the values output by the various input sources along the new axis,
assigning sources to values in the new dimension given by vals. The new source object has inNames made by
(newNames,inNames[0],...,inNames[-1]), and uses the same ordering for inputs. Note that the resulting source
requires evaluating every source at every point. Also note that out of bounds values along the new axis
result in
"""
newInNames = [newName]
for n in inNames:
newInNames.append(n)
nMin = min(vals)
nMax = max(vals)
def data(points):
# Project the points into one lower dimension
lowPoints = points[:, 1:]
newDim = points[:, 0]
out = np.array([sources[i].data(lowPoints)
for i in range(len(sources))])
outInterpolator = interp1d(
vals, out, axis=0, kind=kind, bounds_error=False, fill_value=0.0, assume_sorted=False)
return outInterpolator(newDim)
def contains(points):
ret = sources[0].contains(points[:, 1:])
# Filter out points which fall out of bounds along the new axis.
ret[points[:, 0] < nMin] = 0
ret[points[:, 0] > nMax] = 0
return ret
def smoothMask(points):
ret = sources[0].smoothMask(points[:, 1:])
# Filter out points which fall out of bounds along the new axis.
ret[points[:, 0] < nMin] = 0
ret[points[:, 0] > nMax] = 0
# Smoothly drop the mask to zero at the ends.
# Make the coordinates dimensionless
x = (points[:, 0] - nMin) / (nMax - nMin)
# 30 was picked so that the transition happens over ~10% of the range
ret *= np.maximum(0, 1 - 2. / (1 + np.exp(30 * x)) -
2. / (1 + np.exp(30 * (1 - x))))
return ret
return source(newInNames, outNames, contains, smoothMask, data)
def wrapSource(s, transform, newInNames):
"""
Creates a source which transforms the input points before processing them.
The transform function must accept points in the usual format (numpy array of shape (N,len(newInNames)))
and return points in the usual format (numpy array of shape (N,len(inNames))).
The output of transform is then passed on to the source s.
Arguments:
s -- Source
transform -- Transformation function. Must take numpy input of shape (N,len(newInNames))
and produce output of shape (N,len(InNames)).
newInNames -- New list of input names (given as strings).
"""
def data(points):
return s.data(transform(points))
def contains(points):
return s.contains(transform(points))
def smoothMask(points):
return s.smoothMask(transform(points))
return source(newInNames, s.outNames, contains, smoothMask, data)
def sourceFromTables(grid, inNames, outNames, data, kind='linear', binaryMask=None, smoothingDist=4):
"""
An interpolator object which wraps equation of state tables.
Arguments:
grid -- Must be a NumPy array of dimension 2. The first dimension indexes the variables
over which the table is defined. The second dimension then gives the grid points
spanned by that variable. Thus, for example, one might give an array of the form
[[x0,x1,x2],[z0,z1,z2,z3],[rho0,rho1,rho2,...,rhoN],[T0,T1,...,TM]], where N and
M specify the number of rho and T values given. This imposes that the data be
given in a regular grid.
inNames -- A list of strings specifying the inputs required to receive an output, in the
order in which they appear in grid. In the above example, one would give
['X','Z','Rho','T'].
outNames-- A list of strings specifying the outputs that the source may return, in the
order in which they will be returned. Thus, for example, one might specify
['P','gradad','gamma1'] for the pressure, adiabatic gradient, and first
adiabatic index.
data -- The data table of interest. The number of dimensions this array contains must be
one plus the size of the first dimension of grid (i.e. grid.shape[0]+1). The
final dimension indexes the quantities of interest. The entries should be NaN when
data is unavailable.
kind -- String specifying the kind of interpolation. Default is 'linear'. Currently only
'linear' and 'cubic' are supported. Note that 'linear' must be used if D>2.
smoothingDist -- This gives the distance over which the binary mask is smoothed in grid units. Default is 4.
For both input and output, a full specification of the available strings and the quantities
they correspond to is given in the file 'VariableDeclarations.md' in the top level of the
repository.
In addition to providing interpolation routines, the source object also computes two mask
arrays. The first, called binaryMaskTable, is one if the requested point lies inside the
available data and zero otherwise. The second, called smoothMaskTable, specifies how far any
individual position is from the edges of the available data. This mask is zero by definition
where data is unavailable, and should quickly approach unity as one moves away from the edges
of the data. This mask should smoothly vary as a function of position in the tables. At
the edges (i.e. the outermost points with defined values) it should take on the a small but
non-zero value. This value should be consistent (in terms of the function used to blur the
binary mask into the smooth one) with the mask reaching zero at the next point (i.e. the
first one outside of the data range). Interpolation routines should be provided for both
tables. The binaryMaskTable interpolator will be the contains routine, and the smoothMaskTable
interpolator will be the smoothMask routine. Note that the contains routine must round all
outputs to 0 or 1, so the interpolation is only there to allow for a continuum of queries.
The purpose of the smooth mask object is to make it easy to blend multiple source objects
together. The output of multiple sources may simply be averaged, with weights given by
their masks. These are guaranteed by the properties of the masks to vary smoothly, even as
the point of interest moves into regions covered by a distinct set of tables, and to only
include tables which have data relevant to the point of interest.
"""
# Construct the binaryMaskTable, if not already done.
binaryMaskTable = None
if binaryMask is None:
# Construct the binary mask
binaryMaskTable = 1 - 1.0 * np.isnan(data[..., 0])
else:
binaryMaskTable = np.copy(binaryMask)
if kind == 'cubic':
# Cubic splines use the two points on either side in each dimension. Thus we
# need to set any point which borders a zero-mask point to zero to accomodate this.
# This change must likewise be propagated to the smooth mask, which is why we
# do it here. Note that the mode is constant, such that we reflect our ignorance
# at the edges of the table. This isn't an issue for linear interpolation because
# it only needs the neighboring points.
binaryMaskTable = minimum_filter(binaryMaskTable, size=3, mode='constant', cval=0.0)
# Construct the smoothMaskTable.
x = np.copy(binaryMaskTable)
# This procedure makes it smoothly drop off towards the edges, while being precisely
# zero once out of bounds.
for i in range(smoothingDist):
x = gaussian_filter(x, sigma=1, order=0, mode='constant', cval=0) * x
# Ensures that smooth masks are equally weighted across sources
smoothMaskTable = x / np.amax(x)
# Construct interpolators
dataFunc = None
if kind == 'linear':
interpolator = RegularGridInterpolator(grid, data, bounds_error=False, fill_value=0)
dataFunc = interpolator
elif kind == 'cubic':
data = np.copy(data)
data[np.isnan(data)] = 0
interpolator = [RectBivariateSpline(grid[0], grid[1], data[..., i], kx=3, ky=3)
for i in range(data.shape[-1])]
def dataFunc(points):
out = np.zeros((len(points), len(outNames)))
for i in range(len(outNames)):
out[:, i] = interpolator[i](points[:, 0], points[:, 1], grid=False)
return out
else:
print 'Error: Unrecognized interpolation kind.'
exit()
# The mask interpolators are always linear. This ensures that we don't get
# oscillation, negative values, or other ill-conditioning.
binaryMaskInterpolataor = RegularGridInterpolator(
grid, binaryMaskTable, bounds_error=False, fill_value=0)
smoothMaskInterpolataor = RegularGridInterpolator(
grid, smoothMaskTable, bounds_error=False, fill_value=0)
# If the binary mask interpolator gives us anything less than unity, it means that the requested point is either
# out of range, borders a point that is out of range, or (only if nonlinear) is using an out-of-range point for
# interpolation.
def contains(points):
vals = binaryMaskInterpolataor(points)
vals[vals < 1 - 1e-10] = 0 # Give some leeway for interpolation error
return vals
return source(inNames, outNames, contains, smoothMaskInterpolataor, dataFunc)
| gpl-2.0 |
treebuilder/s3-mining | s3.py | 1 | 1359 | #!/usr/bin/env python
import requests
import sys
url = 'https://s3.amazonaws.com/'
name = sys.argv[1].strip()
common = ['test','dev','bucket','s3','aws','prd','prod','pub','public','production','development','testing','archive','backup','web','devops','sec','secure','hidden','secret','staging','download']
connectors = ['-','_','']
url = url + name
def check_code(r, name):
if r.status_code == 404: print "None",
elif r.status_code == 403: print "Secure",
elif r.status_code == 301: print "Redirect",
elif r.status_code == 200: print "BINGO!",
elif r.status_code == 400: print "BadName",
else: print r.status_code,
print name
return
'''
def get_code(url, name):
r = requests.get(url)
check_code(r, name)
if r.status_code == 403 or r.status_code == 200:
for ext in common:
r = requests.get(url + ext)
check_code(r, name + ext)
'''
def get_code(url, name):
r = requests.get(url)
check_code(r, name)
for ext in common:
for i in connectors:
r = requests.get(url + i + ext)
check_code(r, name + i + ext)
get_code(url, name)
if '.' in name:
n2 = 'www.' + name
url = 'https://s3.amazonaws.com/' + n2
get_code(url, n2)
n2 = 's3.' + name
url = 'https://s3.amazonaws.com/' + n2
get_code(url, n2)
n3 = name.split('.')[0]
url = 'https://s3.amazonaws.com/' + n3
get_code(url, n3)
| apache-2.0 |
billyhunt/osf.io | scripts/tests/test_migrate_mailing_lists_to_mailchimp_field.py | 40 | 1354 | from nose.tools import *
from tests.base import OsfTestCase
from tests.factories import UserFactory
from scripts.migration.migrate_mailing_lists_to_mailchimp_field import main, get_users_with_no_mailchimp_mailing_lists
class TestMigrateMailingLists(OsfTestCase):
def setUp(self):
super(TestMigrateMailingLists, self).setUp()
self.user1 = UserFactory(mailing_lists={'mail': True})
self.user2 = UserFactory(mailing_lists={'mail': False})
self.user3 = UserFactory()
self.user1.save()
self.user2.save()
def test_get_users_with_mailing_lists(self):
users_with_mailing_list_ids = [user._id for user in get_users_with_no_mailchimp_mailing_lists()]
assert_equal(len(users_with_mailing_list_ids), 2)
assert_true(self.user1._id in users_with_mailing_list_ids)
assert_true(self.user2._id in users_with_mailing_list_ids)
assert_false(self.user3._id in users_with_mailing_list_ids)
def test_migration_of_mailing_lists(self):
assert_equal(self.user1.mailchimp_mailing_lists, {})
assert_equal(self.user2.mailchimp_mailing_lists, {})
main()
self.user1.reload()
self.user2.reload()
assert_true(self.user1.mailchimp_mailing_lists.get(u'mail'))
assert_false(self.user2.mailchimp_mailing_lists.get(u'mail'))
| apache-2.0 |
evgchz/scikit-learn | sklearn/linear_model/ransac.py | 16 | 13870 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from .base import LinearRegression
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
def fit(self, X, y):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
if y.ndim == 1:
y = y.reshape(-1, 1)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is None:
residual_metric = lambda dy: np.sum(np.abs(dy), axis=1)
else:
residual_metric = self.residual_metric
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
base_estimator.fit(X_subset, y_subset)
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
if y_pred.ndim == 1:
y_pred = y_pred[:, None]
residuals_subset = residual_metric(y_pred - y)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
return self.estimator_.score(X, y)
| bsd-3-clause |
darren-wang/gl | glance/tests/unit/common/test_scripts.py | 20 | 1442 | # Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import glance.common.scripts as scripts
from glance.common.scripts.image_import import main as image_import
import glance.tests.utils as test_utils
class TestScripts(test_utils.BaseTestCase):
def setUp(self):
super(TestScripts, self).setUp()
def test_run_task(self):
task_id = mock.ANY
task_type = 'import'
context = mock.ANY
task_repo = mock.ANY
image_repo = mock.ANY
image_factory = mock.ANY
with mock.patch.object(image_import, 'run') as mock_run:
scripts.run_task(task_id, task_type, context, task_repo,
image_repo, image_factory)
mock_run.assert_called_once_with(task_id, context, task_repo,
image_repo, image_factory)
| apache-2.0 |
wendellpbarreto/dolly | node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
openworm/tracker-commons | src/Python/wcon/wcon_parser.py | 3 | 28807 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Methods
------------
reject_duplicates
Classes
------------
WCONWorms
"""
import six
import warnings
from collections import OrderedDict
from six import StringIO
from os import path
import os
import shutil
import json
import jsonschema
import zipfile
import numpy as np
import pandas as pd
idx = pd.IndexSlice
from .wcon_data import parse_data, convert_origin
from .wcon_data import df_upsert, data_as_array
from .wcon_data import get_sorted_ordered_dict
from .wcon_data import reverse_backwards_worms, sort_odict
from .measurement_unit import MeasurementUnit
class WCONWorms():
"""
A set of worm tracker data for one or more worms, as specified by
the WCON standard.
Attributes
-------------
units: dict
May be empty, but is never None since 'units' is required
to be specified.
metadata: dict
If 'metadata' was not specified, metadata is None.
The values in this dict might be nested into further dicts or other
data types.
_data: dictionary of Pandas DataFrames [private]
num_worms: int [property]
data_as_dict: dict [property]
data: DataFrame if num_worms == 1 else dict of DataFrames [property]
[Note: the "files" key is not persisted unless the .load
factory method is used.]
Public-Facing Methods
-------------
load_from_file (JSON_path) [class method]
save_to_file (JSON_path, pretty_print)
to_canon [property]
__add__ [use "+"]
__eq__ [use "=="]
Usage
-------------
# From a string literal:
from io import StringIO
w2 = WCONWorms.load(StringIO('{"units":{"t":"s","x":"mm","y":"mm"}, '
'"data":[]}'))
# WCONWorms.load_from_file accepts any valid WCON, but .save_to_file
# output is always "canonical" WCON, which makes specific choices about
# how to arrange and format the WCON file. This way the functional
# equality of any two WCON files can be tested by this:
w1 = WCONWorms.load_from_file('file1.wcon')
w2 = WCONWorms.load_from_file('file2.wcon')
assert(w1 == w2)
# or:
w1.save_to_file('file1.wcon')
w2.save_to_file('file2.wcon')
import filecmp
assert(filecmp.cmp('file1.wcon', file2.wcon'))
Custom WCON versions
--------------------
Any top-level key other than the basic:
- files
- units
- metadata
- data
... is ignored. Handling them requires subclassing WCONWorms.
"""
"""
================================================================
Properties
================================================================
"""
basic_keys = ['files', 'units', 'metadata', 'data']
@property
def num_worms(self):
try:
return self._num_worms
except AttributeError:
self._num_worms = len(self.worm_ids)
return self._num_worms
@property
def worm_ids(self):
try:
return self._worm_ids
except AttributeError:
self._worm_ids = list(self._data.keys())
return self._worm_ids
@property
def data(self):
"""
Return all worms as one giant DataFrame. Since this can
be inefficient for sparse multiworm data, it is only "lazily"
calculated, i.e. once requested, not at object initialization
"""
try:
return self._data_df
except AttributeError:
if self.num_worms == 0:
self._data_df = None
else:
# Get a list of all dfs
dfs = list(self._data.values())
l = dfs[0]
# Merge all the worm dfs together into one
for r in dfs[1:]:
l = pd.merge(l, r, left_index=True, right_index=True,
how='outer')
self._data_df = l
return self._data_df
@property
def data_as_odict(self):
"""
Return the native ordered-dict-of-DataFrames, the cheapest
option for sparse multiworm data
"""
return self._data
@property
def schema(self):
try:
return self._schema
except AttributeError:
# Only load _schema if this method gets called. Once
# it's loaded, though, persist it in memory and don't lose it
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "wcon_schema.json"), "r") as f:
self._schema = json.loads(f.read())
# Now that the schema has been loaded, we can try again
return self._schema
@classmethod
def validate_from_schema(cls, wcon_string):
jsonschema.validate(json.load(StringIO(wcon_string)), cls().schema)
@property
def canonical_units(self):
"""
A dictionary of canonical versions of the unit for all quantities
"""
return {k: self.units[k].canonical_unit for k in self.units.keys()}
@property
def as_ordered_dict(self):
"""
Return a representation of the worm as an OrderedDict. This is most
useful when saving to a file.
Returns the canonical version of the data, with units in
canonical form, and the data converted to canonical form.
The three keys are:
- 'units'
- 'metadata'
- 'data'
"""
# Not strictly required by JSON but nice to order the four top-level
# keys so we use OrderedDict here instead of dict.
ord_dict = OrderedDict()
# A dictionary of the canonical unit strings for all quantities except
# aspect_size, which is generated at runtime.
units_obj = {k: self.units[k].canonical_unit_string
for k in self.units.keys() if k != 'aspect_size'}
# Sort the units so that every time we save this file, it produces
# exactly the same output. Not required in the JSON standard, but
# nice to have.
units_obj = get_sorted_ordered_dict(units_obj)
ord_dict.update({'units': units_obj})
# The only optional object is "metadata" since "files" is not
# necessary since we don't currently support saving to more than
# one chunk.
if self.metadata:
# Again, sort the metadata (recursively) so that the same file
# is produced each time that can stand up to diffing
metadata_obj = get_sorted_ordered_dict(self.metadata)
ord_dict.update({'metadata': metadata_obj})
canonical = self.to_canon
if canonical._data == {}:
data_arr = []
else:
src = canonical.data_as_odict
data_arr = []
for worm_id in src:
data_arr.extend(data_as_array(src[worm_id]))
ord_dict.update({'data': data_arr})
return ord_dict
"""
================================================================
Comparison Methods
================================================================
"""
@classmethod
def are_units_equal(cls, w1, w2):
"""
Returns
---------
boolean
True if w1.units == w2.units, with the only conversion being
between units that mean the same thing
(e.g. 'mm' and 'millimetres')
False otherwise
"""
if set(w1.units.keys()) != set(w2.units.keys()):
return False
for k in w1.units.keys():
if w1.units[k] != w2.units[k]:
return False
return True
@classmethod
def is_metadata_equal(cls, w1, w2):
"""
Returns
----------
boolean
True if w1.metadata == w2.metadata
"""
return w1.metadata == w2.metadata
@classmethod
def is_data_equal(cls, w1, w2, convert_units=True):
"""
Parameters
-------------
w1, w2: WCONWorms objects
The objects whose .data attributes will be compared
convert_units: bool
If True, the data will first be converted to a standard form
so that if one worm uses millimetres and the other metres, the
data can still be properly compared
TODO:
Add a "threshold" parameter so that perfect equality is not
the only option
"""
# import pdb; pdb.set_trace()
if w1.num_worms != w2.num_worms:
return False
if convert_units:
d1 = w1.to_canon._data
d2 = w2.to_canon._data
else:
d1 = w1._data
d2 = w2._data
for worm_id in w1.worm_ids:
try:
df1 = d1[worm_id]
except KeyError:
df1 = None
try:
df2 = d2[worm_id]
except KeyError:
df2 = None
if (df1 is None) ^ (df2 is None):
# If one is None but the other is not (XOR), data is not equal
return False
elif df1 is None and df2 is None:
# If both None, they are equal
continue
if not pd_equals(df1, df2):
return False
return True
def __eq__(self, other):
"""
Comparison operator (overloaded)
Equivalent to .is_data_equal and .is_metadata_equal
Units are converted
Special units are not considered
"""
return (WCONWorms.is_data_equal(self, other) and
WCONWorms.is_metadata_equal(self, other))
def __ne__(self, other):
return not self.__eq__(other)
def __add__(self, other):
"""
Addition operator (overloaded)
"""
return self.merge(self, other)
@property
def is_canon(self):
"""
Returns whether all units are already in their canonical forms.
"""
for data_key in self.units:
mu = self.units[data_key]
if mu.unit_string != mu.canonical_unit_string:
return False
return True
@property
def to_canon(self):
"""
Return a new WCONWorms object, with the same .metadata, but with
.units and .data changed so they are in standard form.
"""
w = WCONWorms()
w.metadata = self.metadata
w.units = self.canonical_units
# Corner case
if self._data == {}:
w._data = OrderedDict({})
return w
w._data = OrderedDict()
for worm_id in self.worm_ids:
w._data[worm_id] = self._data[worm_id].copy()
# Go through each "units" key
for data_key in self.units:
mu = self.units[data_key]
# Don't bother to "convert" units that are already in their
# canonical form.
if mu.unit_string == mu.canonical_unit_string:
continue
tmu = self.units['t']
for worm_id in w.worm_ids:
try:
# Apply across all worm ids and all aspects
mu_slice = \
w._data[worm_id].loc[:, idx[:, data_key, :]].copy()
w._data[worm_id].loc[:, idx[:, data_key, :]] = \
mu_slice.applymap(mu.to_canon)
except KeyError:
# Just ignore cases where there are "units" entries but no
# corresponding data
pass
# Special case: change the dataframe index, i.e. the time units
if tmu.unit_string != tmu.canonical_unit_string:
# Create a function that can be applied elementwise to the
# index values
t_converter = np.vectorize(tmu.to_canon)
new_index = t_converter(w._data[worm_id].index.values)
w._data[worm_id].set_index(new_index, inplace=True)
return w
@classmethod
def merge(cls, w1, w2):
"""
Merge two worm groups, in their standard forms.
Units can differ, but not in their standard forms.
Metadata must be identical.
Data can overlap, as long as it does not clash.
Clashes are checked at a low level of granularity:
e.g. if two worms have different metadata but the individual metadata
entries do not conflict, this method will still fail and raise an
AssertionError.
"""
if not cls.is_metadata_equal(w1, w2):
raise AssertionError("Metadata conflicts between worms to be "
"merged.")
w1c = w1.to_canon
w2c = w2.to_canon
for worm_id in w2c.worm_ids:
if worm_id in w1c.worm_ids:
try:
# Try to upsert w2c's data into w1c. If we cannot
# without an error being raised, the data clashes.
w1c._data[worm_id] = df_upsert(w1c._data[worm_id],
w2c._data[worm_id])
except AssertionError as err:
raise AssertionError("Data conflicts between worms to "
"be merged on worm {0}: {1}"
.format(str(worm_id), err))
else:
# The worm isn't in the 1st group, so just add it
w1c._data[worm_id] = w2c._data[worm_id]
# Sort w1c's list of worms
w1c._data = sort_odict(w1c._data)
# Create a fresh WCONWorms object to reset all the lazily-evaluated
# properties that may change, such as num_worms, in the merged worm
merged_worm = WCONWorms()
merged_worm._data = w1c._data
merged_worm.metadata = w2c.metadata
merged_worm.units = w1c.units
return merged_worm
"""
================================================================
Load / save methods
================================================================
"""
@classmethod
def validate_filename(cls, JSON_path, is_zipped):
"""
Perform simple checks on the file path
JSON_path: str
The path to the file to be evaluated
is_zipped: bool
Whether or not the path is for a zip archive
"""
assert(isinstance(JSON_path, six.string_types))
assert(len(JSON_path) > 0)
if is_zipped:
if JSON_path[-4:].upper() != '.ZIP':
raise Exception("A zip archive like %s must have an "
"extension ending in '.zip'" % JSON_path)
else:
# delete the '.zip' part so the rest can be validated
JSON_path = JSON_path[:-4]
warning_message = (' is either less than 5 characters,'
'consists of only the extension ".WCON", or '
'does not end in ".WCON", the recommended '
'file extension.')
if len(JSON_path) <= 5 or JSON_path[-5:].upper() != '.WCON':
if is_zipped:
warnings.warn('Zip file ends properly in .zip, but the '
'prefix' + warning_message)
else:
warnings.warn('The file name ' + warning_message)
def save_to_file(self, JSON_path, pretty_print=False,
compress_file=False, num_chunks=1):
"""
Save this object to the path specified. The object
will be serialized as a WCON JSON text file.
Parameters
-----------
JSON_path: str
The path to save this object to. A warning is raised if the path
does not end in ".WCON"
pretty_print: bool
If True, adds newlines and spaces to make the file more human-
readable. Otherwise, the JSON output will use as few characters
as possible.
compress_file: bool
If True, saves a compressed version of the WCON JSON text file
num_chunks: int
The number of chunks to break this object into. If
num_chunks > 1 then num_chunks files will be created.
Filenames will have "_1", "_2", etc., added
to the end of the filename after the last path separator
(e.g. "/") and then, before the last "." (if any)
"""
if num_chunks > 1:
raise NotImplementedError("Saving a worm to more than one chunk "
"has not yet been implemented")
self.validate_filename(JSON_path, compress_file)
with open(JSON_path, 'w') as outfile:
json.dump(self.as_ordered_dict, outfile,
indent=4 if pretty_print else None)
if compress_file:
# Zip the file to a TEMP file, then rename to the original,
# overwriting it with the zipped archive.
zf = zipfile.ZipFile(JSON_path + '.TEMP',
'w', zipfile.ZIP_DEFLATED)
zf.write(JSON_path)
zf.close()
os.rename(JSON_path + '.TEMP', JSON_path)
@classmethod
def load_from_file(cls, JSON_path,
load_prev_chunks=True,
load_next_chunks=True,
validate_against_schema=True):
"""
Factory method returning a merged WCONWorms instance of the file
located at JSON_path and all related "chunks" as specified in the
"files" element of the file.
Uses recursion if there are multiple chunks.
Parameters
-------------
JSON_path: str
A file path to a file that can be opened
validate_against_schema: bool
If True, validate before trying to load the file, otherwise don't.
jsonschema.validate takes 99% of the compute time for large files
so use with caution.
load_prev_chunks: bool
If a "files" key is present, load the previous chunks and merge
them with this one. If not present, return only the current
file's worm.
load_next_chunks: bool
If a "files" key is present, load the next chunks and merge
them with this one. If not present, return only the current
file's worm.
"""
print("Loading file: " + JSON_path)
is_zipped = zipfile.is_zipfile(JSON_path)
cls.validate_filename(JSON_path, is_zipped)
# Check if the specified file is compressed
if is_zipped:
zf = zipfile.ZipFile(JSON_path, 'r')
zf_namelist = zf.namelist()
if len(zf_namelist) <= 0:
raise Exception("Filename %s is a zip archive, which is fine, "
"but the archive does not contain any files.")
elif len(zf_namelist) == 1:
# Just one file is in the archive.
print("The file is a zip archive with one file. Attempting "
"to uncompress and then load.")
wcon_bytes = zf.read(zf.namelist()[0])
wcon_string = wcon_bytes.decode("utf-8")
infile = StringIO(wcon_string)
w_current = cls.load(infile, validate_against_schema)
else:
print("The zip archive contains multiple files. We will "
"extract to a temporary folder and then try to load "
"the first file in the archive, then delete the "
"temporary folder.")
# Note: the first file is all we should need since we assume
# the files in the archive are linked together using
# their respective JSON "files" entries
# Make a temporary archive folder
cur_path = os.path.abspath(os.path.dirname(JSON_path))
archive_path = os.path.join(cur_path, '_zip_archive')
if os.path.exists(archive_path):
raise Exception("Archive path %s already exists!"
% archive_path)
else:
os.makedirs(archive_path)
# Extract zip archive to temporary folder
for name in zf_namelist:
zf.extract(name, archive_path)
zf.close()
# Call load_from_file on the first file
first_path = os.path.join(archive_path, zf_namelist[0])
w = cls.load_from_file(first_path)
# Delete the temporary folder
shutil.rmtree(archive_path, ignore_errors=True)
return w
else:
# The file is not a zip file, so assume it's just plaintext JSON
with open(JSON_path, 'r') as infile:
w_current = cls.load(infile, validate_against_schema)
# CASE 1: NO "files" OBJECT, hence no multiple files. We are done.
w_cur = w_current
if w_current.files is None:
return w_current
elif (('next' not in w_cur.files) and ('prev' not in w.cur.files)):
# CASE 2: "files" object exists but no prev/next, assume nothing is
# there
return w_current
else:
# The merge operations below will blast away the .files attribute
# so we need to save a local copy
current_files = w_current.files
# OTHERWISE, CASE 2: MULTIPLE FILES
# The schema guarantees that if "files" is present,
# "current", will exist. Also, that "current" is not
# null and whose corresponding value is a string at least one
# character in length.
cur_ext = current_files['current']
# e.g. cur_filename = 'filename_2.wcon'
# cur_ext = '_2', prefix = 'filename', suffix = '.wcon'
cur_filename = JSON_path
name_offset = cur_filename.find(cur_ext)
if name_offset == -1:
raise AssertionError(
'Mismatch between the filename given in the file "' +
cur_ext +
'" and the file we loaded from "' +
cur_filename +
'".')
path_string = cur_filename[:name_offset]
load_chunks = {'prev': load_prev_chunks,
'next': load_next_chunks}
for direction in ['prev', 'next']:
# If we are supposed to load the previous chunks, and one exists,
# load it and merge it with the current chunk
# Same with the "next" chunks
if (load_chunks[direction] and
current_files is not None and
current_files[direction] is not None and
len(current_files[direction]) > 0):
cur_load_prev_chunks = (direction == 'prev')
cur_load_next_chunks = (direction == 'next')
new_file_name = path_string + current_files[direction][0]
w_new = cls.load_from_file(new_file_name,
cur_load_prev_chunks,
cur_load_next_chunks,
validate_against_schema)
w_current = w_current + w_new
# If no merging took place, we'll still need to delete the "files"
# attribute if it's present (i.e. if both "prev" and "next" were null):
if hasattr(w_current, "files"):
del(w_current.files)
return w_current
@classmethod
def load(cls, JSON_stream, validate_against_schema=True):
"""
Factory method to create a WCONWorms instance
This does NOT load chunks, because a file stream does not
have a file name. In order to load chunks, you must invoke the
factory method load_from_file. You will be passing it a file path
from which it can find the other files/chunks.
Parameters
-------------
JSON_stream: a text stream implementing .read()
e.g. an object inheriting from TextIOBase
validate_against_schema: bool
If True, validate before trying to load the file, otherwise don't.
jsonschema.validate takes 99% of the compute time for large files
so use with caution.
"""
w = cls()
serialized_data = JSON_stream.read()
# Load the whole JSON file into a nested dict. Any duplicate
# keys raise an exception since we've hooked in reject_duplicates
root = json.loads(serialized_data, object_pairs_hook=reject_duplicates)
# ===================================================
# BASIC TOP-LEVEL VALIDATION AGAINST THE SCHEMA
# Validate the raw file against the WCON schema
if validate_against_schema:
jsonschema.validate(root, w.schema)
# ===================================================
# HANDLE THE REQUIRED ELEMENTS: 'units', 'data'
w.units = root['units']
for key in w.units:
w.units[key] = MeasurementUnit.create(w.units[key])
# The only data key without units should be aspect_size, since it's
# generated during the construction of the pandas dataframe
# it is a dimensionless quantity
w.units['aspect_size'] = MeasurementUnit.create('')
if len(root['data']) > 0:
w._data = parse_data(root['data'])
# Shift the coordinates by the amount in the offsets 'ox' and 'oy'
for worm_id in w.worm_ids:
convert_origin(w._data[worm_id])
# Any worms with head=='R' should have their
# coordinates reversed and head reset to 'L'
reverse_backwards_worms(w._data[worm_id])
else:
# "data": {}
w._data = OrderedDict({})
# Raise error if there are any data keys without units
units_keys = set(w.units.keys())
for worm_id in w._data:
df = w._data[worm_id]
if df is None:
data_keys = set()
else:
data_keys = set(df.columns.get_level_values(1))
# "head" and "ventral" don't require units.
keys_missing_units = data_keys - \
units_keys - set(['head', 'ventral'])
if keys_missing_units != set():
raise AssertionError('In worm ' + str(worm_id) + ', the '
'following data keys are missing '
'entries in the "units" object: ' +
str(keys_missing_units))
# ===================================================
# HANDLE THE OPTIONAL ELEMENTS: 'files', 'metadata'
if 'files' in root:
w.files = root['files']
# Handle the case of a single 'next' or 'prev' entry, by
# wrapping it in an array, so we can reliably assume that
# entries are always wrapped in arrays.
for direction in ['next', 'prev']:
if hasattr(w.files, direction):
if isinstance(getattr(w.files, direction), str):
setattr(w.files, direction,
[getattr(w.files, direction)])
else:
w.files = None
if 'metadata' in root:
w.metadata = root['metadata']
else:
w.metadata = None
return w
def pd_equals(df1, df2):
"""
I don't use DataFrame.equals because it returned False for no
apparent reason with one of the centroid unit tests
"""
if not df1.columns.identical(df2.columns):
return False
if not df1.index.identical(df2.index):
return False
try:
pd.util.testing.assert_frame_equal(df1, df2)
except AssertionError:
return False
return True
def reject_duplicates(ordered_pairs):
"""Reject duplicate keys."""
unique_dict = {}
for key, val in ordered_pairs:
if key in unique_dict:
raise KeyError("Duplicate key: %r" % (key,))
else:
unique_dict[key] = val
return unique_dict
| mit |
mnunberg/couchbase-python-client | txcouchbase/tests/test_txconn.py | 1 | 2526 | # Copyright 2013, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from twisted.internet import reactor, defer
from couchbase.exceptions import (
BucketNotFoundError,
ObjectDestroyedError)
from couchbase.tests.base import ConnectionTestCase
from couchbase.connstr import ConnectionString
from txcouchbase.tests.base import gen_base
from txcouchbase.bucket import Bucket
class BasicConnectionTest(gen_base(ConnectionTestCase)):
def testConnectionSuccess(self):
cb = self.make_connection()
d = cb.connect()
d.addCallback(lambda x: self.assertTrue(cb.connected))
return d
def testConnectionFailure(self):
cb = self.make_connection(bucket='blahblah')
d = cb.connect()
d.addCallback(lambda x: x, cb)
return self.assertFailure(d, BucketNotFoundError)
def testBadEvent(self):
cb = self.make_connection()
self.assertRaises(ValueError, cb.registerDeferred,
'blah',
defer.Deferred())
d = defer.Deferred()
cb.registerDeferred('connect', d)
d.addBoth(lambda x: None)
return d
def testMultiHost(self):
info = self.cluster_info
cs = ConnectionString.parse(self.make_connargs()['connection_string'])
cs.hosts = [ info.host + ':' + '10', info.host + ':' + str(info.port) ]
cb = self.make_connection(connection_string=cs.encode())
d = cb.connect()
d.addCallback(lambda x: self.assertTrue(cb.connected))
return d
def testConnstrFirstArg(self):
info = self.cluster_info
s = self.make_connargs()['connection_string']
cb = Bucket(s)
d = cb.connect().addCallback(lambda x: self.assertTrue(cb.connected))
self.register_cleanup(cb)
return d
def testConnectionDestroyed(self):
cb = self.make_connection()
d = cb.connect()
self.assertFailure(d, ObjectDestroyedError)
return d
| apache-2.0 |
winklerand/pandas | pandas/tests/test_resample.py | 1 | 135497 | # pylint: disable=E1101
from warnings import catch_warnings
from datetime import datetime, timedelta
from functools import partial
from textwrap import dedent
import pytz
import pytest
import dateutil
import numpy as np
import pandas as pd
import pandas.tseries.offsets as offsets
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas import (Series, DataFrame, Panel, Index, isna,
notna, Timestamp)
from pandas.core.dtypes.generic import ABCSeries, ABCDataFrame
from pandas.compat import range, lrange, zip, product, OrderedDict
from pandas.core.base import SpecificationError, AbstractMethodError
from pandas.errors import UnsupportedFunctionCall
from pandas.core.groupby import DataError
from pandas._libs.tslibs.resolution import DAYS
from pandas.tseries.frequencies import MONTHS
from pandas.tseries.frequencies import to_offset
from pandas.core.indexes.datetimes import date_range
from pandas.tseries.offsets import Minute, BDay
from pandas.core.indexes.period import period_range, PeriodIndex, Period
from pandas.core.resample import (DatetimeIndex, TimeGrouper,
DatetimeIndexResampler)
from pandas.core.indexes.timedeltas import timedelta_range, TimedeltaIndex
from pandas.util.testing import (assert_series_equal, assert_almost_equal,
assert_frame_equal, assert_index_equal)
from pandas._libs.period import IncompatibleFrequency
bday = BDay()
# The various methods we support
downsample_methods = ['min', 'max', 'first', 'last', 'sum', 'mean', 'sem',
'median', 'prod', 'var', 'ohlc']
upsample_methods = ['count', 'size']
series_methods = ['nunique']
resample_methods = downsample_methods + upsample_methods + series_methods
def _simple_ts(start, end, freq='D'):
rng = date_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
def _simple_pts(start, end, freq='D'):
rng = period_range(start, end, freq=freq)
return Series(np.random.randn(len(rng)), index=rng)
class TestResampleAPI(object):
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
self.frame = DataFrame(
{'A': self.series, 'B': self.series, 'C': np.arange(len(dti))})
def test_str(self):
r = self.series.resample('H')
assert ('DatetimeIndexResampler [freq=<Hour>, axis=0, closed=left, '
'label=left, convention=start, base=0]' in str(r))
def test_api(self):
r = self.series.resample('H')
result = r.mean()
assert isinstance(result, Series)
assert len(result) == 217
r = self.series.to_frame().resample('H')
result = r.mean()
assert isinstance(result, DataFrame)
assert len(result) == 217
def test_api_changes_v018(self):
# change from .resample(....., how=...)
# to .resample(......).how()
r = self.series.resample('H')
assert isinstance(r, DatetimeIndexResampler)
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how=how)
expected = getattr(self.series.resample('H'), how)()
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H', how='ohlc')
expected = self.series.resample('H').ohlc()
tm.assert_frame_equal(result, expected)
# compat for pandas-like methods
for how in ['sort_values', 'isna']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
getattr(r, how)()
# invalids as these can be setting operations
r = self.series.resample('H')
pytest.raises(ValueError, lambda: r.iloc[0])
pytest.raises(ValueError, lambda: r.iat[0])
pytest.raises(ValueError, lambda: r.loc[0])
pytest.raises(ValueError, lambda: r.loc[
Timestamp('2013-01-01 00:00:00', offset='H')])
pytest.raises(ValueError, lambda: r.at[
Timestamp('2013-01-01 00:00:00', offset='H')])
def f():
r[0] = 5
pytest.raises(ValueError, f)
# str/repr
r = self.series.resample('H')
with tm.assert_produces_warning(None):
str(r)
with tm.assert_produces_warning(None):
repr(r)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
tm.assert_numpy_array_equal(np.array(r), np.array(r.mean()))
# masquerade as Series/DataFrame as needed for API compat
assert isinstance(self.series.resample('H'), ABCSeries)
assert not isinstance(self.frame.resample('H'), ABCSeries)
assert not isinstance(self.series.resample('H'), ABCDataFrame)
assert isinstance(self.frame.resample('H'), ABCDataFrame)
# bin numeric ops
for op in ['__add__', '__mul__', '__truediv__', '__div__', '__sub__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), Series)
# unary numeric ops
for op in ['__pos__', '__neg__', '__abs__', '__inv__']:
if getattr(self.series, op, None) is None:
continue
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(), Series)
# comparison ops
for op in ['__lt__', '__le__', '__gt__', '__ge__', '__eq__', '__ne__']:
r = self.series.resample('H')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert isinstance(getattr(r, op)(2), Series)
# IPython introspection shouldn't trigger warning GH 13618
for op in ['_repr_json', '_repr_latex',
'_ipython_canary_method_should_not_exist_']:
r = self.series.resample('H')
with tm.assert_produces_warning(None):
getattr(r, op, None)
# getitem compat
df = self.series.to_frame('foo')
# same as prior versions for DataFrame
pytest.raises(KeyError, lambda: df.resample('H')[0])
# compat for Series
# but we cannot be sure that we need a warning here
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')[0]
expected = self.series.resample('H').mean()[0]
assert result == expected
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.series.resample('H')['2005-01-09 23:00:00']
expected = self.series.resample('H').mean()['2005-01-09 23:00:00']
assert result == expected
def test_groupby_resample_api(self):
# GH 12448
# .groupby(...).resample(...) hitting warnings
# when appropriate
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
# replication step
i = pd.date_range('2016-01-03', periods=8).tolist() + \
pd.date_range('2016-01-17', periods=8).tolist()
index = pd.MultiIndex.from_arrays([[1] * 8 + [2] * 8, i],
names=['group', 'date'])
expected = DataFrame({'val': [5] * 7 + [6] + [7] * 7 + [8]},
index=index)
result = df.groupby('group').apply(
lambda x: x.resample('1D').ffill())[['val']]
assert_frame_equal(result, expected)
def test_groupby_resample_on_api(self):
# GH 15021
# .groupby(...).resample(on=...) results in an unexpected
# keyword warning.
df = DataFrame({'key': ['A', 'B'] * 5,
'dates': pd.date_range('2016-01-01', periods=10),
'values': np.random.randn(10)})
expected = df.set_index('dates').groupby('key').resample('D').mean()
result = df.groupby('key').resample('D', on='dates').mean()
assert_frame_equal(result, expected)
@td.skip_if_no_mpl
def test_plot_api(self):
# .resample(....).plot(...)
# hitting warnings
# GH 12448
s = Series(np.random.randn(60),
index=date_range('2016-01-01', periods=60, freq='1min'))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min').plot()
tm.assert_is_valid_plot_return_object(result)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = s.resample('15min', how='sum').plot()
tm.assert_is_valid_plot_return_object(result)
def test_getitem(self):
r = self.frame.resample('H')
tm.assert_index_equal(r._selected_obj.columns, self.frame.columns)
r = self.frame.resample('H')['B']
assert r._selected_obj.name == self.frame.columns[1]
# technically this is allowed
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
r = self.frame.resample('H')['A', 'B']
tm.assert_index_equal(r._selected_obj.columns,
self.frame.columns[[0, 1]])
def test_select_bad_cols(self):
g = self.frame.resample('H')
pytest.raises(KeyError, g.__getitem__, ['D'])
pytest.raises(KeyError, g.__getitem__, ['A', 'D'])
with tm.assert_raises_regex(KeyError, '^[^A]+$'):
# A should not be referenced as a bad column...
# will have to rethink regex if you change message!
g[['A', 'D']]
def test_attribute_access(self):
r = self.frame.resample('H')
tm.assert_series_equal(r.A.sum(), r['A'].sum())
# getting
pytest.raises(AttributeError, lambda: r.F)
# setting
def f():
r.F = 'bah'
pytest.raises(ValueError, f)
def test_api_compat_before_use(self):
# make sure that we are setting the binner
# on these attributes
for attr in ['groups', 'ngroups', 'indices']:
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
rs = ts.resample('30s')
# before use
getattr(rs, attr)
# after grouper is initialized is ok
rs.mean()
getattr(rs, attr)
def tests_skip_nuisance(self):
df = self.frame
df['D'] = 'foo'
r = df.resample('H')
result = r[['A', 'B']].sum()
expected = pd.concat([r.A.sum(), r.B.sum()], axis=1)
assert_frame_equal(result, expected)
expected = r[['A', 'B', 'C']].sum()
result = r.sum()
assert_frame_equal(result, expected)
def test_downsample_but_actually_upsampling(self):
# this is reindex / asfreq
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
result = ts.resample('20s').asfreq()
expected = Series([0, 20, 40, 60, 80],
index=pd.date_range('2012-01-01 00:00:00',
freq='20s',
periods=5))
assert_series_equal(result, expected)
def test_combined_up_downsampling_of_irregular(self):
# since we are reallydoing an operation like this
# ts2.resample('2s').mean().ffill()
# preserve these semantics
rng = pd.date_range('1/1/2012', periods=100, freq='S')
ts = Series(np.arange(len(rng)), index=rng)
ts2 = ts.iloc[[0, 1, 2, 3, 5, 7, 11, 15, 16, 25, 30]]
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = ts2.resample('2s', how='mean', fill_method='ffill')
expected = ts2.resample('2s').mean().ffill()
assert_series_equal(result, expected)
def test_transform(self):
r = self.series.resample('20min')
expected = self.series.groupby(
pd.Grouper(freq='20min')).transform('mean')
result = r.transform('mean')
assert_series_equal(result, expected)
def test_fillna(self):
# need to upsample here
rng = pd.date_range('1/1/2012', periods=10, freq='2S')
ts = Series(np.arange(len(rng), dtype='int64'), index=rng)
r = ts.resample('s')
expected = r.ffill()
result = r.fillna(method='ffill')
assert_series_equal(result, expected)
expected = r.bfill()
result = r.fillna(method='bfill')
assert_series_equal(result, expected)
with pytest.raises(ValueError):
r.fillna(0)
def test_apply_without_aggregation(self):
# both resample and groupby should work w/o aggregation
r = self.series.resample('20min')
g = self.series.groupby(pd.Grouper(freq='20min'))
for t in [g, r]:
result = t.apply(lambda x: x)
assert_series_equal(result, self.series)
def test_agg_consistency(self):
# make sure that we are consistent across
# similar aggregations with and w/o selection list
df = DataFrame(np.random.randn(1000, 3),
index=pd.date_range('1/1/2012', freq='S', periods=1000),
columns=['A', 'B', 'C'])
r = df.resample('3T')
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = r[['A', 'B', 'C']].agg({'r1': 'mean', 'r2': 'sum'})
result = r.agg({'r1': 'mean', 'r2': 'sum'})
assert_frame_equal(result, expected)
# TODO: once GH 14008 is fixed, move these tests into
# `Base` test class
def test_agg(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
a_mean = r['A'].mean()
a_std = r['A'].std()
a_sum = r['A'].sum()
b_mean = r['B'].mean()
b_std = r['B'].std()
b_sum = r['B'].sum()
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_product([['A', 'B'],
['mean', 'std']])
for t in cases:
result = t.aggregate([np.mean, np.std])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, b_std], axis=1)
for t in cases:
result = t.aggregate({'A': np.mean,
'B': np.std})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std']})
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = ['mean', 'sum']
for t in cases:
result = t['A'].aggregate(['mean', 'sum'])
assert_frame_equal(result, expected)
expected = pd.concat([a_mean, a_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'sum'),
('B', 'mean2'),
('B', 'sum2')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.aggregate({'A': {'mean': 'mean', 'sum': 'sum'},
'B': {'mean2': 'mean', 'sum2': 'sum'}})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_std, b_mean, b_std], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'mean'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.aggregate({'A': ['mean', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([a_mean, a_sum, b_mean, b_sum], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('r1', 'A', 'mean'),
('r1', 'A', 'sum'),
('r2', 'B', 'mean'),
('r2', 'B', 'sum')])
def test_agg_misc(self):
# test with all three Resampler apis and TimeGrouper
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
# passed lambda
for t in cases:
result = t.agg({'A': np.sum,
'B': lambda x: np.std(x, ddof=1)})
rcustom = t['B'].apply(lambda x: np.std(x, ddof=1))
expected = pd.concat([r['A'].sum(), rcustom], axis=1)
assert_frame_equal(result, expected, check_like=True)
# agg with renamers
expected = pd.concat([t['A'].sum(),
t['B'].sum(),
t['A'].mean(),
t['B'].mean()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('result1', 'A'),
('result1', 'B'),
('result2', 'A'),
('result2', 'B')])
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg(OrderedDict([('result1', np.sum),
('result2', np.mean)]))
assert_frame_equal(result, expected, check_like=True)
# agg with different hows
expected = pd.concat([t['A'].sum(),
t['A'].std(),
t['B'].mean(),
t['B'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
for t in cases:
result = t.agg(OrderedDict([('A', ['sum', 'std']),
('B', ['mean', 'std'])]))
assert_frame_equal(result, expected, check_like=True)
# equivalent of using a selection list / or not
for t in cases:
result = t[['A', 'B']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# series like aggs
for t in cases:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std']})
expected = pd.concat([t['A'].sum(),
t['A'].std()],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std')])
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([t['A'].agg(['sum', 'std']),
t['A'].agg(['mean', 'std'])],
axis=1)
expected.columns = pd.MultiIndex.from_tuples([('A', 'sum'),
('A', 'std'),
('B', 'mean'),
('B', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t['A'].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
assert_frame_equal(result, expected, check_like=True)
# errors
# invalid names in the agg specification
for t in cases:
def f():
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
t[['A']].agg({'A': ['sum', 'std'],
'B': ['mean', 'std']})
pytest.raises(SpecificationError, f)
def test_agg_nested_dicts(self):
np.random.seed(1234)
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
index.name = 'date'
df = DataFrame(np.random.rand(10, 2), columns=list('AB'), index=index)
df_col = df.reset_index()
df_mult = df_col.copy()
df_mult.index = pd.MultiIndex.from_arrays([range(10), df.index],
names=['index', 'date'])
r = df.resample('2D')
cases = [
r,
df_col.resample('2D', on='date'),
df_mult.resample('2D', level='date'),
df.groupby(pd.Grouper(freq='2D'))
]
for t in cases:
def f():
t.aggregate({'r1': {'A': ['mean', 'sum']},
'r2': {'B': ['mean', 'sum']}})
pytest.raises(ValueError, f)
for t in cases:
expected = pd.concat([t['A'].mean(), t['A'].std(), t['B'].mean(),
t['B'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t[['A', 'B']].agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = t.agg({'A': {'ra': ['mean', 'std']},
'B': {'rb': ['mean', 'std']}})
assert_frame_equal(result, expected, check_like=True)
def test_selection_api_validation(self):
# GH 13500
index = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
df_exp = DataFrame({'a': rng}, index=index)
# non DatetimeIndex
with pytest.raises(TypeError):
df.resample('2D', level='v')
with pytest.raises(ValueError):
df.resample('2D', on='date', level='d')
with pytest.raises(TypeError):
df.resample('2D', on=['a', 'date'])
with pytest.raises(KeyError):
df.resample('2D', level=['a', 'date'])
# upsampling not allowed
with pytest.raises(ValueError):
df.resample('2D', level='d').asfreq()
with pytest.raises(ValueError):
df.resample('2D', on='date').asfreq()
exp = df_exp.resample('2D').sum()
exp.index.name = 'date'
assert_frame_equal(exp, df.resample('2D', on='date').sum())
exp.index.name = 'd'
assert_frame_equal(exp, df.resample('2D', level='d').sum())
class Base(object):
"""
base class for resampling testing, calling
.create_series() generates a series of each index type
"""
def create_index(self, *args, **kwargs):
""" return the _index_factory created using the args, kwargs """
factory = self._index_factory()
return factory(*args, **kwargs)
@pytest.fixture
def _index_start(self):
return datetime(2005, 1, 1)
@pytest.fixture
def _index_end(self):
return datetime(2005, 1, 10)
@pytest.fixture
def _index_freq(self):
return 'D'
@pytest.fixture
def index(self, _index_start, _index_end, _index_freq):
return self.create_index(_index_start, _index_end, freq=_index_freq)
@pytest.fixture
def _series_name(self):
raise AbstractMethodError(self)
@pytest.fixture
def _static_values(self, index):
return np.arange(len(index))
@pytest.fixture
def series(self, index, _series_name, _static_values):
return Series(_static_values, index=index, name=_series_name)
@pytest.fixture
def frame(self, index, _static_values):
return DataFrame({'value': _static_values}, index=index)
@pytest.fixture(params=[Series, DataFrame])
def series_and_frame(self, request, index, _series_name, _static_values):
if request.param == Series:
return Series(_static_values, index=index, name=_series_name)
if request.param == DataFrame:
return DataFrame({'value': _static_values}, index=index)
@pytest.mark.parametrize('freq', ['2D', '1H'])
def test_asfreq(self, series_and_frame, freq):
obj = series_and_frame
result = obj.resample(freq).asfreq()
if freq == '2D':
new_index = obj.index.take(np.arange(0, len(obj.index), 2))
new_index.freq = to_offset('2D')
else:
new_index = self.create_index(obj.index[0], obj.index[-1],
freq=freq)
expected = obj.reindex(new_index)
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
result = s.resample('1H').asfreq()
new_index = self.create_index(s.index[0], s.index[-1], freq='1H')
expected = s.reindex(new_index)
assert_series_equal(result, expected)
frame = s.to_frame('value')
frame.iloc[1] = None
result = frame.resample('1H').asfreq(fill_value=4.0)
new_index = self.create_index(frame.index[0],
frame.index[-1], freq='1H')
expected = frame.reindex(new_index, fill_value=4.0)
assert_frame_equal(result, expected)
def test_resample_interpolate(self):
# # 12925
df = self.create_series().to_frame('value')
assert_frame_equal(
df.resample('1T').asfreq().interpolate(),
df.resample('1T').interpolate())
def test_raises_on_non_datetimelike_index(self):
# this is a non datetimelike index
xp = DataFrame()
pytest.raises(TypeError, lambda: xp.resample('A').mean())
def test_resample_empty_series(self):
# GH12771 & GH12868
s = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
# need to test for ohlc from GH13083
methods = [method for method in resample_methods
if method != 'ohlc']
for method in methods:
result = getattr(s.resample(freq), method)()
expected = s.copy()
expected.index = s.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_series_equal(result, expected, check_dtype=False)
def test_resample_empty_dataframe(self):
# GH13212
index = self.create_series().index[:0]
f = DataFrame(index=index)
for freq in ['M', 'D', 'H']:
# count retains dimensions too
methods = downsample_methods + upsample_methods
for method in methods:
result = getattr(f.resample(freq), method)()
if method != 'size':
expected = f.copy()
else:
# GH14962
expected = Series([])
expected.index = f.index._shallow_copy(freq=freq)
assert_index_equal(result.index, expected.index)
assert result.index.freq == expected.index.freq
assert_almost_equal(result, expected, check_dtype=False)
# test size for GH13212 (currently stays as df)
def test_resample_empty_dtypes(self):
# Empty series were sometimes causing a segfault (for the functions
# with Cython bounds-checking disabled) or an IndexError. We just run
# them to ensure they no longer do. (GH #10228)
for index in tm.all_timeseries_index_generator(0):
for dtype in (np.float, np.int, np.object, 'datetime64[ns]'):
for how in downsample_methods + upsample_methods:
empty_series = Series([], index, dtype)
try:
getattr(empty_series.resample('d'), how)()
except DataError:
# Ignore these since some combinations are invalid
# (ex: doing mean with dtype of np.object)
pass
def test_resample_loffset_arg_type(self):
# GH 13218, 15002
df = self.create_series().to_frame('value')
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
if isinstance(expected_index, PeriodIndex):
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
for arg in ['mean', {'value': 'mean'}, ['mean']]:
result_agg = df.resample('2D', loffset='2H').agg(arg)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result_how = df.resample('2D', how=arg, loffset='2H')
if isinstance(arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value',
'mean')])
# GH 13022, 7687 - TODO: fix resample w/ TimedeltaIndex
if isinstance(expected.index, TimedeltaIndex):
with pytest.raises(AssertionError):
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
else:
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
def test_apply_to_empty_series(self):
# GH 14313
series = self.create_series()[:0]
for freq in ['M', 'D', 'H']:
result = series.resample(freq).apply(lambda x: 1)
expected = series.resample(freq).apply(np.sum)
assert_series_equal(result, expected, check_dtype=False)
class TestDatetimeIndex(Base):
_index_factory = lambda x: date_range
@pytest.fixture
def _series_name(self):
return 'dti'
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def create_series(self):
i = date_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='dti')
def test_custom_grouper(self):
dti = DatetimeIndex(freq='Min', start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10))
s = Series(np.array([1] * len(dti)), index=dti, dtype='int64')
b = TimeGrouper(Minute(5))
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
b = TimeGrouper(Minute(5), closed='right', label='right')
g = s.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'ohlc', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
assert g.ngroups == 2593
assert notna(g.mean()).all()
# construct expected val
arr = [1] + [5] * 2592
idx = dti[0:-1:5]
idx = idx.append(dti[-1:])
expect = Series(arr, index=idx)
# GH2763 - return in put dtype if we can
result = g.agg(np.sum)
assert_series_equal(result, expect)
df = DataFrame(np.random.rand(len(dti), 10),
index=dti, dtype='float64')
r = df.groupby(b).agg(np.sum)
assert len(r.columns) == 10
assert len(r.index) == 2593
def test_resample_basic(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right').mean()
exp_idx = date_range('1/1/2000', periods=4, freq='5min', name='index')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=exp_idx)
assert_series_equal(result, expected)
assert result.index.name == 'index'
result = s.resample('5min', closed='left', label='right').mean()
exp_idx = date_range('1/1/2000 00:05', periods=3, freq='5min',
name='index')
expected = Series([s[:5].mean(), s[5:10].mean(),
s[10:].mean()], index=exp_idx)
assert_series_equal(result, expected)
s = self.series
result = s.resample('5Min').last()
grouper = TimeGrouper(Minute(5), closed='left', label='left')
expect = s.groupby(grouper).agg(lambda x: x[-1])
assert_series_equal(result, expect)
def test_resample_how(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min',
name='index')
s = Series(np.random.randn(14), index=rng)
grouplist = np.ones_like(s)
grouplist[0] = 0
grouplist[1:6] = 1
grouplist[6:11] = 2
grouplist[11:] = 3
args = downsample_methods
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
inds = date_range('1/1/2000', periods=4, freq='5min', name='index')
for arg in args:
if arg == 'ohlc':
func = _ohlc
else:
func = arg
try:
result = getattr(s.resample(
'5min', closed='right', label='right'), arg)()
expected = s.groupby(grouplist).agg(func)
assert result.index.name == 'index'
if arg == 'ohlc':
expected = DataFrame(expected.values.tolist())
expected.columns = ['open', 'high', 'low', 'close']
expected.index = Index(inds, name='index')
assert_frame_equal(result, expected)
else:
expected.index = inds
assert_series_equal(result, expected)
except BaseException as exc:
exc.args += ('how=%s' % arg,)
raise
def test_numpy_compat(self):
# see gh-12811
s = Series([1, 2, 3, 4, 5], index=date_range(
'20130101', periods=5, freq='s'))
r = s.resample('2s')
msg = "numpy operations are not valid with resample"
for func in ('min', 'max', 'sum', 'prod',
'mean', 'var', 'std'):
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func),
func, 1, 2, 3)
tm.assert_raises_regex(UnsupportedFunctionCall, msg,
getattr(r, func), axis=1)
def test_resample_how_callables(self):
# GH 7929
data = np.arange(5, dtype=np.int64)
ind = pd.DatetimeIndex(start='2014-01-01', periods=len(data), freq='d')
df = DataFrame({"A": data, "B": data}, index=ind)
def fn(x, a=1):
return str(type(x))
class fn_class:
def __call__(self, x):
return str(type(x))
df_standard = df.resample("M").apply(fn)
df_lambda = df.resample("M").apply(lambda x: str(type(x)))
df_partial = df.resample("M").apply(partial(fn))
df_partial2 = df.resample("M").apply(partial(fn, a=2))
df_class = df.resample("M").apply(fn_class())
assert_frame_equal(df_standard, df_lambda)
assert_frame_equal(df_standard, df_partial)
assert_frame_equal(df_standard, df_partial2)
assert_frame_equal(df_standard, df_class)
def test_resample_with_timedeltas(self):
expected = DataFrame({'A': np.arange(1480)})
expected = expected.groupby(expected.index // 30).sum()
expected.index = pd.timedelta_range('0 days', freq='30T', periods=50)
df = DataFrame({'A': np.arange(1480)}, index=pd.to_timedelta(
np.arange(1480), unit='T'))
result = df.resample('30T').sum()
assert_frame_equal(result, expected)
s = df['A']
result = s.resample('30T').sum()
assert_series_equal(result, expected['A'])
def test_resample_single_period_timedelta(self):
s = Series(list(range(5)), index=pd.timedelta_range(
'1 day', freq='s', periods=5))
result = s.resample('2s').sum()
expected = Series([1, 5, 4], index=pd.timedelta_range(
'1 day', freq='2s', periods=3))
assert_series_equal(result, expected)
def test_resample_timedelta_idempotency(self):
# GH 12072
index = pd.timedelta_range('0', periods=9, freq='10L')
series = Series(range(9), index=index)
result = series.resample('10L').mean()
expected = series
assert_series_equal(result, expected)
def test_resample_rounding(self):
# GH 8371
# odd results when rounding is needed
data = """date,time,value
11-08-2014,00:00:01.093,1
11-08-2014,00:00:02.159,1
11-08-2014,00:00:02.667,1
11-08-2014,00:00:03.175,1
11-08-2014,00:00:07.058,1
11-08-2014,00:00:07.362,1
11-08-2014,00:00:08.324,1
11-08-2014,00:00:08.830,1
11-08-2014,00:00:08.982,1
11-08-2014,00:00:09.815,1
11-08-2014,00:00:10.540,1
11-08-2014,00:00:11.061,1
11-08-2014,00:00:11.617,1
11-08-2014,00:00:13.607,1
11-08-2014,00:00:14.535,1
11-08-2014,00:00:15.525,1
11-08-2014,00:00:17.960,1
11-08-2014,00:00:20.674,1
11-08-2014,00:00:21.191,1"""
from pandas.compat import StringIO
df = pd.read_csv(StringIO(data), parse_dates={'timestamp': [
'date', 'time']}, index_col='timestamp')
df.index.name = None
result = df.resample('6s').sum()
expected = DataFrame({'value': [
4, 9, 4, 2
]}, index=date_range('2014-11-08', freq='6s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('7s').sum()
expected = DataFrame({'value': [
4, 10, 4, 1
]}, index=date_range('2014-11-08', freq='7s', periods=4))
assert_frame_equal(result, expected)
result = df.resample('11s').sum()
expected = DataFrame({'value': [
11, 8
]}, index=date_range('2014-11-08', freq='11s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('13s').sum()
expected = DataFrame({'value': [
13, 6
]}, index=date_range('2014-11-08', freq='13s', periods=2))
assert_frame_equal(result, expected)
result = df.resample('17s').sum()
expected = DataFrame({'value': [
16, 3
]}, index=date_range('2014-11-08', freq='17s', periods=2))
assert_frame_equal(result, expected)
def test_resample_basic_from_daily(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to weekly
result = s.resample('w-sun').last()
assert len(result) == 3
assert (result.index.dayofweek == [6, 6, 6]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/9/2005']
assert result.iloc[2] == s.iloc[-1]
result = s.resample('W-MON').last()
assert len(result) == 2
assert (result.index.dayofweek == [0, 0]).all()
assert result.iloc[0] == s['1/3/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-TUE').last()
assert len(result) == 2
assert (result.index.dayofweek == [1, 1]).all()
assert result.iloc[0] == s['1/4/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-WED').last()
assert len(result) == 2
assert (result.index.dayofweek == [2, 2]).all()
assert result.iloc[0] == s['1/5/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-THU').last()
assert len(result) == 2
assert (result.index.dayofweek == [3, 3]).all()
assert result.iloc[0] == s['1/6/2005']
assert result.iloc[1] == s['1/10/2005']
result = s.resample('W-FRI').last()
assert len(result) == 2
assert (result.index.dayofweek == [4, 4]).all()
assert result.iloc[0] == s['1/7/2005']
assert result.iloc[1] == s['1/10/2005']
# to biz day
result = s.resample('B').last()
assert len(result) == 7
assert (result.index.dayofweek == [4, 0, 1, 2, 3, 4, 0]).all()
assert result.iloc[0] == s['1/2/2005']
assert result.iloc[1] == s['1/3/2005']
assert result.iloc[5] == s['1/9/2005']
assert result.index.name == 'index'
def test_resample_upsampling_picked_but_not_correct(self):
# Test for issue #3020
dates = date_range('01-Jan-2014', '05-Jan-2014', freq='D')
series = Series(1, index=dates)
result = series.resample('D').mean()
assert result.index[0] == dates[0]
# GH 5955
# incorrect deciding to upsample when the axis frequency matches the
# resample frequency
import datetime
s = Series(np.arange(1., 6), index=[datetime.datetime(
1975, 1, i, 12, 0) for i in range(1, 6)])
expected = Series(np.arange(1., 6), index=date_range(
'19750101', periods=5, freq='D'))
result = s.resample('D').count()
assert_series_equal(result, Series(1, index=expected.index))
result1 = s.resample('D').sum()
result2 = s.resample('D').mean()
assert_series_equal(result1, expected)
assert_series_equal(result2, expected)
def test_resample_frame_basic(self):
df = tm.makeTimeDataFrame()
b = TimeGrouper('M')
g = df.groupby(b)
# check all cython functions work
funcs = ['add', 'mean', 'prod', 'min', 'max', 'var']
for f in funcs:
g._cython_agg_general(f)
result = df.resample('A').mean()
assert_series_equal(result['A'], df['A'].resample('A').mean())
result = df.resample('M').mean()
assert_series_equal(result['A'], df['A'].resample('M').mean())
df.resample('M', kind='period').mean()
df.resample('W-WED', kind='period').mean()
def test_resample_loffset(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 00:13:00', freq='min')
s = Series(np.random.randn(14), index=rng)
result = s.resample('5min', closed='right', label='right',
loffset=timedelta(minutes=1)).mean()
idx = date_range('1/1/2000', periods=4, freq='5min')
expected = Series([s[0], s[1:6].mean(), s[6:11].mean(), s[11:].mean()],
index=idx + timedelta(minutes=1))
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset='1min').mean()
assert_series_equal(result, expected)
expected = s.resample(
'5min', closed='right', label='right',
loffset=Minute(1)).mean()
assert_series_equal(result, expected)
assert result.index.freq == Minute(5)
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
ser = Series(np.random.rand(len(dti)), dti)
# to weekly
result = ser.resample('w-sun').last()
expected = ser.resample('w-sun', loffset=-bday).last()
assert result.index[0] - bday == expected.index[0]
def test_resample_loffset_count(self):
# GH 12725
start_time = '1/1/2000 00:00:00'
rng = date_range(start_time, periods=100, freq='S')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('10S', loffset='1s').count()
expected_index = (
date_range(start_time, periods=10, freq='10S') +
timedelta(seconds=1)
)
expected = Series(10, index=expected_index)
assert_series_equal(result, expected)
# Same issue should apply to .size() since it goes through
# same code path
result = ts.resample('10S', loffset='1s').size()
assert_series_equal(result, expected)
def test_resample_upsample(self):
# from daily
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D', name='index')
s = Series(np.random.rand(len(dti)), dti)
# to minutely, by padding
result = s.resample('Min').pad()
assert len(result) == 12961
assert result[0] == s[0]
assert result[-1] == s[-1]
assert result.index.name == 'index'
def test_resample_how_method(self):
# GH9915
s = Series([11, 22],
index=[Timestamp('2015-03-31 21:48:52.672000'),
Timestamp('2015-03-31 21:49:52.739000')])
expected = Series([11, np.NaN, np.NaN, np.NaN, np.NaN, np.NaN, 22],
index=[Timestamp('2015-03-31 21:48:50'),
Timestamp('2015-03-31 21:49:00'),
Timestamp('2015-03-31 21:49:10'),
Timestamp('2015-03-31 21:49:20'),
Timestamp('2015-03-31 21:49:30'),
Timestamp('2015-03-31 21:49:40'),
Timestamp('2015-03-31 21:49:50')])
assert_series_equal(s.resample("10S").mean(), expected)
def test_resample_extra_index_point(self):
# GH 9756
index = DatetimeIndex(start='20150101', end='20150331', freq='BM')
expected = DataFrame({'A': Series([21, 41, 63], index=index)})
index = DatetimeIndex(start='20150101', end='20150331', freq='B')
df = DataFrame(
{'A': Series(range(len(index)), index=index)}, dtype='int64')
result = df.resample('BM').last()
assert_frame_equal(result, expected)
def test_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').ffill(limit=2)
expected = ts.reindex(result.index, method='ffill', limit=2)
assert_series_equal(result, expected)
def test_nearest_upsample_with_limit(self):
rng = date_range('1/1/2000', periods=3, freq='5t')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('t').nearest(limit=2)
expected = ts.reindex(result.index, method='nearest', limit=2)
assert_series_equal(result, expected)
def test_resample_ohlc(self):
s = self.series
grouper = TimeGrouper(Minute(5))
expect = s.groupby(grouper).agg(lambda x: x[-1])
result = s.resample('5Min').ohlc()
assert len(result) == len(expect)
assert len(result.columns) == 4
xs = result.iloc[-2]
assert xs['open'] == s[-6]
assert xs['high'] == s[-6:-1].max()
assert xs['low'] == s[-6:-1].min()
assert xs['close'] == s[-2]
xs = result.iloc[0]
assert xs['open'] == s[0]
assert xs['high'] == s[:5].max()
assert xs['low'] == s[:5].min()
assert xs['close'] == s[4]
def test_resample_ohlc_result(self):
# GH 12332
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index = index.union(pd.date_range('4-15-2000', '5-15-2000', freq='h'))
s = Series(range(len(index)), index=index)
a = s.loc[:'4-15-2000'].resample('30T').ohlc()
assert isinstance(a, DataFrame)
b = s.loc[:'4-14-2000'].resample('30T').ohlc()
assert isinstance(b, DataFrame)
# GH12348
# raising on odd period
rng = date_range('2013-12-30', '2014-01-07')
index = rng.drop([Timestamp('2014-01-01'),
Timestamp('2013-12-31'),
Timestamp('2014-01-04'),
Timestamp('2014-01-05')])
df = DataFrame(data=np.arange(len(index)), index=index)
result = df.resample('B').mean()
expected = df.reindex(index=date_range(rng[0], rng[-1], freq='B'))
assert_frame_equal(result, expected)
def test_resample_ohlc_dataframe(self):
df = (
DataFrame({
'PRICE': {
Timestamp('2011-01-06 10:59:05', tz=None): 24990,
Timestamp('2011-01-06 12:43:33', tz=None): 25499,
Timestamp('2011-01-06 12:54:09', tz=None): 25499},
'VOLUME': {
Timestamp('2011-01-06 10:59:05', tz=None): 1500000000,
Timestamp('2011-01-06 12:43:33', tz=None): 5000000000,
Timestamp('2011-01-06 12:54:09', tz=None): 100000000}})
).reindex(['VOLUME', 'PRICE'], axis=1)
res = df.resample('H').ohlc()
exp = pd.concat([df['VOLUME'].resample('H').ohlc(),
df['PRICE'].resample('H').ohlc()],
axis=1,
keys=['VOLUME', 'PRICE'])
assert_frame_equal(exp, res)
df.columns = [['a', 'b'], ['c', 'd']]
res = df.resample('H').ohlc()
exp.columns = pd.MultiIndex.from_tuples([
('a', 'c', 'open'), ('a', 'c', 'high'), ('a', 'c', 'low'),
('a', 'c', 'close'), ('b', 'd', 'open'), ('b', 'd', 'high'),
('b', 'd', 'low'), ('b', 'd', 'close')])
assert_frame_equal(exp, res)
# dupe columns fail atm
# df.columns = ['PRICE', 'PRICE']
def test_resample_dup_index(self):
# GH 4812
# dup columns with resample raising
df = DataFrame(np.random.randn(4, 12), index=[2000, 2000, 2000, 2000],
columns=[Period(year=2000, month=i + 1, freq='M')
for i in range(12)])
df.iloc[3, :] = np.nan
result = df.resample('Q', axis=1).mean()
expected = df.groupby(lambda x: int((x.month - 1) / 3), axis=1).mean()
expected.columns = [
Period(year=2000, quarter=i + 1, freq='Q') for i in range(4)]
assert_frame_equal(result, expected)
def test_resample_reresample(self):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='D')
s = Series(np.random.rand(len(dti)), dti)
bs = s.resample('B', closed='right', label='right').mean()
result = bs.resample('8H').mean()
assert len(result) == 22
assert isinstance(result.index.freq, offsets.DateOffset)
assert result.index.freq == offsets.Hour(8)
def test_resample_timestamp_to_period(self):
ts = _simple_ts('1/1/1990', '1/1/2000')
result = ts.resample('A-DEC', kind='period').mean()
expected = ts.resample('A-DEC').mean()
expected.index = period_range('1990', '2000', freq='a-dec')
assert_series_equal(result, expected)
result = ts.resample('A-JUN', kind='period').mean()
expected = ts.resample('A-JUN').mean()
expected.index = period_range('1990', '2000', freq='a-jun')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
result = ts.resample('M', kind='period').mean()
expected = ts.resample('M').mean()
expected.index = period_range('1990-01', '2000-01', freq='M')
assert_series_equal(result, expected)
def test_ohlc_5min(self):
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
rng = date_range('1/1/2000 00:00:00', '1/1/2000 5:59:50', freq='10s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', closed='right',
label='right').ohlc()
assert (resampled.loc['1/1/2000 00:00'] == ts[0]).all()
exp = _ohlc(ts[1:31])
assert (resampled.loc['1/1/2000 00:05'] == exp).all()
exp = _ohlc(ts['1/1/2000 5:55:01':])
assert (resampled.loc['1/1/2000 6:00:00'] == exp).all()
def test_downsample_non_unique(self):
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(5).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
result = ts.resample('M').mean()
expected = ts.groupby(lambda x: x.month).mean()
assert len(result) == 2
assert_almost_equal(result[0], expected[1])
assert_almost_equal(result[1], expected[2])
def test_asfreq_non_unique(self):
# GH #1077
rng = date_range('1/1/2000', '2/29/2000')
rng2 = rng.repeat(2).values
ts = Series(np.random.randn(len(rng2)), index=rng2)
pytest.raises(Exception, ts.asfreq, 'B')
def test_resample_axis1(self):
rng = date_range('1/1/2000', '2/29/2000')
df = DataFrame(np.random.randn(3, len(rng)), columns=rng,
index=['a', 'b', 'c'])
result = df.resample('M', axis=1).mean()
expected = df.T.resample('M').mean().T
tm.assert_frame_equal(result, expected)
def test_resample_panel(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).mean()
def p_apply(panel, f):
result = {}
for item in panel.items:
result[item] = f(panel[item])
return Panel(result, items=panel.items)
expected = p_apply(panel, lambda x: x.resample('M').mean())
tm.assert_panel_equal(result, expected)
panel2 = panel.swapaxes(1, 2)
result = panel2.resample('M', axis=2).mean()
expected = p_apply(panel2,
lambda x: x.resample('M', axis=1).mean())
tm.assert_panel_equal(result, expected)
def test_resample_panel_numpy(self):
rng = date_range('1/1/2000', '6/30/2000')
n = len(rng)
with catch_warnings(record=True):
panel = Panel(np.random.randn(3, n, 5),
items=['one', 'two', 'three'],
major_axis=rng,
minor_axis=['a', 'b', 'c', 'd', 'e'])
result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
expected = panel.resample('M', axis=1).mean()
tm.assert_panel_equal(result, expected)
panel = panel.swapaxes(1, 2)
result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
expected = panel.resample('M', axis=2).mean()
tm.assert_panel_equal(result, expected)
def test_resample_anchored_ticks(self):
# If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
# "anchor" the origin at midnight so we get regular intervals rather
# than starting from the first timestamp which might start in the
# middle of a desired interval
rng = date_range('1/1/2000 04:00:00', periods=86400, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
freqs = ['t', '5t', '15t', '30t', '4h', '12h']
for freq in freqs:
result = ts[2:].resample(freq, closed='left', label='left').mean()
expected = ts.resample(freq, closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_single_group(self):
mysum = lambda x: x.sum()
rng = date_range('2000-1-1', '2000-2-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
rng = date_range('2000-1-1', '2000-1-10', freq='D')
ts = Series(np.random.randn(len(rng)), index=rng)
assert_series_equal(ts.resample('M').sum(),
ts.resample('M').apply(mysum))
# GH 3849
s = Series([30.1, 31.6], index=[Timestamp('20070915 15:30:00'),
Timestamp('20070915 15:40:00')])
expected = Series([0.75], index=[Timestamp('20070915')])
result = s.resample('D').apply(lambda x: np.std(x))
assert_series_equal(result, expected)
def test_resample_base(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 02:00', freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('5min', base=2).mean()
exp_rng = date_range('12/31/1999 23:57:00', '1/1/2000 01:57',
freq='5min')
tm.assert_index_equal(resampled.index, exp_rng)
def test_resample_base_with_timedeltaindex(self):
# GH 10530
rng = timedelta_range(start='0s', periods=25, freq='s')
ts = Series(np.random.randn(len(rng)), index=rng)
with_base = ts.resample('2s', base=5).mean()
without_base = ts.resample('2s').mean()
exp_without_base = timedelta_range(start='0s', end='25s', freq='2s')
exp_with_base = timedelta_range(start='5s', end='29s', freq='2s')
tm.assert_index_equal(without_base.index, exp_without_base)
tm.assert_index_equal(with_base.index, exp_with_base)
def test_resample_categorical_data_with_timedeltaindex(self):
# GH #12169
df = DataFrame({'Group_obj': 'A'},
index=pd.to_timedelta(list(range(20)), unit='s'))
df['Group'] = df['Group_obj'].astype('category')
result = df.resample('10s').agg(lambda x: (x.value_counts().index[0]))
expected = DataFrame({'Group_obj': ['A', 'A'],
'Group': ['A', 'A']},
index=pd.to_timedelta([0, 10], unit='s'))
expected = expected.reindex(['Group_obj', 'Group'], axis=1)
tm.assert_frame_equal(result, expected)
def test_resample_daily_anchored(self):
rng = date_range('1/1/2000 0:00:00', periods=10000, freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
ts[:2] = np.nan # so results are the same
result = ts[2:].resample('D', closed='left', label='left').mean()
expected = ts.resample('D', closed='left', label='left').mean()
assert_series_equal(result, expected)
def test_resample_to_period_monthly_buglet(self):
# GH #1259
rng = date_range('1/1/2000', '12/31/2000')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('M', kind='period').mean()
exp_index = period_range('Jan-2000', 'Dec-2000', freq='M')
tm.assert_index_equal(result.index, exp_index)
def test_period_with_agg(self):
# aggregate a period resampler with a lambda
s2 = Series(np.random.randint(0, 5, 50),
index=pd.period_range('2012-01-01', freq='H', periods=50),
dtype='float64')
expected = s2.to_timestamp().resample('D').mean().to_period()
result = s2.resample('D').agg(lambda x: x.mean())
assert_series_equal(result, expected)
def test_resample_segfault(self):
# GH 8573
# segfaulting in older versions
all_wins_and_wagers = [
(1, datetime(2013, 10, 1, 16, 20), 1, 0),
(2, datetime(2013, 10, 1, 16, 10), 1, 0),
(2, datetime(2013, 10, 1, 18, 15), 1, 0),
(2, datetime(2013, 10, 1, 16, 10, 31), 1, 0)]
df = DataFrame.from_records(all_wins_and_wagers,
columns=("ID", "timestamp", "A", "B")
).set_index("timestamp")
result = df.groupby("ID").resample("5min").sum()
expected = df.groupby("ID").apply(lambda x: x.resample("5min").sum())
assert_frame_equal(result, expected)
def test_resample_dtype_preservation(self):
# GH 12202
# validation tests for dtype preservation
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4, freq='W'),
'group': [1, 1, 2, 2],
'val': Series([5, 6, 7, 8],
dtype='int32')}
).set_index('date')
result = df.resample('1D').ffill()
assert result.val.dtype == np.int32
result = df.groupby('group').resample('1D').ffill()
assert result.val.dtype == np.int32
def test_resample_dtype_coerceion(self):
pytest.importorskip('scipy.interpolate')
# GH 16361
df = {"a": [1, 3, 1, 4]}
df = DataFrame(df, index=pd.date_range("2017-01-01", "2017-01-04"))
expected = (df.astype("float64")
.resample("H")
.mean()
["a"]
.interpolate("cubic")
)
result = df.resample("H")["a"].mean().interpolate("cubic")
tm.assert_series_equal(result, expected)
result = df.resample("H").mean()["a"].interpolate("cubic")
tm.assert_series_equal(result, expected)
def test_weekly_resample_buglet(self):
# #1327
rng = date_range('1/1/2000', freq='B', periods=20)
ts = Series(np.random.randn(len(rng)), index=rng)
resampled = ts.resample('W').mean()
expected = ts.resample('W-SUN').mean()
assert_series_equal(resampled, expected)
def test_monthly_resample_error(self):
# #1451
dates = date_range('4/16/2012 20:00', periods=5000, freq='h')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('M')
def test_nanosecond_resample_error(self):
# GH 12307 - Values falls after last bin when
# Resampling using pd.tseries.offsets.Nano as period
start = 1443707890427
exp_start = 1443707890400
indx = pd.date_range(
start=pd.to_datetime(start),
periods=10,
freq='100n'
)
ts = Series(range(len(indx)), index=indx)
r = ts.resample(pd.tseries.offsets.Nano(100))
result = r.agg('mean')
exp_indx = pd.date_range(
start=pd.to_datetime(exp_start),
periods=10,
freq='100n'
)
exp = Series(range(len(exp_indx)), index=exp_indx)
assert_series_equal(result, exp)
def test_resample_anchored_intraday(self):
# #1471, #1458
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('M').mean()
expected = df.resample(
'M', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('M', closed='left').mean()
exp = df.tshift(1, freq='D').resample('M', kind='period').mean()
exp = exp.to_timestamp(how='end')
tm.assert_frame_equal(result, exp)
rng = date_range('1/1/2012', '4/1/2012', freq='100min')
df = DataFrame(rng.month, index=rng)
result = df.resample('Q').mean()
expected = df.resample(
'Q', kind='period').mean().to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
result = df.resample('Q', closed='left').mean()
expected = df.tshift(1, freq='D').resample('Q', kind='period',
closed='left').mean()
expected = expected.to_timestamp(how='end')
tm.assert_frame_equal(result, expected)
ts = _simple_ts('2012-04-29 23:00', '2012-04-30 5:00', freq='h')
resampled = ts.resample('M').mean()
assert len(resampled) == 1
def test_resample_anchored_monthstart(self):
ts = _simple_ts('1/1/2000', '12/31/2002')
freqs = ['MS', 'BMS', 'QS-MAR', 'AS-DEC', 'AS-JUN']
for freq in freqs:
ts.resample(freq).mean()
def test_resample_anchored_multiday(self):
# When resampling a range spanning multiple days, ensure that the
# start date gets used to determine the offset. Fixes issue where
# a one day period is not a multiple of the frequency.
#
# See: https://github.com/pandas-dev/pandas/issues/8683
index = pd.date_range(
'2014-10-14 23:06:23.206', periods=3, freq='400L'
) | pd.date_range(
'2014-10-15 23:00:00', periods=2, freq='2200L')
s = Series(np.random.randn(5), index=index)
# Ensure left closing works
result = s.resample('2200L').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:02.000')
# Ensure right closing works
result = s.resample('2200L', label='right').mean()
assert result.index[-1] == Timestamp('2014-10-15 23:00:04.200')
def test_corner_cases(self):
# miscellaneous test coverage
rng = date_range('1/1/2000', periods=12, freq='t')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('5t', closed='right', label='left').mean()
ex_index = date_range('1999-12-31 23:55', periods=4, freq='5t')
tm.assert_index_equal(result.index, ex_index)
len0pts = _simple_pts('2007-01', '2010-05', freq='M')[:0]
# it works
result = len0pts.resample('A-DEC').mean()
assert len(result) == 0
# resample to periods
ts = _simple_ts('2000-04-28', '2000-04-30 11:00', freq='h')
result = ts.resample('M', kind='period').mean()
assert len(result) == 1
assert result.index[0] == Period('2000-04', freq='M')
def test_anchored_lowercase_buglet(self):
dates = date_range('4/16/2012 20:00', periods=50000, freq='s')
ts = Series(np.random.randn(len(dates)), index=dates)
# it works!
ts.resample('d').mean()
def test_upsample_apply_functions(self):
# #1596
rng = pd.date_range('2012-06-12', periods=4, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('20min').aggregate(['mean', 'sum'])
assert isinstance(result, DataFrame)
def test_resample_not_monotonic(self):
rng = pd.date_range('2012-06-12', periods=200, freq='h')
ts = Series(np.random.randn(len(rng)), index=rng)
ts = ts.take(np.random.permutation(len(ts)))
result = ts.resample('D').sum()
exp = ts.sort_index().resample('D').sum()
assert_series_equal(result, exp)
def test_resample_median_bug_1688(self):
for dtype in ['int64', 'int32', 'float64', 'float32']:
df = DataFrame([1, 2], index=[datetime(2012, 1, 1, 0, 0, 0),
datetime(2012, 1, 1, 0, 5, 0)],
dtype=dtype)
result = df.resample("T").apply(lambda x: x.mean())
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
result = df.resample("T").median()
exp = df.asfreq('T')
tm.assert_frame_equal(result, exp)
def test_how_lambda_functions(self):
ts = _simple_ts('1/1/2000', '4/1/2000')
result = ts.resample('M').apply(lambda x: x.mean())
exp = ts.resample('M').mean()
tm.assert_series_equal(result, exp)
foo_exp = ts.resample('M').mean()
foo_exp.name = 'foo'
bar_exp = ts.resample('M').std()
bar_exp.name = 'bar'
result = ts.resample('M').apply(
[lambda x: x.mean(), lambda x: x.std(ddof=1)])
result.columns = ['foo', 'bar']
tm.assert_series_equal(result['foo'], foo_exp)
tm.assert_series_equal(result['bar'], bar_exp)
# this is a MI Series, so comparing the names of the results
# doesn't make sense
result = ts.resample('M').aggregate({'foo': lambda x: x.mean(),
'bar': lambda x: x.std(ddof=1)})
tm.assert_series_equal(result['foo'], foo_exp, check_names=False)
tm.assert_series_equal(result['bar'], bar_exp, check_names=False)
def test_resample_unequal_times(self):
# #1772
start = datetime(1999, 3, 1, 5)
# end hour is less than start
end = datetime(2012, 7, 31, 4)
bad_ind = date_range(start, end, freq="30min")
df = DataFrame({'close': 1}, index=bad_ind)
# it works!
df.resample('AS').sum()
def test_resample_consistency(self):
# GH 6418
# resample with bfill / limit / reindex consistency
i30 = pd.date_range('2002-02-02', periods=4, freq='30T')
s = Series(np.arange(4.), index=i30)
s[2] = np.NaN
# Upsample by factor 3 with reindex() and resample() methods:
i10 = pd.date_range(i30[0], i30[-1], freq='10T')
s10 = s.reindex(index=i10, method='bfill')
s10_2 = s.reindex(index=i10, method='bfill', limit=2)
rl = s.reindex_like(s10, method='bfill', limit=2)
r10_2 = s.resample('10Min').bfill(limit=2)
r10 = s.resample('10Min').bfill()
# s10_2, r10, r10_2, rl should all be equal
assert_series_equal(s10_2, r10)
assert_series_equal(s10_2, r10_2)
assert_series_equal(s10_2, rl)
def test_resample_timegrouper(self):
# GH 7227
dates1 = [datetime(2014, 10, 1), datetime(2014, 9, 3),
datetime(2014, 11, 5), datetime(2014, 9, 5),
datetime(2014, 10, 8), datetime(2014, 7, 15)]
dates2 = dates1[:2] + [pd.NaT] + dates1[2:4] + [pd.NaT] + dates1[4:]
dates3 = [pd.NaT] + dates1 + [pd.NaT]
for dates in [dates1, dates2, dates3]:
df = DataFrame(dict(A=dates, B=np.arange(len(dates))))
result = df.set_index('A').resample('M').count()
exp_idx = pd.DatetimeIndex(['2014-07-31', '2014-08-31',
'2014-09-30',
'2014-10-31', '2014-11-30'],
freq='M', name='A')
expected = DataFrame({'B': [1, 0, 2, 2, 1]}, index=exp_idx)
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
df = DataFrame(dict(A=dates, B=np.arange(len(dates)), C=np.arange(
len(dates))))
result = df.set_index('A').resample('M').count()
expected = DataFrame({'B': [1, 0, 2, 2, 1], 'C': [1, 0, 2, 2, 1]},
index=exp_idx, columns=['B', 'C'])
assert_frame_equal(result, expected)
result = df.groupby(pd.Grouper(freq='M', key='A')).count()
assert_frame_equal(result, expected)
def test_resample_nunique(self):
# GH 12352
df = DataFrame({
'ID': {Timestamp('2015-06-05 00:00:00'): '0010100903',
Timestamp('2015-06-08 00:00:00'): '0010150847'},
'DATE': {Timestamp('2015-06-05 00:00:00'): '2015-06-05',
Timestamp('2015-06-08 00:00:00'): '2015-06-08'}})
r = df.resample('D')
g = df.groupby(pd.Grouper(freq='D'))
expected = df.groupby(pd.Grouper(freq='D')).ID.apply(lambda x:
x.nunique())
assert expected.name == 'ID'
for t in [r, g]:
result = r.ID.nunique()
assert_series_equal(result, expected)
result = df.ID.resample('D').nunique()
assert_series_equal(result, expected)
result = df.ID.groupby(pd.Grouper(freq='D')).nunique()
assert_series_equal(result, expected)
def test_resample_nunique_with_date_gap(self):
# GH 13453
index = pd.date_range('1-1-2000', '2-15-2000', freq='h')
index2 = pd.date_range('4-15-2000', '5-15-2000', freq='h')
index3 = index.append(index2)
s = Series(range(len(index3)), index=index3, dtype='int64')
r = s.resample('M')
# Since all elements are unique, these should all be the same
results = [
r.count(),
r.nunique(),
r.agg(Series.nunique),
r.agg('nunique')
]
assert_series_equal(results[0], results[1])
assert_series_equal(results[0], results[2])
assert_series_equal(results[0], results[3])
def test_resample_group_info(self): # GH10914
for n, k in product((10000, 100000), (10, 100, 1000)):
dr = date_range(start='2015-08-27', periods=n // 10, freq='T')
ts = Series(np.random.randint(0, n // k, n).astype('int64'),
index=np.random.choice(dr, n))
left = ts.resample('30T').nunique()
ix = date_range(start=ts.index.min(), end=ts.index.max(),
freq='30T')
vals = ts.values
bins = np.searchsorted(ix.values, ts.index, side='right')
sorter = np.lexsort((vals, bins))
vals, bins = vals[sorter], bins[sorter]
mask = np.r_[True, vals[1:] != vals[:-1]]
mask |= np.r_[True, bins[1:] != bins[:-1]]
arr = np.bincount(bins[mask] - 1,
minlength=len(ix)).astype('int64', copy=False)
right = Series(arr, index=ix)
assert_series_equal(left, right)
def test_resample_size(self):
n = 10000
dr = date_range('2015-09-19', periods=n, freq='T')
ts = Series(np.random.randn(n), index=np.random.choice(dr, n))
left = ts.resample('7T').size()
ix = date_range(start=left.index.min(), end=ts.index.max(), freq='7T')
bins = np.searchsorted(ix.values, ts.index.values, side='right')
val = np.bincount(bins, minlength=len(ix) + 1)[1:].astype('int64',
copy=False)
right = Series(val, index=ix)
assert_series_equal(left, right)
def test_resample_across_dst(self):
# The test resamples a DatetimeIndex with values before and after a
# DST change
# Issue: 14682
# The DatetimeIndex we will start with
# (note that DST happens at 03:00+02:00 -> 02:00+01:00)
# 2016-10-30 02:23:00+02:00, 2016-10-30 02:23:00+01:00
df1 = DataFrame([1477786980, 1477790580], columns=['ts'])
dti1 = DatetimeIndex(pd.to_datetime(df1.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
# The expected DatetimeIndex after resampling.
# 2016-10-30 02:00:00+02:00, 2016-10-30 02:00:00+01:00
df2 = DataFrame([1477785600, 1477789200], columns=['ts'])
dti2 = DatetimeIndex(pd.to_datetime(df2.ts, unit='s')
.dt.tz_localize('UTC')
.dt.tz_convert('Europe/Madrid'))
df = DataFrame([5, 5], index=dti1)
result = df.resample(rule='H').sum()
expected = DataFrame([5, 5], index=dti2)
assert_frame_equal(result, expected)
def test_resample_dst_anchor(self):
# 5172
dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
df = DataFrame([5], index=dti)
assert_frame_equal(df.resample(rule='D').sum(),
DataFrame([5], index=df.index.normalize()))
df.resample(rule='MS').sum()
assert_frame_equal(
df.resample(rule='MS').sum(),
DataFrame([5], index=DatetimeIndex([datetime(2012, 11, 1)],
tz='US/Eastern')))
dti = date_range('2013-09-30', '2013-11-02', freq='30Min',
tz='Europe/Paris')
values = range(dti.size)
df = DataFrame({"a": values,
"b": values,
"c": values}, index=dti, dtype='int64')
how = {"a": "min", "b": "max", "c": "count"}
assert_frame_equal(
df.resample("W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 384, 720, 1056, 1394],
"b": [47, 383, 719, 1055, 1393, 1586],
"c": [48, 336, 336, 336, 338, 193]},
index=date_range('9/30/2013', '11/4/2013',
freq='W-MON', tz='Europe/Paris')),
'W-MON Frequency')
assert_frame_equal(
df.resample("2W-MON").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 720, 1394],
"b": [47, 719, 1393, 1586],
"c": [48, 672, 674, 193]},
index=date_range('9/30/2013', '11/11/2013',
freq='2W-MON', tz='Europe/Paris')),
'2W-MON Frequency')
assert_frame_equal(
df.resample("MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 48, 1538],
"b": [47, 1537, 1586],
"c": [48, 1490, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='MS', tz='Europe/Paris')),
'MS Frequency')
assert_frame_equal(
df.resample("2MS").agg(how)[["a", "b", "c"]],
DataFrame({"a": [0, 1538],
"b": [1537, 1586],
"c": [1538, 49]},
index=date_range('9/1/2013', '11/1/2013',
freq='2MS', tz='Europe/Paris')),
'2MS Frequency')
df_daily = df['10/26/2013':'10/29/2013']
assert_frame_equal(
df_daily.resample("D").agg({"a": "min", "b": "max", "c": "count"})
[["a", "b", "c"]],
DataFrame({"a": [1248, 1296, 1346, 1394],
"b": [1295, 1345, 1393, 1441],
"c": [48, 50, 48, 48]},
index=date_range('10/26/2013', '10/29/2013',
freq='D', tz='Europe/Paris')),
'D Frequency')
def test_resample_with_nat(self):
# GH 13020
index = DatetimeIndex([pd.NaT,
'1970-01-01 00:00:00',
pd.NaT,
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame = DataFrame([2, 3, 5, 7, 11], index=index)
index_1s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:01',
'1970-01-01 00:00:02'])
frame_1s = DataFrame([3, 7, 11], index=index_1s)
assert_frame_equal(frame.resample('1s').mean(), frame_1s)
index_2s = DatetimeIndex(['1970-01-01 00:00:00',
'1970-01-01 00:00:02'])
frame_2s = DataFrame([5, 11], index=index_2s)
assert_frame_equal(frame.resample('2s').mean(), frame_2s)
index_3s = DatetimeIndex(['1970-01-01 00:00:00'])
frame_3s = DataFrame([7], index=index_3s)
assert_frame_equal(frame.resample('3s').mean(), frame_3s)
assert_frame_equal(frame.resample('60s').mean(), frame_3s)
def test_resample_timedelta_values(self):
# GH 13119
# check that timedelta dtype is preserved when NaT values are
# introduced by the resampling
times = timedelta_range('1 day', '4 day', freq='4D')
df = DataFrame({'time': times}, index=times)
times2 = timedelta_range('1 day', '4 day', freq='2D')
exp = Series(times2, index=times2, name='time')
exp.iloc[1] = pd.NaT
res = df.resample('2D').first()['time']
tm.assert_series_equal(res, exp)
res = df['time'].resample('2D').first()
tm.assert_series_equal(res, exp)
def test_resample_datetime_values(self):
# GH 13119
# check that datetime dtype is preserved when NaT values are
# introduced by the resampling
dates = [datetime(2016, 1, 15), datetime(2016, 1, 19)]
df = DataFrame({'timestamp': dates}, index=dates)
exp = Series([datetime(2016, 1, 15), pd.NaT, datetime(2016, 1, 19)],
index=date_range('2016-01-15', periods=3, freq='2D'),
name='timestamp')
res = df.resample('2D').first()['timestamp']
tm.assert_series_equal(res, exp)
res = df['timestamp'].resample('2D').first()
tm.assert_series_equal(res, exp)
class TestPeriodIndex(Base):
_index_factory = lambda x: period_range
@pytest.fixture
def _series_name(self):
return 'pi'
def create_series(self):
# TODO: replace calls to .create_series() by injecting the series
# fixture
i = period_range(datetime(2005, 1, 1),
datetime(2005, 1, 10), freq='D')
return Series(np.arange(len(i)), index=i, name='pi')
@pytest.mark.parametrize('freq', ['2D', '1H', '2H'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_asfreq(self, series_and_frame, freq, kind):
# GH 12884, 15944
# make sure .asfreq() returns PeriodIndex (except kind='timestamp')
obj = series_and_frame
if kind == 'timestamp':
expected = obj.to_timestamp().resample(freq).asfreq()
else:
start = obj.index[0].to_timestamp(how='start')
end = (obj.index[-1] + 1).to_timestamp(how='start')
new_index = date_range(start=start, end=end, freq=freq,
closed='left')
expected = obj.to_timestamp().reindex(new_index).to_period(freq)
result = obj.resample(freq, kind=kind).asfreq()
assert_almost_equal(result, expected)
def test_asfreq_fill_value(self):
# test for fill value during resampling, issue 3715
s = self.create_series()
new_index = date_range(s.index[0].to_timestamp(how='start'),
(s.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = s.to_timestamp().reindex(new_index, fill_value=4.0)
result = s.resample('1H', kind='timestamp').asfreq(fill_value=4.0)
assert_series_equal(result, expected)
frame = s.to_frame('value')
new_index = date_range(frame.index[0].to_timestamp(how='start'),
(frame.index[-1]).to_timestamp(how='start'),
freq='1H')
expected = frame.to_timestamp().reindex(new_index, fill_value=3.0)
result = frame.resample('1H', kind='timestamp').asfreq(fill_value=3.0)
assert_frame_equal(result, expected)
@pytest.mark.parametrize('freq', ['H', '12H', '2D', 'W'])
@pytest.mark.parametrize('kind', [None, 'period', 'timestamp'])
def test_selection(self, index, freq, kind):
# This is a bug, these should be implemented
# GH 14008
rng = np.arange(len(index), dtype=np.int64)
df = DataFrame({'date': index, 'a': rng},
index=pd.MultiIndex.from_arrays([rng, index],
names=['v', 'd']))
with pytest.raises(NotImplementedError):
df.resample(freq, on='date', kind=kind)
with pytest.raises(NotImplementedError):
df.resample(freq, level='d', kind=kind)
def test_annual_upsample_D_s_f(self):
self._check_annual_upsample_cases('D', 'start', 'ffill')
def test_annual_upsample_D_e_f(self):
self._check_annual_upsample_cases('D', 'end', 'ffill')
def test_annual_upsample_D_s_b(self):
self._check_annual_upsample_cases('D', 'start', 'bfill')
def test_annual_upsample_D_e_b(self):
self._check_annual_upsample_cases('D', 'end', 'bfill')
def test_annual_upsample_B_s_f(self):
self._check_annual_upsample_cases('B', 'start', 'ffill')
def test_annual_upsample_B_e_f(self):
self._check_annual_upsample_cases('B', 'end', 'ffill')
def test_annual_upsample_B_s_b(self):
self._check_annual_upsample_cases('B', 'start', 'bfill')
def test_annual_upsample_B_e_b(self):
self._check_annual_upsample_cases('B', 'end', 'bfill')
def test_annual_upsample_M_s_f(self):
self._check_annual_upsample_cases('M', 'start', 'ffill')
def test_annual_upsample_M_e_f(self):
self._check_annual_upsample_cases('M', 'end', 'ffill')
def test_annual_upsample_M_s_b(self):
self._check_annual_upsample_cases('M', 'start', 'bfill')
def test_annual_upsample_M_e_b(self):
self._check_annual_upsample_cases('M', 'end', 'bfill')
def _check_annual_upsample_cases(self, targ, conv, meth, end='12/31/1991'):
for month in MONTHS:
ts = _simple_pts('1/1/1990', end, freq='A-%s' % month)
result = getattr(ts.resample(targ, convention=conv), meth)()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, meth).to_period()
assert_series_equal(result, expected)
def test_basic_downsample(self):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
expected = ts.groupby(ts.index.year).mean()
expected.index = period_range('1/1/1990', '6/30/1995', freq='a-dec')
assert_series_equal(result, expected)
# this is ok
assert_series_equal(ts.resample('a-dec').mean(), result)
assert_series_equal(ts.resample('a').mean(), result)
def test_not_subperiod(self):
# These are incompatible period rules for resampling
ts = _simple_pts('1/1/1990', '6/30/1995', freq='w-wed')
pytest.raises(ValueError, lambda: ts.resample('a-dec').mean())
pytest.raises(ValueError, lambda: ts.resample('q-mar').mean())
pytest.raises(ValueError, lambda: ts.resample('M').mean())
pytest.raises(ValueError, lambda: ts.resample('w-thu').mean())
@pytest.mark.parametrize('freq', ['D', '2D'])
def test_basic_upsample(self, freq):
ts = _simple_pts('1/1/1990', '6/30/1995', freq='M')
result = ts.resample('a-dec').mean()
resampled = result.resample(freq, convention='end').ffill()
expected = result.to_timestamp(freq, how='end')
expected = expected.asfreq(freq, 'ffill').to_period(freq)
assert_series_equal(resampled, expected)
def test_upsample_with_limit(self):
rng = period_range('1/1/2000', periods=5, freq='A')
ts = Series(np.random.randn(len(rng)), rng)
result = ts.resample('M', convention='end').ffill(limit=2)
expected = ts.asfreq('M').reindex(result.index, method='ffill',
limit=2)
assert_series_equal(result, expected)
def test_annual_upsample(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='A-DEC')
df = DataFrame({'a': ts})
rdf = df.resample('D').ffill()
exp = df['a'].resample('D').ffill()
assert_series_equal(rdf['a'], exp)
rng = period_range('2000', '2003', freq='A-DEC')
ts = Series([1, 2, 3, 4], index=rng)
result = ts.resample('M').ffill()
ex_index = period_range('2000-01', '2003-12', freq='M')
expected = ts.asfreq('M', how='start').reindex(ex_index,
method='ffill')
assert_series_equal(result, expected)
def test_quarterly_upsample(self):
targets = ['D', 'B', 'M']
for month in MONTHS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='Q-%s' % month)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_monthly_upsample(self):
targets = ['D', 'B']
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_basic(self):
# GH3609
s = Series(range(100), index=date_range(
'20130101', freq='s', periods=100, name='idx'), dtype='float')
s[10:30] = np.nan
index = PeriodIndex([
Period('2013-01-01 00:00', 'T'),
Period('2013-01-01 00:01', 'T')], name='idx')
expected = Series([34.5, 79.5], index=index)
result = s.to_period().resample('T', kind='period').mean()
assert_series_equal(result, expected)
result2 = s.resample('T', kind='period').mean()
assert_series_equal(result2, expected)
@pytest.mark.parametrize('freq,expected_vals', [('M', [31, 29, 31, 9]),
('2M', [31 + 29, 31 + 9])])
def test_resample_count(self, freq, expected_vals):
# GH12774
series = Series(1, index=pd.period_range(start='2000', periods=100))
result = series.resample(freq).count()
expected_index = pd.period_range(start='2000', freq=freq,
periods=len(expected_vals))
expected = Series(expected_vals, index=expected_index)
assert_series_equal(result, expected)
def test_resample_same_freq(self):
# GH12770
series = Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M'))
expected = series
for method in resample_methods:
result = getattr(series.resample('M'), method)()
assert_series_equal(result, expected)
def test_resample_incompat_freq(self):
with pytest.raises(IncompatibleFrequency):
Series(range(3), index=pd.period_range(
start='2000', periods=3, freq='M')).resample('W').mean()
def test_with_local_timezone_pytz(self):
# see gh-5430
local_timezone = pytz.timezone('America/Los_Angeles')
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=pytz.utc)
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=pytz.utc)
index = pd.date_range(start, end, freq='H')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_with_local_timezone_dateutil(self):
# see gh-5430
local_timezone = 'dateutil/America/Los_Angeles'
start = datetime(year=2013, month=11, day=1, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
# 1 day later
end = datetime(year=2013, month=11, day=2, hour=0, minute=0,
tzinfo=dateutil.tz.tzutc())
index = pd.date_range(start, end, freq='H', name='idx')
series = Series(1, index=index)
series = series.tz_convert(local_timezone)
result = series.resample('D', kind='period').mean()
# Create the expected series
# Index is moved back a day with the timezone conversion from UTC to
# Pacific
expected_index = (pd.period_range(start=start, end=end, freq='D',
name='idx') - 1)
expected = Series(1, index=expected_index)
assert_series_equal(result, expected)
def test_fill_method_and_how_upsample(self):
# GH2073
s = Series(np.arange(9, dtype='int64'),
index=date_range('2010-01-01', periods=9, freq='Q'))
last = s.resample('M').ffill()
both = s.resample('M').ffill().resample('M').last().astype('int64')
assert_series_equal(last, both)
def test_weekly_upsample(self):
targets = ['D', 'B']
for day in DAYS:
ts = _simple_pts('1/1/1990', '12/31/1995', freq='W-%s' % day)
for targ, conv in product(targets, ['start', 'end']):
result = ts.resample(targ, convention=conv).ffill()
expected = result.to_timestamp(targ, how=conv)
expected = expected.asfreq(targ, 'ffill').to_period()
assert_series_equal(result, expected)
def test_resample_to_timestamps(self):
ts = _simple_pts('1/1/1990', '12/31/1995', freq='M')
result = ts.resample('A-DEC', kind='timestamp').mean()
expected = ts.to_timestamp(how='end').resample('A-DEC').mean()
assert_series_equal(result, expected)
def test_resample_to_quarterly(self):
for month in MONTHS:
ts = _simple_pts('1990', '1992', freq='A-%s' % month)
quar_ts = ts.resample('Q-%s' % month).ffill()
stamps = ts.to_timestamp('D', how='start')
qdates = period_range(ts.index[0].asfreq('D', 'start'),
ts.index[-1].asfreq('D', 'end'),
freq='Q-%s' % month)
expected = stamps.reindex(qdates.to_timestamp('D', 's'),
method='ffill')
expected.index = qdates
assert_series_equal(quar_ts, expected)
# conforms, but different month
ts = _simple_pts('1990', '1992', freq='A-JUN')
for how in ['start', 'end']:
result = ts.resample('Q-MAR', convention=how).ffill()
expected = ts.asfreq('Q-MAR', how=how)
expected = expected.reindex(result.index, method='ffill')
# .to_timestamp('D')
# expected = expected.resample('Q-MAR').ffill()
assert_series_equal(result, expected)
def test_resample_fill_missing(self):
rng = PeriodIndex([2000, 2005, 2007, 2009], freq='A')
s = Series(np.random.randn(4), index=rng)
stamps = s.to_timestamp()
filled = s.resample('A').ffill()
expected = stamps.resample('A').ffill().to_period('A')
assert_series_equal(filled, expected)
def test_cant_fill_missing_dups(self):
rng = PeriodIndex([2000, 2005, 2005, 2007, 2007], freq='A')
s = Series(np.random.randn(5), index=rng)
pytest.raises(Exception, lambda: s.resample('A').ffill())
@pytest.mark.parametrize('freq', ['5min'])
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
def test_resample_5minute(self, freq, kind):
rng = period_range('1/1/2000', '1/5/2000', freq='T')
ts = Series(np.random.randn(len(rng)), index=rng)
expected = ts.to_timestamp().resample(freq).mean()
if kind != 'timestamp':
expected = expected.to_period(freq)
result = ts.resample(freq, kind=kind).mean()
assert_series_equal(result, expected)
def test_upsample_daily_business_daily(self):
ts = _simple_pts('1/1/2000', '2/1/2000', freq='B')
result = ts.resample('D').asfreq()
expected = ts.asfreq('D').reindex(period_range('1/3/2000', '2/1/2000'))
assert_series_equal(result, expected)
ts = _simple_pts('1/1/2000', '2/1/2000')
result = ts.resample('H', convention='s').asfreq()
exp_rng = period_range('1/1/2000', '2/1/2000 23:00', freq='H')
expected = ts.asfreq('H', how='s').reindex(exp_rng)
assert_series_equal(result, expected)
def test_resample_irregular_sparse(self):
dr = date_range(start='1/1/2012', freq='5min', periods=1000)
s = Series(np.array(100), index=dr)
# subset the data.
subset = s[:'2012-01-04 06:55']
result = subset.resample('10min').apply(len)
expected = s.resample('10min').apply(len).loc[result.index]
assert_series_equal(result, expected)
def test_resample_weekly_all_na(self):
rng = date_range('1/1/2000', periods=10, freq='W-WED')
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts.resample('W-THU').asfreq()
assert result.isna().all()
result = ts.resample('W-THU').asfreq().ffill()[:-1]
expected = ts.asfreq('W-THU').ffill()
assert_series_equal(result, expected)
def test_resample_tz_localized(self):
dr = date_range(start='2012-4-13', end='2012-5-1')
ts = Series(lrange(len(dr)), dr)
ts_utc = ts.tz_localize('UTC')
ts_local = ts_utc.tz_convert('America/Los_Angeles')
result = ts_local.resample('W').mean()
ts_local_naive = ts_local.copy()
ts_local_naive.index = [x.replace(tzinfo=None)
for x in ts_local_naive.index.to_pydatetime()]
exp = ts_local_naive.resample(
'W').mean().tz_localize('America/Los_Angeles')
assert_series_equal(result, exp)
# it works
result = ts_local.resample('D').mean()
# #2245
idx = date_range('2001-09-20 15:59', '2001-09-20 16:00', freq='T',
tz='Australia/Sydney')
s = Series([1, 2], index=idx)
result = s.resample('D', closed='right', label='right').mean()
ex_index = date_range('2001-09-21', periods=1, freq='D',
tz='Australia/Sydney')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# for good measure
result = s.resample('D', kind='period').mean()
ex_index = period_range('2001-09-20', periods=1, freq='D')
expected = Series([1.5], index=ex_index)
assert_series_equal(result, expected)
# GH 6397
# comparing an offset that doesn't propagate tz's
rng = date_range('1/1/2011', periods=20000, freq='H')
rng = rng.tz_localize('EST')
ts = DataFrame(index=rng)
ts['first'] = np.random.randn(len(rng))
ts['second'] = np.cumsum(np.random.randn(len(rng)))
expected = DataFrame(
{
'first': ts.resample('A').sum()['first'],
'second': ts.resample('A').mean()['second']},
columns=['first', 'second'])
result = ts.resample(
'A').agg({'first': np.sum,
'second': np.mean}).reindex(columns=['first', 'second'])
assert_frame_equal(result, expected)
def test_closed_left_corner(self):
# #1465
s = Series(np.random.randn(21),
index=date_range(start='1/1/2012 9:30',
freq='1min', periods=21))
s[0] = np.nan
result = s.resample('10min', closed='left', label='right').mean()
exp = s[1:].resample('10min', closed='left', label='right').mean()
assert_series_equal(result, exp)
result = s.resample('10min', closed='left', label='left').mean()
exp = s[1:].resample('10min', closed='left', label='left').mean()
ex_index = date_range(start='1/1/2012 9:30', freq='10min', periods=3)
tm.assert_index_equal(result.index, ex_index)
assert_series_equal(result, exp)
def test_quarterly_resampling(self):
rng = period_range('2000Q1', periods=10, freq='Q-DEC')
ts = Series(np.arange(10), index=rng)
result = ts.resample('A').mean()
exp = ts.to_timestamp().resample('A').mean().to_period()
assert_series_equal(result, exp)
def test_resample_weekly_bug_1726(self):
# 8/6/12 is a Monday
ind = DatetimeIndex(start="8/6/2012", end="8/26/2012", freq="D")
n = len(ind)
data = [[x] * 5 for x in range(n)]
df = DataFrame(data, columns=['open', 'high', 'low', 'close', 'vol'],
index=ind)
# it works!
df.resample('W-MON', closed='left', label='left').first()
def test_resample_with_dst_time_change(self):
# GH 15549
index = pd.DatetimeIndex([1457537600000000000, 1458059600000000000],
tz='UTC').tz_convert('America/Chicago')
df = pd.DataFrame([1, 2], index=index)
result = df.resample('12h', closed='right',
label='right').last().ffill()
expected_index_values = ['2016-03-09 12:00:00-06:00',
'2016-03-10 00:00:00-06:00',
'2016-03-10 12:00:00-06:00',
'2016-03-11 00:00:00-06:00',
'2016-03-11 12:00:00-06:00',
'2016-03-12 00:00:00-06:00',
'2016-03-12 12:00:00-06:00',
'2016-03-13 00:00:00-06:00',
'2016-03-13 13:00:00-05:00',
'2016-03-14 01:00:00-05:00',
'2016-03-14 13:00:00-05:00',
'2016-03-15 01:00:00-05:00',
'2016-03-15 13:00:00-05:00']
index = pd.DatetimeIndex(expected_index_values,
tz='UTC').tz_convert('America/Chicago')
expected = pd.DataFrame([1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 2.0], index=index)
assert_frame_equal(result, expected)
def test_resample_bms_2752(self):
# GH2753
foo = Series(index=pd.bdate_range('20000101', '20000201'))
res1 = foo.resample("BMS").mean()
res2 = foo.resample("BMS").mean().resample("B").mean()
assert res1.index[0] == Timestamp('20000103')
assert res1.index[0] == res2.index[0]
# def test_monthly_convention_span(self):
# rng = period_range('2000-01', periods=3, freq='M')
# ts = Series(np.arange(3), index=rng)
# # hacky way to get same thing
# exp_index = period_range('2000-01-01', '2000-03-31', freq='D')
# expected = ts.asfreq('D', how='end').reindex(exp_index)
# expected = expected.fillna(method='bfill')
# result = ts.resample('D', convention='span').mean()
# assert_series_equal(result, expected)
def test_default_right_closed_label(self):
end_freq = ['D', 'Q', 'M', 'D']
end_types = ['M', 'A', 'Q', 'W']
for from_freq, to_freq in zip(end_freq, end_types):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='right',
label='right').mean())
def test_default_left_closed_label(self):
others = ['MS', 'AS', 'QS', 'D', 'H']
others_freq = ['D', 'Q', 'M', 'H', 'T']
for from_freq, to_freq in zip(others_freq, others):
idx = DatetimeIndex(start='8/15/2012', periods=100, freq=from_freq)
df = DataFrame(np.random.randn(len(idx), 2), idx)
resampled = df.resample(to_freq).mean()
assert_frame_equal(resampled, df.resample(to_freq, closed='left',
label='left').mean())
def test_all_values_single_bin(self):
# 2070
index = period_range(start="2012-01-01", end="2012-12-31", freq="M")
s = Series(np.random.randn(len(index)), index=index)
result = s.resample("A").mean()
tm.assert_almost_equal(result[0], s.mean())
def test_evenly_divisible_with_no_extra_bins(self):
# 4076
# when the frequency is evenly divisible, sometimes extra bins
df = DataFrame(np.random.randn(9, 3),
index=date_range('2000-1-1', periods=9))
result = df.resample('5D').mean()
expected = pd.concat(
[df.iloc[0:5].mean(), df.iloc[5:].mean()], axis=1).T
expected.index = [Timestamp('2000-1-1'), Timestamp('2000-1-6')]
assert_frame_equal(result, expected)
index = date_range(start='2001-5-4', periods=28)
df = DataFrame(
[{'REST_KEY': 1, 'DLY_TRN_QT': 80, 'DLY_SLS_AMT': 90,
'COOP_DLY_TRN_QT': 30, 'COOP_DLY_SLS_AMT': 20}] * 28 +
[{'REST_KEY': 2, 'DLY_TRN_QT': 70, 'DLY_SLS_AMT': 10,
'COOP_DLY_TRN_QT': 50, 'COOP_DLY_SLS_AMT': 20}] * 28,
index=index.append(index)).sort_index()
index = date_range('2001-5-4', periods=4, freq='7D')
expected = DataFrame(
[{'REST_KEY': 14, 'DLY_TRN_QT': 14, 'DLY_SLS_AMT': 14,
'COOP_DLY_TRN_QT': 14, 'COOP_DLY_SLS_AMT': 14}] * 4,
index=index)
result = df.resample('7D').count()
assert_frame_equal(result, expected)
expected = DataFrame(
[{'REST_KEY': 21, 'DLY_TRN_QT': 1050, 'DLY_SLS_AMT': 700,
'COOP_DLY_TRN_QT': 560, 'COOP_DLY_SLS_AMT': 280}] * 4,
index=index)
result = df.resample('7D').sum()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('kind', ['period', None, 'timestamp'])
@pytest.mark.parametrize('agg_arg', ['mean', {'value': 'mean'}, ['mean']])
def test_loffset_returns_datetimeindex(self, frame, kind, agg_arg):
# make sure passing loffset returns DatetimeIndex in all cases
# basic method taken from Base.test_resample_loffset_arg_type()
df = frame
expected_means = [df.values[i:i + 2].mean()
for i in range(0, len(df.values), 2)]
expected_index = self.create_index(df.index[0],
periods=len(df.index) / 2,
freq='2D')
# loffset coerces PeriodIndex to DateTimeIndex
expected_index = expected_index.to_timestamp()
expected_index += timedelta(hours=2)
expected = DataFrame({'value': expected_means}, index=expected_index)
result_agg = df.resample('2D', loffset='2H', kind=kind).agg(agg_arg)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result_how = df.resample('2D', how=agg_arg, loffset='2H',
kind=kind)
if isinstance(agg_arg, list):
expected.columns = pd.MultiIndex.from_tuples([('value', 'mean')])
assert_frame_equal(result_agg, expected)
assert_frame_equal(result_how, expected)
@pytest.mark.parametrize('freq, period_mult', [('H', 24), ('12H', 2)])
@pytest.mark.parametrize('kind', [None, 'period'])
def test_upsampling_ohlc(self, freq, period_mult, kind):
# GH 13083
pi = PeriodIndex(start='2000', freq='D', periods=10)
s = Series(range(len(pi)), index=pi)
expected = s.to_timestamp().resample(freq).ohlc().to_period(freq)
# timestamp-based resampling doesn't include all sub-periods
# of the last original period, so extend accordingly:
new_index = PeriodIndex(start='2000', freq=freq,
periods=period_mult * len(pi))
expected = expected.reindex(new_index)
result = s.resample(freq, kind=kind).ohlc()
assert_frame_equal(result, expected)
@pytest.mark.parametrize('periods, values',
[([pd.NaT, '1970-01-01 00:00:00', pd.NaT,
'1970-01-01 00:00:02', '1970-01-01 00:00:03'],
[2, 3, 5, 7, 11]),
([pd.NaT, pd.NaT, '1970-01-01 00:00:00', pd.NaT,
pd.NaT, pd.NaT, '1970-01-01 00:00:02',
'1970-01-01 00:00:03', pd.NaT, pd.NaT],
[1, 2, 3, 5, 6, 8, 7, 11, 12, 13])])
@pytest.mark.parametrize('freq, expected_values',
[('1s', [3, np.NaN, 7, 11]),
('2s', [3, int((7 + 11) / 2)]),
('3s', [int((3 + 7) / 2), 11])])
def test_resample_with_nat(self, periods, values, freq, expected_values):
# GH 13224
index = PeriodIndex(periods, freq='S')
frame = DataFrame(values, index=index)
expected_index = period_range('1970-01-01 00:00:00',
periods=len(expected_values), freq=freq)
expected = DataFrame(expected_values, index=expected_index)
result = frame.resample(freq).mean()
assert_frame_equal(result, expected)
def test_resample_with_only_nat(self):
# GH 13224
pi = PeriodIndex([pd.NaT] * 3, freq='S')
frame = DataFrame([2, 3, 5], index=pi)
expected_index = PeriodIndex(data=[], freq=pi.freq)
expected = DataFrame([], index=expected_index)
result = frame.resample('1s').mean()
assert_frame_equal(result, expected)
class TestTimedeltaIndex(Base):
_index_factory = lambda x: timedelta_range
@pytest.fixture
def _index_start(self):
return '1 day'
@pytest.fixture
def _index_end(self):
return '10 day'
@pytest.fixture
def _series_name(self):
return 'tdi'
def create_series(self):
i = timedelta_range('1 day',
'10 day', freq='D')
return Series(np.arange(len(i)), index=i, name='tdi')
def test_asfreq_bug(self):
import datetime as dt
df = DataFrame(data=[1, 3],
index=[dt.timedelta(), dt.timedelta(minutes=3)])
result = df.resample('1T').asfreq()
expected = DataFrame(data=[1, np.nan, np.nan, 3],
index=timedelta_range('0 day',
periods=4,
freq='1T'))
assert_frame_equal(result, expected)
class TestResamplerGrouper(object):
def setup_method(self, method):
self.frame = DataFrame({'A': [1] * 20 + [2] * 12 + [3] * 8,
'B': np.arange(40)},
index=date_range('1/1/2000',
freq='s',
periods=40))
def test_back_compat_v180(self):
df = self.frame
for how in ['sum', 'mean', 'prod', 'min', 'max', 'var', 'std']:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how=how)
expected = getattr(df.groupby('A').resample('4s'), how)()
assert_frame_equal(result, expected)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = df.groupby('A').resample('4s', how='mean',
fill_method='ffill')
expected = df.groupby('A').resample('4s').mean().ffill()
assert_frame_equal(result, expected)
def test_tab_complete_ipython6_warning(self, ip):
from IPython.core.completer import provisionalcompleter
code = dedent("""\
import pandas.util.testing as tm
s = tm.makeTimeSeries()
rs = s.resample("D")
""")
ip.run_code(code)
with tm.assert_produces_warning(None):
with provisionalcompleter('ignore'):
list(ip.Completer.completions('rs.', 1))
def test_deferred_with_groupby(self):
# GH 12486
# support deferred resample ops with groupby
data = [['2010-01-01', 'A', 2], ['2010-01-02', 'A', 3],
['2010-01-05', 'A', 8], ['2010-01-10', 'A', 7],
['2010-01-13', 'A', 3], ['2010-01-01', 'B', 5],
['2010-01-03', 'B', 2], ['2010-01-04', 'B', 1],
['2010-01-11', 'B', 7], ['2010-01-14', 'B', 3]]
df = DataFrame(data, columns=['date', 'id', 'score'])
df.date = pd.to_datetime(df.date)
f = lambda x: x.set_index('date').resample('D').asfreq()
expected = df.groupby('id').apply(f)
result = df.set_index('date').groupby('id').resample('D').asfreq()
assert_frame_equal(result, expected)
df = DataFrame({'date': pd.date_range(start='2016-01-01',
periods=4,
freq='W'),
'group': [1, 1, 2, 2],
'val': [5, 6, 7, 8]}).set_index('date')
f = lambda x: x.resample('1D').ffill()
expected = df.groupby('group').apply(f)
result = df.groupby('group').resample('1D').ffill()
assert_frame_equal(result, expected)
def test_getitem(self):
g = self.frame.groupby('A')
expected = g.B.apply(lambda x: x.resample('2s').mean())
result = g.resample('2s').B.mean()
assert_series_equal(result, expected)
result = g.B.resample('2s').mean()
assert_series_equal(result, expected)
result = g.resample('2s').mean().B
assert_series_equal(result, expected)
def test_getitem_multiple(self):
# GH 13174
# multiple calls after selection causing an issue with aliasing
data = [{'id': 1, 'buyer': 'A'}, {'id': 2, 'buyer': 'B'}]
df = DataFrame(data, index=pd.date_range('2016-01-01', periods=2))
r = df.groupby('id').resample('1D')
result = r['buyer'].count()
expected = Series([1, 1],
index=pd.MultiIndex.from_tuples(
[(1, Timestamp('2016-01-01')),
(2, Timestamp('2016-01-02'))],
names=['id', None]),
name='buyer')
assert_series_equal(result, expected)
result = r['buyer'].count()
assert_series_equal(result, expected)
def test_nearest(self):
# GH 17496
# Resample nearest
index = pd.date_range('1/1/2000', periods=3, freq='T')
result = Series(range(3), index=index).resample('20s').nearest()
expected = Series(
[0, 0, 1, 1, 1, 2, 2],
index=pd.DatetimeIndex(
['2000-01-01 00:00:00', '2000-01-01 00:00:20',
'2000-01-01 00:00:40', '2000-01-01 00:01:00',
'2000-01-01 00:01:20', '2000-01-01 00:01:40',
'2000-01-01 00:02:00'],
dtype='datetime64[ns]',
freq='20S'))
assert_series_equal(result, expected)
def test_methods(self):
g = self.frame.groupby('A')
r = g.resample('2s')
for f in ['first', 'last', 'median', 'sem', 'sum', 'mean',
'min', 'max']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
for f in ['size']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['count']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
# series only
for f in ['nunique']:
result = getattr(r.B, f)()
expected = g.B.apply(lambda x: getattr(x.resample('2s'), f)())
assert_series_equal(result, expected)
for f in ['nearest', 'backfill', 'ffill', 'asfreq']:
result = getattr(r, f)()
expected = g.apply(lambda x: getattr(x.resample('2s'), f)())
assert_frame_equal(result, expected)
result = r.ohlc()
expected = g.apply(lambda x: x.resample('2s').ohlc())
assert_frame_equal(result, expected)
for f in ['std', 'var']:
result = getattr(r, f)(ddof=1)
expected = g.apply(lambda x: getattr(x.resample('2s'), f)(ddof=1))
assert_frame_equal(result, expected)
def test_apply(self):
g = self.frame.groupby('A')
r = g.resample('2s')
# reduction
expected = g.resample('2s').sum()
def f(x):
return x.resample('2s').sum()
result = r.apply(f)
assert_frame_equal(result, expected)
def f(x):
return x.resample('2s').apply(lambda y: y.sum())
result = g.apply(f)
assert_frame_equal(result, expected)
def test_apply_with_mutated_index(self):
# GH 15169
index = pd.date_range('1-1-2015', '12-31-15', freq='D')
df = DataFrame(data={'col1': np.random.rand(len(index))}, index=index)
def f(x):
s = Series([1, 2], index=['a', 'b'])
return s
expected = df.groupby(pd.Grouper(freq='M')).apply(f)
result = df.resample('M').apply(f)
assert_frame_equal(result, expected)
# A case for series
expected = df['col1'].groupby(pd.Grouper(freq='M')).apply(f)
result = df['col1'].resample('M').apply(f)
assert_series_equal(result, expected)
def test_resample_groupby_with_label(self):
# GH 13235
index = date_range('2000-01-01', freq='2D', periods=5)
df = DataFrame(index=index,
data={'col0': [0, 0, 1, 1, 2], 'col1': [1, 1, 1, 1, 1]}
)
result = df.groupby('col0').resample('1W', label='left').sum()
mi = [np.array([0, 0, 1, 2]),
pd.to_datetime(np.array(['1999-12-26', '2000-01-02',
'2000-01-02', '2000-01-02'])
)
]
mindex = pd.MultiIndex.from_arrays(mi, names=['col0', None])
expected = DataFrame(data={'col0': [0, 0, 2, 2], 'col1': [1, 1, 2, 1]},
index=mindex
)
assert_frame_equal(result, expected)
def test_consistency_with_window(self):
# consistent return values with window
df = self.frame
expected = pd.Int64Index([1, 2, 3], name='A')
result = df.groupby('A').resample('2s').mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
result = df.groupby('A').rolling(20).mean()
assert result.index.nlevels == 2
tm.assert_index_equal(result.index.levels[0], expected)
def test_median_duplicate_columns(self):
# GH 14233
df = DataFrame(np.random.randn(20, 3),
columns=list('aaa'),
index=pd.date_range('2012-01-01', periods=20, freq='s'))
df2 = df.copy()
df2.columns = ['a', 'b', 'c']
expected = df2.resample('5s').median()
result = df.resample('5s').median()
expected.columns = result.columns
assert_frame_equal(result, expected)
class TestTimeGrouper(object):
def setup_method(self, method):
self.ts = Series(np.random.randn(1000),
index=date_range('1/1/2000', periods=1000))
def test_apply(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
grouped = self.ts.groupby(grouper)
f = lambda x: x.sort_values()[-3:]
applied = grouped.apply(f)
expected = self.ts.groupby(lambda x: x.year).apply(f)
applied.index = applied.index.droplevel(0)
expected.index = expected.index.droplevel(0)
assert_series_equal(applied, expected)
def test_count(self):
self.ts[::3] = np.nan
expected = self.ts.groupby(lambda x: x.year).count()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
grouper = pd.TimeGrouper(freq='A', label='right', closed='right')
result = self.ts.groupby(grouper).count()
expected.index = result.index
assert_series_equal(result, expected)
result = self.ts.resample('A').count()
expected.index = result.index
assert_series_equal(result, expected)
def test_numpy_reduction(self):
result = self.ts.resample('A', closed='right').prod()
expected = self.ts.groupby(lambda x: x.year).agg(np.prod)
expected.index = result.index
assert_series_equal(result, expected)
def test_apply_iteration(self):
# #2300
N = 1000
ind = pd.date_range(start="2000-01-01", freq="D", periods=N)
df = DataFrame({'open': 1, 'close': 2}, index=ind)
tg = TimeGrouper('M')
_, grouper, _ = tg._get_grouper(df)
# Errors
grouped = df.groupby(grouper, group_keys=False)
f = lambda df: df['close'] / df['open']
# it works!
result = grouped.apply(f)
tm.assert_index_equal(result.index, df.index)
def test_panel_aggregation(self):
ind = pd.date_range('1/1/2000', periods=100)
data = np.random.randn(2, len(ind), 4)
with catch_warnings(record=True):
wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
minor_axis=['A', 'B', 'C', 'D'])
tg = TimeGrouper('M', axis=1)
_, grouper, _ = tg._get_grouper(wp)
bingrouped = wp.groupby(grouper)
binagg = bingrouped.mean()
def f(x):
assert (isinstance(x, Panel))
return x.mean(1)
result = bingrouped.agg(f)
tm.assert_panel_equal(result, binagg)
def test_fails_on_no_datetime_index(self):
index_names = ('Int64Index', 'Index', 'Float64Index', 'MultiIndex')
index_funcs = (tm.makeIntIndex,
tm.makeUnicodeIndex, tm.makeFloatIndex,
lambda m: tm.makeCustomIndex(m, 2))
n = 2
for name, func in zip(index_names, index_funcs):
index = func(n)
df = DataFrame({'a': np.random.randn(n)}, index=index)
with tm.assert_raises_regex(TypeError,
"Only valid with "
"DatetimeIndex, TimedeltaIndex "
"or PeriodIndex, but got an "
"instance of %r" % name):
df.groupby(TimeGrouper('D'))
def test_aaa_group_order(self):
# GH 12840
# check TimeGrouper perform stable sorts
n = 20
data = np.random.randn(n, 4)
df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
grouped = df.groupby(TimeGrouper(key='key', freq='D'))
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 1)),
df[::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 2)),
df[1::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 3)),
df[2::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 4)),
df[3::5])
tm.assert_frame_equal(grouped.get_group(datetime(2013, 1, 5)),
df[4::5])
def test_aggregate_normal(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4)
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, 3, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2),
datetime(2013, 1, 3), datetime(2013, 1, 4),
datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'prod', 'var', 'std', 'mean']:
expected = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count', 'sum']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# GH 7453
for func in ['size']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 7453
for func in ['first', 'last']:
expected = getattr(normal_grouped, func)()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
# if TimeGrouper is used included, 'nth' doesn't work yet
"""
for func in ['nth']:
expected = getattr(normal_grouped, func)(3)
expected.index = date_range(start='2013-01-01',
freq='D', periods=5, name='key')
dt_result = getattr(dt_grouped, func)(3)
assert_frame_equal(expected, dt_result)
"""
def test_aggregate_with_nat(self):
# check TimeGrouper's aggregation is identical as normal groupby
n = 20
data = np.random.randn(n, 4).astype('int64')
normal_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
normal_df['key'] = [1, 2, np.nan, 4, 5] * 4
dt_df = DataFrame(data, columns=['A', 'B', 'C', 'D'])
dt_df['key'] = [datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT,
datetime(2013, 1, 4), datetime(2013, 1, 5)] * 4
normal_grouped = normal_df.groupby('key')
dt_grouped = dt_df.groupby(TimeGrouper(key='key', freq='D'))
for func in ['min', 'max', 'sum', 'prod']:
normal_result = getattr(normal_grouped, func)()
dt_result = getattr(dt_grouped, func)()
pad = DataFrame([[np.nan, np.nan, np.nan, np.nan]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
assert_frame_equal(expected, dt_result)
for func in ['count']:
normal_result = getattr(normal_grouped, func)()
pad = DataFrame([[0, 0, 0, 0]], index=[3],
columns=['A', 'B', 'C', 'D'])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_frame_equal(expected, dt_result)
for func in ['size']:
normal_result = getattr(normal_grouped, func)()
pad = Series([0], index=[3])
expected = normal_result.append(pad)
expected = expected.sort_index()
expected.index = date_range(start='2013-01-01', freq='D',
periods=5, name='key')
dt_result = getattr(dt_grouped, func)()
assert_series_equal(expected, dt_result)
# GH 9925
assert dt_result.index.name == 'key'
# if NaT is included, 'var', 'std', 'mean', 'first','last'
# and 'nth' doesn't work yet
def test_repr(self):
# GH18203
result = repr(TimeGrouper(key='A', freq='H'))
expected = ("TimeGrouper(key='A', freq=<Hour>, axis=0, sort=True, "
"closed='left', label='left', how='mean', "
"convention='e', base=0)")
assert result == expected
| bsd-3-clause |
phoenixsbk/kvmmgr | packaging/setup/ovirt_engine_setup/engine/constants.py | 1 | 18900 | #
# ovirt-engine-setup -- ovirt engine setup
# Copyright (C) 2014-2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Constants."""
import os
import gettext
_ = lambda m: gettext.dgettext(message=m, domain='ovirt-engine-setup')
from otopi import util
from ovirt_engine_setup import constants as osetupcons
from ovirt_engine_setup.engine_common import constants as oengcommcons
from ovirt_engine_setup.constants import classproperty
from ovirt_engine_setup.constants import osetupattrsclass
from ovirt_engine_setup.constants import osetupattrs
from . import config
@util.export
class FileLocations(object):
SYSCONFDIR = '/etc'
LOCALSTATEDIR = '/var'
DATADIR = '/usr/share'
OVIRT_ENGINE_SYSCONFDIR = config.ENGINE_SYSCONFDIR
OVIRT_ENGINE_PKIDIR = config.ENGINE_PKIDIR
OVIRT_ENGINE_DATADIR = config.ENGINE_DATADIR
OVIRT_ENGINE_LOCALSTATEDIR = config.ENGINE_LOCALSTATEDIR
OVIRT_ENGINE_LOGDIR = config.ENGINE_LOG
OVIRT_ENGINE_SERVICE_CONFIG = config.ENGINE_SERVICE_CONFIG
OVIRT_ENGINE_SERVICE_CONFIG_DEFAULTS = \
config.ENGINE_SERVICE_CONFIG_DEFAULTS
OVIRT_ENGINE_NOTIFIER_SERVICE_CONFIG = \
config.ENGINE_NOTIFIER_SERVICE_CONFIG
OVIRT_ENGINE_BINDIR = os.path.join(
OVIRT_ENGINE_DATADIR,
'bin',
)
OVIRT_ENGINE_DB_DIR = os.path.join(
OVIRT_ENGINE_DATADIR,
'dbscripts',
)
OVIRT_ENGINE_DB_SCHMA_TOOL = os.path.join(
OVIRT_ENGINE_DB_DIR,
'schema.sh',
)
OVIRT_ENGINE_DB_BACKUP_DIR = os.path.join(
OVIRT_ENGINE_LOCALSTATEDIR,
'backups',
)
OVIRT_ENGINE_DB_MD5_DIR = os.path.join(
OVIRT_ENGINE_LOCALSTATEDIR,
'dbmd5',
)
OVIRT_ENGINE_DB_UTILS_DIR = os.path.join(
OVIRT_ENGINE_DATADIR,
'setup',
'dbutils'
)
OVIRT_ENGINE_DB_VALIDATOR = os.path.join(
OVIRT_ENGINE_DB_UTILS_DIR,
'validatedb.sh'
)
OVIRT_ENGINE_TASKCLEANER = os.path.join(
OVIRT_ENGINE_DB_UTILS_DIR,
'taskcleaner.sh'
)
OVIRT_ENGINE_DB_CHANGE_OWNER = os.path.join(
OVIRT_ENGINE_DB_UTILS_DIR,
'changedbowner.sh'
)
OVIRT_ENGINE_PKIKEYSDIR = os.path.join(
OVIRT_ENGINE_PKIDIR,
'keys',
)
OVIRT_ENGINE_PKICERTSDIR = os.path.join(
OVIRT_ENGINE_PKIDIR,
'certs',
)
OVIRT_ENGINE_PKIPRIVATEDIR = os.path.join(
OVIRT_ENGINE_PKIDIR,
'private',
)
OVIRT_ENGINE_PKI_CA_CREATE = os.path.join(
OVIRT_ENGINE_BINDIR,
'pki-create-ca.sh',
)
OVIRT_ENGINE_PKI_CA_ENROLL = os.path.join(
OVIRT_ENGINE_BINDIR,
'pki-enroll-pkcs12.sh',
)
OVIRT_ENGINE_PKI_PKCS12_EXTRACT = os.path.join(
OVIRT_ENGINE_BINDIR,
'pki-pkcs12-extract.sh',
)
OVIRT_ENGINE_PKI_ENGINE_STORE = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'engine.p12',
)
OVIRT_ENGINE_PKI_ENGINE_CERT = os.path.join(
OVIRT_ENGINE_PKICERTSDIR,
'engine.cer',
)
OVIRT_ENGINE_PKI_ENGINE_SSH_KEY = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'engine_id_rsa',
)
OVIRT_ENGINE_PKI_APACHE_STORE = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'apache.p12',
)
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_STORE = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'websocket-proxy.p12',
)
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_KEY = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'websocket-proxy.key.nopass',
)
OVIRT_ENGINE_PKI_REPORTS_KEY = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'reports.key.nopass',
)
OVIRT_ENGINE_PKI_JBOSS_STORE = os.path.join(
OVIRT_ENGINE_PKIKEYSDIR,
'jboss.p12',
)
OVIRT_ENGINE_PKI_JBOSS_CERT = os.path.join(
OVIRT_ENGINE_PKICERTSDIR,
'jboss.cer',
)
OVIRT_ENGINE_PKI_ENGINE_CA_CERT = os.path.join(
OVIRT_ENGINE_PKIDIR,
'ca.pem',
)
OVIRT_ENGINE_PKI_LOCAL_WEBSOCKET_PROXY_CERT = os.path.join(
OVIRT_ENGINE_PKICERTSDIR,
'websocket-proxy.cer',
)
OVIRT_ENGINE_PKI_ENGINE_TRUST_STORE = os.path.join(
OVIRT_ENGINE_PKIDIR,
'.truststore',
)
OVIRT_ENGINE_PKI_CA_TEMPLATE = os.path.join(
OVIRT_ENGINE_PKIDIR,
'cacert.template.in',
)
OVIRT_ENGINE_PKI_CERT_TEMPLATE = os.path.join(
OVIRT_ENGINE_PKIDIR,
'cert.template.in',
)
OVIRT_ENGINE_PKI_CA_CERT_CONF = os.path.join(
OVIRT_ENGINE_PKIDIR,
'cacert.conf',
)
OVIRT_ENGINE_PKI_CERT_CONF = os.path.join(
OVIRT_ENGINE_PKIDIR,
'cert.conf',
)
OVIRT_ENGINE_PKI_ENGINE_CA_KEY = os.path.join(
OVIRT_ENGINE_PKIPRIVATEDIR,
'ca.pem',
)
NFS_RHEL_CONFIG = os.path.join(
SYSCONFDIR,
'sysconfig',
'nfs',
)
NFS_EXPORT_FILE = os.path.join(
SYSCONFDIR,
'exports',
)
NFS_EXPORT_DIR = os.path.join(
SYSCONFDIR,
'exports.d',
)
OVIRT_NFS_EXPORT_FILE = os.path.join(
NFS_EXPORT_DIR,
'ovirt-engine-iso-domain.exports'
)
ISO_DOMAIN_DEFAULT_NFS_MOUNT_POINT = os.path.join(
LOCALSTATEDIR,
'lib',
'exports',
'iso',
)
DIR_HTTPD = os.path.join(
osetupcons.FileLocations.SYSCONFDIR,
'httpd',
)
HTTPD_CONF_OVIRT_ENGINE = os.path.join(
DIR_HTTPD,
'conf.d',
'z-ovirt-engine-proxy.conf',
)
HTTPD_CONF_OVIRT_ENGINE_TEMPLATE = os.path.join(
osetupcons.FileLocations.OVIRT_SETUP_DATADIR,
'conf',
'ovirt-engine-proxy.conf.v2.in',
)
OVIRT_ENGINE_SERVICE_CONFIGD = '%s.d' % OVIRT_ENGINE_SERVICE_CONFIG
OVIRT_ENGINE_SERVICE_CONFIG_DATABASE = os.path.join(
OVIRT_ENGINE_SERVICE_CONFIGD,
'10-setup-database.conf',
)
OVIRT_ENGINE_SERVICE_CONFIG_PROTOCOLS = os.path.join(
OVIRT_ENGINE_SERVICE_CONFIGD,
'10-setup-protocols.conf',
)
OVIRT_ENGINE_SERVICE_CONFIG_JBOSS = os.path.join(
OVIRT_ENGINE_SERVICE_CONFIGD,
'10-setup-jboss.conf',
)
OVIRT_ENGINE_SERVICE_CONFIG_PKI = os.path.join(
OVIRT_ENGINE_SERVICE_CONFIGD,
'10-setup-pki.conf',
)
OVIRT_ENGINE_NOTIFIER_SERVICE_CONFIGD = (
'%s.d' % OVIRT_ENGINE_NOTIFIER_SERVICE_CONFIG
)
OVIRT_ENGINE_NOTIFIER_SERVICE_CONFIG_JBOSS = os.path.join(
OVIRT_ENGINE_NOTIFIER_SERVICE_CONFIGD,
'10-setup-jboss.conf',
)
OVIRT_ENGINE_SERVICE_CONFIG_JAVA = os.path.join(
OVIRT_ENGINE_SERVICE_CONFIGD,
'10-setup-java.conf',
)
OVIRT_ENGINE_UNINSTALL_DIR = os.path.join(
OVIRT_ENGINE_SYSCONFDIR,
'uninstall.d'
)
AIO_VDSM_PATH = os.path.join(
DATADIR,
'vdsm',
)
AIO_STORAGE_DOMAIN_DEFAULT_DIR = os.path.join(
LOCALSTATEDIR,
'lib',
'images',
)
AIO_POST_INSTALL_CONFIG = os.path.join(
'%s.d' % osetupcons.FileLocations.OVIRT_OVIRT_SETUP_CONFIG_FILE,
'20-setup-aio.conf'
)
@util.export
class Defaults(object):
DEFAULT_SYSTEM_MEMCHECK_MINIMUM_MB = 4096
DEFAULT_SYSTEM_MEMCHECK_RECOMMENDED_MB = 16384
DEFAULT_SYSTEM_MEMCHECK_THRESHOLD = 90
DEFAULT_CONFIG_APPLICATION_MODE = 'Both'
DEFAULT_CONFIG_STORAGE_IS_LOCAL = False
DEFAULT_ISO_DOMAIN_NAME = 'ISO_DOMAIN'
DEFAULT_CLEAR_TASKS_WAIT_PERIOD = 20
DEFAULT_DB_HOST = 'localhost'
DEFAULT_DB_PORT = 5432
DEFAULT_DB_DATABASE = 'engine'
DEFAULT_DB_USER = 'engine'
DEFAULT_DB_PASSWORD = ''
DEFAULT_DB_SECURED = False
DEFAULT_DB_SECURED_HOST_VALIDATION = False
@util.export
class Stages(object):
SYSTEM_NFS_CONFIG_AVAILABLE = 'osetup.system.nfs.available'
CONFIG_ISO_DOMAIN_AVAILABLE = 'osetup.config.iso_domain.available'
CORE_ENABLE = 'osetup.engine.core.enable'
AIO_CONFIG_AVAILABLE = 'osetup.aio.config.available'
AIO_CONFIG_NOT_AVAILABLE = 'osetup.aio.config.not.available'
AIO_CONFIG_STORAGE = 'osetup.aio.config.storage'
AIO_CONFIG_SSH = 'osetup.aio.config.ssh'
AIO_CONFIG_VDSM = 'osetup.aio.config.vdsm'
MEMORY_CHECK = 'osetup.memory.check'
CA_ALLOWED = 'osetup.engine.pki.ca.allow'
CA_AVAILABLE = 'osetup.pki.ca.available'
POSTGRES_PROVISIONING_ALLOWED = 'osetup.engine.provisioning.pgsql.allow'
NFS_CONFIG_ALLOWED = 'osetup.engine.system.nfs.allow'
APPMODE_ALLOWED = 'osetup.engine.config.appmode.allow'
KDUMP_ALLOW = 'osetup.engine.kdump.allow'
CONNECTION_ALLOW = 'osetup.engine.db.connection.allow'
@util.export
@util.codegen
class Const(object):
DOMAIN_INTERNAL = 'internal'
ENGINE_PACKAGE_NAME = 'ovirt-engine'
ENGINE_PACKAGE_SETUP_NAME = '%s-setup' % ENGINE_PACKAGE_NAME
ENGINE_SERVICE_NAME = 'ovirt-engine'
FENCE_KDUMP_LISTENER_SERVICE_NAME = 'ovirt-fence-kdump-listener'
PKI_PASSWORD = 'mypass'
MINIMUM_SPACE_ISODOMAIN_MB = 350
ISO_DOMAIN_IMAGE_UID = '11111111-1111-1111-1111-111111111111'
MAC_RANGE_BASE = '00:1a:4a'
ENGINE_URI = '/ovirt-engine'
ENGINE_PKI_CA_URI = (
'%s/services/pki-resource?'
'resource=ca-certificate&'
'format=X509-PEM-CA'
) % (
ENGINE_URI,
)
UPGRADE_YUM_GROUP_NAME = 'ovirt-engine-3.4'
ENGINE_DB_BACKUP_PREFIX = 'engine'
@classproperty
def ENGINE_DB_ENV_KEYS(self):
return {
'host': EngineDBEnv.HOST,
'port': EngineDBEnv.PORT,
'secured': EngineDBEnv.SECURED,
'hostValidation': EngineDBEnv.SECURED_HOST_VALIDATION,
'user': EngineDBEnv.USER,
'password': EngineDBEnv.PASSWORD,
'database': EngineDBEnv.DATABASE,
'connection': EngineDBEnv.CONNECTION,
'pgpassfile': EngineDBEnv.PGPASS_FILE,
'newDatabase': EngineDBEnv.NEW_DATABASE,
}
@classproperty
def DEFAULT_ENGINE_DB_ENV_KEYS(self):
return {
'host': Defaults.DEFAULT_DB_HOST,
'port': Defaults.DEFAULT_DB_PORT,
'secured': Defaults.DEFAULT_DB_SECURED,
'hostValidation': Defaults.DEFAULT_DB_SECURED_HOST_VALIDATION,
'user': Defaults.DEFAULT_DB_USER,
'password': Defaults.DEFAULT_DB_PASSWORD,
'database': Defaults.DEFAULT_DB_DATABASE,
}
@util.export
@util.codegen
@osetupattrsclass
class EngineDBEnv(object):
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database host'),
)
def HOST(self):
return 'OVESETUP_DB/host'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database port'),
)
def PORT(self):
return 'OVESETUP_DB/port'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database secured connection'),
)
def SECURED(self):
return 'OVESETUP_DB/secured'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database host name validation'),
)
def SECURED_HOST_VALIDATION(self):
return 'OVESETUP_DB/securedHostValidation'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database name'),
)
def DATABASE(self):
return 'OVESETUP_DB/database'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Engine database user name'),
)
def USER(self):
return 'OVESETUP_DB/user'
@osetupattrs(
answerfile=True,
answerfile_condition=lambda env: not env.get(
oengcommcons.ProvisioningEnv.POSTGRES_PROVISIONING_ENABLED
),
)
def PASSWORD(self):
return 'OVESETUP_DB/password'
CONNECTION = 'OVESETUP_DB/connection'
STATEMENT = 'OVESETUP_DB/statement'
PGPASS_FILE = 'OVESETUP_DB/pgPassFile'
NEW_DATABASE = 'OVESETUP_DB/newDatabase'
@osetupattrs(
answerfile=True,
)
def FIX_DB_VIOLATIONS(self):
return 'OVESETUP_DB/fixDbViolations'
@util.export
@util.codegen
@osetupattrsclass
class CoreEnv(object):
@osetupattrs(
answerfile=True,
)
def ENGINE_SERVICE_STOP(self):
return 'OVESETUP_CORE/engineStop'
@osetupattrs(
answerfile=True,
postinstallfile=True,
summary=True,
description=_('Engine installation'),
)
def ENABLE(self):
return 'OVESETUP_ENGINE_CORE/enable'
@util.export
@util.codegen
@osetupattrsclass
class SystemEnv(object):
@osetupattrs(
answerfile=True,
)
def MEMCHECK_ENABLED(self):
return 'OVESETUP_SYSTEM/memCheckEnabled'
MEMCHECK_MINIMUM_MB = 'OVESETUP_SYSTEM/memCheckMinimumMB'
MEMCHECK_RECOMMENDED_MB = 'OVESETUP_SYSTEM/memCheckRecommendedMB'
MEMCHECK_THRESHOLD = 'OVESETUP_SYSTEM/memCheckThreshold'
NFS_SERVICE_NAME = 'OVESETUP_SYSTEM/nfsServiceName'
@osetupattrs(
answerfile=True,
summary=True,
description=_('NFS setup'),
summary_condition=lambda env: env.get(
SystemEnv.NFS_CONFIG_ENABLED
),
)
def NFS_CONFIG_ENABLED(self):
return 'OVESETUP_SYSTEM/nfsConfigEnabled'
#
# In 3.3/3.4.0 the NFS_CONFIG_ENABLED was in postinstall file
# and now removed.
# At first upgrade from these versions we should not consider
# its value from environment.
# This variable will not be available at these versions, and
# will set to False in future runs to allow us to
# consider the value of NFS_CONFIG_ENABLED in later setups.
#
@osetupattrs(
postinstallfile=True,
)
def NFS_CONFIG_ENABLED_LEGACY_IN_POSTINSTALL(self):
return 'OVESETUP_SYSTEM/nfsConfigEnabled_legacyInPostInstall'
@util.export
@util.codegen
@osetupattrsclass
class PKIEnv(object):
STORE_PASS = 'OVESETUP_PKI/storePassword'
COUNTRY = 'OVESETUP_PKI/country'
@osetupattrs(
answerfile=True,
summary=True,
description=_('PKI organization'),
)
def ORG(self):
return 'OVESETUP_PKI/organization'
ENGINE_SSH_PUBLIC_KEY = 'OVESETUP_PKI/sshPublicKey'
@util.export
@util.codegen
@osetupattrsclass
class ConfigEnv(object):
@osetupattrs(
postinstallfile=True,
)
def ISO_DOMAIN_EXISTS(self):
return 'OVESETUP_CONFIG/isoDomainExists'
@osetupattrs(
postinstallfile=True,
)
def ISO_DOMAIN_SD_UUID(self):
return 'OVESETUP_CONFIG/isoDomainSdUuid'
@osetupattrs(
postinstallfile=True,
)
def ISO_DOMAIN_STORAGE_DIR(self):
return 'OVESETUP_CONFIG/isoDomainStorageDir'
@osetupattrs(
answerfile=True,
summary=True,
description=_('NFS mount point'),
postinstallfile=True,
)
def ISO_DOMAIN_NFS_MOUNT_POINT(self):
return 'OVESETUP_CONFIG/isoDomainMountPoint'
@osetupattrs(
answerfile=True,
summary=True,
description=_('NFS export ACL'),
)
def ISO_DOMAIN_NFS_ACL(self):
return 'OVESETUP_CONFIG/isoDomainACL'
@osetupattrs(
answerfile=True,
postinstallfile=True
)
def ISO_DOMAIN_NAME(self):
return 'OVESETUP_CONFIG/isoDomainName'
ISO_DOMAIN_DEFAULT_NFS_MOUNT_POINT = \
'OVESETUP_CONFIG/isoDomainDefaultMountPoint'
MAC_RANGE_POOL = 'OVESETUP_CONFIG/macRangePool'
ENGINE_FQDN = 'OVESETUP_ENGINE_CONFIG/fqdn'
@osetupattrs(
answerfile=True,
)
def ENGINE_HEAP_MIN(self):
return 'OVESETUP_CONFIG/engineHeapMin'
@osetupattrs(
answerfile=True,
)
def ENGINE_HEAP_MAX(self):
return 'OVESETUP_CONFIG/engineHeapMax'
@util.export
@util.codegen
@osetupattrsclass
class RPMDistroEnv(object):
ENGINE_PACKAGES = 'OVESETUP_RPMDISTRO/enginePackages'
ENGINE_SETUP_PACKAGES = 'OVESETUP_RPMDISTRO/engineSetupPackages'
UPGRADE_YUM_GROUP = 'OVESETUP_RPMDISTRO/upgradeYumGroup'
@util.export
@util.codegen
@osetupattrsclass
class AIOEnv(object):
ENABLE = 'OVESETUP_AIO/enable'
CONTINUE_WITHOUT_AIO = 'OVESETUP_AIO/continueWithoutAIO'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Configure VDSM on this host'),
)
def CONFIGURE(self):
return 'OVESETUP_AIO/configure'
LOCAL_DATA_CENTER = 'OVESETUP_AIO/localDataCenter'
LOCAL_CLUSTER = 'OVESETUP_AIO/localCluster'
LOCAL_HOST = 'OVESETUP_AIO/localHost'
VDSM_CPU = 'OVESETUP_AIO/vdsmCpu'
SUPPORTED = 'OVESETUP_AIO/supported'
STORAGE_DOMAIN_SD_UUID = 'OVESETUP_AIO/storageDomainSdUuid'
STORAGE_DOMAIN_DEFAULT_DIR = 'OVESETUP_AIO/storageDomainDefaultDir'
@osetupattrs(
answerfile=True,
summary=True,
description=_('Local storage domain directory'),
)
def STORAGE_DOMAIN_DIR(self):
return 'OVESETUP_AIO/storageDomainDir'
@osetupattrs(
answerfile=True,
summary=False,
description=_('Local storage domain name'),
)
def STORAGE_DOMAIN_NAME(self):
return 'OVESETUP_AIO/storageDomainName'
SSHD_PORT = 'OVESETUP_AIO/sshdPort'
DEFAULT_SSH_PORT = 22
@util.export
class AIODefaults(object):
DEFAULT_LOCAL_DATA_CENTER = 'local_datacenter'
DEFAULT_LOCAL_CLUSTER = 'local_cluster'
DEFAULT_LOCAL_HOST = 'local_host'
DEFAULT_STORAGE_DOMAIN_NAME = 'local_storage'
@util.export
@util.codegen
class AIOConst(object):
MINIMUM_SPACE_STORAGEDOMAIN_MB = 10240
@util.export
@util.codegen
@osetupattrsclass
class ApacheEnv(object):
HTTPD_CONF_OVIRT_ENGINE = 'OVESETUP_APACHE/configFileOvirtEngine'
@util.export
@util.codegen
class AsyncTasksEnv(object):
@osetupattrs(
answerfile=True,
)
def CLEAR_TASKS(self):
return 'OVESETUP_ASYNC/clearTasks'
@osetupattrs(
answerfile=True,
)
def CLEAR_TASKS_WAIT_PERIOD(self):
return 'OVESETUP_ASYNC/clearTasksWait'
@util.export
@util.codegen
@osetupattrsclass
class RemoveEnv(object):
@osetupattrs(
answerfile=True,
)
def REMOVE_ENGINE(self):
return 'OVESETUP_REMOVE/removeEngine'
@osetupattrs(
answerfile=True,
)
def REMOVE_ENGINE_DATABASE(self):
return 'OVESETUP_REMOVE/engineDatabase'
# vim: expandtab tabstop=4 shiftwidth=4
| apache-2.0 |
ProfessorKaos64/openlierox | tools/DedicatedServerVideo/gdata/finance/service.py | 261 | 8974 | #!/usr/bin/python
#
# Copyright (C) 2009 Tan Swee Heng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes to interact with the Google Finance server."""
__author__ = 'thesweeheng@gmail.com'
import gdata.service
import gdata.finance
import atom
class PortfolioQuery(gdata.service.Query):
"""A query object for the list of a user's portfolios."""
def returns(self):
return self.get('returns', False)
def set_returns(self, value):
if value is 'true' or value is True:
self['returns'] = 'true'
returns = property(returns, set_returns, doc="The returns query parameter")
def positions(self):
return self.get('positions', False)
def set_positions(self, value):
if value is 'true' or value is True:
self['positions'] = 'true'
positions = property(positions, set_positions,
doc="The positions query parameter")
class PositionQuery(gdata.service.Query):
"""A query object for the list of a user's positions in a portfolio."""
def returns(self):
return self.get('returns', False)
def set_returns(self, value):
if value is 'true' or value is True:
self['returns'] = 'true'
returns = property(returns, set_returns,
doc="The returns query parameter")
def transactions(self):
return self.get('transactions', False)
def set_transactions(self, value):
if value is 'true' or value is True:
self['transactions'] = 'true'
transactions = property(transactions, set_transactions,
doc="The transactions query parameter")
class FinanceService(gdata.service.GDataService):
def __init__(self, email=None, password=None, source=None,
server='finance.google.com', **kwargs):
"""Creates a client for the Finance service.
Args:
email: string (optional) The user's email address, used for
authentication.
password: string (optional) The user's password.
source: string (optional) The name of the user's application.
server: string (optional) The name of the server to which a connection
will be opened. Default value: 'finance.google.com'.
**kwargs: The other parameters to pass to gdata.service.GDataService
constructor.
"""
gdata.service.GDataService.__init__(self,
email=email, password=password, service='finance', server=server,
**kwargs)
def GetPortfolioFeed(self, query=None):
uri = '/finance/feeds/default/portfolios'
if query:
uri = PortfolioQuery(feed=uri, params=query).ToUri()
return self.Get(uri, converter=gdata.finance.PortfolioFeedFromString)
def GetPositionFeed(self, portfolio_entry=None, portfolio_id=None,
query=None):
"""
Args:
portfolio_entry: PortfolioEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
query: PortfolioQuery (optional)
Notes:
Either a PortfolioEntry OR a portfolio ID must be provided.
"""
if portfolio_entry:
uri = portfolio_entry.GetSelfLink().href + '/positions'
elif portfolio_id:
uri = '/finance/feeds/default/portfolios/%s/positions' % portfolio_id
if query:
uri = PositionQuery(feed=uri, params=query).ToUri()
return self.Get(uri, converter=gdata.finance.PositionFeedFromString)
def GetTransactionFeed(self, position_entry=None,
portfolio_id=None, ticker_id=None):
"""
Args:
position_entry: PositionEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
ticker_id: string (optional; see Notes) This may be obtained from
a PositionEntry's ticker_id attribute. Alternatively it can
be constructed using the security's exchange and symbol,
e.g. 'NASDAQ:GOOG'
Notes:
Either a PositionEntry OR (a portfolio ID AND ticker ID) must
be provided.
"""
if position_entry:
uri = position_entry.GetSelfLink().href + '/transactions'
elif portfolio_id and ticker_id:
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \
% (portfolio_id, ticker_id)
return self.Get(uri, converter=gdata.finance.TransactionFeedFromString)
def GetPortfolio(self, portfolio_id=None, query=None):
uri = '/finance/feeds/default/portfolios/%s' % portfolio_id
if query:
uri = PortfolioQuery(feed=uri, params=query).ToUri()
return self.Get(uri, converter=gdata.finance.PortfolioEntryFromString)
def AddPortfolio(self, portfolio_entry=None):
uri = '/finance/feeds/default/portfolios'
return self.Post(portfolio_entry, uri,
converter=gdata.finance.PortfolioEntryFromString)
def UpdatePortfolio(self, portfolio_entry=None):
uri = portfolio_entry.GetEditLink().href
return self.Put(portfolio_entry, uri,
converter=gdata.finance.PortfolioEntryFromString)
def DeletePortfolio(self, portfolio_entry=None):
uri = portfolio_entry.GetEditLink().href
return self.Delete(uri)
def GetPosition(self, portfolio_id=None, ticker_id=None, query=None):
uri = '/finance/feeds/default/portfolios/%s/positions/%s' \
% (portfolio_id, ticker_id)
if query:
uri = PositionQuery(feed=uri, params=query).ToUri()
return self.Get(uri, converter=gdata.finance.PositionEntryFromString)
def DeletePosition(self, position_entry=None,
portfolio_id=None, ticker_id=None, transaction_feed=None):
"""A position is deleted by deleting all its transactions.
Args:
position_entry: PositionEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
ticker_id: string (optional; see Notes) This may be obtained from
a PositionEntry's ticker_id attribute. Alternatively it can
be constructed using the security's exchange and symbol,
e.g. 'NASDAQ:GOOG'
transaction_feed: TransactionFeed (optional; see Notes)
Notes:
Either a PositionEntry OR (a portfolio ID AND ticker ID) OR
a TransactionFeed must be provided.
"""
if transaction_feed:
feed = transaction_feed
else:
if position_entry:
feed = self.GetTransactionFeed(position_entry=position_entry)
elif portfolio_id and ticker_id:
feed = self.GetTransactionFeed(
portfolio_id=portfolio_id, ticker_id=ticker_id)
for txn in feed.entry:
self.DeleteTransaction(txn)
return True
def GetTransaction(self, portfolio_id=None, ticker_id=None,
transaction_id=None):
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions/%s' \
% (portfolio_id, ticker_id, transaction_id)
return self.Get(uri, converter=gdata.finance.TransactionEntryFromString)
def AddTransaction(self, transaction_entry=None, transaction_feed = None,
position_entry=None, portfolio_id=None, ticker_id=None):
"""
Args:
transaction_entry: TransactionEntry (required)
transaction_feed: TransactionFeed (optional; see Notes)
position_entry: PositionEntry (optional; see Notes)
portfolio_id: string (optional; see Notes) This may be obtained
from a PortfolioEntry's portfolio_id attribute.
ticker_id: string (optional; see Notes) This may be obtained from
a PositionEntry's ticker_id attribute. Alternatively it can
be constructed using the security's exchange and symbol,
e.g. 'NASDAQ:GOOG'
Notes:
Either a TransactionFeed OR a PositionEntry OR (a portfolio ID AND
ticker ID) must be provided.
"""
if transaction_feed:
uri = transaction_feed.GetPostLink().href
elif position_entry:
uri = position_entry.GetSelfLink().href + '/transactions'
elif portfolio_id and ticker_id:
uri = '/finance/feeds/default/portfolios/%s/positions/%s/transactions' \
% (portfolio_id, ticker_id)
return self.Post(transaction_entry, uri,
converter=gdata.finance.TransactionEntryFromString)
def UpdateTransaction(self, transaction_entry=None):
uri = transaction_entry.GetEditLink().href
return self.Put(transaction_entry, uri,
converter=gdata.finance.TransactionEntryFromString)
def DeleteTransaction(self, transaction_entry=None):
uri = transaction_entry.GetEditLink().href
return self.Delete(uri)
| lgpl-2.1 |
hnakamur/django | tests/generic_views/views.py | 286 | 7527 | from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.core.urlresolvers import reverse, reverse_lazy
from django.utils.decorators import method_decorator
from django.views import generic
from .models import Artist, Author, Book, BookSigning, Page
from .test_forms import AuthorForm, ContactForm
class CustomTemplateView(generic.TemplateView):
template_name = 'generic_views/about.html'
def get_context_data(self, **kwargs):
context = super(CustomTemplateView, self).get_context_data(**kwargs)
context.update({'key': 'value'})
return context
class ObjectDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
def get_object(self):
return {'foo': 'bar'}
class ArtistDetail(generic.DetailView):
queryset = Artist.objects.all()
class AuthorDetail(generic.DetailView):
queryset = Author.objects.all()
class PageDetail(generic.DetailView):
queryset = Page.objects.all()
template_name_field = 'template'
class DictList(generic.ListView):
"""A ListView that doesn't use a model."""
queryset = [
{'first': 'John', 'last': 'Lennon'},
{'first': 'Yoko', 'last': 'Ono'}
]
template_name = 'generic_views/list.html'
class ArtistList(generic.ListView):
template_name = 'generic_views/list.html'
queryset = Artist.objects.all()
class AuthorList(generic.ListView):
queryset = Author.objects.all()
class BookList(generic.ListView):
model = Book
class CustomPaginator(Paginator):
def __init__(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
super(CustomPaginator, self).__init__(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class AuthorListCustomPaginator(AuthorList):
paginate_by = 5
def get_paginator(self, queryset, page_size, orphans=0, allow_empty_first_page=True):
return super(AuthorListCustomPaginator, self).get_paginator(
queryset,
page_size,
orphans=2,
allow_empty_first_page=allow_empty_first_page)
class ContactView(generic.FormView):
form_class = ContactForm
success_url = reverse_lazy('authors_list')
template_name = 'generic_views/form.html'
class ArtistCreate(generic.CreateView):
model = Artist
fields = '__all__'
class NaiveAuthorCreate(generic.CreateView):
queryset = Author.objects.all()
fields = '__all__'
class TemplateResponseWithoutTemplate(generic.detail.SingleObjectTemplateResponseMixin, generic.View):
# we don't define the usual template_name here
def __init__(self):
# Dummy object, but attr is required by get_template_name()
self.object = None
class AuthorCreate(generic.CreateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class SpecializedAuthorCreate(generic.CreateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class AuthorCreateRestricted(AuthorCreate):
post = method_decorator(login_required)(AuthorCreate.post)
class ArtistUpdate(generic.UpdateView):
model = Artist
fields = '__all__'
class NaiveAuthorUpdate(generic.UpdateView):
queryset = Author.objects.all()
fields = '__all__'
class AuthorUpdate(generic.UpdateView):
model = Author
success_url = '/list/authors/'
fields = '__all__'
class OneAuthorUpdate(generic.UpdateView):
success_url = '/list/authors/'
fields = '__all__'
def get_object(self):
return Author.objects.get(pk=1)
class SpecializedAuthorUpdate(generic.UpdateView):
model = Author
form_class = AuthorForm
template_name = 'generic_views/form.html'
context_object_name = 'thingy'
def get_success_url(self):
return reverse('author_detail', args=[self.object.id])
class NaiveAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
class AuthorDelete(generic.DeleteView):
model = Author
success_url = '/list/authors/'
class SpecializedAuthorDelete(generic.DeleteView):
queryset = Author.objects.all()
template_name = 'generic_views/confirm_delete.html'
context_object_name = 'thingy'
success_url = reverse_lazy('authors_list')
class BookConfig(object):
queryset = Book.objects.all()
date_field = 'pubdate'
class BookArchive(BookConfig, generic.ArchiveIndexView):
pass
class BookYearArchive(BookConfig, generic.YearArchiveView):
pass
class BookMonthArchive(BookConfig, generic.MonthArchiveView):
pass
class BookWeekArchive(BookConfig, generic.WeekArchiveView):
pass
class BookDayArchive(BookConfig, generic.DayArchiveView):
pass
class BookTodayArchive(BookConfig, generic.TodayArchiveView):
pass
class BookDetail(BookConfig, generic.DateDetailView):
pass
class AuthorGetQuerySetFormView(generic.edit.ModelFormMixin):
fields = '__all__'
def get_queryset(self):
return Author.objects.all()
class BookDetailGetObjectCustomQueryset(BookDetail):
def get_object(self, queryset=None):
return super(BookDetailGetObjectCustomQueryset, self).get_object(
queryset=Book.objects.filter(pk=self.kwargs['pk']))
class CustomMultipleObjectMixinView(generic.list.MultipleObjectMixin, generic.View):
queryset = [
{'name': 'John'},
{'name': 'Yoko'},
]
def get(self, request):
self.object_list = self.get_queryset()
class CustomContextView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name='dummy')
def get_object(self):
return Book(name="dummy")
def get_context_data(self, **kwargs):
context = {'custom_key': 'custom_value'}
context.update(kwargs)
return super(CustomContextView, self).get_context_data(**context)
def get_context_object_name(self, obj):
return "test_name"
class CustomSingleObjectView(generic.detail.SingleObjectMixin, generic.View):
model = Book
object = Book(name="dummy")
class BookSigningConfig(object):
model = BookSigning
date_field = 'event_date'
# use the same templates as for books
def get_template_names(self):
return ['generic_views/book%s.html' % self.template_name_suffix]
class BookSigningArchive(BookSigningConfig, generic.ArchiveIndexView):
pass
class BookSigningYearArchive(BookSigningConfig, generic.YearArchiveView):
pass
class BookSigningMonthArchive(BookSigningConfig, generic.MonthArchiveView):
pass
class BookSigningWeekArchive(BookSigningConfig, generic.WeekArchiveView):
pass
class BookSigningDayArchive(BookSigningConfig, generic.DayArchiveView):
pass
class BookSigningTodayArchive(BookSigningConfig, generic.TodayArchiveView):
pass
class BookSigningDetail(BookSigningConfig, generic.DateDetailView):
context_object_name = 'book'
class NonModel(object):
id = "non_model_1"
_meta = None
class NonModelDetail(generic.DetailView):
template_name = 'generic_views/detail.html'
model = NonModel
def get_object(self, queryset=None):
return NonModel()
class ObjectDoesNotExistDetail(generic.DetailView):
def get_queryset(self):
return Book.does_not_exist.all()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.