src
stringlengths 721
1.04M
|
|---|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pytest
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import WebDriverException
class TestFormHandling(object):
def testShouldClickOnSubmitInputElements(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("submitButton").click()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testClickingOnUnclickableElementsDoesNothing(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//body").click()
def testShouldBeAbleToClickImageButtons(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("imageButton").click()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldBeAbleToSubmitForms(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_name("login").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldSubmitAFormWhenAnyInputElementWithinThatFormIsSubmitted(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_id("checky").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldSubmitAFormWhenAnyElementWihinThatFormIsSubmitted(self, driver, pages):
pages.load("formPage.html")
driver.find_element_by_xpath("//form/p").submit()
driver.implicitly_wait(5)
assert driver.title == "We Arrive Here"
def testShouldNotBeAbleToSubmitAFormThatDoesNotExist(self, driver, pages):
pages.load("formPage.html")
with pytest.raises(NoSuchElementException):
driver.find_element_by_name("there is no spoon").submit()
def testShouldBeAbleToEnterTextIntoATextAreaBySettingItsValue(self, driver, pages):
pages.load("javascriptPage.html")
textarea = driver.find_element_by_id("keyUpArea")
cheesey = "Brie and cheddar"
textarea.send_keys(cheesey)
assert textarea.get_attribute("value") == cheesey
def testShouldEnterDataIntoFormFields(self, driver, pages):
pages.load("xhtmlTest.html")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
originalValue = element.get_attribute("value")
assert originalValue == "change"
element.clear()
element.send_keys("some text")
element = driver.find_element_by_xpath("//form[@name='someForm']/input[@id='username']")
newFormValue = element.get_attribute("value")
assert newFormValue == "some text"
def testShouldBeAbleToSelectACheckBox(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldToggleTheCheckedStateOfACheckbox(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testTogglingACheckboxShouldReturnItsCurrentState(self, driver, pages):
pages.load("formPage.html")
checkbox = driver.find_element_by_id("checky")
assert checkbox.is_selected() is False
checkbox.click()
assert checkbox.is_selected() is True
checkbox.click()
assert checkbox.is_selected() is False
def testShouldBeAbleToSelectARadioButton(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldBeAbleToSelectARadioButtonByClickingOnIt(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("peas")
assert radioButton.is_selected() is False
radioButton.click()
assert radioButton.is_selected() is True
def testShouldReturnStateOfRadioButtonsBeforeInteration(self, driver, pages):
pages.load("formPage.html")
radioButton = driver.find_element_by_id("cheese_and_peas")
assert radioButton.is_selected() is True
radioButton = driver.find_element_by_id("cheese")
assert radioButton.is_selected() is False
# [ExpectedException(typeof(NotImplementedException))]
# def testShouldThrowAnExceptionWhenTogglingTheStateOfARadioButton(self, driver, pages):
# pages.load("formPage.html")
# radioButton = driver.find_element_by_id("cheese"))
# radioButton.click()
# [IgnoreBrowser(Browser.IE, "IE allows toggling of an option not in a multiselect")]
# [ExpectedException(typeof(NotImplementedException))]
# def testTogglingAnOptionShouldThrowAnExceptionIfTheOptionIsNotInAMultiSelect(self, driver, pages):
# pages.load("formPage.html")
# select = driver.find_element_by_name("selectomatic"))
# option = select.find_elements_by_tag_name("option"))[0]
# option.click()
def testTogglingAnOptionShouldToggleOptionsInAMultiSelect(self, driver, pages):
pages.load("formPage.html")
select = driver.find_element_by_name("multi")
option = select.find_elements_by_tag_name("option")[0]
selected = option.is_selected()
option.click()
assert not selected == option.is_selected()
option.click()
assert selected == option.is_selected()
def testShouldThrowAnExceptionWhenSelectingAnUnselectableElement(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_xpath("//title")
with pytest.raises(WebDriverException):
element.click()
def testSendingKeyboardEventsShouldAppendTextInInputs(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some")
value = element.get_attribute("value")
assert value == "Some"
element.send_keys(" text")
value = element.get_attribute("value")
assert value == "Some text"
def testShouldBeAbleToClearTextFromInputElements(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("working")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testEmptyTextBoxesShouldReturnAnEmptyStringNotNull(self, driver, pages):
pages.load("formPage.html")
emptyTextBox = driver.find_element_by_id("working")
assert emptyTextBox.get_attribute("value") == ""
emptyTextArea = driver.find_element_by_id("emptyTextArea")
assert emptyTextArea.get_attribute("value") == ""
def testShouldBeAbleToClearTextFromTextAreas(self, driver, pages):
pages.load("formPage.html")
element = driver.find_element_by_id("withText")
element.send_keys("Some text")
value = element.get_attribute("value")
assert len(value) > 0
element.clear()
value = element.get_attribute("value")
assert len(value) == 0
def testRadioShouldNotBeSelectedAfterSelectingSibling(self, driver, pages):
pages.load("formPage.html")
cheese = driver.find_element_by_id("cheese")
peas = driver.find_element_by_id("peas")
cheese.click()
assert cheese.is_selected() is True
assert peas.is_selected() is False
peas.click()
assert cheese.is_selected() is False
assert peas.is_selected() is True
|
#coding: iso-8859-1
## Copyright 2005 Luc Saffre
## This file is part of the Lino project.
## Lino is free software; you can redistribute it and/or modify it
## under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
## Lino is distributed in the hope that it will be useful, but WITHOUT
## ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
## or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
## License for more details.
## You should have received a copy of the GNU General Public License
## along with Lino; if not, write to the Free Software Foundation,
## Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from lino.forms import gui
from lino.adamo.ddl import Schema
from lino.apps.spz import tables
class SPZ(Schema):
name="Lino/SPZ"
version="0.0.1"
copyright="""\
Copyright (c) 2005 Luc Saffre.
This software comes with ABSOLUTELY NO WARRANTY and is
distributed under the terms of the GNU General Public License.
See file COPYING.txt for more information."""
def setupSchema(self):
for cl in tables.TABLES:
self.addTable(cl)
def showMainForm(self,sess):
frm = sess.form(
label="Main menu",
doc="""\
This is the SPZ main menu.
"""+("\n"*10))
m = frm.addMenu("s","&Stammdaten")
m.addItem("a",label="&Akten").setHandler(
sess.showViewGrid, tables.Akten)
self.addProgramMenu(sess,frm)
frm.addOnClose(sess.close)
frm.show()
if __name__ == '__main__':
app=SPZ()
app.quickStartup()
gui.run(app)
|
#!/usr/bin/env python3
#
#===- format_diff.py - Diff Reformatter ----*- python3 -*--===#
#
# This file is licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===------------------------------------------------------------------------===#
"""
This script reads input from a unified diff and reformats all the changed
lines. This is useful to reformat all the lines touched by a specific patch.
Example usage:
git diff -U0 HEAD^ | python3 format_diff.py yapf -i
git diff -U0 HEAD^ | python3 format_diff.py clang-format -i
svn diff --diff-cmd=diff -x-U0 | python3 format_diff.py -p0 clang-format -i
General usage:
<some diff> | python3 format_diff.py [--regex] [--lines-style] [-p] binary [args for binary]
It should be noted that the filename contained in the diff is used unmodified
to determine the source file to update. Users calling this script directly
should be careful to ensure that the path in the diff is correct relative to the
current working directory.
"""
import argparse
import difflib
import io
import re
import subprocess
import sys
BINARY_TO_DEFAULT_REGEX = {
"yapf": r".*\.py",
"clang-format":
r".*\.(cpp|cc|c\+\+|cxx|c|cl|h|hh|hpp|hxx|m|mm|inc|js|ts|proto|"
r"protodevel|java|cs)",
}
def parse_arguments():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
"binary",
help="Location of binary to use for formatting. This controls the "
"default values of --regex and --lines-style. If binary isn't 'yapf' "
"or 'clang-format' then --regex and --lines-style are required.")
parser.add_argument(
"--regex",
metavar="PATTERN",
default=None,
help="Regex pattern for selecting file paths to reformat from the piped "
"diff. This flag is required if 'binary' is not set to 'yapf' or "
"'clang-format'. Otherwise, this flag overrides the default pattern that "
"--binary sets.")
parser.add_argument(
"--lines-style",
default=None,
help="How to style the 'lines' argument for the given binary. Can be set "
"to 'yapf' or 'clang-format'. This flag is required if 'binary' is not "
"set to 'yapf' or 'clang-format'.")
parser.add_argument(
"-p",
metavar="NUM",
default=1,
help="Strip the smallest prefix containing P slashes. Set to 0 if "
"passing `--no-prefix` to `git diff` or using `svn`")
# Parse and error-check arguments
args, binary_args = parser.parse_known_args()
if args.binary not in BINARY_TO_DEFAULT_REGEX:
if not args.regex:
raise parser.error("If 'binary' is not 'yapf' or 'clang-format' then "
"--regex must be set.")
if not args.lines_style:
raise parser.error("If 'binary' is not 'yapf' or 'clang-format' then "
"--lines-style must be set.")
else:
# Set defaults based off of 'binary'.
if not args.regex:
args.regex = BINARY_TO_DEFAULT_REGEX[args.binary]
if not args.lines_style:
args.lines_style = args.binary
if args.lines_style not in ["yapf", "clang-format"]:
raise parser.error(f"Unexpected value for --line-style {args.lines_style}")
return args, binary_args
def main():
args, binary_args = parse_arguments()
# Extract changed lines for each file.
filename = None
lines_by_file = {}
for line in sys.stdin:
# Match all filenames.
match = re.search(fr"^\+\+\+\ (.*?/){{{args.p}}}(\S*)", line)
if match:
filename = match.group(2)
if filename is None:
continue
# Match all filenames specified by --regex.
if not re.match(f"^{args.regex}$", filename):
continue
# Match unified diff line numbers.
match = re.search(r"^@@.*\+(\d+)(,(\d+))?", line)
if match:
start_line = int(match.group(1))
line_count = 1
if match.group(3):
line_count = int(match.group(3))
if line_count == 0:
continue
end_line = start_line + line_count - 1
if args.lines_style == "yapf":
lines = ["--lines", f"{start_line}-{end_line}"]
elif args.lines_style == "clang-format":
lines = ["-lines", f"{start_line}:{end_line}"]
lines_by_file.setdefault(filename, []).extend(lines)
# Pass the changed lines to 'binary' alongside any unparsed args (e.g. -i).
for filename, lines in lines_by_file.items():
command = [args.binary, filename]
command.extend(lines)
command.extend(binary_args)
print(f"Running `{' '.join(command)}`")
p = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=None,
stdin=subprocess.PIPE,
universal_newlines=True)
stdout, stderr = p.communicate()
if p.returncode != 0:
sys.exit(p.returncode)
# If the formatter printed the formatted code to stdout then print out
# a unified diff between the formatted and unformatted code.
# If flags like --verbose are passed to the binary then the diffs this
# produces won't be particularly helpful.
formatted_code = io.StringIO(stdout).readlines()
if len(formatted_code):
with open(filename) as f:
unformatted_code = f.readlines()
diff = difflib.unified_diff(unformatted_code,
formatted_code,
fromfile=filename,
tofile=filename,
fromfiledate="(before formatting)",
tofiledate="(after formatting)")
diff_string = "".join(diff)
if len(diff_string) > 0:
sys.stdout.write(diff_string)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#-------------------------------------------------------------------------------
# Name: search.py
# Purpose: Contains all Request Handlers relating to generic search
# functions
#
# Author: Andre Wiggins
#
# Created: 04/07/2011
# Copyright: (c) Jacob Marsh, Andrew Stewart, Andre Wiggins 2011
# License:
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#-------------------------------------------------------------------------------
import parsers
import server
from google.appengine.ext.webapp import template
from google.appengine.ext import webapp
class SearchResultsPage(webapp.RequestHandler):
'''Handles request for the /search path which returns the HTML for
a user searching for a textbook by a generic search term'''
def get(self):
query = self.request.get('q')
type = self.request.get('type')
if type and query:
if type=="isbn":
newurl = "book/%s"%query
self.redirect(newurl)
else:
path = '../static/templates/search.html'
retailer = "halfdotcom"
newurl = "/search/%s?q=%s&type=%s"%(retailer,query,type)
url2 = "/book"
template_values={"url":newurl,"url2":url2,"retailer":retailer}
self.response.out.write(template.render(path, template_values, True))
else:
self.redirect("/")
class SearchRetailer(webapp.RequestHandler):
'''Handles request for the /search/retailer which returns the JSON for
searching a retailer for a generic search term'''
def get(self):
retailer_name = self.request.path.rstrip('/').split('/')[-1]
retailer = parsers.import_parser(retailer_name)
query = self.request.get('q')
type = self.request.get('type')
textbooks = server.getjson(retailer.search(query, type))
self.response.headers['Content-Type'] = 'application/json'
self.response.out.write(textbooks)
|
import pytest
from awx.main.access import (
RoleAccess,
UserAccess,
TeamAccess)
from awx.main.models import Role, Organization
@pytest.mark.django_db
def test_team_access_attach(rando, team, inventory):
# rando is admin of the team
team.admin_role.members.add(rando)
inventory.read_role.members.add(rando)
# team has read_role for the inventory
team.member_role.children.add(inventory.read_role)
access = TeamAccess(rando)
data = {'id': inventory.admin_role.pk}
assert not access.can_attach(team, inventory.admin_role, 'member_role.children', data, False)
@pytest.mark.django_db
def test_user_access_attach(rando, inventory):
inventory.read_role.members.add(rando)
access = UserAccess(rando)
data = {'id': inventory.admin_role.pk}
assert not access.can_attach(rando, inventory.admin_role, 'roles', data, False)
@pytest.mark.django_db
def test_role_access_attach(rando, inventory):
inventory.read_role.members.add(rando)
access = RoleAccess(rando)
assert not access.can_attach(inventory.admin_role, rando, 'members', None)
@pytest.mark.django_db
def test_visible_roles(admin_user, system_auditor, rando, organization, project):
'''
system admin & system auditor fixtures needed to create system roles
'''
organization.auditor_role.members.add(rando)
access = RoleAccess(rando)
assert rando not in organization.admin_role
assert access.can_read(organization.admin_role)
assert organization.admin_role in Role.visible_roles(rando)
assert rando not in project.admin_role
assert access.can_read(project.admin_role)
assert project.admin_role in Role.visible_roles(rando)
# Permissions when adding users to org member/admin
@pytest.mark.django_db
def test_org_user_role_attach(user, organization, inventory):
'''
Org admins must not be able to add arbitrary users to their
organization, because that would give them admin permission to that user
'''
admin = user('admin')
nonmember = user('nonmember')
inventory.admin_role.members.add(nonmember)
organization.admin_role.members.add(admin)
role_access = RoleAccess(admin)
assert not role_access.can_attach(organization.member_role, nonmember, 'members', None)
assert not role_access.can_attach(organization.admin_role, nonmember, 'members', None)
# Permissions when adding users/teams to org special-purpose roles
@pytest.mark.django_db
def test_user_org_object_roles(organization, org_admin, org_member):
'''
Unlike admin & member roles, the special-purpose organization roles do not
confer any permissions related to user management,
Normal rules about role delegation should apply, only admin to org needed.
'''
assert RoleAccess(org_admin).can_attach(
organization.notification_admin_role, org_member, 'members', None
)
assert not RoleAccess(org_member).can_attach(
organization.notification_admin_role, org_member, 'members', None
)
@pytest.mark.django_db
def test_team_org_object_roles(organization, team, org_admin, org_member):
'''
the special-purpose organization roles are not ancestors of any
team roles, and can be delegated en masse through teams,
following normal admin rules
'''
assert RoleAccess(org_admin).can_attach(
organization.notification_admin_role, team, 'member_role.parents', {'id': 68}
)
# Obviously team admin isn't enough to assign organization roles to the team
team.admin_role.members.add(org_member)
assert not RoleAccess(org_member).can_attach(
organization.notification_admin_role, team, 'member_role.parents', {'id': 68}
)
# Cannot make a team member of an org
assert not RoleAccess(org_admin).can_attach(
organization.member_role, team, 'member_role.parents', {'id': 68}
)
# Singleton user editing restrictions
@pytest.mark.django_db
def test_org_superuser_role_attach(admin_user, org_admin, organization):
'''
Ideally, you would not add superusers to roles (particularly member_role)
but it has historically been possible
this checks that the situation does not grant unexpected permissions
'''
organization.member_role.members.add(admin_user)
role_access = RoleAccess(org_admin)
assert not role_access.can_attach(organization.member_role, admin_user, 'members', None)
assert not role_access.can_attach(organization.admin_role, admin_user, 'members', None)
user_access = UserAccess(org_admin)
assert not user_access.can_change(admin_user, {'last_name': 'Witzel'})
# Sanity check user editing permissions combined with new org roles
@pytest.mark.django_db
def test_org_object_role_not_sufficient(user, organization):
member = user('amember')
obj_admin = user('icontrolallworkflows')
organization.member_role.members.add(member)
organization.workflow_admin_role.members.add(obj_admin)
user_access = UserAccess(obj_admin)
assert not user_access.can_change(member, {'last_name': 'Witzel'})
# Org admin user editing permission ANY to ALL change
@pytest.mark.django_db
def test_need_all_orgs_to_admin_user(user):
'''
Old behavior - org admin to ANY organization that a user is member of
grants permission to admin that user
New behavior enforced here - org admin to ALL organizations that a
user is member of grants permission to admin that user
'''
org1 = Organization.objects.create(name='org1')
org2 = Organization.objects.create(name='org2')
org1_admin = user('org1-admin')
org1.admin_role.members.add(org1_admin)
org12_member = user('org12-member')
org1.member_role.members.add(org12_member)
org2.member_role.members.add(org12_member)
user_access = UserAccess(org1_admin)
assert not user_access.can_change(org12_member, {'last_name': 'Witzel'})
role_access = RoleAccess(org1_admin)
assert not role_access.can_attach(org1.admin_role, org12_member, 'members', None)
assert not role_access.can_attach(org1.member_role, org12_member, 'members', None)
org2.admin_role.members.add(org1_admin)
assert role_access.can_attach(org1.admin_role, org12_member, 'members', None)
assert role_access.can_attach(org1.member_role, org12_member, 'members', None)
# Orphaned user can be added to member role, only in special cases
@pytest.mark.django_db
def test_orphaned_user_allowed(org_admin, rando, organization):
'''
We still allow adoption of orphaned* users by assigning them to
organization member role, but only in the situation where the
org admin already posesses indirect access to all of the user's roles
*orphaned means user is not a member of any organization
'''
role_access = RoleAccess(org_admin)
assert role_access.can_attach(organization.member_role, rando, 'members', None)
# Cannot edit the user directly without adding to org first
user_access = UserAccess(org_admin)
assert not user_access.can_change(rando, {'last_name': 'Witzel'})
|
# project
from db_file_storage.storage import DatabaseFileStorage
def delete_file_if_needed(instance, filefield_name):
"""
When editing and the filefield is a new file,
delete the previous file (if any) from the database.
Call this function immediately BEFORE saving the instance.
"""
if instance.id:
model_class = type(instance)
# Check if there is a file for the instance in the database
if model_class.objects.filter(pk=instance.pk).exclude(
**{'%s__isnull' % filefield_name: True}
).exclude(
**{'%s__exact' % filefield_name: ''}
).exists():
old_file = getattr(
model_class.objects.only(filefield_name).get(pk=instance.id),
filefield_name
)
else:
old_file = None
# If there is a file, delete it if needed
if old_file:
# When editing and NOT changing the file,
# old_file.name == getattr(instance, filefield_name)
# returns True. In this case, the file must NOT be deleted.
# If the file IS being changed, the comparison returns False.
# In this case, the old file MUST be deleted.
if (old_file.name == getattr(instance, filefield_name)) is False:
DatabaseFileStorage().delete(old_file.name)
def delete_file(instance, filefield_name):
"""
Delete the file (if any) from the database.
Call this function immediately AFTER deleting the instance.
"""
file_instance = getattr(instance, filefield_name)
if file_instance:
DatabaseFileStorage().delete(file_instance.name)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from tools.translate import _
import pooler
from openerp.addons.account_report_lib.account_report_base import accountReportbase
class Parser(accountReportbase):
def __init__(self, cursor, uid, name, context):
super(Parser, self).__init__(cursor, uid, name, context=context)
self.pool = pooler.get_pool(self.cr.dbname)
self.cursor = self.cr
self.localcontext.update({
'storage':{},
'cumul_balance': 0.0,
'get_bank_account': self.get_bank_account,
'get_period': self.get_period,
'display_account_name': self.display_account_name,
'account_has_move_lines': self.account_has_move_lines,
'messages': self.messages,
'return_balance_account':self.return_balance_account,
'display_symbol_account': self.display_symbol_account,
'update_cumul_balance': self.update_cumul_balance,
'reset_data': self.reset_data,
'get_cumul_balance':self.get_cumul_balance,
})
#=================== DISPLAY DATA ===================================
def messages(self):
message = _("For this account, doesn't exist move lines")
return message
def account_has_move_lines(self, account_id):
if account_id in self.localcontext['storage']['result'].keys():
if len(self.localcontext['storage']['result'][account_id]) > 0:
return True
else:
return False
def display_account_name(self, data, account_id):
str_name = ''
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
str_name = bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
else:
if bank_account.default_credit_account_id:
if bank_account.default_credit_account_id.id == account_id:
str_name = _('Default credit account: ') + bank_account.default_credit_account_id.code + ' - ' + bank_account.default_credit_account_id.name + ' - ' + bank_account.default_credit_account_id.currency_id.name
elif bank_account.default_debit_account_id:
if bank_account.default_debit_account_id.id == account_id:
str_name = _('Default debit account: ') + bank_account.default_debit_account_id.code + ' - ' + bank_account.default_debit_account_id.name + ' - ' + bank_account.default_debit_account_id.currency_id.name
return str_name
def display_symbol_account(self, account_id):
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
if account.currency_id:
return account.currency_id.symbol
else:
return ''
#=============== SET AND GET DATA ====================================#
def reset_data(self):
self.localcontext['storage']['cumul_balance'] = 0.0
return False
def get_cumul_balance(self):
return self.localcontext['storage']['cumul_balance']
def get_bank_account(self, data):
return self._get_info(data, 'res_partner_bank_ids', 'res.partner.bank')
def get_period(self, data):
return self._get_info(data, 'period_ids', 'account.period')
def get_currency_company(self):
return self.pool.get('res.users').browse(self.cr, self.uid, [self.uid])[0].company_id.currency_id
def different_currency(self, currency_id):
currency_company = self.get_currency_company()
if currency_company != currency_id:
return True
else:
return False
#Change cumul_balance when changes the line
def update_cumul_balance(self, line):
cumul_balance = self.localcontext['storage']['cumul_balance']
if line.currency_id:
if line.currency_id.id == self.get_currency_company():
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.debit - line.credit
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
else:
cumul_balance = self.localcontext['storage']['cumul_balance'] + line.amount_currency
dict_update = {'cumul_balance': cumul_balance}
self.localcontext['storage'].update(dict_update)
return cumul_balance
def set_data_template(self, data):
#Main dictionary
res = self.classified_move_lines(data)
dict_update = {'result': res,}
self.localcontext['storage'].update(dict_update)
return False
def return_balance_account(self, data, account_id):
#Depends of account currency, return balance or foreign balance
balance = self.get_initial_balance(data, account_id)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
currency_company = self.get_currency_company()
if account.currency_id:
if account.currency_id == currency_company:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['balance']
else:
#initialize cum_balance
dict_update = {'cumul_balance': balance[account_id]['foreign_balance']}
self.localcontext['storage'].update(dict_update)
return balance[account_id]['foreign_balance']
#=====================================================================#
#===================================================================
# Find move_lines that match with default_credit_account_id or
# default_debit_account_id, status = valid and period is the
# same with selected in wizard
#===================================================================
def process_move_lines(self, data):
account_ids = []
period = self.get_period(data)
bank_account = self.get_bank_account(data)
if bank_account.default_credit_account_id and bank_account.default_debit_account_id:
if bank_account.default_credit_account_id.id == bank_account.default_debit_account_id.id:
account_ids.append(bank_account.default_debit_account_id.id)
else:
account_ids.append(bank_account.default_credit_account_id.id)
account_ids.append(bank_account.default_debit_account_id.id)
elif bank_account.default_credit_account_id:
account_ids.append(bank_account.default_credit_account_id.id)
elif bank_account.default_debit_account_id:
account_ids.append(bank_account.default_debit_account_id.id)
move_lines_ids = self.pool.get('account.move.line').search(self.cr, self.uid, [('account_id','in',account_ids),('state', '=', 'valid'),('period_id','=',period.id)])
move_lines = self.pool.get('account.move.line').browse(self.cr, self.uid, move_lines_ids)
return move_lines
#=======================================================================
# Create a dictionary where account is key and each of them have a
# move lines list associated
#=======================================================================
def classified_move_lines(self, data):
res = {}
#Get move_lines
move_lines = self.process_move_lines(data)
for line in move_lines:
#lines must have a account if they are included in list
#It is not necessary included a check with account
if line.account_id.id not in res:
res[line.account_id.id] = []
res[line.account_id.id].append(line)
return res
#=======================================================================
# Create a dictionary where account is key and each of them have a
# balance associated (initial balance)
#=======================================================================
def get_initial_balance(self, data, account_id):
account_balance = 0.0
library_obj = self.pool.get('account.webkit.report.library')
fiscal_year = self.get_fiscalyear(data)
account = self.pool.get('account.account').browse(self.cr, self.uid, account_id)
period = self.get_period(data)
currency_company = self.get_currency_company()
#Get initial balance with previous period for period selected
previous_period = self.pool.get('account.period').get_start_previous_period(self.cr, self.uid, start_period=period, fiscal_year=fiscal_year)
if account.currency_id:
#Compare currency, if account is different than currency company, get foreign_balance
if account.currency_id.id == currency_company:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = library_obj.get_account_balance(self.cr, self.uid,
[account_id],
['foreign_balance'],
initial_balance=True,
fiscal_year_id=fiscal_year.id,
start_period_id=previous_period,
end_period_id=previous_period,
filter_type='filter_period')
else:
account_balance = 0.0
return account_balance
|
# uArm Swift Pro - Python Library Example
# Created by: Richard Garsthagen - the.anykey@gmail.com
# V0.1 - June 2017 - Still under development
import uArmRobot
import time
#Configure Serial Port
serialport = "com5" # for windows
#serialport = "/dev/ttyACM0" # for linux like system
# Connect to uArm
myRobot = uArmRobot.robot(serialport)
# myRobot.__init__(serialport)
myRobot.debug = True # Enable / Disable debug output on screen, by default disabled
myRobot.connect()
myRobot.mode(0) # Set mode to Normal
time.sleep(1)
# Move robot, command will complete when motion is completed
myRobot.goto(150, 0, 100, 30000)
for x in range(150,262,14):
for y in range(-112, 112, 14):
myRobot.goto(120, 0, 100, 30000)
myRobot.pump(True)
time.sleep(5)
# 一格14mm
# 落下33mm
myRobot.goto(x, y, 35, 30000)
time.sleep(2)
myRobot.pump(False)
time.sleep(3)
# myRobot.async_goto(112,0,32,6000)
myRobot.goto(150, 0, 100, 30000)
time.sleep(5)
#Disconnect serial connection
myRobot.disconnect()
|
from PyQt4.QtCore import Qt
import PyQt4.QtGui as qtgui
from CutSlider import CutSlider
from Parameters import Parameters
from view.ModelView import ModelView
class MainWindow(qtgui.QMainWindow):
def __init__(self, **kwargs):
super(MainWindow, self).__init__(**kwargs)
top_frame = qtgui.QFrame()
top_layout = qtgui.QHBoxLayout()
self.setCentralWidget(top_frame)
top_frame.setLayout(top_layout)
self.parameters = Parameters()
self.parameters.setFrameStyle(2)
self.parameters.setFixedWidth(500)
self.parameters.setMinimumHeight(700)
render_frame = qtgui.QFrame()
top_layout.addWidget(render_frame)
top_layout.addWidget(self.parameters, alignment=Qt.AlignTop)
self.parameters.setSizePolicy(qtgui.QSizePolicy.Minimum,
qtgui.QSizePolicy.Maximum)
render_layout = qtgui.QHBoxLayout()
self.model_view = ModelView(parent=render_frame)
render_layout.addWidget(self.model_view)
self.slider = CutSlider()
render_frame.setLayout(render_layout)
render_layout.addWidget(self.slider)
self.model_view.GetRenderWindow().Render()
|
from sentinels import NOTHING
from .api_object import APIObject
from .lazy_query import LazyQuery
class Test(APIObject):
def report_end(self, duration=NOTHING):
self.client.api.call_function('report_test_end', {'id': self.id, 'duration': duration})
def mark_skipped(self):
self.client.api.call_function('report_test_skipped', {'id': self.id})
def mark_interrupted(self):
self.client.api.call_function('report_test_interrupted', {'id': self.id})
def add_error(self):
return self.client.api.call_function('add_test_error', {'id': self.id})
def add_failure(self):
return self.client.api.call_function('add_test_failure', {'id': self.id})
def add_metadata(self, metadata):
return self.client.api.call_function('add_test_metadata', {'id': self.id, 'metadata': metadata})
def set_conclusion(self, conclusion):
return self.client.api.call_function('set_test_conclusion', {'id': self.id, 'conclusion': conclusion})
def add_error_data(self, exception, exception_type, traceback, timestamp=NOTHING):
return self.client.api.call_function('add_test_error_data', {'id': self.id,
'exception': exception,
'exception_type': exception_type,
'traceback': traceback,
'timestamp': timestamp
})
def edit_status(self, status):
return self.client.api.call_function('edit_test_status', {'id': self.id, 'status': status})
def query_errors(self):
"""Queries tests of the current session
:rtype: A lazy query object
"""
return LazyQuery(self.client, '/rest/errors', query_params={'test_id': self.id})
|
# coding: utf-8
import unittest
from numeros_telefonicos import TelefonesFixos
class TestTelefonesFixos(unittest.TestCase):
def test_verificar_quantidade_digitos_fornecidos(self):
# Devem haver no máximo 10 Dígitos, considerando DDD + Tipagem Fixo
# + Número. ex: xx [2-5] xxxxxxx
# Testando corretos
corretos = ['5534441112', '0934445577', '3829921313']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._verificar_tamanho())
# Testando incorretos
# Menor que; Maior que; Com caracteres
incorretos = ['123', '1234567890111213', 'dasd321FDSF21as']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl._verificar_tamanho())
def test_verificar_ddd(self):
# Valem quaisquer dois dígitos, que não comecem com 0
# Considera-se que caracteres são excluídos com a verificação acima
# Testando corretos
corretos = ['5534441641', '4734445544', '1134440091']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._verificar_ddd())
# Testando incorretos
# A única chance de falha é caso aplique-se o número 0
cl = TelefonesFixos('0734441515')
self.assertFalse(cl._verificar_ddd())
def test_validar_como_fixo(self):
# Verifica se está na faixa [2-5]
# Telefones nas Faixas 2,3,4 e 5
corretos = ['4723995530', '1134496567', '8544448774', '8554537777']
for elemento in corretos:
cl = TelefonesFixos(elemento)
self.assertTrue(cl._validar_como_fixo())
# Telefones fora das Faixas 2,3,4 e 5
incorretos = ['1113995530', '1464496567', '4574448774', '4884537777']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl._validar_como_fixo())
def test_ddd(self):
# Verifica se retorna o ddd passado como instância da classe
cl = TelefonesFixos('4734441515')
self.assertEqual('47', cl.ddd())
def test_get_numero(self):
# Verifica se retorna o número passado como instância da classe
cl = TelefonesFixos('4734441515')
self.assertEqual('34441515', cl.get_numero())
def test_validar(self):
# Verifica se o método validar está funcionando como meio de chamar
# o método privado validar_como_fixo
cl = TelefonesFixos('4734441515')
self.assertTrue(cl.validar())
incorretos = ['314441641', '31dasjid']
for elemento in incorretos:
cl = TelefonesFixos(elemento)
self.assertFalse(cl.validar())
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
version = __import__('linguist').__version__
root = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(root, 'README.rst')) as f:
README = f.read()
setup(
name='django-linguist',
version=version,
description='An application to manage translations in Django models',
long_description=README,
author='Gilles Fabio',
author_email='gilles.fabio@gmail.com',
url='http://github.com/ulule/django-linguist',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Internationalization',
]
)
|
from django.core.management.base import BaseCommand, CommandError
from jsonschema import validate
from proso_configab.models import Experiment, Variable, PossibleValue, ExperimentSetup
from datetime import datetime
from django.db import transaction
import os
import json
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('filename')
def handle(self, *args, **options):
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "experiments_schema.json"), "r", encoding='utf8') as schema_file:
schema = json.load(schema_file)
with open(options['filename'], 'r', encoding='utf8') as json_file:
with transaction.atomic():
data = json.load(json_file)
validate(data, schema)
self._load_experiments(data["experiments"])
def _load_experiments(self, data):
for experiment in data:
experiment_db, created = Experiment.objects.get_or_create(identifier=experiment['id'])
if 'paused' in experiment and experiment['paused'] != experiment_db.is_paused:
experiment_db.is_paused = experiment['paused']
experiment_db.save()
print(' -- experiment', experiment['id'], ('paused' if experiment['paused'] else 'unpaused'))
if 'disabled' in experiment:
if not experiment_db.is_enabled:
if not experiment['disabled']:
raise CommandError('Experiment "{}" can not be enabled again.'.format(experiment['id']))
elif experiment['disabled']:
experiment_db.is_enabled = False
experiment_db.time_disabled = datetime.now()
experiment_db.save()
print(' -- experiment', experiment['id'], 'disabled')
if not created:
print(' -- experiment', experiment['id'], 'already created, skipping')
continue
if 'variables' in experiment and 'setups' in experiment:
raise CommandError('The experiment ({}) can not contain both variables and setups.'.format(experiment['id']))
if 'variables' in experiment:
self._load_variables(experiment_db, experiment['variables'])
elif 'setups' in experiment:
self._load_setups(experiment_db, experiment['setups'])
else:
raise CommandError('The experiment ({}) has to contain either variables, or setups.'.format(experiment['id']))
print(' -- experiment', experiment['id'], 'created')
def _load_variables(self, experiment, variables_json):
values_list_with_probabilities = []
for variable in variables_json:
variable_db, _ = Variable.objects.get_or_create(app_name=variable['app_name'], name=variable['name'])
prob_sum = sum([val['probability'] for val in variable['values']])
if prob_sum != 100:
raise CommandError('The total sum of probs for variable "{}.{}" is {}, expected 100'.format(variable['app_name'], variable['name'], prob_sum))
values_with_probs = []
for value in variable['values']:
value_db, _ = PossibleValue.objects.get_or_create(
variable=variable_db,
value=value['value'],
)
values_with_probs.append((value_db, value['probability']))
values_list_with_probabilities.append(values_with_probs)
ExperimentSetup.objects.from_values_product(experiment, values_list_with_probabilities)
def _load_setups(self, experiment, setups_json):
total_prob = sum([s['probability'] for s in setups_json])
if total_prob != 100:
raise CommandError('The total sum of probs for setups in experiment {} is {}, expected 100.'.format(experiment.identifier, total_prob))
for setup in setups_json:
values = []
for variable in setup['variables']:
variable_db, _ = Variable.objects.get_or_create(app_name=variable['app_name'], name=variable['name'])
value_db, _ = PossibleValue.objects.get_or_create(
variable=variable_db,
value=variable['value'],
)
values.append(value_db)
ExperimentSetup.objects.from_values(experiment, values, setup['probability'])
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2015 Ben Kurtovic <ben.kurtovic@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from platform import python_version
import re
from earwigbot import __version__
from earwigbot.commands import Command
class Help(Command):
"""Displays information about the bot."""
name = "help"
commands = ["help", "version"]
def check(self, data):
if data.is_command:
if data.command in self.commands:
return True
if not data.command and data.trigger == data.my_nick:
return True
return False
def process(self, data):
if not data.command:
self.do_hello(data)
elif data.command == "version":
self.do_version(data)
elif data.args:
self.do_command_help(data)
else:
self.do_main_help(data)
def do_main_help(self, data):
"""Give the user a general help message with a list of all commands."""
msg = "Hi, I'm a bot! I have {0} commands loaded: {1}. You can get help for any command with '!help <command>'."
cmnds = sorted([cmnd.name for cmnd in self.bot.commands])
msg = msg.format(len(cmnds), ', '.join(cmnds))
self.reply(data, msg)
def do_command_help(self, data):
"""Give the user help for a specific command."""
target = data.args[0]
for command in self.bot.commands:
if command.name == target or target in command.commands:
if command.__doc__:
doc = command.__doc__.replace("\n", "")
doc = re.sub(r"\s\s+", " ", doc)
msg = 'Help for command \x0303{0}\x0F: "{1}"'
self.reply(data, msg.format(target, doc))
return
msg = "Sorry, no help for \x0303{0}\x0F.".format(target)
self.reply(data, msg)
def do_hello(self, data):
self.say(data.chan, "Yes, {0}?".format(data.nick))
def do_version(self, data):
vers = "EarwigBot v{bot} on Python {python}: https://github.com/earwig/earwigbot"
self.reply(data, vers.format(bot=__version__, python=python_version()))
|
#!/usr/bin/env python
#coding: utf-8
import mongokit
from mongokit import Document, Connection
from mongokit.document import DocumentProperties
from bson.objectid import ObjectId
from misc.config import MONGO_CONFIG, DB
_iterables = (list, tuple, set, frozenset)
mongo = Connection(**MONGO_CONFIG)
class CallableMixin(object):
"""
brings the callable method to a Document. usefull for the connection's
register method
"""
def __call__(self, doc=None, gen_skel=False, lang='en', fallback_lang='en'):
return self._obj_class(
doc=doc,
gen_skel=gen_skel,
collection=self.collection,
lang=lang,
fallback_lang=fallback_lang
)
mongokit.connection.CallableMixin = CallableMixin
class MetaDoc(DocumentProperties):
def __new__(cls, name, bases, attrs):
new_cls = super(MetaDoc, cls).__new__( cls, name, bases, attrs)
if bases[0] is not Document:
new_cls.__mongo__ = mongo
if not new_cls.__name__.startswith('Callable'):
new_cls.__collection__ = (name[0].lower()+name[1:])
new_cls = mongo.register(new_cls)
new_cls = getattr(mongo, name)
else:
new_cls._protected_field_names.append("_collection")
_ = getattr(new_cls.__mongo__, new_cls.__database__)
_ = getattr( _ , new_cls.__collection__)
new_cls._collection = _
return new_cls
class Doc(Document):
__metaclass__ = MetaDoc
__database__ = DB
use_dot_notation = True
use_autorefs = False
skip_validation = True
def __init__(self, doc={}, gen_skel=None, *args, **kwds):
'''
gen_skel为True的时候设置default value, 否则不设置
'''
if doc is None:
doc = {}
super(Doc, self).__init__(doc, *args, **kwds)
for i in self.structure:
if i not in doc:
self[i]=None
if gen_skel:
if self.default_values:
self._set_default_fields(self, self.structure)
def upsert(self, spec):
if isinstance(spec,basestring):
spec = {'_id': ObjectId(spec)}
#self.update(spec)
update = dict((k,v) for k,v in self.iteritems() if v is not None )
update.update(spec)
self.collection.update(
spec,
{'$set': update},
upsert=True
)
return self
def save(self, *args, **kwds):
if "_id" in self:
_id = self['_id']
if isinstance(_id, basestring):
self['_id'] = ObjectId(_id)
super(Doc, self).save(*args,**kwds)
return self
@classmethod
def count(cls, *args, **kwds):
return cls._collection.find(*args, **kwds).count()
@classmethod
def find(cls, *args, **kwds):
result = []
for i in cls._collection.find(*args, **kwds):
i['_id'] = str(i['_id'])
result.append(i)
return map(lambda doc:cls(doc, collection=cls._collection), result)
@classmethod
def find_one(cls, spec_or_id=None, *args, **kwds):
if isinstance(spec_or_id,basestring):
spec_or_id = ObjectId(spec_or_id)
o = cls._collection.find_one(spec_or_id, *args,**kwds)
if o:
o['_id']=str(o['_id'])
return cls(o, collection=cls._collection)
def delete(self):
if self._collection:
self._collection.remove({'_id': ObjectId(self['_id'])})
@classmethod
def remove(cls, spec_or_id, safe=None, multi=True, **kwargs):
if isinstance(spec_or_id,basestring):
spec_or_id = ObjectId(spec_or_id)
if spec_or_id:
cls._collection.remove(spec_or_id=spec_or_id, safe=safe, multi=multi, **kwargs)
if __name__ == "__main__":
pass
|
# -*- coding: utf-8 -*-
"""Additional pyflakes"""
# :copyright: (c) 2012-2016 by Ask Solem.
# :license: BSD, see LICENSE for more details.
from __future__ import absolute_import
from __future__ import with_statement
VERSION = (1, 1, 0)
__version__ = '.'.join(map(str, VERSION[0:3])) + ''.join(VERSION[3:])
__author__ = 'Ask Solem'
__contact__ = 'ask@celeryproject.org'
__homepage__ = 'http://github.com/ask/flakeplus'
__docformat__ = 'restructuredtext'
# -eof meta-
import os
import re
import sys
from collections import defaultdict
from optparse import OptionParser, make_option as Option
from unipath import Path
EX_USAGE = getattr(os, 'EX_USAGE', 0x40)
RE_COMMENT = r'^\s*\#'
RE_NOQA = r'.+?\#\s+noqa+'
RE_MULTILINE_COMMENT_O = r'^\s*(?:\'\'\'|""").+?(?:\'\'\'|""")'
RE_MULTILINE_COMMENT_S = r'^\s*(?:\'\'\'|""")'
RE_MULTILINE_COMMENT_E = r'(?:^|.+?)(?:\'\'\'|""")'
RE_WITH = r'(?:^|\s+)with\s+'
RE_WITH_IMPORT = r'''from\s+ __future__\s+ import\s+ with_statement'''
RE_PRINT = r'''(?:^|\s+)print\((?:"|')(?:\W+?)?[A-Z0-9:]{2,}'''
RE_ABS_IMPORT = r'''from\s+ __future__\s+ import\s+.*?absolute_import'''
RE_UNI_IMPORT = r'''from\s+ __future__\s+ import.*?\s+unicode_literals'''
acc = defaultdict(lambda: {
'abs': False,
'print': False,
'uni': False,
})
def compile(regex):
return re.compile(regex, re.VERBOSE)
class FlakePP(object):
re_comment = compile(RE_COMMENT)
re_ml_comment_o = compile(RE_MULTILINE_COMMENT_O)
re_ml_comment_s = compile(RE_MULTILINE_COMMENT_S)
re_ml_comment_e = compile(RE_MULTILINE_COMMENT_E)
re_abs_import = compile(RE_ABS_IMPORT)
re_uni_import = compile(RE_UNI_IMPORT)
re_print = compile(RE_PRINT)
re_with_import = compile(RE_WITH_IMPORT)
re_with = compile(RE_WITH)
re_noqa = compile(RE_NOQA)
map = {
'abs': False,
'print': False,
'with': False,
'with-used': False,
'uni': False,
}
def __init__(self, verbose=False, use_26=False, use_27=False, quiet=False):
self.verbose = verbose # XXX unused
self.quiet = quiet
self.use_26 = use_26 or use_27
self.use_27 = use_27
self.steps = (
('abs', self.re_abs_import),
('uni', self.re_uni_import),
('with', self.re_with_import),
('with-used', self.re_with),
('print', self.re_print),
)
def analyze_fh(self, fh):
steps = self.steps
filename = fh.name
acc = dict(self.map)
index = 0
errors = [0]
def error(fmt, **kwargs):
errors[0] += 1
self.announce(fmt, **dict(kwargs, filename=filename))
for index, line in enumerate(self.strip_comments(fh)):
for key, pattern in steps:
if pattern.match(line):
acc[key] = True
if index:
if not acc['abs']:
error('%(filename)s: missing absloute_import import')
if not self.use_26 and acc['with-used'] and not acc['with']:
error('%(filename)s: missing with import')
if self.use_27 and not acc['uni']:
error('%(filename)s: missing unicode_literals import')
if acc['print']:
error('%(filename)s: left over print statement')
return filename, errors[0], acc
def analyze_file(self, filename):
with open(filename) as fh:
return self.analyze_fh(fh)
def analyze_tree(self, dir):
for dirpath, _, filenames in os.walk(dir):
for path in (Path(dirpath, f) for f in filenames):
if path.endswith('.py'):
yield self.analyze_file(path)
def analyze(self, *paths):
for path in map(Path, paths):
if path.isdir():
for res in self.analyze_tree(path):
yield res
else:
yield self.analyze_file(path)
def strip_comments(self, fh):
re_comment = self.re_comment
re_ml_comment_o = self.re_ml_comment_o
re_ml_comment_s = self.re_ml_comment_s
re_ml_comment_e = self.re_ml_comment_e
re_noqa = self.re_noqa
in_ml = False
for line in fh.readlines():
if in_ml:
if re_ml_comment_e.match(line):
in_ml = False
else:
if re_noqa.match(line) or re_ml_comment_o.match(line):
pass
elif re_ml_comment_s.match(line):
in_ml = True
elif re_comment.match(line):
pass
else:
yield line
def announce(self, fmt, **kwargs):
if not self.quiet:
sys.stderr.write((fmt + '\n') % kwargs)
class Command(object):
FlakePP = FlakePP
Parser = OptionParser
args = 'dir1 .. dirN'
version = __version__
def run(self, *files, **kwargs):
exitcode = 0
for _, errors, _ in self.FlakePP(**kwargs).analyze(*files):
if errors:
exitcode = 1
return exitcode
def get_options(self):
return (
Option('--2.6',
default=False, action='store_true', dest='use_26',
help='Specify support of Python 2.6 and up'),
Option('--2.7',
default=False, action='store_true', dest='use_27',
help='Specify support of Python 2.7 and up'),
Option('--verbose', '-v',
default=False, action='store_true', dest='verbose',
help='Show more output.'),
Option('--quiet', '-q',
default=False, action='store_true', dest='quiet',
help='Silence output'),
)
def usage(self):
return '%%prog [options] %s' % (self.args, )
def die(self, msg):
self.usage()
sys.stderr.write('%s\n' % msg)
return EX_USAGE
def expanduser(self, value):
if isinstance(value, basestring):
return os.path.expanduser(value)
return value
def handle_argv(self, prog_name, argv):
options, args = self.parse_options(prog_name, argv)
options = dict((k, self.expanduser(v))
for k, v in vars(options).iteritems()
if not k.startswith('_'))
argv = map(self.expanduser, argv)
if not argv:
return self.die('No input files/directories')
return self.run(*args, **options)
def parse_options(self, prog_name, argv):
parser = self.Parser(prog=prog_name,
usage=self.usage(),
version=self.version,
option_list=self.get_options())
return parser.parse_args(argv)
def execute_from_commandline(self, argv=None):
if argv is None:
argv = list(sys.argv)
prog_name = os.path.basename(argv[0])
return self.handle_argv(prog_name, argv[1:])
def main(argv=sys.argv):
sys.exit(Command().execute_from_commandline(argv))
if __name__ == '__main__':
main()
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import re
import urllib2
from ambari_commons import subprocess32
import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set.
from resource_management.core.resources.system import Directory, File, Execute
from resource_management.libraries.functions.format import format
from resource_management.libraries.functions import check_process_status
from resource_management.libraries.functions.check_process_status import wait_process_stopped
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.core import shell
from resource_management.core.shell import as_user, as_sudo
from resource_management.core.source import Template
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.core.logger import Logger
from resource_management.libraries.functions.curl_krb_request import curl_krb_request
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions.namenode_ha_utils import get_namenode_states
from resource_management.libraries.functions.show_logs import show_logs
from ambari_commons.inet_utils import ensure_ssl_using_protocol
from zkfc_slave import ZkfcSlaveDefault
ensure_ssl_using_protocol(
Script.get_force_https_protocol_name(),
Script.get_ca_cert_file_path()
)
def safe_zkfc_op(action, env):
"""
Idempotent operation on the zkfc process to either start or stop it.
:param action: start or stop
:param env: environment
"""
Logger.info("Performing action {0} on zkfc.".format(action))
zkfc = None
if action == "start":
try:
ZkfcSlaveDefault.status_static(env)
except ComponentIsNotRunning:
ZkfcSlaveDefault.start_static(env)
if action == "stop":
try:
ZkfcSlaveDefault.status_static(env)
except ComponentIsNotRunning:
pass
else:
ZkfcSlaveDefault.stop_static(env)
def initiate_safe_zkfc_failover():
"""
If this is the active namenode, initiate a safe failover and wait for it to become the standby.
If an error occurs, force a failover to happen by killing zkfc on this host. In this case, during the Restart,
will also have to start ZKFC manually.
"""
import params
# Must kinit before running the HDFS command
if params.security_enabled:
Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"),
user = params.hdfs_user)
active_namenode_id = None
standby_namenode_id = None
active_namenodes, standby_namenodes, unknown_namenodes = get_namenode_states(params.hdfs_site, params.security_enabled, params.hdfs_user)
if active_namenodes:
active_namenode_id = active_namenodes[0][0]
if standby_namenodes:
standby_namenode_id = standby_namenodes[0][0]
if active_namenode_id:
Logger.info(format("Active NameNode id: {active_namenode_id}"))
if standby_namenode_id:
Logger.info(format("Standby NameNode id: {standby_namenode_id}"))
if unknown_namenodes:
for unknown_namenode in unknown_namenodes:
Logger.info("NameNode HA state for {0} is unknown".format(unknown_namenode[0]))
if params.namenode_id == active_namenode_id and params.other_namenode_id == standby_namenode_id:
# Failover if this NameNode is active and other NameNode is up and in standby (i.e. ready to become active on failover)
Logger.info(format("NameNode {namenode_id} is active and NameNode {other_namenode_id} is in standby"))
failover_command = format("hdfs haadmin -ns {dfs_ha_nameservices} -failover {namenode_id} {other_namenode_id}")
check_standby_cmd = format("hdfs haadmin -ns {dfs_ha_nameservices} -getServiceState {namenode_id} | grep standby")
msg = "Rolling Upgrade - Initiating a ZKFC failover on active NameNode host {0}.".format(params.hostname)
Logger.info(msg)
code, out = shell.call(failover_command, user=params.hdfs_user, logoutput=True)
Logger.info(format("Rolling Upgrade - failover command returned {code}"))
wait_for_standby = False
if code == 0:
wait_for_standby = True
else:
# Try to kill ZKFC manually
was_zkfc_killed = kill_zkfc(params.hdfs_user)
code, out = shell.call(check_standby_cmd, user=params.hdfs_user, logoutput=True)
Logger.info(format("Rolling Upgrade - check for standby returned {code}"))
if code == 255 and out:
Logger.info("Rolling Upgrade - NameNode is already down.")
else:
if was_zkfc_killed:
# Only mandate that this be the standby namenode if ZKFC was indeed killed to initiate a failover.
wait_for_standby = True
if wait_for_standby:
Logger.info("Waiting for this NameNode to become the standby one.")
Execute(check_standby_cmd,
user=params.hdfs_user,
tries=50,
try_sleep=6,
logoutput=True)
else:
msg = "Rolling Upgrade - Skipping ZKFC failover on NameNode host {0}.".format(params.hostname)
Logger.info(msg)
def kill_zkfc(zkfc_user):
"""
There are two potential methods for failing over the namenode, especially during a Rolling Upgrade.
Option 1. Kill zkfc on primary namenode provided that the secondary is up and has zkfc running on it.
Option 2. Silent failover
:param zkfc_user: User that started the ZKFC process.
:return: Return True if ZKFC was killed, otherwise, false.
"""
import params
if params.dfs_ha_enabled:
if params.zkfc_pid_file:
check_process = as_user(format("ls {zkfc_pid_file} > /dev/null 2>&1 && ps -p `cat {zkfc_pid_file}` > /dev/null 2>&1"), user=zkfc_user)
code, out = shell.call(check_process)
if code == 0:
Logger.debug("ZKFC is running and will be killed.")
kill_command = format("kill -15 `cat {zkfc_pid_file}`")
Execute(kill_command,
user=zkfc_user
)
File(params.zkfc_pid_file,
action = "delete",
)
return True
return False
def service(action=None, name=None, user=None, options="", create_pid_dir=False,
create_log_dir=False):
"""
:param action: Either "start" or "stop"
:param name: Component name, e.g., "namenode", "datanode", "secondarynamenode", "zkfc"
:param user: User to run the command as
:param options: Additional options to pass to command as a string
:param create_pid_dir: Create PID directory
:param create_log_dir: Crate log file directory
"""
import params
options = options if options else ""
pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
hadoop_env_exports = {
'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir
}
log_dir = format("{hdfs_log_dir_prefix}/{user}")
# NFS GATEWAY is always started by root using jsvc due to rpcbind bugs
# on Linux such as CentOS6.2. https://bugzilla.redhat.com/show_bug.cgi?id=731542
if name == "nfs3" :
pid_file = format("{pid_dir}/hadoop_privileged_nfs3.pid")
custom_export = {
'HADOOP_PRIVILEGED_NFS_USER': params.hdfs_user,
'HADOOP_PRIVILEGED_NFS_PID_DIR': pid_dir,
'HADOOP_PRIVILEGED_NFS_LOG_DIR': log_dir
}
hadoop_env_exports.update(custom_export)
process_id_exists_command = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file])
# on STOP directories shouldn't be created
# since during stop still old dirs are used (which were created during previous start)
if action != "stop":
if name == "nfs3":
Directory(params.hadoop_pid_dir_prefix,
mode=0755,
owner=params.root_user,
group=params.root_group
)
else:
Directory(params.hadoop_pid_dir_prefix,
mode=0755,
owner=params.hdfs_user,
group=params.user_group
)
if create_pid_dir:
Directory(pid_dir,
owner=user,
group=params.user_group,
create_parents = True)
if create_log_dir:
if name == "nfs3":
Directory(log_dir,
mode=0775,
owner=params.root_user,
group=params.user_group)
else:
Directory(log_dir,
owner=user,
group=params.user_group,
create_parents = True)
if params.security_enabled and name == "datanode":
## The directory where pid files are stored in the secure data environment.
hadoop_secure_dn_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
hadoop_secure_dn_pid_file = format("{hadoop_secure_dn_pid_dir}/hadoop_secure_dn.pid")
# At datanode_non_root stack version and further, we may start datanode as a non-root even in secure cluster
if not (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) or params.secure_dn_ports_are_in_use:
user = "root"
pid_file = format(
"{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
if action == 'stop' and (params.stack_version_formatted and check_stack_feature(StackFeature.DATANODE_NON_ROOT, params.stack_version_formatted)) and \
os.path.isfile(hadoop_secure_dn_pid_file):
# We need special handling for this case to handle the situation
# when we configure non-root secure DN and then restart it
# to handle new configs. Otherwise we will not be able to stop
# a running instance
user = "root"
try:
check_process_status(hadoop_secure_dn_pid_file)
custom_export = {
'HADOOP_SECURE_DN_USER': params.hdfs_user
}
hadoop_env_exports.update(custom_export)
except ComponentIsNotRunning:
pass
hadoop_daemon = format("{hadoop_bin}/hadoop-daemon.sh")
if user == "root":
cmd = [hadoop_daemon, "--config", params.hadoop_conf_dir, action, name]
if options:
cmd += [options, ]
daemon_cmd = as_sudo(cmd)
else:
cmd = format("{ulimit_cmd} {hadoop_daemon} --config {hadoop_conf_dir} {action} {name}")
if options:
cmd += " " + options
daemon_cmd = as_user(cmd, user)
if action == "start":
# remove pid file from dead process
File(pid_file, action="delete", not_if=process_id_exists_command)
try:
Execute(daemon_cmd, not_if=process_id_exists_command, environment=hadoop_env_exports)
except:
show_logs(log_dir, user)
raise
elif action == "stop":
try:
Execute(daemon_cmd, only_if=process_id_exists_command, environment=hadoop_env_exports)
except:
show_logs(log_dir, user)
raise
# Wait until stop actually happens
process_id_does_not_exist_command = format("! ( {process_id_exists_command} )")
code, out = shell.call(process_id_does_not_exist_command,
env=hadoop_env_exports,
tries = 6,
try_sleep = 10,
)
# If stop didn't happen, kill it forcefully
if code != 0:
code, out, err = shell.checked_call(("cat", pid_file), sudo=True, env=hadoop_env_exports, stderr=subprocess32.PIPE)
pid = out
Execute(("kill", "-9", pid), sudo=True)
File(pid_file, action="delete")
def get_jmx_data(nn_address, modeler_type, metric, encrypted=False, security_enabled=False):
"""
:param nn_address: Namenode Address, e.g., host:port, ** MAY ** be preceded with "http://" or "https://" already.
If not preceded, will use the encrypted param to determine.
:param modeler_type: Modeler type to query using startswith function
:param metric: Metric to return
:return: Return an object representation of the metric, or None if it does not exist
"""
if not nn_address or not modeler_type or not metric:
return None
nn_address = nn_address.strip()
if not nn_address.startswith("http"):
nn_address = ("https://" if encrypted else "http://") + nn_address
if not nn_address.endswith("/"):
nn_address = nn_address + "/"
nn_address = nn_address + "jmx"
Logger.info("Retrieve modeler: %s, metric: %s from JMX endpoint %s" % (modeler_type, metric, nn_address))
if security_enabled:
import params
data, error_msg, time_millis = curl_krb_request(params.tmp_dir, params.smoke_user_keytab, params.smokeuser_principal, nn_address,
"jn_upgrade", params.kinit_path_local, False, None, params.smoke_user)
else:
data = urllib2.urlopen(nn_address).read()
my_data = None
if data:
data_dict = json.loads(data)
if data_dict:
for el in data_dict['beans']:
if el is not None and el['modelerType'] is not None and el['modelerType'].startswith(modeler_type):
if metric in el:
my_data = el[metric]
if my_data:
my_data = json.loads(str(my_data))
break
return my_data
def get_port(address):
"""
Extracts port from the address like 0.0.0.0:1019
"""
if address is None:
return None
m = re.search(r'(?:http(?:s)?://)?([\w\d.]*):(\d{1,5})', address)
if m is not None and len(m.groups()) >= 2:
return int(m.group(2))
else:
return None
def is_secure_port(port):
"""
Returns True if port is root-owned at *nix systems
"""
if port is not None:
return port < 1024
else:
return False
def is_previous_fs_image():
"""
Return true if there's a previous folder in the HDFS namenode directories.
"""
import params
if params.dfs_name_dir:
nn_name_dirs = params.dfs_name_dir.split(',')
for nn_dir in nn_name_dirs:
prev_dir = os.path.join(nn_dir, "previous")
if os.path.isdir(prev_dir):
return True
return False
def get_hdfs_binary(distro_component_name):
"""
Get the hdfs binary to use depending on the stack and version.
:param distro_component_name: e.g., hadoop-hdfs-namenode, hadoop-hdfs-datanode
:return: The hdfs binary to use
"""
import params
hdfs_binary = "hdfs"
if params.stack_version_formatted and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.stack_version_formatted):
hdfs_binary = "{0}/current/{1}/bin/hdfs".format(params.stack_root, distro_component_name)
return hdfs_binary
def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False):
"""
Get the dfsadmin base command constructed using hdfs_binary path and passing namenode address as explicit -fs argument
:param hdfs_binary: path to hdfs binary to use
:param use_specific_namenode: flag if set and Namenode HA is enabled, then the dfsadmin command will use
current namenode's address
:return: the constructed dfsadmin base command
"""
import params
dfsadmin_base_command = ""
if params.dfs_ha_enabled and use_specific_namenode:
dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs hdfs://{params.namenode_rpc}")
else:
dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}")
return dfsadmin_base_command
def set_up_zkfc_security(params):
""" Sets up security for accessing zookeper on secure clusters """
if params.stack_supports_zk_security is False:
Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's supported only for HDP 2.6 and above.")
return
# check if the namenode is HA
if params.dfs_ha_enabled is False:
Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's supported only for NameNode HA mode.")
return
# check if the cluster is secure (skip otherwise)
if params.security_enabled is False:
Logger.info("Skipping setting up secure ZNode ACL for HFDS as it's supported only for secure clusters.")
return
# process the JAAS template
File(os.path.join(params.hadoop_conf_secure_dir, 'hdfs_jaas.conf'),
owner=params.hdfs_user,
group=params.user_group,
mode=0644,
content=Template("hdfs_jaas.conf.j2")
)
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# @name : Infoga - Email Information Gathering
# @url : http://github.com/m4ll0k
# @author : Momo Outaadi (m4ll0k)
from lib.output import *
from lib.request import *
from lib.parser import *
class Google(Request):
def __init__(self,target):
Request.__init__(self)
self.target = target
def search(self):
test('Searching "%s" in Google...'%(self.target))
base_url = 'https://www.google.com/search?q=intext:%22%40{target}%22&num=50'.format(
target=self.target)
mails = []
# First 350 results (page 0 to 6)
for page in range(0, 7):
url = base_url + "&start=" + str(page)
try:
resp = self.send(
method = 'GET',
url = url
)
if "detected unusual traffic" in resp.text:
break
for email in self.getemail(resp.content,self.target):
if email not in mails:
mails.append(email)
except:
pass
return mails
def getemail(self,content,target):
return parser(content,target).email()
|
import datetime
import functools
import hashlib
import random
import bitly_api
import jinja2
from django.conf import settings
from django.template.loader import get_template
from django.utils.encoding import smart_str
from django.utils.translation import ugettext, ungettext
from django_jinja import library
from taggit.models import TaggedItem
from kuma.core.cache import memcache
from kuma.core.urlresolvers import reverse
from kuma.core.utils import bitly
from .. import DEMO_LICENSES, DEMOS_CACHE_NS_KEY, TAG_DESCRIPTIONS
from ..models import Submission
TEMPLATE_INCLUDE_CACHE_EXPIRES = getattr(settings,
'TEMPLATE_INCLUDE_CACHE_EXPIRES', 300)
def new_context(context, **kw):
c = dict(context.items())
c.update(kw)
return c
# TODO:liberate ?
def register_cached_inclusion_tag(template, key_fn=None,
expires=TEMPLATE_INCLUDE_CACHE_EXPIRES):
"""Decorator for inclusion tags with output caching.
Accepts a string or function to generate a cache key based on the incoming
parameters, along with an expiration time configurable as
INCLUDE_CACHE_EXPIRES or an explicit parameter"""
if key_fn is None:
key_fn = template
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kw):
if type(key_fn) is str:
cache_key = key_fn
else:
cache_key = key_fn(*args, **kw)
out = memcache.get(cache_key)
if out is None:
context = f(*args, **kw)
t = get_template(template).render(context)
out = jinja2.Markup(t)
memcache.set(cache_key, out, expires)
return out
return library.global_function(wrapper)
return decorator
def submission_key(prefix):
"""Produce a cache key function with a prefix, which generates the rest of
the key based on a submission ID and last-modified timestamp."""
def k(*args, **kw):
submission = args[0]
return 'submission:%s:%s:%s' % (prefix,
submission.id,
submission.modified)
return k
# TOOO: All of these inclusion tags could probably be generated & registered
# from a dict of function names and inclusion tag args, since the method bodies
# are all identical. Might be astronaut architecture, though.
@library.global_function
@library.render_with('demos/elements/demos_head.html')
def demos_head(request):
return locals()
@library.global_function
@library.render_with('demos/elements/submission_creator.html')
def submission_creator(submission):
return locals()
@library.global_function
@library.render_with('demos/elements/user_link.html')
def user_link(user, show_gravatar=False, gravatar_size=48,
gravatar_default='mm'):
return locals()
@library.global_function
@library.render_with('demos/elements/submission_thumb.html')
def submission_thumb(submission, extra_class=None, thumb_width="200",
thumb_height="150", is_homepage=False):
vars = locals()
flags = submission.get_flags()
# Dict of metadata associated with flags for demos
# TODO: Move to a constant or DB table? Too much view stuff here?
flags_meta = {
# flag name thumb class flag description
'firstplace': ('first-place', ugettext('First Place')),
'secondplace': ('second-place', ugettext('Second Place')),
'thirdplace': ('third-place', ugettext('Third Place')),
'finalist': ('finalist', ugettext('Finalist')),
'featured': ('featured', ugettext('Featured')),
}
# If there are any flags, pass them onto the template. Special treatment
# for the first flag, which takes priority over all others for display in
# the thumb.
main_flag = (len(flags) > 0) and flags[0] or None
vars['all_flags'] = flags
vars['main_flag'] = main_flag
if main_flag in flags_meta:
vars['main_flag_class'] = flags_meta[main_flag][0]
vars['main_flag_description'] = flags_meta[main_flag][1]
vars['is_homepage'] = is_homepage
return vars
def submission_listing_cache_key(*args, **kw):
ns_key = memcache.get(DEMOS_CACHE_NS_KEY)
if ns_key is None:
ns_key = random.randint(1, 10000)
memcache.set(DEMOS_CACHE_NS_KEY, ns_key)
full_path = args[0].get_full_path()
username = args[0].user.username
return 'demos_%s:%s' % (
ns_key,
hashlib.md5(full_path + username).hexdigest())
@register_cached_inclusion_tag('demos/elements/submission_listing.html',
submission_listing_cache_key)
def submission_listing(request, submission_list, is_paginated, paginator,
page_obj, feed_title, feed_url,
cols_per_row=3, pagination_base_url='', show_sorts=True,
show_submit=False):
return locals()
@library.global_function
@library.render_with('demos/elements/tech_tags_list.html')
def tech_tags_list():
return locals()
# Not cached, because it's small and changes based on
# current search query string
@library.global_function
@library.render_with('demos/elements/search_form.html')
@jinja2.contextfunction
def search_form(context):
return new_context(**locals())
@library.global_function
def devderby_tag_to_date_url(tag):
"""Turn a devderby tag like challenge:2011:june into a date-based URL"""
# HACK: Not super happy with this, but it works for now
if not tag:
return ''
parts = tag.split(':')
return reverse('demos_devderby_by_date', args=(parts[-2], parts[-1]))
@library.global_function
def license_link(license_name):
if license_name in DEMO_LICENSES:
return DEMO_LICENSES[license_name]['link']
else:
return license_name
@library.global_function
def license_title(license_name):
if license_name in DEMO_LICENSES:
return DEMO_LICENSES[license_name]['title']
else:
return license_name
@library.global_function
def tag_title(tag):
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS:
return TAG_DESCRIPTIONS[name]['title']
else:
return name
@library.global_function
def tag_description(tag):
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS and 'description' in TAG_DESCRIPTIONS[name]:
return TAG_DESCRIPTIONS[name]['description']
else:
return name
@library.global_function
def tag_learn_more(tag):
if not tag:
return ''
if (tag.name in TAG_DESCRIPTIONS and
'learn_more' in TAG_DESCRIPTIONS[tag.name]):
return TAG_DESCRIPTIONS[tag.name]['learn_more']
else:
return []
@library.global_function
def tag_meta(tag, other_name):
"""Get metadata for a tag or tag name."""
# TODO: Replace usage of tag_{title,description,learn_more}?
if not tag:
return ''
name = (isinstance(tag, basestring)) and tag or tag.name
if name in TAG_DESCRIPTIONS and other_name in TAG_DESCRIPTIONS[name]:
return TAG_DESCRIPTIONS[name][other_name]
else:
return ''
@library.global_function
def tags_for_object(obj):
tags = obj.taggit_tags.all()
return tags
@library.global_function
def tech_tags_for_object(obj):
return obj.taggit_tags.all_ns('tech')
@library.global_function
def tags_used_for_submissions():
return TaggedItem.tags_for(Submission)
@library.filter
def date_diff(timestamp, to=None):
if not timestamp:
return ""
compare_with = to or datetime.date.today()
delta = timestamp - compare_with
if delta.days == 0:
return u"today"
elif delta.days == -1:
return u"yesterday"
elif delta.days == 1:
return u"tomorrow"
chunks = (
(365.0, lambda n: ungettext('year', 'years', n)),
(30.0, lambda n: ungettext('month', 'months', n)),
(7.0, lambda n: ungettext('week', 'weeks', n)),
(1.0, lambda n: ungettext('day', 'days', n)),
)
for i, (chunk, name) in enumerate(chunks):
if abs(delta.days) >= chunk:
count = abs(round(delta.days / chunk, 0))
break
date_str = ugettext('%(number)d %(type)s') % {'number': count,
'type': name(count)}
if delta.days > 0:
return "in " + date_str
else:
return date_str + " ago"
# Note: Deprecated. Only used in kuma/demos/.
@library.filter
def bitly_shorten(url):
"""Attempt to shorten a given URL through bit.ly / mzl.la"""
cache_key = 'bitly:%s' % hashlib.md5(smart_str(url)).hexdigest()
short_url = memcache.get(cache_key)
if short_url is None:
try:
short_url = bitly.shorten(url)['url']
memcache.set(cache_key, short_url, 60 * 60 * 24 * 30 * 12)
except (bitly_api.BitlyError, KeyError):
# Just in case the bit.ly service fails or the API key isn't
# configured, fall back to using the original URL.
return url
return short_url
|
# -*- coding: utf-8 -*-
import re
from django.db.models import Count, FieldDoesNotExist
from tcms.management.models import Product
COUNT_DISTINCT = 0
QUERY_DISTINCT = 1
ACCEPTABLE_BOOL_VALUES = ('0', '1', 0, 1, True, False)
def parse_bool_value(value):
if value in ACCEPTABLE_BOOL_VALUES:
if value is '0':
return False
elif value is '1':
return True
else:
return value
else:
raise ValueError('Unacceptable bool value.')
def pre_check_product(values):
if isinstance(values, dict):
if not values.get('product'):
return
product_str = values['product']
else:
product_str = values
if not (isinstance(product_str, str) or isinstance(product_str, int)):
raise ValueError('The type of product is not recognizable.')
try:
product_id = int(product_str)
return Product.objects.get(id=product_id)
except ValueError:
return Product.objects.get(name=product_str)
def pre_process_ids(value):
if isinstance(value, list):
return [isinstance(c, int) and c or int(c.strip()) for c in value if c]
if isinstance(value, str):
return [int(c.strip()) for c in value.split(',') if c]
if isinstance(value, int):
return [value]
raise TypeError('Unrecognizable type of ids')
def compare_list(src_list, dest_list):
return list(set(src_list) - set(dest_list))
def _lookup_fields_in_model(cls, fields):
"""Lookup ManyToMany fields in current table and related tables. For
distinct duplicate rows when using inner join
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param fields: fields in where condition.
@type fields: list
@return: whether use distinct or not
@rtype: bool
Example:
cls is TestRun (<class 'tcms.testruns.models.TestRun'>)
fields is 'plan__case__is_automated'
| | |----- Normal Field in TestCase
| |--------------- ManyToManyKey in TestPlan
|--------------------- ForeignKey in TestRun
1. plan is a ForeignKey field of TestRun and it will trigger getting the
related model TestPlan by django orm framework.
2. case is a ManyToManyKey field of TestPlan and it will trigger using
INNER JOIN to join TestCase, here will be many duplicated rows.
3. is_automated is a local field of TestCase only filter the rows (where
condition).
So this method will find out that case is a m2m field and notice the
outter method use distinct to avoid duplicated rows.
"""
for field in fields:
try:
field_info = cls._meta.get_field_by_name(field)
if field_info[-1]:
yield True
else:
if getattr(field_info[0], 'related', None):
cls = field_info[0].related.parent_model
except FieldDoesNotExist:
pass
def _need_distinct_m2m_rows(cls, fields):
"""Check whether the query string has ManyToMany field or not, return
False if the query string is empty.
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param fields: fields in where condition.
@type fields: list
@return: whether use distinct or not
@rtype: bool
"""
return next(_lookup_fields_in_model(cls, fields), False) \
if fields else False
def distinct_m2m_rows(cls, values, op_type):
"""By django model field looking up syntax, loop values and check the
condition if there is a multi-tables query.
@param cls: table model class
@type cls: subclass of django.db.models.Model
@param values: fields in where condition.
@type values: dict
@return: QuerySet
@rtype: django.db.models.query.QuerySet
"""
flag = False
for field in values.iterkeys():
if '__' in field:
if _need_distinct_m2m_rows(cls, field.split('__')):
flag = True
break
qs = cls.objects.filter(**values)
if op_type == COUNT_DISTINCT:
return qs.aggregate(Count('pk', distinct=True))['pk__count'] if flag \
else qs.count()
elif op_type == QUERY_DISTINCT:
return qs.distinct() if flag else qs
else:
raise TypeError('Not implement op type %s' % op_type)
def distinct_count(cls, values):
return distinct_m2m_rows(cls, values, op_type=COUNT_DISTINCT)
def distinct_filter(cls, values):
return distinct_m2m_rows(cls, values, op_type=QUERY_DISTINCT)
class Comment(object):
def __init__(self, request, content_type, object_pks, comment=None):
self.request = request
self.content_type = content_type
self.object_pks = object_pks
self.comment = comment
def add(self):
import time
from django.contrib import comments
from django.contrib.comments import signals
from django.db import models
comment_form = comments.get_form()
try:
model = models.get_model(*self.content_type.split('.', 1))
targets = model._default_manager.filter(pk__in=self.object_pks)
except:
raise
for target in targets.iterator():
d_form = comment_form(target)
timestamp = str(time.time()).split('.')[0]
object_pk = str(target.pk)
data = {
'content_type': self.content_type,
'object_pk': object_pk,
'timestamp': timestamp,
'comment': self.comment
}
security_hash_dict = {
'content_type': self.content_type,
'object_pk': object_pk,
'timestamp': timestamp
}
data['security_hash'] = d_form.generate_security_hash(
**security_hash_dict)
form = comment_form(target, data=data)
# Response the errors if got
if not form.is_valid():
return form.errors
# Otherwise create the comment
comment = form.get_comment_object()
comment.ip_address = self.request.META.get("REMOTE_ADDR", None)
if self.request.user.is_authenticated():
comment.user = self.request.user
# Signal that the comment is about to be saved
signals.comment_will_be_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
# Save the comment and signal that it was saved
comment.save()
signals.comment_was_posted.send(
sender=comment.__class__,
comment=comment,
request=self.request
)
return
estimated_time_re = re.compile(r'^(\d+[d])?(\d+[h])?(\d+[m])?(\d+[s])?$')
def pre_process_estimated_time(value):
'''pre process estiamted_time.
support value - HH:MM:SS & xdxhxmxs
return xdxhxmxs
'''
if isinstance(value, str):
match = estimated_time_re.match(value.replace(' ', ''))
if match:
return value
else:
# FIXME: missed validation to invalid value in HH:MM:SS format.
# for example: sfsdfs:sfwerwe:rerwerwe
raw_estimated_time = value.split(':')
if len(raw_estimated_time) == 3:
hours, minutes, seconds = raw_estimated_time
return '{0}h{1}m{2}s'.format(hours, minutes, seconds)
else:
raise ValueError('Invaild estimated_time format.')
else:
raise ValueError('Invaild estimated_time format.')
|
import concurrent.futures as futures
import grpc
import time
import ProtoConfig
import generated.proto_out.dao_pb2 as dao_pb2
import generated.proto_out.dao_pb2_grpc as dao_grpc
from pylibs.Database import Mongo
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class Dao(dao_grpc.DaoServicer):
def __init__(self, sensor_db):
super().__init__()
self.sensor_db = sensor_db
def Select(self, request, context):
table = request.table
limit = request.limit
cols = request.cols
print('Got request {\n%s}\n' % (request))
colNames = [col.name for col in cols]
findResult = self.sensor_db.Find(table=table, columns=colNames, limit=limit)
allColValues = {col.name: [] for col in cols} # Col name to list of vals
for doc in findResult:
for col in cols:
# print('%s added to %s' % (doc[col.name], col.name))
allColValues[col.name].append(doc[col.name])
dataColumns = [self._NewDataColumn(colName, vals) for (colName, vals)
in allColValues.items()]
return dao_pb2.SelectReply(columns=dataColumns)
def _NewDataColumn(self, columnName, values):
datacolumn = dao_pb2.DataColumn(name=columnName)
if not values:
print("Warning: No values found.")
elif type(values[0]) is int:
datacolumn.intValues.extend(values)
elif type(values[0]) is str:
datacolumn.stringValues.extend(values)
else:
print("ERROR: Unknown Type!")
return datacolumn
def serve():
protoConfig = ProtoConfig.getConfig()
sensor_db = Mongo()
sensor_db.GetClient() # initalize the Db
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
dao_grpc.add_DaoServicer_to_server(Dao(sensor_db), server)
port = protoConfig.ports.daoPort
server.add_insecure_port('[::]:%s' % port)
server.start()
print('Started Dao Server on Port %s ' % port)
try:
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
server.stop(0)
if __name__ == '__main__':
serve()
|
"""
__name__ = settings.py
__author__ = Yash Patel
__description__ = Settings file for the accuracy predictor plot
"""
# NOTE: All the below paths are relative to the starting directory of
# gem5, since this is the default behavior for gem5 execution
# --------------------------- Sim Params ---------------------------- #
BP_NAMES = [
"LocalBP", # simple local history branch predictor
"TournamentBP", # mixed global/local history predictor
"BiModeBP", # 2-bit history mode predictor
"LTAGE", # often best-performing current mainstream predictor
"AlwaysBP", # always true branch predictor (static)
"NeuroBP", # single perceptron neural branch predictor
"NeuroPathBP" # neural path branch predictor
]
EXEC_NAMES = [
"ConnCompSmall", # 0
"ConnCompMedium", # 1
"Bubblesort", # 2
"IntMM", # 3
"Oscar", # 4
"Perm", # 5
"Puzzle", # 6
"Queens", # 7
"Quicksort", # 8
"RealMM", # 9
"Towers", # 10
"Treesort", # 11
]
# --------------------------- Input Specs ---------------------------- #
# location of the final results dump file
INPUT_FILE = "m5out/stats.txt"
# --------------------------- Output Specs ---------------------------- #
# outputs are by convention specified by executable
OUTPUT_DIR = "m5cached"
# outputs are by convention specified by executable
FIGURE_DIR = "m5cached/{}/figures"
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime, date, timedelta, time
from pandas.compat import map, zip, range, lrange, lzip, long
from pandas import compat
from numpy import nan
from numpy.random import randn
import pytest
import numpy as np
import pandas.core.common as com
from pandas import (DataFrame, Index, Series, notna, isna,
MultiIndex, DatetimeIndex, Timestamp,
date_range)
import pandas as pd
from pandas._libs.tslib import iNaT
from pandas.tseries.offsets import BDay
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer,
is_scalar)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
from pandas.core.indexing import IndexingError
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
sl = self.frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in compat.iteritems(sl):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in compat.iteritems(self.frame._series):
assert self.frame[key] is not None
assert 'random' not in self.frame
with tm.assert_raises_regex(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
with pytest.raises(KeyError):
df.__getitem__('df["$10"]')
res = df['@awesome_domain']
tm.assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
assert self.frame.get('foo') is None
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')),
DataFrame(columns=list('AB'), index=range(3))]:
result = df.get(None)
assert result is None
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.loc[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert result.columns.name == 'foo'
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.iloc[:, :2]
assert_frame_equal(result, expected)
assert result.columns.names == ['sth', 'sth2']
def test_getitem_callable(self):
# GH 12533
result = self.frame[lambda x: 'A']
tm.assert_series_equal(result, self.frame.loc[:, 'A'])
result = self.frame[lambda x: ['A', 'B']]
tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
df = self.frame[:3]
result = df[lambda x: [True, False, True]]
tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with tm.assert_raises_regex(ValueError,
'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with tm.assert_raises_regex(ValueError, 'Length of values '
'does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.loc[1, ['tt1', 'tt2']] = [1, 2]
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.loc[df.index[1], ['tt1', 'tt2']] = ['1', '2']
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006', periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_setitem_callable(self):
# GH 12533
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]})
df[lambda x: 'A'] = [11, 12, 13, 14]
exp = pd.DataFrame({'A': [11, 12, 13, 14], 'B': [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
tm.assert_index_equal(subindex, subframe.index)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_raises_regex(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([(c, np.where(data[c] > 0, data[c], np.nan))
for c in data.columns]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32': 2, 'int64': 2})
assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ['E1', 'F1']] = 0
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data=np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isna(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30]},
columns=['rna', -1000, 0, 1000])
result = df[[1000]]
expected = df.iloc[:, [3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
with catch_warnings(record=True):
assert isna(df.ix[:, [-1]].values).all()
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
pytest.raises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
assert 'col5' in self.frame
assert len(series) == 15
assert len(self.frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=self.frame.index, name='col5')
tm.assert_series_equal(self.frame['col5'], exp)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_series_equal(series, self.frame['col6'], check_names=False)
with pytest.raises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
pytest.raises(com.SettingWithCopyError, f)
assert smaller['col10'].dtype == np.object_
assert (smaller['col10'] == ['1', '2']).all()
# with a dtype
for dtype in ['int32', 'int64', 'float32', 'float64']:
self.frame[dtype] = np.array(arr, dtype=dtype)
assert self.frame[dtype].dtype.name == dtype
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
assert_frame_equal(df, expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
assert notna(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'Must pass '
'DataFrame with '
'boolean values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
assert self.frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
assert self.frame['B'].dtype == np.int64
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
assert self.frame['foo'].dtype == np.int64
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
assert self.frame['foo'].dtype == np.float64
self.frame['something'] = 0
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2.5
assert self.frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10, 'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64': 3, 'object': 1}).sort_values()
assert_series_equal(result, expected)
# Test that data type is preserved . #5782
df = DataFrame({'one': np.arange(6, dtype=np.int8)})
df.loc[1, 'one'] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
assert 'B' in df
assert len(df.columns) == 2
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm['C'] = 1
assert dm['C'].dtype == np.int64
dm['E'] = 1.
assert dm['E'].dtype == np.float64
# set existing column
dm['A'] = 'bar'
assert 'bar' == dm['A'][0]
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
assert dm['foo'].dtype == np.object_
dm['coercable'] = ['1', '2', '3']
assert dm['coercable'].dtype == np.object_
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.loc[ix, ['title']] = 'foobar'
df.loc[ix, ['cruft']] = 0
assert df.loc[1, 'title'] == 'foobar'
assert df.loc[1, 'cruft'] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_clear_caches(self):
# see gh-304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.loc[df.index[2:], 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(
self.frame.iloc[:, -1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:, None], self.frame[
'A'], check_names=False)
assert_series_equal(self.frame[None], self.frame[
'A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isna(), 'a'] = result.a
assert_frame_equal(result, df)
def test_setitem_empty_frame_with_boolean(self):
# Test for issue #10126
for dtype in ('float', 'int64'):
for df in [
pd.DataFrame(dtype=dtype),
pd.DataFrame(dtype=dtype, index=[1]),
pd.DataFrame(dtype=dtype, columns=['A']),
]:
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_setitem_scalars_no_index(self):
# GH16823 / 17894
df = DataFrame()
df['foo'] = 1
expected = DataFrame(columns=['foo']).astype(np.int64)
assert_frame_equal(df, expected)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue #11859
df = pd.DataFrame()
df2 = df[df > 0]
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
pytest.raises(KeyError, f.__delitem__, 'D')
del f['B']
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5],
columns=['A', 'B']))
# slice rows with labels, inclusive!
with catch_warnings(record=True):
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
pytest.raises(ValueError, f.ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.loc[52195.1:52196.5]
assert len(s1) == 2
s1 = df.loc[52195.1:52196.6]
assert len(s1) == 2
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
# case 1
frame = self.frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
with catch_warnings(record=True):
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
with catch_warnings(record=True):
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
with catch_warnings(record=True):
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.iloc[:, -3:]
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
# labels that aren't contained
pytest.raises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
pytest.raises(KeyError, self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
pytest.raises(KeyError, self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
assert isna(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
with catch_warnings(record=True):
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
# return self if no slicing...for now
with catch_warnings(record=True):
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[f.index[5:10], [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.iloc[[1, 4, 7]]
expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.iloc[:, [2, 0, 1]]
expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.iloc
with tm.assert_raises_regex(IndexingError, 'Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.loc[mask]
expected = self.frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]),
np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
pytest.raises(TypeError, lambda: df.iloc[1.0:5])
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
pytest.raises(TypeError, f)
def f():
result = cp.iloc[1.0:5] == 0 # noqa
pytest.raises(TypeError, f)
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.loc[df.index[::2], 'str'] = nan
expected = np.array([nan, 'qux', nan, 'qux', nan], dtype=object)
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
assert isna(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = nan
assert isna(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = nan
assert not isna(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
# pytest.raises(
# Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2], ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2:], ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
df2.loc[:, ['A', 'B']] = df.loc[:, ['A', 'B']] + 0.5
expected = df.reindex(columns=['A', 'B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.loc['bar']
expected = df.iloc[[2, 4]]
assert_frame_equal(result, expected)
result = df.loc['baz']
expected = df.iloc[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc[['bar']]
exp = df.iloc[[2, 4]]
assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
pytest.raises(KeyError, df.loc.__getitem__, False)
pytest.raises(KeyError, df.loc.__getitem__, True)
pytest.raises(KeyError, df.loc.__setitem__, False, 0)
pytest.raises(KeyError, df.loc.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
assert result.columns.name == 'foo'
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert result == expected
def test_lookup(self):
def alt(df, rows, cols, dtype):
result = []
for r, c in zip(rows, cols):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result.append(df.get_value(r, c))
return np.array(result, dtype=dtype)
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assert_raises_regex(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
def test_set_value_resize(self):
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res = self.frame.set_value('foobar', 'B', 0)
assert res is self.frame
assert res.index[-1] == 'foobar'
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert res.get_value('foobar', 'B') == 0
self.frame.loc['foobar', 'qux'] = 0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert self.frame.get_value('foobar', 'qux') == 0
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
res3 = res.set_value('foobar', 'baz', 5)
assert is_float_dtype(res3['baz'])
assert isna(res3['baz'].drop(['foobar'])).all()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 2, 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc['C', 2] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.set_value('C', 'D', 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
pytest.raises(KeyError, df.get_value, 0, 1)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
df["b"] = 666
with catch_warnings(record=True):
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
pytest.raises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
pytest.raises(com.SettingWithCopyError, f)
assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
with catch_warnings(record=True):
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i, j]
expected = self.frame.at[row, col]
assert result == expected
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
assert type(e) != UnboundLocalError
def test_reindex_methods(self):
df = pd.DataFrame({'x': list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
for method, expected_values in [('nearest', [0, 1, 1, 2]),
('pad', [np.nan, 0, 1, 1]),
('backfill', [0, 1, 2, 2])]:
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex_like(df, method=method, tolerance=[0, 0, 0, 0])
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
actual = df.reindex(target, method=method, tolerance=[1, 1, 1, 1])
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, np.nan, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest',
tolerance=[0.5, 0.01, 0.4, 0.1])
assert_frame_equal(expected, actual)
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isna(result)['B']
assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype('M8[ns]')
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6, 1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
pytest.raises(ValueError, df_rev.reindex, df.index, method='pad')
pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim': list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe': list('abcdeabcd')[::-1],
'jolie': [10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim': ['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe': ['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i + 1])
i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10,
11, 12, 13, 14, 18, 19, 15, 16, 17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6,
7, 8, 9, 15, 16, 17, 18, 19, 13, 14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.Series([1, 0, 0], name='new_column')
assert_series_equal(df['new_column'], exp)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = pd.Series(pd.date_range('2015-01-01', periods=3, tz='utc'),
name='dates')
df = pd.DataFrame({'dates': column})
df['dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
df = pd.DataFrame({'dates': column})
df.loc[[0, 1, 2], 'dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
def test_setitem_datetime_coercion(self):
# gh-1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')] * 3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
assert pd.Timestamp('2008-08-08') == df.loc[0, 'c']
assert pd.Timestamp('2008-08-08') == df.loc[1, 'c']
df.loc[2, 'c'] = date(2005, 5, 5)
assert pd.Timestamp('2005-05-05') == df.loc[2, 'c']
def test_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101', periods=4))
df['A'] = np.array([1 * one_hour] * 4, dtype='m8[ns]')
df.loc[:, 'B'] = np.array([2 * one_hour] * 4, dtype='m8[ns]')
df.loc[:3, 'C'] = np.array([3 * one_hour] * 3, dtype='m8[ns]')
df.loc[:, 'D'] = np.array([4 * one_hour] * 4, dtype='m8[ns]')
df.loc[df.index[:3], 'E'] = np.array([5 * one_hour] * 3,
dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.loc[df.index[:-1], 'F'] = np.array([6 * one_hour] * 3,
dtype='m8[ns]')
df.loc[df.index[-3]:, 'G'] = date_range('20130101', periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')] * 6 +
[np.dtype('datetime64[ns]')] * 2,
index=list('ABCDEFGH'))
assert_series_equal(result, expected)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 4
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 12
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
assert value == self.frame[item][idx]
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
assert xs.dtype == np.object_
assert xs['A'] == 1
assert xs['B'] == '1'
with pytest.raises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - BDay())
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
exp = pd.Series([1., 'foo', 2., 'bar', 3.],
index=list('ABCDE'), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = (DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}})
.set_index(['year', 'flavour', 'day']))
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A', 'B', 'C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame({
long(0): {35: np.nan, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987,
49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan,
50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
assert_frame_equal(df2, expected)
df['foo'] = 'test'
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df[df > 0.3] = 1
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (issubclass(s.dtype.type, (np.integer, np.floating)) and
s.dtype != 'uint8')
return DataFrame(dict([(c, s + 1) if is_ok(s) else (c, s)
for c, s in compat.iteritems(df)]))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(
np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([(c, Series([1] * 3, dtype=c))
for c in ['int64', 'int32',
'float32', 'float64']]))
df.iloc[1, :] = 0
result = df.where(df >= 0).get_dtype_counts()
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({'float32': 1, 'float64': 1, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([not issubclass(s.type, np.integer)
for s in df.dtypes])
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
pytest.raises(ValueError, df.where, cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
pytest.raises(ValueError, df.where, err2, other1)
pytest.raises(ValueError, df.mask, True)
pytest.raises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
klasses = [list, tuple, np.array]
df = DataFrame({'a': [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({'a': [np.nan, 2, 3]})
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
df['b'] = 2
expected['b'] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
df = DataFrame({'a': [1, 2, 3]})
msg = "Boolean array expected for the condition"
conds = [
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({'a': [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")],
[pd.NaT], [Timestamp("2017-01-02")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
df['b'] = 2
conds = [
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"],
["True", "True"]],
DataFrame({'a': [2, 5, 7], 'b': [4, 8, 9]}),
[[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
result = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(result, expected)
# this *does* align, though has no matching columns
cond.columns = ["a", "b", "c"]
result = df.where(cond)
expected = DataFrame(np.nan, index=df.index, columns=df.columns)
tm.assert_frame_equal(result, expected)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [
4.0, 3.0, 2.0, 1.0]}, dtype='float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [
4.0, 3.0, np.nan, np.nan]}, dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16', 'int8', 'int32', 'int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4], dtype=dtype),
'b': np.array([4.0, 3.0, 2.0, 1.0],
dtype='float64')})
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0],
'b': [4.0, 3.0, np.nan, np.nan]},
dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isna() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A=date_range('20130102', periods=5),
B=date_range('20130104', periods=5),
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
result = df[df > stamp]
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{'series': Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])})
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {
'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isna(df), None)
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df.where(~isna(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notna(df), df.mean(), axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notna(df), df.mean(), inplace=True, axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis='index')
assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notna(df), DataFrame(
1, index=df.index, columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame(
[[1 + 1j, 2], [np.nan, 4 + 1j]], columns=['a', 'b'])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]])
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0: np.array([0, 0], dtype='int64'),
1: np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([
DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)), dtype='int64')],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype('int64')
expected[3] = expected[3].astype('int64')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame[0:0])
assert_frame_equal(self.frame.tail(0), self.frame[0:0])
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(columns=['i', 'c', 'x', 'y'],
data=[[0, 0, 1, 2], [1, 0, 3, 4],
[0, 1, 1, 2], [1, 1, 3, 4]])
dg = df.pivot_table(index='i', columns='c',
values=['x', 'y'])
with tm.assert_raises_regex(TypeError, "is an invalid key"):
str(dg[:, 0])
index = Index(range(2), name='i')
columns = MultiIndex(levels=[['x', 'y'], [0, 1]],
labels=[[0, 1], [0, 0]],
names=[None, 'c'])
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
assert_frame_equal(result, expected)
name = ('x', 0)
index = Index(range(2), name='i')
expected = Series([1, 3], index=index, name=name)
result = dg['x', 0]
assert_series_equal(result, expected)
class TestDataFrameIndexingDatetimeWithTZ(TestData):
def setup_method(self, method):
self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
self.dr = date_range('20130110', periods=3)
self.df = DataFrame({'A': self.idx, 'B': self.dr})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
assert b1.values.equals(b2.values)
assert id(b1.values.values.base) != id(b2.values.values.base)
# with nan
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
class TestDataFrameIndexingUInt64(TestData):
def setup_method(self, method):
self.ir = Index(np.arange(3), dtype=np.uint64)
self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
self.df = DataFrame({'A': self.idx, 'B': self.ir})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notna(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, Series([np.dtype('uint64'),
np.dtype('O'), np.dtype('O')],
index=['A', 'B', 'C']))
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype == np.dtype('uint64')
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
|
# coding: utf8
{
'!langcode!': 'hu',
'!langname!': 'Magyar',
'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN',
'%s %%{row} deleted': '%s sorok törlődtek',
'%s %%{row} updated': '%s sorok frissítődtek',
'%s selected': '%s kiválasztott',
'%Y-%m-%d': '%Y.%m.%d.',
'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S',
'About': 'About',
'Access Control': 'Access Control',
'Administrative Interface': 'Administrative Interface',
'Administrative interface': 'az adminisztrációs felületért kattints ide',
'Ajax Recipes': 'Ajax Recipes',
'appadmin is disabled because insecure channel': 'az appadmin a biztonságtalan csatorna miatt letiltva',
'Are you sure you want to delete this object?': 'Are you sure you want to delete this object?',
'Available Databases and Tables': 'Elérhető adatbázisok és táblák',
'Buy this book': 'Buy this book',
'cache': 'gyorsítótár',
'Cache': 'Cache',
'Cache Keys': 'Cache Keys',
'Cannot be empty': 'Nem lehet üres',
'change password': 'jelszó megváltoztatása',
'Check to delete': 'Törléshez válaszd ki',
'Clear CACHE?': 'Clear CACHE?',
'Clear DISK': 'Clear DISK',
'Clear RAM': 'Clear RAM',
'Client IP': 'Client IP',
'Community': 'Community',
'Components and Plugins': 'Components and Plugins',
'Controller': 'Controller',
'Copyright': 'Copyright',
'Current request': 'Jelenlegi lekérdezés',
'Current response': 'Jelenlegi válasz',
'Current session': 'Jelenlegi folyamat',
'customize me!': 'változtass meg!',
'data uploaded': 'adat feltöltve',
'Database': 'adatbázis',
'Database %s select': 'adatbázis %s kiválasztás',
'db': 'db',
'DB Model': 'DB Model',
'Delete:': 'Töröl:',
'Demo': 'Demo',
'Deployment Recipes': 'Deployment Recipes',
'Description': 'Description',
'design': 'design',
'DISK': 'DISK',
'Disk Cache Keys': 'Disk Cache Keys',
'Disk Cleared': 'Disk Cleared',
'Documentation': 'Documentation',
"Don't know what to do?": "Don't know what to do?",
'done!': 'kész!',
'Download': 'Download',
'E-mail': 'E-mail',
'Edit': 'Szerkeszt',
'Edit current record': 'Aktuális bejegyzés szerkesztése',
'edit profile': 'profil szerkesztése',
'Edit This App': 'Alkalmazást szerkeszt',
'Email and SMS': 'Email and SMS',
'Errors': 'Errors',
'export as csv file': 'exportál csv fájlba',
'FAQ': 'FAQ',
'First name': 'First name',
'Forms and Validators': 'Forms and Validators',
'Free Applications': 'Free Applications',
'Group ID': 'Group ID',
'Groups': 'Groups',
'Hello World': 'Hello Világ',
'Home': 'Home',
'How did you get here?': 'How did you get here?',
'import': 'import',
'Import/Export': 'Import/Export',
'Index': 'Index',
'insert new': 'új beillesztése',
'insert new %s': 'új beillesztése %s',
'Internal State': 'Internal State',
'Introduction': 'Introduction',
'Invalid email': 'Invalid email',
'Invalid Query': 'Hibás lekérdezés',
'invalid request': 'hibás kérés',
'Key': 'Key',
'Last name': 'Last name',
'Layout': 'Szerkezet',
'Layout Plugins': 'Layout Plugins',
'Layouts': 'Layouts',
'Live Chat': 'Live Chat',
'login': 'belép',
'logout': 'kilép',
'lost password': 'elveszett jelszó',
'Lost Password': 'Lost Password',
'Main Menu': 'Főmenü',
'Manage Cache': 'Manage Cache',
'Menu Model': 'Menü model',
'My Sites': 'My Sites',
'Name': 'Name',
'New Record': 'Új bejegyzés',
'new record inserted': 'új bejegyzés felvéve',
'next 100 rows': 'következő 100 sor',
'No databases in this application': 'Nincs adatbázis ebben az alkalmazásban',
'Online examples': 'online példákért kattints ide',
'or import from csv file': 'vagy betöltés csv fájlból',
'Origin': 'Origin',
'Other Plugins': 'Other Plugins',
'Other Recipes': 'Other Recipes',
'Overview': 'Overview',
'Password': 'Password',
'Plugins': 'Plugins',
'Powered by': 'Powered by',
'Preface': 'Preface',
'previous 100 rows': 'előző 100 sor',
'Python': 'Python',
'Query:': 'Lekérdezés:',
'Quick Examples': 'Quick Examples',
'RAM': 'RAM',
'RAM Cache Keys': 'RAM Cache Keys',
'Ram Cleared': 'Ram Cleared',
'Recipes': 'Recipes',
'Record': 'bejegyzés',
'record does not exist': 'bejegyzés nem létezik',
'Record ID': 'Record ID',
'Record id': 'bejegyzés id',
'Register': 'Register',
'register': 'regisztráció',
'Registration key': 'Registration key',
'Reset Password key': 'Reset Password key',
'Role': 'Role',
'Rows in Table': 'Sorok a táblában',
'Rows selected': 'Kiválasztott sorok',
'Semantic': 'Semantic',
'Services': 'Services',
'Size of cache:': 'Size of cache:',
'state': 'állapot',
'Statistics': 'Statistics',
'Stylesheet': 'Stylesheet',
'submit': 'submit',
'Support': 'Support',
'Sure you want to delete this object?': 'Biztos törli ezt az objektumot?',
'Table': 'tábla',
'Table name': 'Table name',
'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.',
'The Core': 'The Core',
'The output of the file is a dictionary that was rendered by the view %s': 'The output of the file is a dictionary that was rendered by the view %s',
'The Views': 'The Views',
'This App': 'This App',
'Time in Cache (h:m:s)': 'Time in Cache (h:m:s)',
'Timestamp': 'Timestamp',
'Twitter': 'Twitter',
'unable to parse csv file': 'nem lehet a csv fájlt beolvasni',
'Update:': 'Frissít:',
'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.',
'User ID': 'User ID',
'Videos': 'Videos',
'View': 'Nézet',
'Welcome %s': 'Welcome %s',
'Welcome to web2py': 'Isten hozott a web2py-ban',
'Welcome to web2py!': 'Welcome to web2py!',
'Which called the function %s located in the file %s': 'Which called the function %s located in the file %s',
'You are successfully running web2py': 'You are successfully running web2py',
'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs',
'You visited the url %s': 'You visited the url %s',
}
|
from flask.ext.wtf import Form, TextField, BooleanField, TextAreaField
from flask.ext.wtf import Required, Length
from flask.ext.babel import gettext
from frmwk.model.mdUser import User
class EditForm(Form):
nickname = TextField('nickname', validators = [Required()])
about_me = TextAreaField('about_me', validators = [Length(min = 0, max = 140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
if self.nickname.data != User.make_valid_nickname(self.nickname.data):
self.nickname.errors.append(gettext('This nickname has invalid characters. Please use letters, numbers, dots and underscores only.'))
return False
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append(gettext('This nickname is already in use. Please choose another one.'))
return False
return True
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - Thread monitor action
Shows the current traceback of all threads.
@copyright: 2006 MoinMoin:AlexanderSchremmer
@license: GNU GPL, see COPYING for details.
"""
import os, time
from StringIO import StringIO
from MoinMoin import Page, wikiutil
from MoinMoin.util import thread_monitor
def execute_fs(pagename, request):
_ = request.getText
# check for superuser
if not request.user.isSuperUser():
request.theme.add_msg(_('You are not allowed to use this action.'), "error")
return Page.Page(request, pagename).send_page()
if thread_monitor.hook_enabled():
s = StringIO()
thread_monitor.trigger_dump(s)
time.sleep(5) # allow for all threads to dump to request
data = s.getvalue()
timestamp = time.time()
dump_fname = os.path.join(request.cfg.data_dir, "tm_%d.log" % timestamp)
f = file(dump_fname, "w")
f.write(data)
f.close()
else:
dump_fname = "nowhere"
request.write('<html><body>A dump has been saved to %s.</body></html>' % dump_fname)
def execute_wiki(pagename, request):
_ = request.getText
# be extra paranoid in dangerous actions
actname = __name__.split('.')[-1]
if not request.user.isSuperUser():
request.theme.add_msg(_('You are not allowed to use this action.'), "error")
return Page.Page(request, pagename).send_page()
request.theme.send_title("Thread monitor")
request.write('<pre>')
if not thread_monitor.hook_enabled():
request.write("Hook is not enabled.")
else:
s = StringIO()
thread_monitor.trigger_dump(s)
time.sleep(5) # allow for all threads to dump to request
request.write(wikiutil.escape(s.getvalue()))
request.write('</pre>')
request.theme.send_footer(pagename)
request.theme.send_closing_html()
execute = execute_fs
|
import os
from tests.base import mock
import testtools
from jenkins_jobs import utils
def fake_os_walk(paths):
"""Helper function for mocking os.walk() where must test that manipulation
of the returned dirs variable works as expected
"""
paths_dict = dict(paths)
def os_walk(top, topdown=True):
dirs, nondirs = paths_dict[top]
yield top, dirs, nondirs
for name in dirs:
# hard code use of '/' to ensure the test data can be defined as
# simple strings otherwise tests using this helper will break on
# platforms where os.path.sep is different.
new_path = "/".join([top, name])
for x in os_walk(new_path, topdown):
yield x
return os_walk
# Testing the utils module can sometimes result in the CacheStorage class
# attempting to create the cache directory multiple times as the tests
# are run in parallel. Stub out the CacheStorage to ensure that each
# test can safely create the object without effect.
@mock.patch('jenkins_jobs.builder.CacheStorage', mock.MagicMock)
class CmdRecursePath(testtools.TestCase):
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_pattern(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using pattern
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', (['test1'], ('file'))),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', None),
('/jjb_configs/test3/baz', None)
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs', ['test*']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_absolute(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using absolute
excludes.
testing paths
/jjb_configs/dir1/test1/
/jjb_configs/dir1/file
/jjb_configs/dir2/test2/
/jjb_configs/dir3/bar/
/jjb_configs/test3/bar/
/jjb_configs/test3/baz/
"""
os_walk_paths = [
('/jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('/jjb_configs/dir1', None),
('/jjb_configs/dir2', (['test2'], ())),
('/jjb_configs/dir3', (['bar'], ())),
('/jjb_configs/test3', (['bar', 'baz'], ())),
('/jjb_configs/dir2/test2', ([], ())),
('/jjb_configs/dir3/bar', ([], ())),
('/jjb_configs/test3/bar', ([], ())),
('/jjb_configs/test3/baz', ([], ()))
]
paths = [k for k, v in os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(os_walk_paths)
self.assertEqual(paths, utils.recurse_path('/jjb_configs',
['/jjb_configs/dir1']))
@mock.patch('jenkins_jobs.utils.os.walk')
def test_recursive_path_option_exclude_relative(self, oswalk_mock):
"""
Test paths returned by the recursive processing when using relative
excludes.
testing paths
./jjb_configs/dir1/test/
./jjb_configs/dir1/file
./jjb_configs/dir2/test/
./jjb_configs/dir3/bar/
./jjb_configs/test3/bar/
./jjb_configs/test3/baz/
"""
os_walk_paths = [
('jjb_configs', (['dir1', 'dir2', 'dir3', 'test3'], ())),
('jjb_configs/dir1', (['test'], ('file'))),
('jjb_configs/dir2', (['test2'], ())),
('jjb_configs/dir3', (['bar'], ())),
('jjb_configs/test3', (['bar', 'baz'], ())),
('jjb_configs/dir1/test', ([], ())),
('jjb_configs/dir2/test2', ([], ())),
('jjb_configs/dir3/bar', ([], ())),
('jjb_configs/test3/bar', None),
('jjb_configs/test3/baz', ([], ()))
]
rel_os_walk_paths = [
(os.path.abspath(
os.path.join(os.path.curdir, k)), v) for k, v in os_walk_paths]
paths = [k for k, v in rel_os_walk_paths if v is not None]
oswalk_mock.side_effect = fake_os_walk(rel_os_walk_paths)
self.assertEqual(paths, utils.recurse_path('jjb_configs',
['jjb_configs/test3/bar']))
|
from setuptools import setup, Extension
classifiers = ['Development Status :: 3 - Alpha',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
'Topic :: Home Automation',
'Topic :: System :: Hardware']
setup(name = 'WebIOPi',
version = '0.7.1',
author = 'Eric PTAK',
author_email = 'trouch@trouch.com',
description = 'A package to control Raspberry Pi GPIO from the web',
long_description = open('../doc/README').read(),
license = 'Apache',
keywords = 'RaspberryPi GPIO Python REST',
url = 'http://webiopi.trouch.com/',
classifiers = classifiers,
packages = ['_webiopi',
"webiopi",
"webiopi.utils",
"webiopi.clients",
"webiopi.protocols",
"webiopi.server",
"webiopi.decorators",
"webiopi.devices",
"webiopi.devices.digital",
"webiopi.devices.analog",
"webiopi.devices.sensor",
"webiopi.devices.clock",
"webiopi.devices.memory",
"webiopi.devices.shield"
],
ext_modules = [Extension(name='_webiopi.GPIO', sources=['native/bridge.c', 'native/gpio.c', 'native/cpuinfo.c', 'native/pwm.c'], include_dirs=['native/'])],
headers = ['native/cpuinfo.h', 'native/gpio.h', 'native/pwm.h'],
)
|
################################################################
#
# Albow - Tab Panel
#
################################################################
from pygame import Rect
from widget import Widget
from theme import ThemeProperty, FontProperty
from utils import brighten
class TabPanel(Widget):
# pages [Widget]
# current_page Widget
tab_font = FontProperty('tab_font')
tab_height = ThemeProperty('tab_height')
tab_border_width = ThemeProperty('tab_border_width')
tab_spacing = ThemeProperty('tab_spacing')
tab_margin = ThemeProperty('tab_margin')
tab_fg_color = ThemeProperty('tab_fg_color')
default_tab_bg_color = ThemeProperty('default_tab_bg_color')
tab_area_bg_color = ThemeProperty('tab_area_bg_color')
tab_dimming = ThemeProperty('tab_dimming')
#use_page_bg_color_for_tabs = ThemeProperty('use_page_bg_color_for_tabs')
def __init__(self, pages = None, **kwds):
Widget.__init__(self, **kwds)
self.pages = []
self.current_page = None
if pages:
w = h = 0
for title, page in pages:
w = max(w, page.width)
h = max(h, page.height)
self._add_page(title, page)
self.size = (w, h)
self.show_page(pages[0][1])
def content_size(self):
return (self.width, self.height - self.tab_height)
def content_rect(self):
return Rect((0, self.tab_height), self.content_size())
def page_height(self):
return self.height - self.tab_height
def add_page(self, title, page):
self._add_page(title, page)
if not self.current_page:
self.show_page(page)
def _add_page(self, title, page):
page.tab_title = title
page.anchor = 'ltrb'
self.pages.append(page)
def remove_page(self, page):
try:
i = self.pages.index(page)
del self.pages[i]
except IndexError:
pass
if page is self.current_page:
self.show_page(None)
def show_page(self, page):
if self.current_page:
self.remove(self.current_page)
self.current_page = page
if page:
th = self.tab_height
page.rect = Rect(0, th, self.width, self.height - th)
self.add(page)
page.focus()
def draw(self, surf):
self.draw_tab_area_bg(surf)
self.draw_tabs(surf)
def draw_tab_area_bg(self, surf):
bg = self.tab_area_bg_color
if bg:
surf.fill(bg, (0, 0, self.width, self.tab_height))
def draw_tabs(self, surf):
font = self.tab_font
fg = self.tab_fg_color
b = self.tab_border_width
if b:
surf.fill(fg, (0, self.tab_height - b, self.width, b))
for i, title, page, selected, rect in self.iter_tabs():
x0 = rect.left
w = rect.width
h = rect.height
r = rect
if not selected:
r = Rect(r)
r.bottom -= b
self.draw_tab_bg(surf, page, selected, r)
if b:
surf.fill(fg, (x0, 0, b, h))
surf.fill(fg, (x0 + b, 0, w - 2 * b, b))
surf.fill(fg, (x0 + w - b, 0, b, h))
buf = font.render(title, True, page.fg_color or fg)
r = buf.get_rect()
r.center = (x0 + w // 2, h // 2)
surf.blit(buf, r)
def iter_tabs(self):
pages = self.pages
current_page = self.current_page
n = len(pages)
b = self.tab_border_width
s = self.tab_spacing
h = self.tab_height
m = self.tab_margin
width = self.width - 2 * m + s - b
x0 = m
for i, page in enumerate(pages):
x1 = m + (i + 1) * width // n #self.tab_boundary(i + 1)
selected = page is current_page
yield i, page.tab_title, page, selected, Rect(x0, 0, x1 - x0 - s + b, h)
x0 = x1
def draw_tab_bg(self, surf, page, selected, rect):
bg = self.tab_bg_color_for_page(page)
if not selected:
bg = brighten(bg, self.tab_dimming)
surf.fill(bg, rect)
def tab_bg_color_for_page(self, page):
return getattr(page, 'tab_bg_color', None) \
or page.bg_color \
or self.default_tab_bg_color
def mouse_down(self, e):
x, y = e.local
if y < self.tab_height:
i = self.tab_number_containing_x(x)
if i is not None:
self.show_page(self.pages[i])
def tab_number_containing_x(self, x):
n = len(self.pages)
m = self.tab_margin
width = self.width - 2 * m + self.tab_spacing - self.tab_border_width
i = (x - m) * n // width
if 0 <= i < n:
return i
|
import csv
import datetime
import os
import random
import shutil
import string
import zipfile
from typing import Iterable, List, Dict
from cleo import Command
from lxml import etree
from kerapu.style.KerapuStyle import KerapuStyle
class TestShredderCommand(Command):
"""
Converteert XML-bestand met de testset naar een CSV-bestand
kerapu:test-shredder
{testset-zip : ZIP-bestand met de testset}
{testset-csv : Path waar het CSV-bestand met de tests moeten worden opgeslagen}
"""
# ------------------------------------------------------------------------------------------------------------------
def __extract_zip_file(self, zip_filename: str, tmp_dir: str):
"""
Extracts het ZIP-bestand met de testset in een folder.
:param str zip_filename: Het path naar het ZIP-bestand met de testset.
:param str tmp_dir: Path naar de folder.
"""
self.output.writeln('Uitpakken van <fso>{}</fso> in <fso>{}</fso>'.format(zip_filename, tmp_dir))
with zipfile.ZipFile(zip_filename, 'r') as zip_ref:
zip_ref.extractall(tmp_dir)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def ordinal(path: str) -> int:
"""
Geeft het volgnummer van een test.
:param str path: Het path naar het XML-bestand met de test case.
"""
parts = os.path.basename(path).split('_')
return int(parts[6])
# ------------------------------------------------------------------------------------------------------------------
def __lees_test_cases_lijst(self, folder: str) -> List:
"""
Geeft een lijst met alle bestanden in een folder.
:param str folder: Het path naar de folder.
"""
entries = os.listdir(folder)
filenames = list()
for entry in entries:
path = os.path.join(folder, entry)
if os.path.isfile(path):
filenames.append(path)
self.output.writeln('Aantal gevonden test cases: {}'.format(len(filenames)))
return sorted(filenames, key=TestShredderCommand.ordinal)
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __maak_xpath(parts: Iterable) -> str:
"""
Maakt een string met een xpath.
:param tuple parts: The onderdelen van het xpath.
:rtype: str
"""
xpath = ''
for part in parts:
if xpath:
xpath += '/'
xpath += 'xmlns:' + part
return xpath
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __convert_date(date: str) -> str:
"""
Converteert een datum in YYYYMMDD formaat naar YYYY-MM-DD format.
:param str date: De datum in YYYYMMDD format.
:rtype: str
"""
return date[:4] + '-' + date[4:6] + '-' + date[6:8]
# ------------------------------------------------------------------------------------------------------------------
@staticmethod
def __leeftijd_geboorte_datum(date: str, leeftijd: int) -> str:
"""
Geeft de geboortedatum gegeven een datum en een leeftijd (en de persoon is niet jarig).
:param str date: De gegeven datum in YYYY-MM-DD format.
:param int leeftijd: De leeftijd in jaren.
:rtype: int
"""
date = datetime.date(int(date[:4]) - leeftijd, int(date[5:7]), int(date[8:10]))
date -= datetime.timedelta(days=1)
return date.isoformat()
# ------------------------------------------------------------------------------------------------------------------
def __shred_xml_bestand(self, filename: str) -> Dict:
"""
Leest de relevante data in een XML-bestand met een test case.
:param str filename: De filenaam van het XML bestand.
:rtype: dict
"""
doc = etree.parse(filename)
xpath = '/soapenv:Envelope/soapenv:Body/xmlns:FICR_IN900101NL04'
namespaces = {'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/',
'xmlns': 'urn:hl7-org:v3'}
# Lees declaratiecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
declaratie_code = elements[0].get('extension')
# Lees specialismecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'responsibleParty', 'assignedPerson', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
specialisme_code = elements[0].get('code')
# Lees diagnosecode.
parts = (
'ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'pertinentInformation1',
'typerendeDiagnose', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
diagnose_code = elements[0].get('code')
# Lees zorgtypecode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'code')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_type_code = elements[0].get('code') if elements else None
# Lees zorgvraagcode.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'derivedFrom',
'zorgtraject', 'reason', 'zorgvraag', 'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_vraag_code = elements[0].get('code') if elements else None
# Lees begindatum.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'effectiveTime', 'low')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
begin_datum = self.__convert_date(elements[0].get('value')) if elements else None
# Lees de geboortedatum van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'subjectOf', 'leeftijd',
'value')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
leeftijd = int(elements[0].get('value')) if elements else None
geboorte_datum = self.__leeftijd_geboorte_datum(begin_datum, leeftijd)
# Lees het geslacht van de patient.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'subject', 'patient', 'patientPerson',
'administrativeGenderCode')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
geslacht_code = elements[0].get('code') if elements else None
# Lees de AGB-code van de zorginstelling.
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'author', 'assignedOrganization', 'id')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
zorg_instelling_code = elements[0].get('extension') if elements else None
# Lees alle zorgactiviteiten.
zorg_activiteiten = list()
parts = ('ControlActProcess', 'subject', 'Declaratiedataset', 'component', 'subtraject', 'debit',
'zorgactiviteit')
elements = doc.xpath(xpath + '/' + self.__maak_xpath(parts), namespaces=namespaces)
for element in elements:
path = 'xmlns:code'
sub_elements = element.xpath(path, namespaces=namespaces)
zorg_activiteit_code = sub_elements[0].get('code') if sub_elements else None
path = 'xmlns:repeatNumber'
sub_elements = element.xpath(path, namespaces=namespaces)
aantal = int(sub_elements[0].get('value')) if sub_elements else None
zorg_activiteiten.append((zorg_activiteit_code, aantal))
return {'subtraject_nummer': os.path.basename(filename),
'declaratie_code': declaratie_code,
'specialisme_code': specialisme_code,
'diagnose_code': diagnose_code,
'zorg_type_code': zorg_type_code,
'zorg_vraag_code': zorg_vraag_code,
'begin_datum': begin_datum,
'geboorte_datum': geboorte_datum,
'geslacht_code': geslacht_code,
'zorg_instelling_code': zorg_instelling_code,
'zorg_activiteiten': zorg_activiteiten}
# ----------------------------------------------------------------------------------------------------------------------
@staticmethod
def __write_subtraject(writer, subtraject: Dict) -> None:
"""
Schrijft het subtraject met alle zorgactiviteiten naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param dict subtraject: De details van het subtract.
"""
writer.writerow((subtraject['subtraject_nummer'],
subtraject['specialisme_code'],
subtraject['diagnose_code'],
subtraject['zorg_type_code'],
subtraject['zorg_vraag_code'],
subtraject['begin_datum'],
subtraject['geboorte_datum'],
subtraject['geslacht_code'],
subtraject['zorg_instelling_code'],
subtraject['declaratie_code']))
for zorgactiviteit in subtraject['zorg_activiteiten']:
writer.writerow((zorgactiviteit[0], zorgactiviteit[1]))
# ----------------------------------------------------------------------------------------------------------------------
def __extract_files(self, writer, filenames: List) -> None:
"""
Extract de data van een lijst met XML-bestanden met test cases en schrijft deze data naar een CSV-bestand.
:param writer: De handle naar de CSV writer.
:param list filenames: De lijst met bestandsnamen van XML-bestanden met test cases.
"""
for filename in filenames:
subtraject = self.__shred_xml_bestand(filename)
self.__write_subtraject(writer, subtraject)
# ------------------------------------------------------------------------------------------------------------------
def handle(self) -> int:
"""
Executes the command.
"""
self.output = KerapuStyle(self.input, self.output)
zip_filename = self.argument('testset-zip')
csv_filename = self.argument('testset-csv')
tmp_dir = '.kerapu-' + ''.join(random.choices(string.ascii_lowercase, k=12))
os.mkdir(tmp_dir)
self.__extract_zip_file(zip_filename, tmp_dir)
files = self.__lees_test_cases_lijst(tmp_dir)
with open(csv_filename, 'w', encoding='utf-8') as handle:
csv_writer = csv.writer(handle, dialect=csv.unix_dialect)
self.__extract_files(csv_writer, files)
shutil.rmtree(tmp_dir)
return 0
# ----------------------------------------------------------------------------------------------------------------------
|
# coding: utf-8
"""
EVE Swagger Interface
An OpenAPI for EVE Online
OpenAPI spec version: 0.4.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class GetCharactersCharacterIdMailLabelsForbidden(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, error=None):
"""
GetCharactersCharacterIdMailLabelsForbidden - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'error': 'str'
}
self.attribute_map = {
'error': 'error'
}
self._error = error
@property
def error(self):
"""
Gets the error of this GetCharactersCharacterIdMailLabelsForbidden.
Forbidden message
:return: The error of this GetCharactersCharacterIdMailLabelsForbidden.
:rtype: str
"""
return self._error
@error.setter
def error(self, error):
"""
Sets the error of this GetCharactersCharacterIdMailLabelsForbidden.
Forbidden message
:param error: The error of this GetCharactersCharacterIdMailLabelsForbidden.
:type: str
"""
self._error = error
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, GetCharactersCharacterIdMailLabelsForbidden):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
import os
import spyral
from .sprites import sprite
from . import collision
WIDTH = 1200
HEIGHT = 900
WHITE = (255, 255, 255)
SIZE = (WIDTH, HEIGHT)
GREEN = (60, 179, 113)
RED = (255, 0, 0)
BLACKBLUE = (19, 15, 48)
BG_COLOR = BLACKBLUE
ENEMYGAP = 30
XMARGIN = 175
YMARGIN = 100
MOVEX = 15
MOVEY = 20
ENEMYSIDE = 50
BACKGROUND = os.path.join("game", "graphics", "spacebackground.png")
class Level1(spyral.Scene):
def __init__(self):
spyral.Scene.__init__(self, SIZE)
self.space = spyral.Image(filename=BACKGROUND)
self.background = self.space.scale((1200, 900))
self.collision_handler = collision.CollisionHandler(self)
self.player = sprite.Player(self, 'left', self.collision_handler)
self.alien_list = self.make_aliens(6, 3)
self.collision_handler.add_player(self.player)
self.collision_handler.add_aliens(self.alien_list)
spyral.event.register("system.quit", spyral.director.pop)
spyral.event.register("director.update", self.update)
spyral.event.register("input.keyboard.down.q", spyral.director.pop)
def update(self, delta):
pass
def make_aliens(self, columns, rows):
"""
Make aliens and send them to collision handler.
"""
alien_list = []
for column in range(columns):
for row in range(rows):
alien = sprite.Alien(self, row, column)
alien_list.append(alien)
return alien_list
|
from .riq_obj import RIQObject
from .riq_base import RIQBase
from .listitems import ListItems
from .listitem import ListItem
# TODO: Add version, externalId, category
# TODO: Payload exception if missing required fields
class List(RIQObject,RIQBase) :
# Object Attributes
_id = None
_modifiedDate = None
_title = None
_listType = None
_fields = None
_size = None
def __init__(self, _id=None, title=None, modifiedDate=None, fields=None, data=None) :
if data != None :
self.parse(data)
elif self.id(_id) != None :
self.get()
self.title(title)
self.modifiedDate(modifiedDate)
self.fields(fields)
self.ListItems = ListItems(self)
@classmethod
def node(cls) :
return 'lists'
def parse(self,data) :
self.id(data.get('id',None))
self.modifiedDate(data.get('modifiedDate',None))
self.title(data.get('title',None))
self.listType(data.get('listType',None))
self.fields(data.get('fields',None))
self.size(data.get('size', None))
return self
# Data Payload
def payload(self) :
payload = {
'title' : self.title(),
'fields' : self.fields()
}
if self.id() != None :
payload['id'] = self.id()
return payload
# Hybrid
def id(self,value=None) :
if value != None :
self._id = value
return self._id
def modifiedDate(self,value=None) :
if value != None :
self._modifiedDate = value
return self._modifiedDate
def title(self,value=None) :
if value != None :
self._title = value
return self._title
def listType(self,value=None) :
if value != None :
self._listType = value
return self._listType
def fields(self,value=None) :
if value != None :
self._fields = value
return self._fields or []
def size(self, value=None):
if value != None:
self._size = value
return self._size
# Sub Endpoints
def ListItem(self,*args,**kwargs) :
kwargs['parent'] = self
return ListItem(*args,**kwargs)
# Lookup Functions
# Convert a field name to a field key (eg "Status" --> "0")
def fieldKey(self,name) :
#if the "name" is already a key, just return it
for field in self.fields() :
if field.get('id',None) == name :
return name
#otherwise, find the field whose "name" is name, and return that field's id
for field in self.fields() :
if field.get('name',None) == name :
return field.get('id',name)
#print "[WARN] Field is a Linked Field and has no Schema in List: " + name
return name
def fieldValue(self,key,value=None) :
for field in self.fields() :
if field.get('id',None) == key :
return key
for field in self.fields() :
if field.get('display',None) == key :
return field.get('id',key)
return key
def fieldOption(self,key,value=None) :
for field in self.fields() :
if field.get('id',None) == key :
return key
for field in self.fields() :
if field.get('display',None) == key :
return field.get('id',key)
return key
def fieldMap(self):
return {field["id"]: field for field in self._fields}
def fetchListSize(self):
self.get({"includeSize" : True})
return self.size()
|
import pytest
import re
def test_java(host):
cmd = host.run('. /etc/profile && java -version')
assert cmd.rc == 0
m = re.search('(?:java|openjdk) version "([0-9]+)', cmd.stderr)
assert m is not None
java_version = m.group(1)
assert '11' == java_version
def test_javac(host):
cmd = host.run('. /etc/profile && javac -version')
assert cmd.rc == 0
m = re.search('javac ([0-9]+)', cmd.stdout)
assert m is not None
java_version = m.group(1)
assert '11' == java_version
@pytest.mark.parametrize('version_dir_pattern', [
'jdk-11(\\.[0-9]+){,3}(\\+[0-9]+)?$'
])
def test_java_installed(host, version_dir_pattern):
java_home = host.check_output('find %s | grep --color=never -E %s',
'/opt/java/',
version_dir_pattern)
java_exe = host.file(java_home + '/bin/java')
assert java_exe.exists
assert java_exe.is_file
assert java_exe.user == 'root'
assert java_exe.group == 'root'
assert oct(java_exe.mode) == '0o755'
@pytest.mark.parametrize('fact_group_name', [
'java'
])
def test_facts_installed(host, fact_group_name):
fact_file = host.file('/etc/ansible/facts.d/' + fact_group_name + '.fact')
assert fact_file.exists
assert fact_file.is_file
assert fact_file.user == 'root'
assert fact_file.group == 'root'
assert oct(fact_file.mode) == '0o644'
|
# This file is part of the Juju GUI, which lets users view and manage Juju
# environments within a graphical interface (https://launchpad.net/juju-gui).
# Copyright (C) 2013 Canonical Ltd.
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License version 3, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranties of MERCHANTABILITY,
# SATISFACTORY QUALITY, or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Juju GUI server entry point.
Arguments example:
--apiurl="wss://ec2-75-101-177-185.compute-1.example.com:17070"
--apiversion="go"
--sslpath="/etc/ssl/juju-gui"
--tests_root="/var/lib/juju/agents/unit-juju-gui-0/charm/juju-gui/test/"
--insecure
--sandbox
--logging=debug|info|warning|error
--charmworldurl="https://manage.jujucharms.com/"
The --sslpath option is ignored if --insecure is set.
The --apiurl and --apiversion options are ignored if --sandbox is set.
"""
from guiserver import manage
if __name__ == '__main__':
manage.setup()
manage.run()
|
# Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import unittest
from mock import patch
from pytz import UTC
from script_wrapper.tasks.gpsvis_db import GpsVisDB
class TestClassification(unittest.TestCase):
def test_matlab_version(self):
task = GpsVisDB()
self.assertEqual(task.matlab_version, '2012b')
def test_convert_colors_valids(self):
task = GpsVisDB()
colors = {'FFFF50': 1,
'F7E8AA': 2,
'FFA550': 3,
'5A5AFF': 4,
'BEFFFF': 5,
'8CFF8C': 6,
'FF8CFF': 7,
'AADD96': 8,
'FFD3AA': 9,
'C6C699': 10,
'E5BFC6': 11,
'DADADA': 12,
'C6B5C4': 13,
'C1D1BF': 14,
'000000': 15
}
for (code, index) in colors.iteritems():
result = task.convert_colors({'id': 1, 'color': code,
'size': 'small', 'speed': 1})
self.assertEquals(result, index)
def test_convert_colors_notfound(self):
task = GpsVisDB()
with self.assertRaises(ValueError):
task.convert_colors({'id': 1, 'color': 'blue',
'size': 'small', 'speed': 1})
@patch('script_wrapper.tasks.gpsvis_db.getGPSCount')
def test_formfields2taskargs(self, gps_count):
gps_count.return_value = 1000
task = GpsVisDB()
trackers = [{'id': 1, 'color': 'DADADA',
'size': 'small', 'speed': 1}]
formquery = {'trackers': trackers,
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'alt': 'clampToGround',
}
taskargs = task.formfields2taskargs(formquery,
'postgresql://localhost')
etrackers = [{'id': 1, 'color': 12,
'size': 'small', 'speed': 1}]
etaskargs = {'db_url': 'postgresql://localhost',
'start': '2013-01-01T00:00:00',
'end': '2013-10-10T00:00:00',
'alt': 'clampToGround',
'trackers': etrackers,
}
self.assertEqual(taskargs, etaskargs)
|
# words.py
def main():
try:
#txt = open("test_1.txt")
#txt = open("test_2.txt")
#txt = open("test_3.txt")
txt = open("test_4.txt")
#target = input("Enter characters (spaces will be ignored): ")
#target = "cluuud IN DeD 23*"
target = "NSCRT - oooe+*"
except OSError:
print("OSError: Cannot find the file.")
string = ''
for ch in target: string += ch.lower()
lines = []
for line in txt: line = line.strip(); lines.append(line)
all_words = []
for line in lines:
words = line.split()
for word in words:
if not word[-1].isalpha(): word = word[:-1]
if word.lower() not in all_words:
all_words.append(word.lower())
all_words = sorted(all_words)
#print(all_words)
is_in = {}
for word in all_words:
flag = 1
for char in word:
if char == '.':
pass
else:
if char in string and char:
pass
else:
flag = 0
break
if flag:
if len(word) not in is_in:
is_in[len(word)] = [word]
else:
is_in[len(word)].append(word)
is_in = sorted(is_in.items(), key = lambda x:x[0])
for key in is_in:
print("Words of length {:d} built from these characters, in lexicographic order:".format(key[0]))
for ch in key[1]:
print('\t', ch)
if __name__ == '__main__':
main()
|
from .. import TwoHaloTerm, DampedGalaxyPowerTerm
from . import SOCorrection
class PcAcB_2h(TwoHaloTerm):
"""
The 2-halo term for `PcAcB`
"""
name = 'PcAcB_2h'
def __init__(self, model):
super(PcAcB_2h, self).__init__(model, 'b1_cA', 'b1_cB')
class PcAcB(DampedGalaxyPowerTerm):
"""
The cross power spectrum between centrals with/without
satellites in the same halo ('cenA' x 'cenB')
"""
name = "PcAcB"
def __init__(self, model):
super(PcAcB, self).__init__(model, PcAcB_2h, sigma1='sigma_c')
@property
def coefficient(self):
return 2*(1-self.model.fcB)*self.model.fcB
def __call__(self, k, mu):
with SOCorrection(self.model):
return super(PcAcB, self).__call__(k, mu)
def derivative_k(self, k, mu):
with SOCorrection(self.model):
return super(PcAcB, self).derivative_k(k, mu)
def derivative_mu(self, k, mu):
with SOCorrection(self.model):
return super(PcAcB, self).derivative_mu(k, mu)
|
from django.db import migrations
import utilities.fields
import utilities.ordering
def _update_model_names(model):
# Update each unique field value in bulk
for name in model.objects.values_list('name', flat=True).order_by('name').distinct():
model.objects.filter(name=name).update(_name=utilities.ordering.naturalize(name, max_length=100))
def naturalize_consoleports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsolePort'))
def naturalize_consoleserverports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'ConsoleServerPort'))
def naturalize_powerports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerPort'))
def naturalize_poweroutlets(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'PowerOutlet'))
def naturalize_frontports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'FrontPort'))
def naturalize_rearports(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'RearPort'))
def naturalize_devicebays(apps, schema_editor):
_update_model_names(apps.get_model('dcim', 'DeviceBay'))
class Migration(migrations.Migration):
dependencies = [
('dcim', '0092_fix_rack_outer_unit'),
]
operations = [
migrations.AlterModelOptions(
name='consoleport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='consoleserverport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='devicebay',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='frontport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='inventoryitem',
options={'ordering': ('device__id', 'parent__id', '_name')},
),
migrations.AlterModelOptions(
name='poweroutlet',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='powerport',
options={'ordering': ('device', '_name')},
),
migrations.AlterModelOptions(
name='rearport',
options={'ordering': ('device', '_name')},
),
migrations.AddField(
model_name='consoleport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='consoleserverport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='devicebay',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='frontport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='inventoryitem',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='poweroutlet',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='powerport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.AddField(
model_name='rearport',
name='_name',
field=utilities.fields.NaturalOrderingField('name', blank=True, max_length=100, naturalize_function=utilities.ordering.naturalize),
),
migrations.RunPython(
code=naturalize_consoleports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_consoleserverports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_powerports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_poweroutlets,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_frontports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_rearports,
reverse_code=migrations.RunPython.noop
),
migrations.RunPython(
code=naturalize_devicebays,
reverse_code=migrations.RunPython.noop
),
]
|
import os.path
import shutil
import textwrap
import pytest
from pip._internal.cli.status_codes import ERROR
from tests.lib.path import Path
def fake_wheel(data, wheel_path):
shutil.copy(
data.packages.joinpath('simple.dist-0.1-py2.py3-none-any.whl'),
data.packages.joinpath(wheel_path),
)
@pytest.mark.network
def test_download_if_requested(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip(
'download', '-d', 'pip_downloads', 'INITools==0.1'
)
assert Path('scratch') / 'pip_downloads' / 'INITools-0.1.tar.gz' \
in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_setuptools(script):
"""
It should download (in the scratch path) and not install if requested.
"""
result = script.pip('download', 'setuptools')
setuptools_prefix = str(Path('scratch') / 'setuptools')
assert any(
path.startswith(setuptools_prefix) for path in result.files_created
)
def test_download_wheel(script, data):
"""
Test using "pip download" to download a *.whl archive.
"""
result = script.pip(
'download',
'--no-index',
'-f', data.packages,
'-d', '.', 'meta'
)
assert (
Path('scratch') / 'meta-1.0-py2.py3-none-any.whl'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
@pytest.mark.network
def test_single_download_from_requirements_file(script):
"""
It should support download (in the scratch path) from PyPI from a
requirements file
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
@pytest.mark.network
def test_basic_download_should_download_dependencies(script):
"""
It should download dependencies (in the scratch path)
"""
result = script.pip(
'download', 'Paste[openid]==1.7.5.1', '-d', '.'
)
assert Path('scratch') / 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert script.site_packages / 'openid' not in result.files_created
def test_download_wheel_archive(script, data):
"""
It should download a wheel archive path
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--no-deps'
)
assert Path('scratch') / wheel_filename in result.files_created
def test_download_should_download_wheel_deps(script, data):
"""
It should download dependencies for wheels(in the scratch path)
"""
wheel_filename = 'colander-0.9.9-py2.py3-none-any.whl'
dep_filename = 'translationstring-1.1.tar.gz'
wheel_path = '/'.join((data.find_links, wheel_filename))
result = script.pip(
'download', wheel_path,
'-d', '.', '--find-links', data.find_links, '--no-index'
)
assert Path('scratch') / wheel_filename in result.files_created
assert Path('scratch') / dep_filename in result.files_created
@pytest.mark.network
def test_download_should_skip_existing_files(script):
"""
It should not download files already existing in the scratch dir
"""
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
"""))
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
assert Path('scratch') / 'INITools-0.1.tar.gz' in result.files_created
assert script.site_packages / 'initools' not in result.files_created
# adding second package to test-req.txt
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
INITools==0.1
python-openid==2.2.5
"""))
# only the second package should be downloaded
result = script.pip(
'download', '-r', script.scratch_path / 'test-req.txt', '-d', '.',
)
openid_tarball_prefix = str(Path('scratch') / 'python-openid-')
assert any(
path.startswith(openid_tarball_prefix) for path in result.files_created
)
assert Path('scratch') / 'INITools-0.1.tar.gz' not in result.files_created
assert script.site_packages / 'initools' not in result.files_created
assert script.site_packages / 'openid' not in result.files_created
@pytest.mark.network
def test_download_vcs_link(script):
"""
It should allow -d flag for vcs links, regression test for issue #798.
"""
result = script.pip(
'download', '-d', '.', 'git+git://github.com/pypa/pip-test-package.git'
)
assert (
Path('scratch') / 'pip-test-package-0.1.1.zip'
in result.files_created
)
assert script.site_packages / 'piptestpackage' not in result.files_created
def test_only_binary_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_no_deps_set_then_download_specific_platform(script, data):
"""
Confirm that specifying an interpreter/platform constraint
is allowed when ``--no-deps`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--no-deps',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
def test_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--no-deps`` or ``--only-binary=:all:`` is set.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_no_binary_set_then_download_specific_platform_fails(script, data):
"""
Confirm that specifying an interpreter/platform constraint
enforces that ``--only-binary=:all:`` is set without ``--no-binary``.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--no-binary=fake',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
expect_error=True,
)
assert '--only-binary=:all:' in result.stderr
def test_download_specify_platform(script, data):
"""
Test using "pip download --platform" to download a .whl archive
supported for a specific platform
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
# Confirm that universal wheels are returned even for specific
# platforms.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_9_x86_64',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl')
fake_wheel(data, 'fake-2.0-py2.py3-none-linux_x86_64.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_10_x86_64',
'fake'
)
assert (
Path('scratch') /
'fake-1.0-py2.py3-none-macosx_10_9_x86_64.whl'
in result.files_created
)
# OSX platform wheels are not backward-compatible.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'macosx_10_8_x86_64',
'fake',
expect_error=True,
)
# No linux wheel provided for this version.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==1',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake==2'
)
assert (
Path('scratch') / 'fake-2.0-py2.py3-none-linux_x86_64.whl'
in result.files_created
)
class TestDownloadPlatformManylinuxes(object):
"""
"pip download --platform" downloads a .whl archive supported for
manylinux platforms.
"""
@pytest.mark.parametrize("platform", [
"linux_x86_64",
"manylinux1_x86_64",
"manylinux2010_x86_64",
])
def test_download_universal(self, platform, script, data):
"""
Universal wheels are returned even for specific platforms.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
@pytest.mark.parametrize("wheel_abi,platform", [
("manylinux1_x86_64", "manylinux1_x86_64"),
("manylinux1_x86_64", "manylinux2010_x86_64"),
("manylinux2010_x86_64", "manylinux2010_x86_64"),
])
def test_download_compatible_manylinuxes(
self, wheel_abi, platform, script, data,
):
"""
Earlier manylinuxes are compatible with later manylinuxes.
"""
wheel = 'fake-1.0-py2.py3-none-{}.whl'.format(wheel_abi)
fake_wheel(data, wheel)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', platform,
'fake',
)
assert Path('scratch') / wheel in result.files_created
def test_explicit_platform_only(self, data, script):
"""
When specifying the platform, manylinux1 needs to be the
explicit platform--it won't ever be added to the compatible
tags.
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-linux_x86_64.whl')
script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--platform', 'linux_x86_64',
'fake',
)
def test_download__python_version(script, data):
"""
Test using "pip download --python-version" to download a .whl archive
supported for a specific interpreter
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '27',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '33',
'fake'
)
data.reset()
fake_wheel(data, 'fake-1.0-py2-none-any.whl')
fake_wheel(data, 'fake-2.0-py3-none-any.whl')
# No py3 provided for version 1.
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake==1.0',
expect_error=True,
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '26',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-2.0-py3-none-any.whl'
in result.files_created
)
def make_wheel_with_python_requires(script, package_name, python_requires):
"""
Create a wheel using the given python_requires.
:return: the path to the wheel file.
"""
package_dir = script.scratch_path / package_name
package_dir.mkdir()
text = textwrap.dedent("""\
from setuptools import setup
setup(name='{}',
python_requires='{}',
version='1.0')
""").format(package_name, python_requires)
package_dir.joinpath('setup.py').write_text(text)
script.run(
'python', 'setup.py', 'bdist_wheel', '--universal', cwd=package_dir,
)
file_name = '{}-1.0-py2.py3-none-any.whl'.format(package_name)
return package_dir / 'dist' / file_name
def test_download__python_version_used_for_python_requires(
script, data, with_wheel,
):
"""
Test that --python-version is used for the Requires-Python check.
"""
wheel_path = make_wheel_with_python_requires(
script, 'mypackage', python_requires='==3.2',
)
wheel_dir = os.path.dirname(wheel_path)
def make_args(python_version):
return [
'download', '--no-index', '--find-links', wheel_dir,
'--only-binary=:all:',
'--dest', '.',
'--python-version', python_version,
'mypackage==1.0',
]
args = make_args('33')
result = script.pip(*args, expect_error=True)
expected_err = (
"ERROR: Package 'mypackage' requires a different Python: "
"3.3.0 not in '==3.2'"
)
assert expected_err in result.stderr, 'stderr: {}'.format(result.stderr)
# Now try with a --python-version that satisfies the Requires-Python.
args = make_args('32')
script.pip(*args) # no exception
def test_download_specify_abi(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'fake_abi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--abi', 'none',
'fake'
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--abi', 'cp27m',
'fake',
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2-fakeabi-fake_platform.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--python-version', '2',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'fakeabi',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2-fakeabi-fake_platform.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--platform', 'fake_platform',
'--abi', 'none',
'fake',
expect_error=True,
)
def test_download_specify_implementation(script, data):
"""
Test using "pip download --abi" to download a .whl archive
supported for a specific abi
"""
fake_wheel(data, 'fake-1.0-py2.py3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-py2.py3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk2.fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk2.fk3-none-any.whl'
in result.files_created
)
data.reset()
fake_wheel(data, 'fake-1.0-fk3-none-any.whl')
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '3',
'fake'
)
assert (
Path('scratch') / 'fake-1.0-fk3-none-any.whl'
in result.files_created
)
result = script.pip(
'download', '--no-index', '--find-links', data.find_links,
'--only-binary=:all:',
'--dest', '.',
'--implementation', 'fk',
'--python-version', '2',
'fake',
expect_error=True,
)
def test_download_exit_status_code_when_no_requirements(script):
"""
Test download exit status code when no requirements specified
"""
result = script.pip('download', expect_error=True)
assert (
"You must give at least one requirement to download" in result.stderr
)
assert result.returncode == ERROR
def test_download_exit_status_code_when_blank_requirements_file(script):
"""
Test download exit status code when blank requirements file specified
"""
script.scratch_path.joinpath("blank.txt").write_text("\n")
script.pip('download', '-r', 'blank.txt')
def test_download_prefer_binary_when_tarball_higher_than_wheel(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
in result.files_created
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
not in result.files_created
)
def test_download_prefer_binary_when_wheel_doesnt_satisfy_req(script, data):
fake_wheel(data, 'source-0.8-py2.py3-none-any.whl')
script.scratch_path.joinpath("test-req.txt").write_text(textwrap.dedent("""
source>0.9
"""))
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.',
'-r', script.scratch_path / 'test-req.txt'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
assert (
Path('scratch') / 'source-0.8-py2.py3-none-any.whl'
not in result.files_created
)
def test_download_prefer_binary_when_only_tarball_exists(script, data):
result = script.pip(
'download',
'--prefer-binary',
'--no-index',
'-f', data.packages,
'-d', '.', 'source'
)
assert (
Path('scratch') / 'source-1.0.tar.gz'
in result.files_created
)
|
from flask import Flask, render_template, redirect, url_for, flash, abort, send_file
import json
from datetime import datetime
import subprocess
from picamera.exc import PiCameraMMALError
from .robotdriver import DirError
from .utils import sys_info
from .camera import capture_img
app = Flask(__name__)
app.secret_key = 'some_secret'
@app.route('/')
def index():
sinfo = sys_info()
return render_template('index.html',
is_stream=app.is_stream,
sys_info=sinfo)
@app.route('/move/<direction>/')
@app.route('/move/<direction>/<period>/')
def move(direction, period=0.5):
try:
app.robot.move(direction, float(period))
except (DirError, TypeError, ValueError):
abort(406)
flash('Moving {} for {}s'.format(direction, period))
return redirect(url_for('index'))
@app.route('/info/')
def info():
return json.dumps(sys_info())
@app.route('/start_stream/')
def start_stream():
if app.streamer is not None:
app.streamer.start_stream()
app.is_stream = True
flash('Starting stream')
else:
flash('Runing in --no-stream mode')
return redirect(url_for('index'))
@app.route('/stop_stream/')
def stop_stream():
if app.streamer is not None:
app.streamer.stop_stream()
app.is_stream = False
flash('Stopping stream')
else:
flash('Runing in --no-stream mode')
return redirect(url_for('index'))
@app.route('/capture/')
@app.route('/capture/<vflip>/')
def capture(vflip='true'):
filename = '/tmp/{}.jpg'.format(str(datetime.now()))
bvflip = vflip in ['true', '1']
try:
capture_img(filename, vflip=bvflip)
except PiCameraMMALError:
print('CAMERA: Streaming in prgoress, redirecting to snapshot...')
return redirect('http://raspberrypi.local:8080/?action=snapshot')
return send_file(filename, mimetype='image/jpg')
if __name__ == '__main__':
app.run()
|
#!/usr/bin/env python
import time
import sys
import threading
import queue
import collections
from foos.bus import Bus
class Pattern:
def __init__(self, time, leds=[]):
self.time = time
self.leds = leds
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable):
for sub in flatten(el):
yield sub
else:
yield el
class Plugin:
def __init__(self, bus):
self.queue = queue.Queue()
self.bus = bus
fmap = {'score_goal': lambda d: self.setMode(pat_goal),
'upload_ok': lambda d: self.setMode(pat_ok),
'tv_standby': lambda d: self.setMode(pat_standby, loop=True),
'tv_on': lambda d: self.setMode([]),
'button_will_upload': lambda d: self.setMode(pat_upload_feedback),
'upload_error': lambda d: self.setMode(pat_error)}
self.bus.subscribe_map(fmap)
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def run(self):
while True:
loop, m = self.queue.get()
first = True
while first or loop:
first = False
for p in flatten(m):
if self.__canRun():
self.setLeds(p.leds)
self.__safeSleep(p.time)
else:
loop = False
break
# reset leds
self.setLeds()
def __safeSleep(self, t):
start = time.time()
while (time.time() < start + t) and self.__canRun():
time.sleep(0.05)
def __canRun(self):
return self.queue.empty()
def setLeds(self, leds=[]):
self.bus.notify("leds_enabled", leds)
def setMode(self, mode, loop=False):
self.stop = True
self.queue.put((loop, mode))
pat_reset = 3 * [Pattern(0.2, ["BI", "BD", "YI", "YD"]),
Pattern(0.1),
Pattern(0.2, ["BI", "BD", "YI", "YD"]),
Pattern(1)]
pat_standby = [Pattern(1, ["OK"]),
Pattern(1)]
pat_goal = [[Pattern(0.1, ["BD", "YD"]),
Pattern(0.1, ["OK"]),
Pattern(0.1, ["BI", "YI"])],
3 * [Pattern(0.1),
Pattern(0.1, ["BI", "BD", "OK", "YI", "YD"])]]
pat_ok = [Pattern(0.3, ["OK"])]
pat_upload_feedback = 2 * [Pattern(0.1, ["OK"]), Pattern(0.1)]
pat_error = 2 * [Pattern(0.3, ["YD", "BD"]),
Pattern(0.3)]
pat_demo = [Pattern(1, ["BD"]),
Pattern(1, ["BI"]),
Pattern(1, ["YD"]),
Pattern(1, ["YI"]),
Pattern(1, ["OK"])]
if __name__ == "__main__":
def write_data(led_event):
leds = led_event.data
print("\r", end="")
for led in ["BD", "BI", "OK", "YI", "YD"]:
print("0" if led in leds else " ", end=" ")
sys.stdout.flush()
bus = Bus()
bus.subscribe(write_data, thread=True)
controller = Plugin(bus)
controller.setMode(pat_standby, loop=True)
time.sleep(5)
controller.setMode(pat_goal)
time.sleep(5)
|
import pandas as pd
import folium.folium as folium
import itertools
import numpy as np
import logging
import geojson as gj
import copy
import attrdict as ad
from functional import seq
# import emission.analysis.classification.cleaning.location_smoothing as ls
import bson.json_util as bju
import emission.storage.decorations.location_queries as lq
import emission.storage.decorations.trip_queries as esdt
import emission.storage.decorations.place_queries as esdp
import emission.storage.decorations.stop_queries as esds
import emission.storage.decorations.section_queries as esdsc
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.stop as ecws
import emission.core.wrapper.section as ecwsc
import emission.analysis.plotting.geojson.geojson_feature_converter as gfc
import emission.analysis.plotting.leaflet_osm.folium_geojson_plugin as fgjp
import emission.net.usercache.abstract_usercache as enua
import emission.net.api.usercache as enau
all_color_list = ['black', 'brown', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'navy', 'pink', 'purple', 'red', 'snow', 'yellow']
sel_color_list = ['black', 'blue', 'chocolate', 'cyan', 'fuschia', 'green', 'lime', 'magenta', 'pink', 'purple', 'red', 'yellow']
def df_to_string_list(df):
"""
Convert the input df into a list of strings, suitable for using as popups in a map.
This is a utility function.
"""
# print "Converting df with size %s to string list" % df.shape[0]
array_list = df.to_dict(orient='records')
return [str(line) for line in array_list]
def get_maps_for_range(user_id, start_ts, end_ts):
map_list = []
geojson_list = gfc.get_geojson_for_ts(user_id, start_ts, end_ts)
return get_maps_for_geojson_list(geojson_list)
def get_maps_for_usercache(user_id):
data_to_phone = seq(enau.sync_server_to_phone(user_id))
logging.debug("Before pipeline, trips to phone list has length %d" % len(data_to_phone.to_list()))
logging.debug("keys are %s" % data_to_phone.map(lambda e: ad.AttrDict(e).metadata.key))
trips_to_phone = data_to_phone.map(lambda e: ad.AttrDict(e))\
.filter(lambda e: e.metadata.key.startswith("diary/trips")) \
.map(lambda e: e.data)
logging.debug("After pipeline, trips to phone list has length %d" % len(trips_to_phone.to_list()))
# logging.debug("trips_to_phone = %s" % trips_to_phone)
maps_for_day = []
for day in trips_to_phone:
maps_for_day.append(get_maps_for_geojson_list(day))
return maps_for_day
def get_maps_for_geojson_list(trip_geojson_list):
map_list = []
for trip_doc in trip_geojson_list:
# logging.debug(trip_doc)
trip_geojson = ad.AttrDict(trip_doc)
logging.debug("centering based on start = %s, end = %s " % (trip_geojson.features[0], trip_geojson.features[1]))
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
curr_map = folium.Map(flipped_midpoint((trip_geojson.features[0].geometry,
trip_geojson.features[1].geometry)))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(trip_geojson))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def flipped(coord):
return (coord[1], coord[0])
def get_center_for_map(coords):
# logging.debug(trip_geojson)
midpoint = lambda(p1, p2): [(p1[0] + p2[0])/2,
(p1[1] + p2[1])/2]
if len(coords) == 0:
return None
if len(coords) == 1:
return flipped(coords)
if len(coords) > 0:
logging.debug("Getting midpoint of %s and %s" % (coords[0], coords[-1]))
return flipped(midpoint((coords[0], coords[-1])))
def get_maps_for_geojson_unsectioned(feature_list):
map_list = []
for feature in feature_list:
# logging.debug("Getting map for feature %s" % bju.dumps(feature))
feature_coords = list(get_coords(feature))
# feature_coords = list(gj.utils.coords(feature))
curr_map = folium.Map(get_center_for_map(feature_coords))
curr_plugin = fgjp.FoliumGeojsonPlugin(dict(feature))
curr_map.add_plugin(curr_plugin)
map_list.append(curr_map)
return map_list
def get_coords(feature):
# logging.debug("Getting coordinates for feature %s" % bju.dumps(feature))
if feature["type"] == "FeatureCollection":
retVal = []
for f in feature["features"]:
retVal.extend(get_coords(f))
return retVal
else:
return gj.utils.coords(feature)
def get_maps_for_range_old(user_id, start_ts, end_ts):
# First, get the timeline for that range.
ts = esta.TimeSeries.get_time_series(user_id)
trip_list = esdt.get_trips(user_id, enua.UserCache.TimeQuery("start_ts", start_ts, end_ts))
# TODO: Should the timeline support random access as well?
# If it did, we wouldn't need this additional map
# I think that it would be good to support a doubly linked list, i.e. prev and next in addition
# to the iteration interface
place_list = esdp.get_places(user_id, enua.UserCache.TimeQuery("exit_ts", start_ts, end_ts))
place_list = place_list + (esdp.get_places(user_id, enua.UserCache.TimeQuery("enter_ts", start_ts, end_ts)))
place_map = dict([(p.get_id(), p) for p in place_list])
map_list = []
flipped_midpoint = lambda(p1, p2): [(p1.coordinates[1] + p2.coordinates[1])/2,
(p1.coordinates[0] + p2.coordinates[0])/2]
for i, trip in enumerate(trip_list):
logging.debug("-" * 20 + trip.start_fmt_time + "=>" + trip.end_fmt_time
+ "(" + str(trip.end_ts - trip.start_ts) + ")")
if (len(esdt.get_sections_for_trip(user_id, trip.get_id())) == 0 and
len(esdt.get_stops_for_trip(user_id, trip.get_id())) == 0):
logging.debug("Skipping trip because it has no stops and no sections")
continue
start_point = gj.GeoJSON.to_instance(trip.start_loc)
end_point = gj.GeoJSON.to_instance(trip.end_loc)
curr_map = folium.Map(flipped_midpoint((start_point, end_point)))
map_list.append(curr_map)
logging.debug("About to display places %s and %s" % (trip.start_place, trip.end_place))
update_place(curr_map, trip.start_place, place_map, marker_color='green')
update_place(curr_map, trip.end_place, place_map, marker_color='red')
# TODO: Should get_timeline_for_trip work on a trip_id or on a trip object
# it seems stupid to convert trip object -> id -> trip object
curr_trip_timeline = esdt.get_timeline_for_trip(user_id, trip.get_id())
for i, trip_element in enumerate(curr_trip_timeline):
# logging.debug("Examining element %s of type %s" % (trip_element, type(trip_element)))
if type(trip_element) == ecws.Stop:
time_query = esds.get_time_query_for_stop(trip_element.get_id())
logging.debug("time_query for stop %s = %s" % (trip_element, time_query))
stop_points_df = ts.get_data_df("background/filtered_location", time_query)
# logging.debug("stop_points_df.head() = %s" % stop_points_df.head())
if len(stop_points_df) > 0:
update_line(curr_map, stop_points_df, line_color = sel_color_list[-1],
popup="%s -> %s" % (trip_element.enter_fmt_time, trip_element.exit_fmt_time))
else:
assert(type(trip_element) == ecwsc.Section)
time_query = esdsc.get_time_query_for_section(trip_element.get_id())
logging.debug("time_query for section %s = %s" %
(trip_element, "[%s,%s,%s]" % (time_query.timeType, time_query.startTs, time_query.endTs)))
section_points_df = ts.get_data_df("background/filtered_location", time_query)
logging.debug("section_points_df.tail() = %s" % section_points_df.tail())
if len(section_points_df) > 0:
update_line(curr_map, section_points_df, line_color = sel_color_list[trip_element.sensed_mode.value],
popup="%s (%s -> %s)" % (trip_element.sensed_mode, trip_element.start_fmt_time,
trip_element.end_fmt_time))
else:
logging.warn("found no points for section %s" % trip_element)
return map_list
def update_place(curr_map, place_id, place_map, marker_color='blue'):
if place_id is not None and place_id in place_map:
place = place_map[place_id]
logging.debug("Retrieved place %s" % place)
if hasattr(place, "location"):
coords = copy.copy(place.location.coordinates)
coords.reverse()
logging.debug("Displaying place at %s" % coords)
curr_map.simple_marker(location=coords, popup=str(place), marker_color=marker_color)
else:
logging.debug("starting place has no location, skipping")
else:
logging.warn("place not mapped because place_id = %s and place_id in place_map = %s" % (place_id, place_id in place_map))
def update_line(currMap, line_points, line_color = None, popup=None):
currMap.div_markers(line_points[['latitude', 'longitude']].as_matrix().tolist(),
df_to_string_list(line_points), marker_size=5)
currMap.line(line_points[['latitude', 'longitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
##########################
# Everything below this line is from the time when we were evaluating
# segmentation and can potentially be deleted. It is also likely to have bitrotted.
# Let's hold off a bit on that until we have the replacement, though
##########################
def get_map_list(df, potential_splits):
mapList = []
potential_splits_list = list(potential_splits)
for start, end in zip(potential_splits_list, potential_splits_list[1:]):
trip = df[start:end]
print "Considering trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
if end - start < 4:
# If there are only 3 entries, that means that there is only one
# point other than the start and the end, bail
print "Ignoring trip from %s to %s because start = %d and end = %d" % (df.formatted_time.loc[start], df.formatted_time.loc[end], start, end)
continue
mapList.append(get_map(trip))
return mapList
def get_map_list_after_segmentation(section_map, outlier_algo = None, filter_algo = None):
mapList = []
for trip, section_list in section_map:
logging.debug("%s %s -> %s %s" % ("=" * 20, trip.start_time, trip.end_time, "=" * 20))
trip_df = lq.get_points_for_section(trip)
curr_map = folium.Map([trip_df.mLatitude.mean(), trip_df.mLongitude.mean()])
last_section_end = None
for (i, section) in enumerate(section_list):
logging.debug("%s %s: %s -> %s %s" %
("-" * 20, i, section.start_time, section.end_time, "-" * 20))
raw_section_df = trip_df[np.logical_and(trip_df.mTime >= section.start_ts,
trip_df.mTime <= section.end_ts)]
section_df = ls.filter_points(raw_section_df, outlier_algo, filter_algo)
if section_df.shape[0] == 0:
logging.info("Found empty df! skipping...")
continue
logging.debug("for section %s, section_df.shape = %s, formatted_time.head() = %s" %
(section, section_df.shape, section_df["formatted_time"].head()))
update_map(curr_map, section_df, line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity))
if section_df.shape[0] > 0:
curr_section_start = section_df.iloc[0]
if i != 0 and last_section_end is not None:
# We want to join this to the previous section.
curr_map.line([[last_section_end.mLatitude, last_section_end.mLongitude],
[curr_section_start.mLatitude, curr_section_start.mLongitude]],
line_color = sel_color_list[-1],
popup = "%s -> %s" % (section_list[i-1].activity, section.activity))
last_section_end = section_df.iloc[-1]
mapList.append(curr_map)
return mapList
def get_map(section_points, line_color = None, popup=None):
currMap = folium.Map([section_points.mLatitude.mean(), section_points.mLongitude.mean()])
update_map(currMap, section_points, line_color, popup)
return currMap
def update_map(currMap, section_points, line_color = None, popup=None):
currMap.div_markers(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
df_to_string_list(section_points), marker_size=5)
currMap.line(section_points[['mLatitude', 'mLongitude']].as_matrix().tolist(),
line_color = line_color,
popup = popup)
def evaluate_filtering(section_list, outlier_algos, filtering_algos):
"""
TODO: Is this the best place for this? If not, what is?
It almost seems like we need to have a separate evaluation module that is
separate from the plotting and the calculation modules.
But then, what is the purpose of this module?
"""
nCols = 2 + len(outlier_algos) * len(filtering_algos)
nRows = len(section_list)
map_list = []
for section in section_list:
curr_compare_list = []
section_df = ls.get_section_points(section)
curr_compare_list.append(get_map(section_df))
curr_compare_list.append(get_map(ls.filter_points(section_df, None, None)))
for (oa, fa) in itertools.product(outlier_algos, filtering_algos):
curr_filtered_df = ls.filter_points(section_df, oa, fa)
print ("After filtering with %s, %s, size is %s" % (oa, fa, curr_filtered_df.shape))
if "activity" in section:
curr_compare_list.append(get_map(curr_filtered_df,
line_color = sel_color_list[section.activity.value],
popup = "%s" % (section.activity)))
else:
curr_compare_list.append(get_map(curr_filtered_df))
assert(len(curr_compare_list) == nCols)
map_list.append(curr_compare_list)
assert(len(map_list) == nRows)
return map_list
|
#-------------------------------------------------------------------------------
# $Id$
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2011 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
import logging
from eoxserver.core import Component, implements
from eoxserver.core.config import get_eoxserver_config
from eoxserver.contrib import mapserver as ms
from eoxserver.resources.coverages.crss import CRSsConfigReader
from eoxserver.services.result import (
result_set_from_raw_data, get_content_type
)
from eoxserver.services.exceptions import RenderException
from eoxserver.services.ows.wms.exceptions import InvalidCRS, InvalidFormat
from eoxserver.services.ows.wms.interfaces import (
WMSCapabilitiesRendererInterface
)
logger = logging.getLogger(__name__)
class MapServerCapabilitiesRenderer(Component):
""" Base class for various WMS render components using MapServer.
"""
implements(WMSCapabilitiesRendererInterface)
def render(self):
mapfile_path = get_eoxserver_config().get("wmm", "mapfile")
map_ = ms.mapObj(mapfile_path) #TODO: path to map
map_.setMetaData("ows_enable_request", "*")
map_.setProjection("EPSG:4326")
map_.imagecolor.setRGB(0, 0, 0)
# set supported CRSs
decoder = CRSsConfigReader(get_eoxserver_config())
crss_string = " ".join(
map(lambda crs: "EPSG:%d" % crs, decoder.supported_crss_wms)
)
map_.setMetaData("ows_srs", crss_string)
map_.setMetaData("wms_srs", crss_string)
ms_request = ms.create_request((
("service", "WMS"),
("version", "1.3.0"),
("request", "GetCapabilities"),
))
raw_result = map_.dispatch(ms_request)
result = result_set_from_raw_data(raw_result)
return result, get_content_type(result)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Speaker.email'
db.add_column(u'pyconkr_speaker', 'email',
self.gf('django.db.models.fields.EmailField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Speaker.email'
db.delete_column(u'pyconkr_speaker', 'email')
models = {
u'pyconkr.announcement': {
'Meta': {'ordering': "['-id']", 'object_name': 'Announcement'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'})
},
u'pyconkr.jobfair': {
'Meta': {'object_name': 'Jobfair'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'sponsor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pyconkr.Sponsor']", 'null': 'True'})
},
u'pyconkr.program': {
'Meta': {'object_name': 'Program'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pyconkr.ProgramCategory']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pyconkr.ProgramDate']"}),
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'rooms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['pyconkr.Room']", 'null': 'True', 'blank': 'True'}),
'slide_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'speakers': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pyconkr.Speaker']", 'symmetrical': 'False', 'blank': 'True'}),
'times': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['pyconkr.ProgramTime']", 'symmetrical': 'False'})
},
u'pyconkr.programcategory': {
'Meta': {'object_name': 'ProgramCategory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'})
},
u'pyconkr.programdate': {
'Meta': {'object_name': 'ProgramDate'},
'day': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'pyconkr.programtime': {
'Meta': {'object_name': 'ProgramTime'},
'begin': ('django.db.models.fields.TimeField', [], {}),
'end': ('django.db.models.fields.TimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pyconkr.room': {
'Meta': {'object_name': 'Room'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'pyconkr.speaker': {
'Meta': {'ordering': "['name']", 'object_name': 'Speaker'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'info': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'})
},
u'pyconkr.sponsor': {
'Meta': {'object_name': 'Sponsor'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['pyconkr.SponsorLevel']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'pyconkr.sponsorlevel': {
'Meta': {'object_name': 'SponsorLevel'},
'desc': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '1'})
}
}
complete_apps = ['pyconkr']
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from operator import attrgetter
import re
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import osv, fields
from openerp.tools import ustr
from openerp.tools.translate import _
from openerp import exceptions
_logger = logging.getLogger(__name__)
class res_config_module_installation_mixin(object):
def _install_modules(self, cr, uid, modules, context):
"""Install the requested modules.
return the next action to execute
modules is a list of tuples
(mod_name, browse_record | None)
"""
ir_module = self.pool.get('ir.module.module')
to_install_ids = []
to_install_missing_names = []
for name, module in modules:
if not module:
to_install_missing_names.append(name)
elif module.state == 'uninstalled':
to_install_ids.append(module.id)
result = None
if to_install_ids:
result = ir_module.button_immediate_install(cr, uid, to_install_ids, context=context)
#FIXME: if result is not none, the corresponding todo will be skipped because it was just marked done
if to_install_missing_names:
return {
'type': 'ir.actions.client',
'tag': 'apps',
'params': {'modules': to_install_missing_names},
}
return result
class res_config_configurable(osv.osv_memory):
''' Base classes for new-style configuration items
Configuration items should inherit from this class, implement
the execute method (and optionally the cancel one) and have
their view inherit from the related res_config_view_base view.
'''
_name = 'res.config'
def _next_action(self, cr, uid, context=None):
Todos = self.pool['ir.actions.todo']
_logger.info('getting next %s', Todos)
active_todos = Todos.browse(cr, uid,
Todos.search(cr, uid, ['&', ('type', '=', 'automatic'), ('state','=','open')]),
context=context)
user_groups = set(map(
lambda g: g.id,
self.pool['res.users'].browse(cr, uid, [uid], context=context)[0].groups_id))
valid_todos_for_user = [
todo for todo in active_todos
if not todo.groups_id or bool(user_groups.intersection((
group.id for group in todo.groups_id)))
]
if valid_todos_for_user:
return valid_todos_for_user[0]
return None
def _next(self, cr, uid, context=None):
_logger.info('getting next operation')
next = self._next_action(cr, uid, context=context)
_logger.info('next action is %s', next)
if next:
res = next.action_launch(context=context)
res['nodestroy'] = False
return res
# reload the client; open the first available root menu
menu_obj = self.pool['ir.ui.menu']
menu_ids = menu_obj.search(cr, uid, [('parent_id', '=', False)], context=context)
return {
'type': 'ir.actions.client',
'tag': 'reload',
'params': {'menu_id': menu_ids and menu_ids[0] or False},
}
def start(self, cr, uid, ids, context=None):
return self.next(cr, uid, ids, context)
def next(self, cr, uid, ids, context=None):
""" Returns the next todo action to execute (using the default
sort order)
"""
return self._next(cr, uid, context=context)
def execute(self, cr, uid, ids, context=None):
""" Method called when the user clicks on the ``Next`` button.
Execute *must* be overloaded unless ``action_next`` is overloaded
(which is something you generally don't need to do).
If ``execute`` returns an action dictionary, that action is executed
rather than just going to the next configuration item.
"""
raise NotImplementedError(
'Configuration items need to implement execute')
def cancel(self, cr, uid, ids, context=None):
""" Method called when the user click on the ``Skip`` button.
``cancel`` should be overloaded instead of ``action_skip``. As with
``execute``, if it returns an action dictionary that action is
executed in stead of the default (going to the next configuration item)
The default implementation is a NOOP.
``cancel`` is also called by the default implementation of
``action_cancel``.
"""
pass
def action_next(self, cr, uid, ids, context=None):
""" Action handler for the ``next`` event.
Sets the status of the todo the event was sent from to
``done``, calls ``execute`` and -- unless ``execute`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.execute(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_skip(self, cr, uid, ids, context=None):
""" Action handler for the ``skip`` event.
Sets the status of the todo the event was sent from to
``skip``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
def action_cancel(self, cr, uid, ids, context=None):
""" Action handler for the ``cancel`` event. That event isn't
generated by the res.config.view.base inheritable view, the
inherited view has to overload one of the buttons (or add one
more).
Sets the status of the todo the event was sent from to
``cancel``, calls ``cancel`` and -- unless ``cancel`` returned
an action dictionary -- executes the action provided by calling
``next``.
"""
next = self.cancel(cr, uid, ids, context=context)
if next: return next
return self.next(cr, uid, ids, context=context)
class res_config_installer(osv.osv_memory, res_config_module_installation_mixin):
""" New-style configuration base specialized for addons selection
and installation.
Basic usage
-----------
Subclasses can simply define a number of _columns as
fields.boolean objects. The keys (column names) should be the
names of the addons to install (when selected). Upon action
execution, selected boolean fields (and those only) will be
interpreted as addons to install, and batch-installed.
Additional addons
-----------------
It is also possible to require the installation of an additional
addon set when a specific preset of addons has been marked for
installation (in the basic usage only, additionals can't depend on
one another).
These additionals are defined through the ``_install_if``
property. This property is a mapping of a collection of addons (by
name) to a collection of addons (by name) [#]_, and if all the *key*
addons are selected for installation, then the *value* ones will
be selected as well. For example::
_install_if = {
('sale','crm'): ['sale_crm'],
}
This will install the ``sale_crm`` addon if and only if both the
``sale`` and ``crm`` addons are selected for installation.
You can define as many additionals as you wish, and additionals
can overlap in key and value. For instance::
_install_if = {
('sale','crm'): ['sale_crm'],
('sale','project'): ['project_mrp'],
}
will install both ``sale_crm`` and ``project_mrp`` if all of
``sale``, ``crm`` and ``project`` are selected for installation.
Hook methods
------------
Subclasses might also need to express dependencies more complex
than that provided by additionals. In this case, it's possible to
define methods of the form ``_if_%(name)s`` where ``name`` is the
name of a boolean field. If the field is selected, then the
corresponding module will be marked for installation *and* the
hook method will be executed.
Hook methods take the usual set of parameters (cr, uid, ids,
context) and can return a collection of additional addons to
install (if they return anything, otherwise they should not return
anything, though returning any "falsy" value such as None or an
empty collection will have the same effect).
Complete control
----------------
The last hook is to simply overload the ``modules_to_install``
method, which implements all the mechanisms above. This method
takes the usual set of parameters (cr, uid, ids, context) and
returns a ``set`` of addons to install (addons selected by the
above methods minus addons from the *basic* set which are already
installed) [#]_ so an overloader can simply manipulate the ``set``
returned by ``res_config_installer.modules_to_install`` to add or
remove addons.
Skipping the installer
----------------------
Unless it is removed from the view, installers have a *skip*
button which invokes ``action_skip`` (and the ``cancel`` hook from
``res.config``). Hooks and additionals *are not run* when skipping
installation, even for already installed addons.
Again, setup your hooks accordingly.
.. [#] note that since a mapping key needs to be hashable, it's
possible to use a tuple or a frozenset, but not a list or a
regular set
.. [#] because the already-installed modules are only pruned at
the very end of ``modules_to_install``, additionals and
hooks depending on them *are guaranteed to execute*. Setup
your hooks accordingly.
"""
_name = 'res.config.installer'
_inherit = 'res.config'
_install_if = {}
def already_installed(self, cr, uid, context=None):
""" For each module, check if it's already installed and if it
is return its name
:returns: a list of the already installed modules in this
installer
:rtype: [str]
"""
return map(attrgetter('name'),
self._already_installed(cr, uid, context=context))
def _already_installed(self, cr, uid, context=None):
""" For each module (boolean fields in a res.config.installer),
check if it's already installed (either 'to install', 'to upgrade'
or 'installed') and if it is return the module's browse_record
:returns: a list of all installed modules in this installer
:rtype: [browse_record]
"""
modules = self.pool['ir.module.module']
selectable = [field for field in self._columns
if type(self._columns[field]) is fields.boolean]
return modules.browse(
cr, uid,
modules.search(cr, uid,
[('name','in',selectable),
('state','in',['to install', 'installed', 'to upgrade'])],
context=context),
context=context)
def modules_to_install(self, cr, uid, ids, context=None):
""" selects all modules to install:
* checked boolean fields
* return values of hook methods. Hook methods are of the form
``_if_%(addon_name)s``, and are called if the corresponding
addon is marked for installation. They take the arguments
cr, uid, ids and context, and return an iterable of addon
names
* additionals, additionals are setup through the ``_install_if``
class variable. ``_install_if`` is a dict of {iterable:iterable}
where key and value are iterables of addon names.
If all the addons in the key are selected for installation
(warning: addons added through hooks don't count), then the
addons in the value are added to the set of modules to install
* not already installed
"""
base = set(module_name
for installer in self.read(cr, uid, ids, context=context)
for module_name, to_install in installer.iteritems()
if module_name != 'id'
if type(self._columns[module_name]) is fields.boolean
if to_install)
hooks_results = set()
for module in base:
hook = getattr(self, '_if_%s'% module, None)
if hook:
hooks_results.update(hook(cr, uid, ids, context=None) or set())
additionals = set(
module for requirements, consequences \
in self._install_if.iteritems()
if base.issuperset(requirements)
for module in consequences)
return (base | hooks_results | additionals).difference(
self.already_installed(cr, uid, context))
def default_get(self, cr, uid, fields_list, context=None):
''' If an addon is already installed, check it by default
'''
defaults = super(res_config_installer, self).default_get(
cr, uid, fields_list, context=context)
return dict(defaults,
**dict.fromkeys(
self.already_installed(cr, uid, context=context),
True))
def fields_get(self, cr, uid, fields=None, context=None, write_access=True):
""" If an addon is already installed, set it to readonly as
res.config.installer doesn't handle uninstallations of already
installed addons
"""
fields = super(res_config_installer, self).fields_get(
cr, uid, fields, context, write_access)
for name in self.already_installed(cr, uid, context=context):
if name not in fields:
continue
fields[name].update(
readonly=True,
help= ustr(fields[name].get('help', '')) +
_('\n\nThis addon is already installed on your system'))
return fields
def execute(self, cr, uid, ids, context=None):
to_install = list(self.modules_to_install(
cr, uid, ids, context=context))
_logger.info('Selecting addons %s to install', to_install)
ir_module = self.pool.get('ir.module.module')
modules = []
for name in to_install:
mod_ids = ir_module.search(cr, uid, [('name', '=', name)])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
return self._install_modules(cr, uid, modules, context=context)
class res_config_settings(osv.osv_memory, res_config_module_installation_mixin):
""" Base configuration wizard for application settings. It provides support for setting
default values, assigning groups to employee users, and installing modules.
To make such a 'settings' wizard, define a model like::
class my_config_wizard(osv.osv_memory):
_name = 'my.settings'
_inherit = 'res.config.settings'
_columns = {
'default_foo': fields.type(..., default_model='my.model'),
'group_bar': fields.boolean(..., group='base.group_user', implied_group='my.group'),
'module_baz': fields.boolean(...),
'other_field': fields.type(...),
}
The method ``execute`` provides some support based on a naming convention:
* For a field like 'default_XXX', ``execute`` sets the (global) default value of
the field 'XXX' in the model named by ``default_model`` to the field's value.
* For a boolean field like 'group_XXX', ``execute`` adds/removes 'implied_group'
to/from the implied groups of 'group', depending on the field's value.
By default 'group' is the group Employee. Groups are given by their xml id.
* For a boolean field like 'module_XXX', ``execute`` triggers the immediate
installation of the module named 'XXX' if the field has value ``True``.
* For the other fields, the method ``execute`` invokes all methods with a name
that starts with 'set_'; such methods can be defined to implement the effect
of those fields.
The method ``default_get`` retrieves values that reflect the current status of the
fields like 'default_XXX', 'group_XXX' and 'module_XXX'. It also invokes all methods
with a name that starts with 'get_default_'; such methods can be defined to provide
current values for other fields.
"""
_name = 'res.config.settings'
def copy(self, cr, uid, id, values, context=None):
raise osv.except_osv(_("Cannot duplicate configuration!"), "")
def _get_classified_fields(self, cr, uid, context=None):
""" return a dictionary with the fields classified by category::
{ 'default': [('default_foo', 'model', 'foo'), ...],
'group': [('group_bar', browse_group, browse_implied_group), ...],
'module': [('module_baz', browse_module), ...],
'other': ['other_field', ...],
}
"""
ir_model_data = self.pool['ir.model.data']
ir_module = self.pool['ir.module.module']
def ref(xml_id):
mod, xml = xml_id.split('.', 1)
return ir_model_data.get_object(cr, uid, mod, xml, context)
defaults, groups, modules, others = [], [], [], []
for name, field in self._columns.items():
if name.startswith('default_') and hasattr(field, 'default_model'):
defaults.append((name, field.default_model, name[8:]))
elif name.startswith('group_') and isinstance(field, fields.boolean) and hasattr(field, 'implied_group'):
field_group = getattr(field, 'group', 'base.group_user')
groups.append((name, ref(field_group), ref(field.implied_group)))
elif name.startswith('module_') and isinstance(field, fields.boolean):
mod_ids = ir_module.search(cr, uid, [('name', '=', name[7:])])
record = ir_module.browse(cr, uid, mod_ids[0], context) if mod_ids else None
modules.append((name, record))
else:
others.append(name)
return {'default': defaults, 'group': groups, 'module': modules, 'other': others}
def default_get(self, cr, uid, fields, context=None):
ir_values = self.pool['ir.values']
classified = self._get_classified_fields(cr, uid, context)
res = super(res_config_settings, self).default_get(cr, uid, fields, context)
# defaults: take the corresponding default value they set
for name, model, field in classified['default']:
value = ir_values.get_default(cr, uid, model, field)
if value is not None:
res[name] = value
# groups: which groups are implied by the group Employee
for name, group, implied_group in classified['group']:
res[name] = implied_group in group.implied_ids
# modules: which modules are installed/to install
for name, module in classified['module']:
res[name] = module and module.state in ('installed', 'to install', 'to upgrade')
# other fields: call all methods that start with 'get_default_'
for method in dir(self):
if method.startswith('get_default_'):
res.update(getattr(self, method)(cr, uid, fields, context))
return res
def execute(self, cr, uid, ids, context=None):
ir_values = self.pool['ir.values']
ir_module = self.pool['ir.module.module']
classified = self._get_classified_fields(cr, uid, context)
config = self.browse(cr, uid, ids[0], context)
# default values fields
for name, model, field in classified['default']:
ir_values.set_default(cr, uid, model, field, config[name])
# group fields: modify group / implied groups
for name, group, implied_group in classified['group']:
if config[name]:
group.write({'implied_ids': [(4, implied_group.id)]})
else:
group.write({'implied_ids': [(3, implied_group.id)]})
implied_group.write({'users': [(3, u.id) for u in group.users]})
# other fields: execute all methods that start with 'set_'
for method in dir(self):
if method.startswith('set_'):
getattr(self, method)(cr, uid, ids, context)
# module fields: install/uninstall the selected modules
to_install = []
to_uninstall_ids = []
lm = len('module_')
for name, module in classified['module']:
if config[name]:
to_install.append((name[lm:], module))
else:
if module and module.state in ('installed', 'to upgrade'):
to_uninstall_ids.append(module.id)
if to_uninstall_ids:
ir_module.button_immediate_uninstall(cr, uid, to_uninstall_ids, context=context)
action = self._install_modules(cr, uid, to_install, context=context)
if action:
return action
# After the uninstall/install calls, the self.pool is no longer valid.
# So we reach into the RegistryManager directly.
res_config = openerp.modules.registry.RegistryManager.get(cr.dbname)['res.config']
config = res_config.next(cr, uid, [], context=context) or {}
if config.get('type') not in ('ir.actions.act_window_close',):
return config
# force client-side reload (update user menu and current view)
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
def cancel(self, cr, uid, ids, context=None):
# ignore the current record, and send the action to reopen the view
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)])
if action_ids:
return act_window.read(cr, uid, action_ids[0], [], context=context)
return {}
def name_get(self, cr, uid, ids, context=None):
""" Override name_get method to return an appropriate configuration wizard
name, and not the generated name."""
if not ids:
return []
# name_get may receive int id instead of an id list
if isinstance(ids, (int, long)):
ids = [ids]
act_window = self.pool['ir.actions.act_window']
action_ids = act_window.search(cr, uid, [('res_model', '=', self._name)], context=context)
name = self._name
if action_ids:
name = act_window.read(cr, uid, action_ids[0], ['name'], context=context)['name']
return [(record.id, name) for record in self.browse(cr, uid , ids, context=context)]
def get_option_path(self, cr, uid, menu_xml_id, context=None):
"""
Fetch the path to a specified configuration view and the action id to access it.
:param string menu_xml_id: the xml id of the menuitem where the view is located,
structured as follows: module_name.menuitem_xml_id (e.g.: "base.menu_sale_config")
:return tuple:
- t[0]: string: full path to the menuitem (e.g.: "Settings/Configuration/Sales")
- t[1]: int or long: id of the menuitem's action
"""
module_name, menu_xml_id = menu_xml_id.split('.')
dummy, menu_id = self.pool['ir.model.data'].get_object_reference(cr, uid, module_name, menu_xml_id)
ir_ui_menu = self.pool['ir.ui.menu'].browse(cr, uid, menu_id, context=context)
return (ir_ui_menu.complete_name, ir_ui_menu.action.id)
def get_option_name(self, cr, uid, full_field_name, context=None):
"""
Fetch the human readable name of a specified configuration option.
:param string full_field_name: the full name of the field, structured as follows:
model_name.field_name (e.g.: "sale.config.settings.fetchmail_lead")
:return string: human readable name of the field (e.g.: "Create leads from incoming mails")
"""
model_name, field_name = full_field_name.rsplit('.', 1)
return self.pool[model_name].fields_get(cr, uid, allfields=[field_name], context=context)[field_name]['string']
def get_config_warning(self, cr, msg, context=None):
"""
Helper: return a Warning exception with the given message where the %(field:xxx)s
and/or %(menu:yyy)s are replaced by the human readable field's name and/or menuitem's
full path.
Usage:
------
Just include in your error message %(field:model_name.field_name)s to obtain the human
readable field's name, and/or %(menu:module_name.menuitem_xml_id)s to obtain the menuitem's
full path.
Example of use:
---------------
from openerp.addons.base.res.res_config import get_warning_config
raise get_warning_config(cr, _("Error: this action is prohibited. You should check the field %(field:sale.config.settings.fetchmail_lead)s in %(menu:base.menu_sale_config)s."), context=context)
This will return an exception containing the following message:
Error: this action is prohibited. You should check the field Create leads from incoming mails in Settings/Configuration/Sales.
What if there is another substitution in the message already?
-------------------------------------------------------------
You could have a situation where the error message you want to upgrade already contains a substitution. Example:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.
What you want to do here is simply to replace the path by %menu:account.menu_account_config)s, and leave the rest alone.
In order to do that, you can use the double percent (%%) to escape your new substitution, like so:
Cannot find any account journal of %s type for this company.\n\nYou can create one in the %%(menu:account.menu_account_config)s.
"""
res_config_obj = openerp.registry(cr.dbname)['res.config.settings']
regex_path = r'%\(((?:menu|field):[a-z_\.]*)\)s'
# Process the message
# 1/ find the menu and/or field references, put them in a list
references = re.findall(regex_path, msg, flags=re.I)
# 2/ fetch the menu and/or field replacement values (full path and
# human readable field's name) and the action_id if any
values = {}
action_id = None
for item in references:
ref_type, ref = item.split(':')
if ref_type == 'menu':
values[item], action_id = res_config_obj.get_option_path(cr, SUPERUSER_ID, ref, context=context)
elif ref_type == 'field':
values[item] = res_config_obj.get_option_name(cr, SUPERUSER_ID, ref, context=context)
# 3/ substitute and return the result
if (action_id):
return exceptions.RedirectWarning(msg % values, action_id, _('Go to the configuration panel'))
return exceptions.Warning(msg % values)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
#!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='django-mptt',
description='''Utilities for implementing Modified Preorder Tree Traversal
with your Django Models and working with trees of Model instances.''',
version=__import__('mptt').__version__,
author='Craig de Stigter',
author_email='craig.ds@gmail.com',
url='http://github.com/django-mptt/django-mptt',
license='MIT License',
packages=find_packages(),
include_package_data=True,
install_requires=(
'Django>=1.8',
),
tests_require=(
'mock-django>=0.6.7',
'mock>=1.3',
),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
'Topic :: Utilities',
],
)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 Camptocamp (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import account_fiscalyear
from . import ir_sequence_type
from . import ir_sequence
from . import account
from . import account_move
from . import ir_sequence_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from collections import defaultdict
import json
from django.conf import settings
from django.db import connection
from specifyweb.specify.models import (
Splocalecontainer as Container,
Splocalecontaineritem as Item,
Splocaleitemstr as SpString)
schema_localization_cache = {}
def get_schema_localization(collection, schematype):
disc = collection.discipline
if (disc, schematype) in schema_localization_cache:
return schema_localization_cache[(disc, schematype)]
lang = settings.SCHEMA_LANGUAGE
cursor = connection.cursor()
cursor.execute("""
select name, format, ishidden!=0, isuiformatter, picklistname, type, aggregator, defaultui, n.text, d.text
from splocalecontainer
left outer join splocaleitemstr n on n.splocalecontainernameid = splocalecontainerid and n.language = %s
left outer join splocaleitemstr d on d.splocalecontainerdescid = splocalecontainerid and d.language = %s
where schematype = %s and disciplineid = %s;
""", [lang, lang, schematype, disc.id])
cfields = ('format', 'ishidden', 'isuiformatter', 'picklistname', 'type', 'aggregator', 'defaultui', 'name', 'desc')
containers = {
row[0]: dict(items={}, **{field: row[i+1] for i, field in enumerate(cfields)})
for row in cursor.fetchall()
}
cursor.execute("""
select container.name, item.name,
item.format, item.ishidden!=0, item.isuiformatter, item.picklistname,
item.type, item.isrequired, item.weblinkname, n.text, d.text
from splocalecontainer container
inner join splocalecontaineritem item on item.splocalecontainerid = container.splocalecontainerid
left outer join splocaleitemstr n on n.splocalecontaineritemnameid = item.splocalecontaineritemid and n.language = %s
left outer join splocaleitemstr d on d.splocalecontaineritemdescid = item.splocalecontaineritemid and d.language = %s
where schematype = %s and disciplineid = %s;
""", [lang, lang, schematype, disc.id])
ifields = ('format', 'ishidden', 'isuiformatter', 'picklistname', 'type', 'isrequired', 'weblinkname', 'name', 'desc')
for row in cursor.fetchall():
containers[row[0]]['items'][row[1].lower()] = {field: row[i+2] for i, field in enumerate(ifields)}
sl = schema_localization_cache[(disc, schematype)] = json.dumps(containers)
return sl
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright (C) 2006-2008 Inmaculada Luengo Merino, Néstor Arocha Rodríguez
#This file is part of pyrqt.
#
#pyrqt is free software; you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation; either version 2 of the License, or
#(at your option) any later version.
#
#pyrqt is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with pyrqt; if not, write to the Free Software
#Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Anova"""
#TODO: pasar a formato nuevo
nombre = u"Anova Simple"
#tipo = "Variable"
tipo = "Casos" #FIXME: Tipo incorrecto
etiquetas = ["Otros"]
factor = {"nombre":"Factor", "tipo":"Factores"}
widget = {"tipo":"Variable", "opciones":[factor]}
def funcion(dato, variable, caso, opciones):
"""Funcion que convierte los datos de entrada en los resultados"""
import rpy #pylint: disable=import-error
diccionario = {}
r_data = {"Variable":[], "Factor":[]}
for x in dato.query(variable, caso = caso):
r_data["Variable"].append(float(x))
for x in dato.query(opciones["Factor"], caso = caso):
r_data["Factor"].append(repr(x))
# lista=[float(x) for x in dato.getCol(variable,caso=caso)]
# agrupacion=[x for x in dato.getCasos(opciones["Factor"])]
# agrupacion2=[x for x in dato.getCol(opciones["Factor"],caso=caso)]
# mifuncion=lambda f:agrupacion.index(f)
# agrupacionfinal=map(mifuncion,agrupacion2)
r_data_table = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.data_frame)(r_data)
modelo = rpy.r("Variable ~ Factor")
aov = rpy.with_mode(rpy.NO_CONVERSION, rpy.r.aov)(modelo, r_data_table)
diccionario = rpy.r.summary(aov)
return diccionario
def initresultado(resultado, opciones):
"""Inicializa al objeto resultado, añadiendole lo que crea conveniente"""
resultado.addTablaSimple("resultado")
resultado["resultado"].titulo = u"Anova"
lista = []
if opciones["caso"]:
lista.append("Caso")
lista += [u"Resultado en bruto"]
resultado["resultado"].settitulo(lista)
def interfazresultado(resultado, listaopciones, floatrender = None):
"""Este método dice como introducir los datos en la tabla"""
lista = []
variable = listaopciones[0]
caso = listaopciones[1]
if caso:
lista.append(caso)
diccionario = listaopciones[2]
resultado["resultado"].set(variable, [str(diccionario)])
def comprobarentrada(opciones):
if not opciones["Factor"]:
from pyrqt.excepciones import OpcionesIncorrectaException
raise OpcionesIncorrectaException
def funcionprincipal(): pass
def funcionchequeocondiciones(interfazdato): return False
def funcionchequeoentradausuario(opciones): return False
definicionresultado = []
|
# -*- coding: utf-8 -*-
'''
Task Coach - Your friendly task manager
Copyright (C) 2004-2010 Task Coach developers <developers@taskcoach.org>
Copyright (C) 2008 Rob McMullen <rob.mcmullen@gmail.com>
Copyright (C) 2008 Thomas Sonne Olesen <tpo@sonnet.dk>
Task Coach is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Task Coach is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import wx
from taskcoachlib import patterns, command, widgets
from taskcoachlib.domain import category
from taskcoachlib.i18n import _
from taskcoachlib.gui import uicommand, menu, dialog, render
import base, mixin
class BaseCategoryViewer(mixin.AttachmentDropTargetMixin,
mixin.SortableViewerForCategoriesMixin,
mixin.SearchableViewerMixin,
mixin.NoteColumnMixin, mixin.AttachmentColumnMixin,
base.SortableViewerWithColumns, base.TreeViewer):
SorterClass = category.CategorySorter
defaultTitle = _('Categories')
defaultBitmap = 'folder_blue_arrow_icon'
def __init__(self, *args, **kwargs):
kwargs.setdefault('settingsSection', 'categoryviewer')
super(BaseCategoryViewer, self).__init__(*args, **kwargs)
for eventType in (category.Category.subjectChangedEventType(),
category.Category.filterChangedEventType(),
category.Category.foregroundColorChangedEventType(),
category.Category.backgroundColorChangedEventType(),
category.Category.fontChangedEventType(),
category.Category.iconChangedEventType(),
category.Category.selectedIconChangedEventType(),
category.Category.exclusiveSubcategoriesChangedEventType()):
patterns.Publisher().registerObserver(self.onAttributeChanged,
eventType)
def onEveryMinute(self, event):
pass
def domainObjectsToView(self):
return self.taskFile.categories()
def curselectionIsInstanceOf(self, class_):
return class_ == category.Category
def createWidget(self):
imageList = self.createImageList() # Has side-effects
self._columns = self._createColumns()
itemPopupMenu = self.createCategoryPopupMenu()
columnPopupMenu = menu.ColumnPopupMenu(self)
self._popupMenus.extend([itemPopupMenu, columnPopupMenu])
widget = widgets.CheckTreeCtrl(self, self._columns,
self.onSelect, self.onCheck,
uicommand.CategoryEdit(viewer=self, categories=self.presentation()),
uicommand.CategoryDragAndDrop(viewer=self, categories=self.presentation()),
uicommand.EditSubject(viewer=self),
itemPopupMenu, columnPopupMenu,
**self.widgetCreationKeywordArguments())
widget.AssignImageList(imageList) # pylint: disable-msg=E1101
return widget
def createCategoryPopupMenu(self, localOnly=False):
return menu.CategoryPopupMenu(self.parent, self.settings, self.taskFile,
self, localOnly)
def _createColumns(self):
# pylint: disable-msg=W0142
kwargs = dict(renderDescriptionCallback=lambda category: category.description(),
resizeCallback=self.onResizeColumn)
columns = [widgets.Column('subject', _('Subject'),
category.Category.subjectChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='subject'),
imageIndexCallback=self.subjectImageIndex,
width=self.getColumnWidth('subject'),
**kwargs),
widgets.Column('description', _('Description'),
category.Category.descriptionChangedEventType(),
sortCallback=uicommand.ViewerSortByCommand(viewer=self,
value='description'),
renderCallback=lambda category: category.description(),
width=self.getColumnWidth('description'),
**kwargs),
widgets.Column('attachments', '',
category.Category.attachmentsChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('attachments'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.attachmentImageIndex,
headerImageIndex=self.imageIndex['paperclip_icon'],
renderCallback=lambda category: '', **kwargs)]
if self.settings.getboolean('feature', 'notes'):
columns.append(widgets.Column('notes', '',
category.Category.notesChangedEventType(), # pylint: disable-msg=E1101
width=self.getColumnWidth('notes'),
alignment=wx.LIST_FORMAT_LEFT,
imageIndexCallback=self.noteImageIndex,
headerImageIndex=self.imageIndex['note_icon'],
renderCallback=lambda category: '', **kwargs))
return columns
def getImageIndices(self, category):
bitmap = category.icon(recursive=True)
bitmap_selected = category.selectedIcon(recursive=True) or bitmap
return self.imageIndex[bitmap] if bitmap else -1, self.imageIndex[bitmap_selected] if bitmap_selected else -1
def subjectImageIndex(self, category, which):
normalImageIndex, expandedImageIndex = self.getImageIndices(category)
expanded = which in [wx.TreeItemIcon_Expanded,
wx.TreeItemIcon_SelectedExpanded]
return expandedImageIndex if expanded else normalImageIndex
def createToolBarUICommands(self):
commands = super(BaseCategoryViewer, self).createToolBarUICommands()
commands[-2:-2] = [None,
uicommand.CategoryNew(categories=self.presentation(),
settings=self.settings),
uicommand.CategoryNewSubCategory(categories=self.presentation(),
viewer=self),
uicommand.CategoryEdit(categories=self.presentation(),
viewer=self),
uicommand.CategoryDelete(categories=self.presentation(),
viewer=self)]
return commands
def createColumnUICommands(self):
commands = [\
uicommand.ToggleAutoColumnResizing(viewer=self,
settings=self.settings),
None,
uicommand.ViewColumn(menuText=_('&Description'),
helpText=_('Show/hide description column'),
setting='description', viewer=self),
uicommand.ViewColumn(menuText=_('&Attachments'),
helpText=_('Show/hide attachments column'),
setting='attachments', viewer=self)]
if self.settings.getboolean('feature', 'notes'):
commands.append(uicommand.ViewColumn(menuText=_('&Notes'),
helpText=_('Show/hide notes column'),
setting='notes', viewer=self))
return commands
def onAttributeChanged(self, event):
if category.Category.exclusiveSubcategoriesChangedEventType() in event.types():
# We need to refresh the children of the changed item as well
# because they have to use radio buttons instead of checkboxes, or
# vice versa:
items = event.sources()
for item in items.copy():
items |= set(item.children())
self.widget.RefreshItems(*items)
else:
super(BaseCategoryViewer, self).onAttributeChanged(event)
def onCheck(self, event):
categoryToFilter = self.widget.GetItemPyData(event.GetItem())
categoryToFilter.setFiltered(event.GetItem().IsChecked())
self.onSelect(event) # Notify status bar
def getIsItemChecked(self, item):
if isinstance(item, category.Category):
return item.isFiltered()
return False
def getItemParentHasExclusiveChildren(self, item):
parent = item.parent()
return parent and parent.hasExclusiveSubcategories()
def isShowingCategories(self):
return True
def statusMessages(self):
status1 = _('Categories: %d selected, %d total')%\
(len(self.curselection()), len(self.presentation()))
filteredCategories = self.presentation().filteredCategories()
status2 = _('Status: %d filtered')%len(filteredCategories)
return status1, status2
def itemEditorClass(self):
return dialog.editor.CategoryEditor
def newItemCommandClass(self):
return command.NewCategoryCommand
def editItemCommandClass(self):
return command.EditCategoryCommand
def newSubItemCommandClass(self):
return command.NewSubCategoryCommand
def deleteItemCommandClass(self):
return command.DeleteCategoryCommand
class CategoryViewer(BaseCategoryViewer):
def __init__(self, *args, **kwargs):
super(CategoryViewer, self).__init__(*args, **kwargs)
self.filterUICommand.setChoice(self.settings.getboolean('view',
'categoryfiltermatchall'))
def getToolBarUICommands(self):
''' UI commands to put on the toolbar of this viewer. '''
toolBarUICommands = super(CategoryViewer, self).getToolBarUICommands()
toolBarUICommands.insert(-2, None) # Separator
# pylint: disable-msg=W0201
self.filterUICommand = \
uicommand.CategoryViewerFilterChoice(settings=self.settings)
toolBarUICommands.insert(-2, self.filterUICommand)
return toolBarUICommands
|
#!/usr/bin/env python
# -*- coding: ISO-8859-1 -*-
##This file is part of PyVot
#############################################################################
#############################################################################
## ##
## PyVot ##
## ##
#############################################################################
#############################################################################
## Copyright (C) 2006-2009 Cédrick FAURY
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import wx
import Icones
import sys, os, getpass
##import psyco
##psyco.log()
##psyco.full()
import globdef
from globdef import *
#import sys, os, time, traceback, types
import FenPrincipale
#import wx.aui
#import wx.html
#import images
# For debugging
##wx.Trap();
##print "wx.VERSION_STRING = %s (%s)" % (wx.VERSION_STRING, wx.USE_UNICODE and 'unicode' or 'ansi')
##print "pid:", os.getpid()
##raw_input("Press Enter...")
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
class MySplashScreen(wx.SplashScreen):
def __init__(self):
bmp = Icones.getLogoSplashBitmap()
wx.SplashScreen.__init__(self, bmp,
wx.SPLASH_CENTRE_ON_SCREEN | wx.SPLASH_TIMEOUT,
5000, None, -1,
style = wx.BORDER_NONE|wx.FRAME_NO_TASKBAR)
self.Bind(wx.EVT_CLOSE, self.OnClose)
self.fc = wx.FutureCall(2000, self.ShowMain)
def OnClose(self, evt):
# Make sure the default handler runs too so this window gets
# destroyed
evt.Skip()
self.Hide()
# if the timer is still running then go ahead and show the
# main frame now
if self.fc.IsRunning():
self.fc.Stop()
self.ShowMain()
def ShowMain(self):
NomFichier = None
if len(sys.argv)>1: #un paramètre a été passé
parametre=sys.argv[1]
# on verifie que le fichier passé en paramètre existe
if os.path.isfile(parametre):
NomFichier = parametre
frame = FenPrincipale.wxPyVot(None, "PyVot", NomFichier)
frame.Show()
if self.fc.IsRunning():
self.Raise()
# wx.CallAfter(frame.ShowTip)
#---------------------------------------------------------------------------
class PyVotApp(wx.App):
def OnInit(self):
"""
Create and show the splash screen. It will then create and show
the main frame when it is time to do so.
"""
self.version = VERSION
# try:
self.auteur = unicode(getpass.getuser(),'cp1252')
# except:
# self.auteur = ""
wx.SystemOptions.SetOptionInt("mac.window-plain-transition", 1)
self.SetAppName("PyVot")
# For debugging
#self.SetAssertMode(wx.PYAPP_ASSERT_DIALOG)
# Normally when using a SplashScreen you would create it, show
# it and then continue on with the applicaiton's
# initialization, finally creating and showing the main
# application window(s). In this case we have nothing else to
# do so we'll delay showing the main frame until later (see
# ShowMain above) so the users can see the SplashScreen effect.
splash = MySplashScreen()
splash.Show()
return True
#---------------------------------------------------------------------------
def main():
## try:
# demoPath = os.path.dirname(__file__)
# os.chdir(demoPath)
# print demoPath
# except:
# pass
app = PyVotApp(False)
# wx.Log.SetActiveTarget( LogPrintStackStderr() )
app.MainLoop()
# def PyVotRunning():
# #
# # Cette fonction teste si PyVot.exe est déjà lancé, auquel cas on arrete tout.
# #
# if not HAVE_WMI:
# return False
# else:
# nb_instances=0
# try:
# controler=wmi.WMI()
# for elem in controler.Win32_Process():
# if "PyVot.exe"==elem.Caption:
# nb_instances=nb_instances+1
# if nb_instances>=2:
# sys.exit(0)
# except:
# pass
#----------------------------------------------------------------------------
#----------------------------------------------------------------------------
# from customLogTarget import *
if __name__ == '__main__':
# __name__ = 'Main'
#
# On teste si PyVot est déjà lancé
#
# PyVotRunning()
#
# Amélioration de la vitesse de traitement en utilisant psyco
#
# if USE_PSYCO:
# try:
# import psyco
# HAVE_PSYCO=True
# except ImportError:
# HAVE_PSYCO=False
# if HAVE_PSYCO:
# print "Psyco !!!!!"
# psyco.full()
main()
#----------------------------------------------------------------------------
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base target assigner module.
The job of a TargetAssigner is, for a given set of anchors (bounding boxes) and
groundtruth detections (bounding boxes), to assign classification and regression
targets to each anchor as well as weights to each anchor (specifying, e.g.,
which anchors should not contribute to training loss).
It assigns classification/regression targets by performing the following steps:
1) Computing pairwise similarity between anchors and groundtruth boxes using a
provided RegionSimilarity Calculator
2) Computing a matching based on the similarity matrix using a provided Matcher
3) Assigning regression targets based on the matching and a provided BoxCoder
4) Assigning classification targets based on the matching and groundtruth labels
Note that TargetAssigners only operate on detections from a single
image at a time, so any logic for applying a TargetAssigner to multiple
images must be handled externally.
"""
import tensorflow as tf
from object_detection.box_coders import faster_rcnn_box_coder
from object_detection.box_coders import mean_stddev_box_coder
from object_detection.core import box_coder as bcoder
from object_detection.core import box_list
from object_detection.core import matcher as mat
from object_detection.core import region_similarity_calculator as sim_calc
from object_detection.core import standard_fields as fields
from object_detection.matchers import argmax_matcher
from object_detection.matchers import bipartite_matcher
from object_detection.utils import shape_utils
class TargetAssigner(object):
"""Target assigner to compute classification and regression targets."""
def __init__(self,
similarity_calc,
matcher,
box_coder,
negative_class_weight=1.0):
"""Construct Object Detection Target Assigner.
Args:
similarity_calc: a RegionSimilarityCalculator
matcher: an object_detection.core.Matcher used to match groundtruth to
anchors.
box_coder: an object_detection.core.BoxCoder used to encode matching
groundtruth boxes with respect to anchors.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0). The weight must be in [0., 1.].
Raises:
ValueError: if similarity_calc is not a RegionSimilarityCalculator or
if matcher is not a Matcher or if box_coder is not a BoxCoder
"""
if not isinstance(similarity_calc, sim_calc.RegionSimilarityCalculator):
raise ValueError('similarity_calc must be a RegionSimilarityCalculator')
if not isinstance(matcher, mat.Matcher):
raise ValueError('matcher must be a Matcher')
if not isinstance(box_coder, bcoder.BoxCoder):
raise ValueError('box_coder must be a BoxCoder')
self._similarity_calc = similarity_calc
self._matcher = matcher
self._box_coder = box_coder
self._negative_class_weight = negative_class_weight
@property
def box_coder(self):
return self._box_coder
# TODO(rathodv): move labels, scores, and weights to groundtruth_boxes fields.
def assign(self,
anchors,
groundtruth_boxes,
groundtruth_labels=None,
unmatched_class_label=None,
groundtruth_weights=None):
"""Assign classification and regression targets to each anchor.
For a given set of anchors and groundtruth detections, match anchors
to groundtruth_boxes and assign classification and regression targets to
each anchor as well as weights based on the resulting match (specifying,
e.g., which anchors should not contribute to training loss).
Anchors that are not matched to anything are given a classification target
of self._unmatched_cls_target which can be specified via the constructor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth boxes
groundtruth_labels: a tensor of shape [M, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar inputs). When set
to None, groundtruth_labels assumes a binary problem where all
ground_truth boxes get a positive label (of 1).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
If set to None, unmatched_cls_target is set to be [0] for each anchor.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box. The weights
must be in [0., 1.]. If None, all weights are set to 1. Generally no
groundtruth boxes with zero weight match to any anchors as matchers are
aware of groundtruth weights. Additionally, `cls_weights` and
`reg_weights` are calculated using groundtruth weights as an added
safety.
Returns:
cls_targets: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
where the subshape [d_1, ..., d_k] is compatible with groundtruth_labels
which has shape [num_gt_boxes, d_1, d_2, ... d_k].
cls_weights: a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k],
representing weights for each element in cls_targets.
reg_targets: a float32 tensor with shape [num_anchors, box_code_dimension]
reg_weights: a float32 tensor with shape [num_anchors]
match: a matcher.Match object encoding the match between anchors and
groundtruth boxes, with rows corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if anchors or groundtruth_boxes are not of type
box_list.BoxList
"""
if not isinstance(anchors, box_list.BoxList):
raise ValueError('anchors must be an BoxList')
if not isinstance(groundtruth_boxes, box_list.BoxList):
raise ValueError('groundtruth_boxes must be an BoxList')
if unmatched_class_label is None:
unmatched_class_label = tf.constant([0], tf.float32)
if groundtruth_labels is None:
groundtruth_labels = tf.ones(tf.expand_dims(groundtruth_boxes.num_boxes(),
0))
groundtruth_labels = tf.expand_dims(groundtruth_labels, -1)
unmatched_shape_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(groundtruth_labels)[1:],
shape_utils.combined_static_and_dynamic_shape(unmatched_class_label))
labels_and_box_shapes_assert = shape_utils.assert_shape_equal(
shape_utils.combined_static_and_dynamic_shape(
groundtruth_labels)[:1],
shape_utils.combined_static_and_dynamic_shape(
groundtruth_boxes.get())[:1])
if groundtruth_weights is None:
num_gt_boxes = groundtruth_boxes.num_boxes_static()
if not num_gt_boxes:
num_gt_boxes = groundtruth_boxes.num_boxes()
groundtruth_weights = tf.ones([num_gt_boxes], dtype=tf.float32)
# set scores on the gt boxes
scores = 1 - groundtruth_labels[:, 0]
groundtruth_boxes.add_field(fields.BoxListFields.scores, scores)
with tf.control_dependencies(
[unmatched_shape_assert, labels_and_box_shapes_assert]):
match_quality_matrix = self._similarity_calc.compare(groundtruth_boxes,
anchors)
match = self._matcher.match(match_quality_matrix,
valid_rows=tf.greater(groundtruth_weights, 0))
reg_targets = self._create_regression_targets(anchors,
groundtruth_boxes,
match)
cls_targets = self._create_classification_targets(groundtruth_labels,
unmatched_class_label,
match)
reg_weights = self._create_regression_weights(match, groundtruth_weights)
cls_weights = self._create_classification_weights(match,
groundtruth_weights)
# convert cls_weights from per-anchor to per-class.
class_label_shape = tf.shape(cls_targets)[1:]
weights_shape = tf.shape(cls_weights)
weights_multiple = tf.concat(
[tf.ones_like(weights_shape), class_label_shape],
axis=0)
for _ in range(len(cls_targets.get_shape()[1:])):
cls_weights = tf.expand_dims(cls_weights, -1)
cls_weights = tf.tile(cls_weights, weights_multiple)
num_anchors = anchors.num_boxes_static()
if num_anchors is not None:
reg_targets = self._reset_target_shape(reg_targets, num_anchors)
cls_targets = self._reset_target_shape(cls_targets, num_anchors)
reg_weights = self._reset_target_shape(reg_weights, num_anchors)
cls_weights = self._reset_target_shape(cls_weights, num_anchors)
return cls_targets, cls_weights, reg_targets, reg_weights, match
def _reset_target_shape(self, target, num_anchors):
"""Sets the static shape of the target.
Args:
target: the target tensor. Its first dimension will be overwritten.
num_anchors: the number of anchors, which is used to override the target's
first dimension.
Returns:
A tensor with the shape info filled in.
"""
target_shape = target.get_shape().as_list()
target_shape[0] = num_anchors
target.set_shape(target_shape)
return target
def _create_regression_targets(self, anchors, groundtruth_boxes, match):
"""Returns a regression target for each anchor.
Args:
anchors: a BoxList representing N anchors
groundtruth_boxes: a BoxList representing M groundtruth_boxes
match: a matcher.Match object
Returns:
reg_targets: a float32 tensor with shape [N, box_code_dimension]
"""
matched_gt_boxes = match.gather_based_on_match(
groundtruth_boxes.get(),
unmatched_value=tf.zeros(4),
ignored_value=tf.zeros(4))
matched_gt_boxlist = box_list.BoxList(matched_gt_boxes)
if groundtruth_boxes.has_field(fields.BoxListFields.keypoints):
groundtruth_keypoints = groundtruth_boxes.get_field(
fields.BoxListFields.keypoints)
matched_keypoints = match.gather_based_on_match(
groundtruth_keypoints,
unmatched_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]),
ignored_value=tf.zeros(groundtruth_keypoints.get_shape()[1:]))
matched_gt_boxlist.add_field(fields.BoxListFields.keypoints,
matched_keypoints)
matched_reg_targets = self._box_coder.encode(matched_gt_boxlist, anchors)
match_results_shape = shape_utils.combined_static_and_dynamic_shape(
match.match_results)
# Zero out the unmatched and ignored regression targets.
unmatched_ignored_reg_targets = tf.tile(
self._default_regression_target(), [match_results_shape[0], 1])
matched_anchors_mask = match.matched_column_indicator()
reg_targets = tf.where(matched_anchors_mask,
matched_reg_targets,
unmatched_ignored_reg_targets)
return reg_targets
def _default_regression_target(self):
"""Returns the default target for anchors to regress to.
Default regression targets are set to zero (though in
this implementation what these targets are set to should
not matter as the regression weight of any box set to
regress to the default target is zero).
Returns:
default_target: a float32 tensor with shape [1, box_code_dimension]
"""
return tf.constant([self._box_coder.code_size*[0]], tf.float32)
def _create_classification_targets(self, groundtruth_labels,
unmatched_class_label, match):
"""Create classification targets for each anchor.
Assign a classification target of for each anchor to the matching
groundtruth label that is provided by match. Anchors that are not matched
to anything are given the target self._unmatched_cls_target
Args:
groundtruth_labels: a tensor of shape [num_gt_boxes, d_1, ... d_k]
with labels for each of the ground_truth boxes. The subshape
[d_1, ... d_k] can be empty (corresponding to scalar labels).
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
Returns:
a float32 tensor with shape [num_anchors, d_1, d_2 ... d_k], where the
subshape [d_1, ..., d_k] is compatible with groundtruth_labels which has
shape [num_gt_boxes, d_1, d_2, ... d_k].
"""
return match.gather_based_on_match(
groundtruth_labels,
unmatched_value=unmatched_class_label,
ignored_value=unmatched_class_label)
def _create_regression_weights(self, match, groundtruth_weights):
"""Set regression weight for each anchor.
Only positive anchors are set to contribute to the regression loss, so this
method returns a weight of 1 for every positive anchor and 0 for every
negative anchor.
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing regression weights.
"""
return match.gather_based_on_match(
groundtruth_weights, ignored_value=0., unmatched_value=0.)
def _create_classification_weights(self,
match,
groundtruth_weights):
"""Create classification weights for each anchor.
Positive (matched) anchors are associated with a weight of
positive_class_weight and negative (unmatched) anchors are associated with
a weight of negative_class_weight. When anchors are ignored, weights are set
to zero. By default, both positive/negative weights are set to 1.0,
but they can be adjusted to handle class imbalance (which is almost always
the case in object detection).
Args:
match: a matcher.Match object that provides a matching between anchors
and groundtruth boxes.
groundtruth_weights: a float tensor of shape [M] indicating the weight to
assign to all anchors match to a particular groundtruth box.
Returns:
a float32 tensor with shape [num_anchors] representing classification
weights.
"""
return match.gather_based_on_match(
groundtruth_weights,
ignored_value=0.,
unmatched_value=self._negative_class_weight)
def get_box_coder(self):
"""Get BoxCoder of this TargetAssigner.
Returns:
BoxCoder object.
"""
return self._box_coder
# TODO(rathodv): This method pulls in all the implementation dependencies into
# core. Therefore its best to have this factory method outside of core.
def create_target_assigner(reference, stage=None,
negative_class_weight=1.0, use_matmul_gather=False):
"""Factory function for creating standard target assigners.
Args:
reference: string referencing the type of TargetAssigner.
stage: string denoting stage: {proposal, detection}.
negative_class_weight: classification weight to be associated to negative
anchors (default: 1.0)
use_matmul_gather: whether to use matrix multiplication based gather which
are better suited for TPUs.
Returns:
TargetAssigner: desired target assigner.
Raises:
ValueError: if combination reference+stage is invalid.
"""
if reference == 'Multibox' and stage == 'proposal':
similarity_calc = sim_calc.NegSqDistSimilarity()
matcher = bipartite_matcher.GreedyBipartiteMatcher()
box_coder = mean_stddev_box_coder.MeanStddevBoxCoder()
elif reference == 'FasterRCNN' and stage == 'proposal':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.7,
unmatched_threshold=0.3,
force_match_for_each_row=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FasterRCNN' and stage == 'detection':
similarity_calc = sim_calc.IouSimilarity()
# Uses all proposals with IOU < 0.5 as candidate negatives.
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
negatives_lower_than_unmatched=True,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder(
scale_factors=[10.0, 10.0, 5.0, 5.0])
elif reference == 'FastRCNN':
similarity_calc = sim_calc.IouSimilarity()
matcher = argmax_matcher.ArgMaxMatcher(matched_threshold=0.5,
unmatched_threshold=0.1,
force_match_for_each_row=False,
negatives_lower_than_unmatched=False,
use_matmul_gather=use_matmul_gather)
box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder()
else:
raise ValueError('No valid combination of reference and stage.')
return TargetAssigner(similarity_calc, matcher, box_coder,
negative_class_weight=negative_class_weight)
def batch_assign_targets(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_targets_batch,
unmatched_class_label=None,
gt_weights_batch=None):
"""Batched assignment of classification and regression targets.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_targets_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_boxes] containing weights for groundtruth boxes.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_targets_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_targets_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_targets_batch)
for anchors, gt_boxes, gt_class_targets, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_targets_batch, gt_weights_batch):
(cls_targets, cls_weights,
reg_targets, reg_weights, match) = target_assigner.assign(
anchors, gt_boxes, gt_class_targets, unmatched_class_label, gt_weights)
cls_targets_list.append(cls_targets)
cls_weights_list.append(cls_weights)
reg_targets_list.append(reg_targets)
reg_weights_list.append(reg_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
def batch_assign_confidences(target_assigner,
anchors_batch,
gt_box_batch,
gt_class_confidences_batch,
gt_weights_batch=None,
unmatched_class_label=None,
include_background_class=True,
implicit_class_weight=1.0):
"""Batched assignment of classification and regression targets.
This differences between batch_assign_confidences and batch_assign_targets:
- 'batch_assign_targets' supports scalar (agnostic), vector (multiclass) and
tensor (high-dimensional) targets. 'batch_assign_confidences' only support
scalar (agnostic) and vector (multiclass) targets.
- 'batch_assign_targets' assumes the input class tensor using the binary
one/K-hot encoding. 'batch_assign_confidences' takes the class confidence
scores as the input, where 1 means positive classes, 0 means implicit
negative classes, and -1 means explicit negative classes.
- 'batch_assign_confidences' assigns the targets in the similar way as
'batch_assign_targets' except that it gives different weights for implicit
and explicit classes. This allows user to control the negative gradients
pushed differently for implicit and explicit examples during the training.
Args:
target_assigner: a target assigner.
anchors_batch: BoxList representing N box anchors or list of BoxList objects
with length batch_size representing anchor sets.
gt_box_batch: a list of BoxList objects with length batch_size
representing groundtruth boxes for each image in the batch
gt_class_confidences_batch: a list of tensors with length batch_size, where
each tensor has shape [num_gt_boxes_i, classification_target_size] and
num_gt_boxes_i is the number of boxes in the ith boxlist of
gt_box_batch. Note that in this tensor, 1 means explicit positive class,
-1 means explicit negative class, and 0 means implicit negative class.
gt_weights_batch: A list of 1-D tf.float32 tensors of shape
[num_gt_boxes_i] containing weights for groundtruth boxes.
unmatched_class_label: a float32 tensor with shape [d_1, d_2, ..., d_k]
which is consistent with the classification target for each
anchor (and can be empty for scalar targets). This shape must thus be
compatible with the groundtruth labels that are passed to the "assign"
function (which have shape [num_gt_boxes, d_1, d_2, ..., d_k]).
include_background_class: whether or not gt_class_confidences_batch includes
the background class.
implicit_class_weight: the weight assigned to implicit examples.
Returns:
batch_cls_targets: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_cls_weights: a tensor with shape [batch_size, num_anchors,
num_classes],
batch_reg_targets: a tensor with shape [batch_size, num_anchors,
box_code_dimension]
batch_reg_weights: a tensor with shape [batch_size, num_anchors],
match_list: a list of matcher.Match objects encoding the match between
anchors and groundtruth boxes for each image of the batch,
with rows of the Match objects corresponding to groundtruth boxes
and columns corresponding to anchors.
Raises:
ValueError: if input list lengths are inconsistent, i.e.,
batch_size == len(gt_box_batch) == len(gt_class_targets_batch)
and batch_size == len(anchors_batch) unless anchors_batch is a single
BoxList, or if any element in gt_class_confidences_batch has rank > 2.
"""
if not isinstance(anchors_batch, list):
anchors_batch = len(gt_box_batch) * [anchors_batch]
if not all(
isinstance(anchors, box_list.BoxList) for anchors in anchors_batch):
raise ValueError('anchors_batch must be a BoxList or list of BoxLists.')
if not (len(anchors_batch)
== len(gt_box_batch)
== len(gt_class_confidences_batch)):
raise ValueError('batch size incompatible with lengths of anchors_batch, '
'gt_box_batch and gt_class_confidences_batch.')
cls_targets_list = []
cls_weights_list = []
reg_targets_list = []
reg_weights_list = []
match_list = []
if gt_weights_batch is None:
gt_weights_batch = [None] * len(gt_class_confidences_batch)
for anchors, gt_boxes, gt_class_confidences, gt_weights in zip(
anchors_batch, gt_box_batch, gt_class_confidences_batch,
gt_weights_batch):
if (gt_class_confidences is not None and
len(gt_class_confidences.get_shape().as_list()) > 2):
raise ValueError('The shape of the class target is not supported. ',
gt_class_confidences.get_shape())
cls_targets, _, reg_targets, _, match = target_assigner.assign(
anchors, gt_boxes, gt_class_confidences, unmatched_class_label,
groundtruth_weights=gt_weights)
if include_background_class:
cls_targets_without_background = tf.slice(
cls_targets, [0, 1], [-1, -1])
else:
cls_targets_without_background = cls_targets
positive_mask = tf.greater(cls_targets_without_background, 0.0)
negative_mask = tf.less(cls_targets_without_background, 0.0)
explicit_example_mask = tf.logical_or(positive_mask, negative_mask)
positive_anchors = tf.reduce_any(positive_mask, axis=-1)
regression_weights = tf.to_float(positive_anchors)
regression_targets = (
reg_targets * tf.expand_dims(regression_weights, axis=-1))
regression_weights_expanded = tf.expand_dims(regression_weights, axis=-1)
cls_targets_without_background = (
cls_targets_without_background * (1 - tf.to_float(negative_mask)))
cls_weights_without_background = (
(1 - implicit_class_weight) * tf.to_float(explicit_example_mask)
+ implicit_class_weight)
if include_background_class:
cls_weights_background = (
(1 - implicit_class_weight) * regression_weights_expanded
+ implicit_class_weight)
classification_weights = tf.concat(
[cls_weights_background, cls_weights_without_background], axis=-1)
cls_targets_background = 1 - regression_weights_expanded
classification_targets = tf.concat(
[cls_targets_background, cls_targets_without_background], axis=-1)
else:
classification_targets = cls_targets_without_background
classification_weights = cls_weights_without_background
cls_targets_list.append(classification_targets)
cls_weights_list.append(classification_weights)
reg_targets_list.append(regression_targets)
reg_weights_list.append(regression_weights)
match_list.append(match)
batch_cls_targets = tf.stack(cls_targets_list)
batch_cls_weights = tf.stack(cls_weights_list)
batch_reg_targets = tf.stack(reg_targets_list)
batch_reg_weights = tf.stack(reg_weights_list)
return (batch_cls_targets, batch_cls_weights, batch_reg_targets,
batch_reg_weights, match_list)
|
import jupytext
import nbformat
import papermill as pm
import pytest
def save_and_execute_notebook(nb_str, path):
nb = jupytext.reads(nb_str, fmt='py:light')
nb.metadata['kernelspec'] = {
'name': 'python3',
'language': 'python',
'display_name': 'Python 3'
}
nbformat.write(nb, path)
pm.execute_notebook(str(path), str(path))
return str(path)
@pytest.fixture
def nb_literals():
content = """
# + tags=["int"]
int_ = 1
print(int_)
# + tags=["list"]
list_ = [1, 2, 3]
print(list_)
# + tags=["dict"]
dict_ = {'x': 1, 'y': 2}
dict_
"""
save_and_execute_notebook(content, 'nb_literals.ipynb')
@pytest.fixture
def nb_other_literals():
content = """
# + tags=["int"]
int_ = 2
print(int_)
# + tags=["list"]
list_ = [2, 3, 4]
print(list_)
# + tags=["dict"]
dict_ = {'x': 2, 'y': 3}
dict_
"""
save_and_execute_notebook(content, 'nb_other_literals.ipynb')
@pytest.fixture
def nb_plot():
content = """
import matplotlib.pyplot as plt
# + tags=["a"]
plt.plot([1, 2, 3], [1, 2, 3])
# + tags=["b"]
42
"""
save_and_execute_notebook(content, 'nb_plot.ipynb')
@pytest.fixture
def nb_table():
content = """
import pandas as pd
# + tags=["a"]
pd.DataFrame({'a': [1,2 ,3]})
# + tags=["b"]
42
"""
save_and_execute_notebook(content, 'nb_table.ipynb')
@pytest.fixture
def nb_no_output():
content = """
import pandas as pd
# + tags=["a"]
x = 1
"""
save_and_execute_notebook(content, 'nb_no_output.ipynb')
@pytest.fixture
def nb_invalid_output():
content = """
import numpy as np
# + tags=["numpy_array"]
np.array([1, 2, 3])
"""
return save_and_execute_notebook(content, 'nb_invalid_output.ipynb')
|
#joliebulle 3.6
#Copyright (C) 2010-2016 Pierre Tavares
#Copyright (C) 2012-2015 joliebulle's authors
#See AUTHORS file.
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation; either version 3
#of the License, or (at your option) any later version.
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from PyQt5 import QtGui
from base import ImportBase
from view.yeastview import *
def getFermentablesQtModel():
model = QtGui.QStandardItemModel()
for f in ImportBase().listeFermentables:
item = QtGui.QStandardItem(f.name)
item.setData(f, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getHopsQtModel():
model = QtGui.QStandardItemModel()
for h in ImportBase().listeHops :
item = QtGui.QStandardItem(h.name)
item.setData(h, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getMiscsQtModel():
model = QtGui.QStandardItemModel()
for m in ImportBase().listeMiscs:
item = QtGui.QStandardItem(m.name)
item.setData(m, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
def getYeastsQtModel():
model = QtGui.QStandardItemModel()
for y in ImportBase().listeYeasts:
item = QtGui.QStandardItem(YeastView(y).yeastDetailDisplay())
item.setData(y, view.constants.MODEL_DATA_ROLE)
model.appendRow(item)
return model
|
"""
Implementation of the Colella 2nd order unsplit Godunov scheme. This
is a 2-dimensional implementation only. We assume that the grid is
uniform, but it is relatively straightforward to relax this
assumption.
There are several different options for this solver (they are all
discussed in the Colella paper).
limiter = 0 to use no limiting
= 1 to use the 2nd order MC limiter
= 2 to use the 4th order MC limiter
riemann = HLLC to use the HLLC solver
= CGF to use the Colella, Glaz, and Ferguson solver
use_flattening = 1 to use the multidimensional flattening
algorithm at shocks
delta, z0, z1 these are the flattening parameters. The default
are the values listed in Colella 1990.
j+3/2--+---------+---------+---------+
| | | |
j+1 _| | | |
| | | |
| | | |
j+1/2--+---------XXXXXXXXXXX---------+
| X X |
j _| X X |
| X X |
| X X |
j-1/2--+---------XXXXXXXXXXX---------+
| | | |
j-1 _| | | |
| | | |
| | | |
j-3/2--+---------+---------+---------+
| | | | | | |
i-1 i i+1
i-3/2 i-1/2 i+1/2 i+3/2
We wish to solve
U_t + F^x_x + F^y_y = H
we want U_{i+1/2}^{n+1/2} -- the interface values that are input to
the Riemann problem through the faces for each zone.
Taylor expanding yields
n+1/2 dU dU
U = U + 0.5 dx -- + 0.5 dt --
i+1/2,j,L i,j dx dt
dU dF^x dF^y
= U + 0.5 dx -- - 0.5 dt ( ---- + ---- - H )
i,j dx dx dy
dU dF^x dF^y
= U + 0.5 ( dx -- - dt ---- ) - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt dU dF^y
= U + 0.5 dx ( 1 - -- A^x ) -- - 0.5 dt ---- + 0.5 dt H
i,j dx dx dy
dt _ dF^y
= U + 0.5 ( 1 - -- A^x ) DU - 0.5 dt ---- + 0.5 dt H
i,j dx dy
+----------+-----------+ +----+----+ +---+---+
| | |
this is the monotonized this is the source term
central difference term transverse
flux term
There are two components, the central difference in the normal to the
interface, and the transverse flux difference. This is done for the
left and right sides of all 4 interfaces in a zone, which are then
used as input to the Riemann problem, yielding the 1/2 time interface
values,
n+1/2
U
i+1/2,j
Then, the zone average values are updated in the usual finite-volume
way:
n+1 n dt x n+1/2 x n+1/2
U = U + -- { F (U ) - F (U ) }
i,j i,j dx i-1/2,j i+1/2,j
dt y n+1/2 y n+1/2
+ -- { F (U ) - F (U ) }
dy i,j-1/2 i,j+1/2
Updating U_{i,j}:
-- We want to find the state to the left and right (or top and
bottom) of each interface, ex. U_{i+1/2,j,[lr]}^{n+1/2}, and use
them to solve a Riemann problem across each of the four
interfaces.
-- U_{i+1/2,j,[lr]}^{n+1/2} is comprised of two parts, the
computation of the monotonized central differences in the normal
direction (eqs. 2.8, 2.10) and the computation of the transverse
derivatives, which requires the solution of a Riemann problem in
the transverse direction (eqs. 2.9, 2.14).
-- the monotonized central difference part is computed using
the primitive variables.
-- We compute the central difference part in both directions
before doing the transverse flux differencing, since for the
high-order transverse flux implementation, we use these as
the input to the transverse Riemann problem.
"""
import numpy
import vars
import eos
import mesh.reconstruction_f as reconstruction_f
from util import runparams
from util import profile
import interface_f
def unsplitFluxes(myData, dt):
"""
unsplitFluxes returns the fluxes through the x and y interfaces by
doing an unsplit reconstruction of the interface values and then
solving the Riemann problem through all the interfaces at once
currently we assume a gamma-law EOS
grav is the gravitational acceleration in the y-direction
"""
pf = profile.timer("unsplitFluxes")
pf.begin()
myg = myData.grid
#=========================================================================
# compute the primitive variables
#=========================================================================
# Q = (rho, u, v, p)
dens = myData.getVarPtr("density")
xmom = myData.getVarPtr("x-momentum")
ymom = myData.getVarPtr("y-momentum")
ener = myData.getVarPtr("energy")
r = dens
# get the velocities
u = xmom/dens
v = ymom/dens
# get the pressure
e = (ener - 0.5*(xmom**2 + ymom**2)/dens)/dens
p = eos.pres(dens, e)
smallp = 1.e-10
p = p.clip(smallp) # apply a floor to the pressure
#=========================================================================
# compute the flattening coefficients
#=========================================================================
# there is a single flattening coefficient (xi) for all directions
use_flattening = runparams.getParam("compressible.use_flattening")
if (use_flattening):
smallp = 1.e-10
delta = runparams.getParam("compressible.delta")
z0 = runparams.getParam("compressible.z0")
z1 = runparams.getParam("compressible.z1")
xi_x = reconstruction_f.flatten(1, p, u, myg.qx, myg.qy, myg.ng, smallp, delta, z0, z1)
xi_y = reconstruction_f.flatten(2, p, v, myg.qx, myg.qy, myg.ng, smallp, delta, z0, z1)
xi = reconstruction_f.flatten_multid(xi_x, xi_y, p, myg.qx, myg.qy, myg.ng)
else:
xi = 1.0
#=========================================================================
# x-direction
#=========================================================================
# monotonized central differences in x-direction
pfa = profile.timer("limiting")
pfa.begin()
limiter = runparams.getParam("compressible.limiter")
if (limiter == 0):
limitFunc = reconstruction_f.nolimit
elif (limiter == 1):
limitFunc = reconstruction_f.limit2
else:
limitFunc = reconstruction_f.limit4
ldelta_r = xi*limitFunc(1, r, myg.qx, myg.qy, myg.ng)
ldelta_u = xi*limitFunc(1, u, myg.qx, myg.qy, myg.ng)
ldelta_v = xi*limitFunc(1, v, myg.qx, myg.qy, myg.ng)
ldelta_p = xi*limitFunc(1, p, myg.qx, myg.qy, myg.ng)
pfa.end()
# left and right primitive variable states
pfb = profile.timer("interfaceStates")
pfb.begin()
gamma = runparams.getParam("eos.gamma")
V_l = numpy.zeros((myg.qx, myg.qy, vars.nvar), dtype=numpy.float64)
V_r = numpy.zeros((myg.qx, myg.qy, vars.nvar), dtype=numpy.float64)
(V_l, V_r) = interface_f.states(1, myg.qx, myg.qy, myg.ng, myg.dx, dt,
vars.nvar,
gamma,
r, u, v, p,
ldelta_r, ldelta_u, ldelta_v, ldelta_p)
pfb.end()
# transform interface states back into conserved variables
U_xl = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_xr = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_xl[:,:,vars.idens] = V_l[:,:,vars.irho]
U_xl[:,:,vars.ixmom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iu]
U_xl[:,:,vars.iymom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iv]
U_xl[:,:,vars.iener] = eos.rhoe(V_l[:,:,vars.ip]) + \
0.5*V_l[:,:,vars.irho]*(V_l[:,:,vars.iu]**2 + V_l[:,:,vars.iv]**2)
U_xr[:,:,vars.idens] = V_r[:,:,vars.irho]
U_xr[:,:,vars.ixmom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iu]
U_xr[:,:,vars.iymom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iv]
U_xr[:,:,vars.iener] = eos.rhoe(V_r[:,:,vars.ip]) + \
0.5*V_r[:,:,vars.irho]*(V_r[:,:,vars.iu]**2 + V_r[:,:,vars.iv]**2)
#=========================================================================
# y-direction
#=========================================================================
# monotonized central differences in y-direction
pfa.begin()
ldelta_r = xi*limitFunc(2, r, myg.qx, myg.qy, myg.ng)
ldelta_u = xi*limitFunc(2, u, myg.qx, myg.qy, myg.ng)
ldelta_v = xi*limitFunc(2, v, myg.qx, myg.qy, myg.ng)
ldelta_p = xi*limitFunc(2, p, myg.qx, myg.qy, myg.ng)
pfa.end()
# left and right primitive variable states
pfb.begin()
(V_l, V_r) = interface_f.states(2, myg.qx, myg.qy, myg.ng, myg.dy, dt,
vars.nvar,
gamma,
r, u, v, p,
ldelta_r, ldelta_u, ldelta_v, ldelta_p)
pfb.end()
# transform interface states back into conserved variables
U_yl = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_yr = numpy.zeros((myg.qx, myg.qy, myData.nvar), dtype=numpy.float64)
U_yl[:,:,vars.idens] = V_l[:,:,vars.irho]
U_yl[:,:,vars.ixmom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iu]
U_yl[:,:,vars.iymom] = V_l[:,:,vars.irho]*V_l[:,:,vars.iv]
U_yl[:,:,vars.iener] = eos.rhoe(V_l[:,:,vars.ip]) + \
0.5*V_l[:,:,vars.irho]*(V_l[:,:,vars.iu]**2 + V_l[:,:,vars.iv]**2)
U_yr[:,:,vars.idens] = V_r[:,:,vars.irho]
U_yr[:,:,vars.ixmom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iu]
U_yr[:,:,vars.iymom] = V_r[:,:,vars.irho]*V_r[:,:,vars.iv]
U_yr[:,:,vars.iener] = eos.rhoe(V_r[:,:,vars.ip]) + \
0.5*V_r[:,:,vars.irho]*(V_r[:,:,vars.iu]**2 + V_r[:,:,vars.iv]**2)
#=========================================================================
# apply source terms
#=========================================================================
grav = runparams.getParam("compressible.grav")
# ymom_xl[i,j] += 0.5*dt*dens[i-1,j]*grav
U_xl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-2:myg.ihi+1,myg.jlo-1:myg.jhi+2]*grav
U_xl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-2:myg.ihi+1,myg.jlo-1:myg.jhi+2]*grav
# ymom_xr[i,j] += 0.5*dt*dens[i,j]*grav
U_xr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
U_xr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
# ymom_yl[i,j] += 0.5*dt*dens[i,j-1]*grav
U_yl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-2:myg.jhi+1]*grav
U_yl[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-2:myg.jhi+1]*grav
# ymom_yr[i,j] += 0.5*dt*dens[i,j]*grav
U_yr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iymom] += \
0.5*dt*dens[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
U_yr[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2,vars.iener] += \
0.5*dt*ymom[myg.ilo-1:myg.ihi+2,myg.jlo-1:myg.jhi+2]*grav
#=========================================================================
# compute transverse fluxes
#=========================================================================
pfc = profile.timer("riemann")
pfc.begin()
riemann = runparams.getParam("compressible.riemann")
if (riemann == "HLLC"):
riemannFunc = interface_f.riemann_hllc
elif (riemann == "CGF"):
riemannFunc = interface_f.riemann_cgf
else:
msg.fail("ERROR: Riemann solver undefined")
F_x = riemannFunc(1, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_xl, U_xr)
F_y = riemannFunc(2, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_yl, U_yr)
pfc.end()
#=========================================================================
# construct the interface values of U now
#=========================================================================
"""
finally, we can construct the state perpendicular to the interface
by adding the central difference part to the trasverse flux
difference.
The states that we represent by indices i,j are shown below
(1,2,3,4):
j+3/2--+----------+----------+----------+
| | | |
| | | |
j+1 -+ | | |
| | | |
| | | | 1: U_xl[i,j,:] = U
j+1/2--+----------XXXXXXXXXXXX----------+ i-1/2,j,L
| X X |
| X X |
j -+ 1 X 2 X | 2: U_xr[i,j,:] = U
| X X | i-1/2,j,R
| X 4 X |
j-1/2--+----------XXXXXXXXXXXX----------+
| | 3 | | 3: U_yl[i,j,:] = U
| | | | i,j-1/2,L
j-1 -+ | | |
| | | |
| | | | 4: U_yr[i,j,:] = U
j-3/2--+----------+----------+----------+ i,j-1/2,R
| | | | | | |
i-1 i i+1
i-3/2 i-1/2 i+1/2 i+3/2
remember that the fluxes are stored on the left edge, so
F_x[i,j,:] = F_x
i-1/2, j
F_y[i,j,:] = F_y
i, j-1/2
"""
pfd = profile.timer("transverse flux addition")
pfd.begin()
# U_xl[i,j,:] = U_xl[i,j,:] - 0.5*dt/dy * (F_y[i-1,j+1,:] - F_y[i-1,j,:])
U_xl[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dy * (F_y[myg.ilo-3:myg.ihi+1,myg.jlo-1:myg.jhi+3,:] - \
F_y[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2,:])
# U_xr[i,j,:] = U_xr[i,j,:] - 0.5*dt/dy * (F_y[i,j+1,:] - F_y[i,j,:])
U_xr[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dy * (F_y[myg.ilo-2:myg.ihi+2,myg.jlo-1:myg.jhi+3,:] - \
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:])
# U_yl[i,j,:] = U_yl[i,j,:] - 0.5*dt/dx * (F_x[i+1,j-1,:] - F_x[i,j-1,:])
U_yl[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dx * (F_x[myg.ilo-1:myg.ihi+3,myg.jlo-3:myg.jhi+1,:] - \
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1,:])
# U_yr[i,j,:] = U_yr[i,j,:] - 0.5*dt/dx * (F_x[i+1,j,:] - F_x[i,j,:])
U_yr[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:] += \
- 0.5*dt/myg.dx * (F_x[myg.ilo-1:myg.ihi+3,myg.jlo-2:myg.jhi+2,:] - \
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,:])
pfd.end()
#=========================================================================
# construct the fluxes normal to the interfaces
#=========================================================================
# up until now, F_x and F_y stored the transverse fluxes, now we
# overwrite with the fluxes normal to the interfaces
pfc.begin()
F_x = riemannFunc(1, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_xl, U_xr)
F_y = riemannFunc(2, myg.qx, myg.qy, myg.ng,
vars.nvar, vars.idens, vars.ixmom, vars.iymom, vars.iener,
gamma, U_yl, U_yr)
pfc.end()
#=========================================================================
# apply artificial viscosity
#=========================================================================
cvisc = runparams.getParam("compressible.cvisc")
(avisco_x, avisco_y) = interface_f.artificial_viscosity( \
myg.qx, myg.qy, myg.ng, myg.dx, myg.dy, \
cvisc, u, v)
# F_x = F_x + avisco_x * (U(i-1,j) - U(i,j))
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.idens] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(dens[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
dens[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.ixmom] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(xmom[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
xmom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iymom] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ymom[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
ymom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iener] += \
avisco_x[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ener[myg.ilo-3:myg.ihi+1,myg.jlo-2:myg.jhi+2] - \
ener[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
# F_y = F_y + avisco_y * (U(i,j-1) - U(i,j))
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.idens] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(dens[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
dens[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.ixmom] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(xmom[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
xmom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iymom] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ymom[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
ymom[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
F_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2,vars.iener] += \
avisco_y[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2]* \
(ener[myg.ilo-2:myg.ihi+2,myg.jlo-3:myg.jhi+1] - \
ener[myg.ilo-2:myg.ihi+2,myg.jlo-2:myg.jhi+2])
pf.end()
return F_x, F_y
|
"""Support for the Locative platform."""
import logging
from homeassistant.core import callback
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import (
DeviceTrackerEntity
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN as LT_DOMAIN, TRACKER_UPDATE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Configure a dispatcher connection based on a config entry."""
@callback
def _receive_data(device, location, location_name):
"""Receive set location."""
if device in hass.data[LT_DOMAIN]['devices']:
return
hass.data[LT_DOMAIN]['devices'].add(device)
async_add_entities([LocativeEntity(
device, location, location_name
)])
hass.data[LT_DOMAIN]['unsub_device_tracker'][entry.entry_id] = \
async_dispatcher_connect(hass, TRACKER_UPDATE, _receive_data)
return True
class LocativeEntity(DeviceTrackerEntity):
"""Represent a tracked device."""
def __init__(self, device, location, location_name):
"""Set up Locative entity."""
self._name = device
self._location = location
self._location_name = location_name
self._unsub_dispatcher = None
@property
def latitude(self):
"""Return latitude value of the device."""
return self._location[0]
@property
def longitude(self):
"""Return longitude value of the device."""
return self._location[1]
@property
def location_name(self):
"""Return a location name for the current location of the device."""
return self._location_name
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
async def async_added_to_hass(self):
"""Register state update callback."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, TRACKER_UPDATE, self._async_receive_data)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
self._unsub_dispatcher()
@callback
def _async_receive_data(self, device, location, location_name):
"""Update device data."""
self._location_name = location_name
self._location = location
self.async_write_ha_state()
|
# -*- coding: utf-8 -*-
# Copyright 2016 OpenSynergy Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from .base import BaseCase
class HrExpensePayableAccount(BaseCase):
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == True
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_1(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_1.id
new.employee_id = self.employee_1.id
new.onchange_account_id()
self.assertEqual(
self.journal_1.default_credit_account_id.id,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == False
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_2(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_2.id
new.employee_id = self.employee_1.id
property_account_payable =\
new._get_partner_account()
new.onchange_account_id()
self.assertEqual(
property_account_payable,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == True
# default_credit_account == False
# home_address == False
# property_account_payable == False
def test_onchange_journal_id_3(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = self.journal_2.id
new.employee_id = self.employee_2.id
new.onchange_account_id()
self.assertEqual(
False,
new.account_id.id)
# Check value account_id
# Condition :
# journal_id == False
# default_credit_account == False
# home_address == True
# property_account_payable == True
def test_onchange_journal_id_4(self):
with self.env.do_in_onchange():
new = self.obj_hr_expense.new()
new.journal_id = False
new.employee_id = self.employee_1.id
property_account_payable =\
new._get_partner_account()
new.onchange_account_id()
self.assertEqual(
property_account_payable,
new.account_id.id)
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as _plt
class DraggableLegend:
def __init__(self, legend):
self.legend = legend
self.gotLegend = False
legend.figure.canvas.mpl_connect('motion_notify_event', self.on_motion)
legend.figure.canvas.mpl_connect('pick_event', self.on_picker)
legend.figure.canvas.mpl_connect('button_release_event', self.on_release)
legend.set_picker(self.my_legend_picker)
#----------------------------------------------------#
# Connected event handlers
def on_motion(self, event):
if self.gotLegend:
dx = event.x - self.mouse_x
dy = event.y - self.mouse_y
loc_in_canvas = self.legend_x + dx, self.legend_y + dy
loc_in_norm_axes = self.legend.parent.transAxes.inverted().transform_point(loc_in_canvas)
self.legend._loc = tuple(loc_in_norm_axes)
self.legend.figure.canvas.draw()
def my_legend_picker(self, legend, event):
return self.legend.legendPatch.contains(event)
def on_picker(self, event):
if event.artist == self.legend:
# left-click
if event.mouseevent.button == 1:
self._move_legend(event)
# mouse button pressed
if event.mouseevent.button == 2:
pass
# right-click
if event.mouseevent.button == 3:
self._hideLegend()
# mouse up
if event.mouseevent.button == 'up':
self._scaleUpLegendFont()
# mouse down
if event.mouseevent.button == 'down':
self._scaleDownLegendFont()
def on_release(self, event):
if self.gotLegend:
self.gotLegend = False
#----------------------------------------------------#
# Utility functions
def _move_legend(self,event):
bbox = self.legend.get_window_extent()
self.mouse_x = event.mouseevent.x
self.mouse_y = event.mouseevent.y
self.legend_x = bbox.xmin
self.legend_y = bbox.ymin
self.gotLegend = 1
def _scaleUpLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size += size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _scaleDownLegendFont(self,size_step=4):
size = self.legend.get_texts()[0].get_fontsize()
size -= size_step
_plt.setp(self.legend.get_texts(), fontsize=size) #legend 'list' fontsize
self.legend.figure.canvas.draw()
def _hideLegend(self):
if self.legend.get_visible():
self.legend.set_visible(False)
else:
self.legend.set_visible(True)
self.legend.figure.canvas.draw()
figure = _plt.figure()
ax = figure.add_subplot(111)
scatter = ax.scatter(np.random.randn(100), np.random.randn(100), label='hi')
legend = ax.legend()
legend = DraggableLegend(legend)
_plt.show()
|
# import_export_batches/controllers_batch_process.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import create_batch_row_actions, import_data_from_batch_row_actions
from .models import ACTIVITY_NOTICE_PROCESS, API_REFRESH_REQUEST, \
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT, \
BatchDescription, BatchManager, BatchProcessManager, \
CALCULATE_ORGANIZATION_DAILY_METRICS, \
CALCULATE_ORGANIZATION_ELECTION_METRICS, \
CALCULATE_SITEWIDE_DAILY_METRICS, \
CALCULATE_SITEWIDE_ELECTION_METRICS, \
CALCULATE_SITEWIDE_VOTER_METRICS, \
IMPORT_CREATE, IMPORT_DELETE, \
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, \
REFRESH_BALLOT_ITEMS_FROM_VOTERS, SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE
from activity.controllers import process_activity_notice_seeds_triggered_by_batch_process
from analytics.controllers import calculate_sitewide_daily_metrics, \
process_one_analytics_batch_process_augment_with_election_id, \
process_one_analytics_batch_process_augment_with_first_visit, process_sitewide_voter_metrics, \
retrieve_analytics_processing_next_step
from analytics.models import AnalyticsManager
from api_internal_cache.models import ApiInternalCacheManager
from ballot.models import BallotReturnedListManager
from datetime import timedelta
from django.utils.timezone import now
from election.models import ElectionManager
from exception.models import handle_exception
from import_export_twitter.controllers import fetch_number_of_candidates_needing_twitter_search, \
retrieve_possible_twitter_handles_in_bulk
from issue.controllers import update_issue_statistics
import json
from voter_guide.controllers import voter_guides_upcoming_retrieve_for_api
import wevote_functions.admin
from wevote_functions.functions import positive_value_exists
from wevote_settings.models import fetch_batch_process_system_on, fetch_batch_process_system_activity_notices_on, \
fetch_batch_process_system_api_refresh_on, fetch_batch_process_system_ballot_items_on, \
fetch_batch_process_system_calculate_analytics_on, fetch_batch_process_system_search_twitter_on
logger = wevote_functions.admin.get_logger(__name__)
CANDIDATE = 'CANDIDATE'
CONTEST_OFFICE = 'CONTEST_OFFICE'
ELECTED_OFFICE = 'ELECTED_OFFICE'
IMPORT_BALLOT_ITEM = 'IMPORT_BALLOT_ITEM'
IMPORT_VOTER = 'IMPORT_VOTER'
MEASURE = 'MEASURE'
POLITICIAN = 'POLITICIAN'
# Note that as of Sept 2020 we are running 6 API servers. Each API server can be running up to
# 7 processes simultaneously. Since each new batch processes could be started on any of these 6 servers,
# in the worst case, all of these NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES processes could get bunched up
# on only one server. Since incoming API calls might get routed to the API server with the bunched up processes,
# we could see voter-driven API calls rejected. That is why we keep the NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES
# relatively low.
NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES = 4 # Four processes at a time
NUMBER_OF_SIMULTANEOUS_BALLOT_ITEM_BATCH_PROCESSES = 4 # Four processes at a time
NUMBER_OF_SIMULTANEOUS_GENERAL_MAINTENANCE_BATCH_PROCESSES = 1
# WE HAVE DEPRECATED THIS WHOLE FUNCTION
# def batch_process_next_steps():
# success = True
# status = ""
# batch_process_manager = BatchProcessManager()
#
# # If we have more than NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES batch_processes that are still active,
# # don't start a new import ballot item batch_process
# total_active_batch_processes = batch_process_manager.count_active_batch_processes()
# status += "TOTAL_ACTIVE_BATCH_PROCESSES: " + str(total_active_batch_processes) + ", "
#
# total_checked_out_batch_processes = batch_process_manager.count_checked_out_batch_processes()
# status += "CHECKED_OUT_BATCH_PROCESSES: " + str(total_checked_out_batch_processes) + ", "
#
# if not fetch_batch_process_system_on():
# status += "BATCH_PROCESS_SYSTEM_TURNED_OFF "
# results = {
# 'success': success,
# 'status': status,
# }
# return results
#
# # Retrieve list of active BatchProcess
# results = batch_process_manager.retrieve_batch_process_list(process_active=True, process_queued=False)
# if not positive_value_exists(results['success']):
# success = False
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=results['status'],
# )
# status += results['status']
# results = {
# 'success': success,
# 'status': status,
# }
# return results
#
# # We only want to process one batch_process at a time. The next time this script runs, the next one will be
# # picked up and processed.
# batch_process_list = []
# batch_process_list_count = 0
# if positive_value_exists(results['batch_process_list_found']):
# full_batch_process_list = results['batch_process_list']
# # How many processes currently running?
# batch_process_list_count = len(full_batch_process_list)
# if positive_value_exists(batch_process_list_count):
# batch_process_list.append(full_batch_process_list[0])
# status += "BATCH_PROCESS_COUNT: " + str(batch_process_list_count) + ", "
#
# # ############################
# # Are there any Api's that need to have their internal cache updated?
# api_internal_cache_manager = ApiInternalCacheManager()
# results = api_internal_cache_manager.retrieve_next_api_refresh_request()
# if positive_value_exists(results['api_refresh_request_found']):
# api_refresh_request = results['api_refresh_request']
# results = batch_process_manager.create_batch_process(
# kind_of_process=API_REFRESH_REQUEST,
# api_name=api_refresh_request.api_name,
# election_id_list_serialized=api_refresh_request.election_id_list_serialized)
# status += results['status']
# success = results['success']
# if results['batch_process_saved']:
# # Increase these counters so the code below can react correctly
# batch_process_list_count += 1
# total_active_batch_processes += 1
# batch_process = results['batch_process']
# batch_process_list.append(batch_process)
# status += "SCHEDULED_API_REFRESH_REQUEST "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=batch_process.id,
# kind_of_process=batch_process.kind_of_process,
# status=status,
# )
#
# # Now mark api_refresh_request as checked out
# try:
# api_refresh_request.date_checked_out = now()
# api_refresh_request.save()
# except Exception as e:
# status += "COULD_NOT_MARK_API_REFRESH_REQUEST_WITH_DATE_CHECKED_OUT " + str(e) + " "
# else:
# status += "FAILED_TO_SCHEDULE-" + str(API_REFRESH_REQUEST) + " "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=0,
# kind_of_process=API_REFRESH_REQUEST,
# status=status,
# )
#
# # ############################
# # Twitter Search - Turned off temporarily
# # number_of_candidates_to_analyze = fetch_number_of_candidates_needing_twitter_search()
# # if positive_value_exists(number_of_candidates_to_analyze):
# # results = batch_process_manager.create_batch_process(
# # kind_of_process=SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE)
# # status += results['status']
# # success = results['success']
# # if results['batch_process_saved']:
# # batch_process = results['batch_process']
# # batch_process_list.append(batch_process)
# # status += "SCHEDULED_SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE "
# # batch_process_manager.create_batch_process_log_entry(
# # batch_process_id=batch_process.id,
# # kind_of_process=batch_process.kind_of_process,
# # status=status,
# # )
# # else:
# # status += "FAILED_TO_SCHEDULE-" + str(SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE) + " "
# # batch_process_manager.create_batch_process_log_entry(
# # batch_process_id=0,
# # kind_of_process=SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE,
# # status=status,
# # )
#
# # ############################
# # Processing Analytics
# analytics_process_is_currently_running = batch_process_manager.is_analytics_process_currently_running()
# if not analytics_process_is_currently_running:
# analytics_processing_status = retrieve_analytics_processing_next_step()
# kind_of_process = None
# analytics_date_as_integer = 0
# status += analytics_processing_status['status']
# if not analytics_processing_status['success']:
# status += "FAILURE_TRYING_TO_RETRIEVE_ANALYTICS_PROCESSING_NEXT_STEP "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=0,
# kind_of_process=kind_of_process,
# status=status,
# )
# elif analytics_processing_status['analytics_processing_status_found']:
# analytics_date_as_integer = analytics_processing_status['analytics_date_as_integer']
# if analytics_processing_status['augment_analytics_action_with_election_id']:
# kind_of_process = AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID
# elif analytics_processing_status['augment_analytics_action_with_first_visit']:
# kind_of_process = AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT
# elif analytics_processing_status['calculate_sitewide_voter_metrics']:
# kind_of_process = CALCULATE_SITEWIDE_VOTER_METRICS
# elif analytics_processing_status['calculate_sitewide_daily_metrics']:
# kind_of_process = CALCULATE_SITEWIDE_DAILY_METRICS
# elif analytics_processing_status['calculate_sitewide_election_metrics']:
# kind_of_process = CALCULATE_SITEWIDE_ELECTION_METRICS
# elif analytics_processing_status['calculate_organization_daily_metrics']:
# kind_of_process = CALCULATE_ORGANIZATION_DAILY_METRICS
# elif analytics_processing_status['calculate_organization_election_metrics']:
# kind_of_process = CALCULATE_ORGANIZATION_ELECTION_METRICS
# if kind_of_process:
# results = batch_process_manager.create_batch_process(
# kind_of_process=kind_of_process,
# analytics_date_as_integer=analytics_date_as_integer)
# status += results['status']
# success = results['success']
# if results['batch_process_saved']:
# batch_process = results['batch_process']
# try:
# batch_process.date_started = now()
# batch_process.save()
# batch_process_list.append(batch_process)
# status += "SCHEDULED_PROCESS: " + str(kind_of_process) + " "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=batch_process.id,
# kind_of_process=batch_process.kind_of_process,
# status=status,
# )
# except Exception as e:
# status += "BATCH_PROCESS_ANALYTICS-CANNOT_SAVE_DATE_STARTED " + str(e) + " "
# handle_exception(e, logger=logger, exception_message=status)
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=batch_process.id,
# kind_of_process=kind_of_process,
# status=status,
# )
# else:
# status += "FAILED_TO_SCHEDULE-" + str(kind_of_process) + " "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=0,
# kind_of_process=kind_of_process,
# status=status,
# )
#
# # ##################################
# # Generate or Update ActivityNotice entries from ActivityNoticeSeed entries
# activity_notice_process_is_currently_running = \
# batch_process_manager.is_activity_notice_process_currently_running()
# if not activity_notice_process_is_currently_running:
# results = batch_process_manager.create_batch_process(kind_of_process=ACTIVITY_NOTICE_PROCESS)
# status += results['status']
# success = results['success']
# if results['batch_process_saved']:
# batch_process = results['batch_process']
# batch_process_list.insert(0, batch_process) # Put at the start of the list
# status += "SCHEDULED_ACTIVITY_NOTICE_PROCESS "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=batch_process.id,
# kind_of_process=batch_process.kind_of_process,
# status=status,
# )
# else:
# status += "FAILED_TO_SCHEDULE-" + str(ACTIVITY_NOTICE_PROCESS) + " "
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=0,
# kind_of_process=ACTIVITY_NOTICE_PROCESS,
# status=status,
# )
#
# # ############################
# # Processing Ballot Items
# # If less than NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES total active processes,
# # and we aren't working on a current process chunk,
# # then add a new batch_process (importing ballot items) to the current queue
# status += "TOTAL_ACTIVE_BATCH_PROCESSES-BEFORE_RETRIEVE: " + str(total_active_batch_processes) + " "
# if total_active_batch_processes < NUMBER_OF_SIMULTANEOUS_BATCH_PROCESSES:
# results = batch_process_manager.retrieve_batch_process_list(process_active=False, process_queued=True)
# if not positive_value_exists(results['success']):
# success = False
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=results['status'],
# )
# status += results['status']
# results = {
# 'success': success,
# 'status': status,
# }
# return results
#
# if positive_value_exists(results['batch_process_list_found']):
# new_batch_process_list = results['batch_process_list']
# new_batch_process_list_count = len(new_batch_process_list)
# status += "NEW_BATCH_PROCESS_LIST_COUNT: " + str(new_batch_process_list_count) + ", ADDING ONE "
# for new_batch in new_batch_process_list:
# # Bring the batch_process_list up by 1 item
# kind_of_process = ""
# try:
# kind_of_process = new_batch.kind_of_process
# new_batch.date_started = now()
# new_batch.save()
# batch_process_list.append(new_batch)
# total_active_batch_processes += 1
# except Exception as e:
# status += "BATCH_PROCESS-CANNOT_SAVE_DATE_STARTED " + str(e) + " "
# handle_exception(e, logger=logger, exception_message=status)
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=new_batch.id,
# kind_of_process=kind_of_process,
# status=status,
# )
# break
# status += "TOTAL_ACTIVE_BATCH_PROCESSES_BEFORE_PROCESS_LOOP: " + str(total_active_batch_processes) + " "
#
# for batch_process in batch_process_list:
# if batch_process.kind_of_process in \
# [REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
# RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]:
# results = process_one_ballot_item_batch_process(batch_process)
# status += results['status']
#
# # When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# # When the process is complete, we should reset this to "NULL"
# try:
# # Before saving batch_process, make sure we have the latest version. (For example, it might have been
# # paused since it was first retrieved.)
# batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
# if positive_value_exists(batch_process_results['batch_process_found']):
# batch_process = batch_process_results['batch_process']
#
# batch_process.date_checked_out = None
# batch_process.save()
# except Exception as e:
# status += "COULD_NOT_SET_CHECKED_OUT_TIME_TO_NULL " + str(e) + " "
# handle_exception(e, logger=logger, exception_message=status)
# batch_process_manager.create_batch_process_log_entry(
# batch_process_id=batch_process.id,
# google_civic_election_id=batch_process.google_civic_election_id,
# kind_of_process=batch_process.kind_of_process,
# state_code=batch_process.state_code,
# status=status,
# )
# elif batch_process.kind_of_process in [ACTIVITY_NOTICE_PROCESS]:
# results = process_activity_notice_batch_process(batch_process)
# status += results['status']
# elif batch_process.kind_of_process in [API_REFRESH_REQUEST]:
# results = process_one_api_refresh_request_batch_process(batch_process)
# status += results['status']
# elif batch_process.kind_of_process in [
# AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
# CALCULATE_SITEWIDE_VOTER_METRICS,
# CALCULATE_SITEWIDE_ELECTION_METRICS, CALCULATE_ORGANIZATION_DAILY_METRICS,
# CALCULATE_ORGANIZATION_ELECTION_METRICS]:
# results = process_one_analytics_batch_process(batch_process)
# status += results['status']
# elif batch_process.kind_of_process in [CALCULATE_SITEWIDE_DAILY_METRICS]:
# results = process_one_sitewide_daily_analytics_batch_process(batch_process)
# status += results['status']
# elif batch_process.kind_of_process in [SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE]: # Turned off
# # results = process_one_search_twitter_batch_process(batch_process)
# # status += results['status']
# status += "SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE-TURNED_OFF "
# else:
# status += "KIND_OF_PROCESS_NOT_RECOGNIZED "
#
# results = {
# 'success': success,
# 'status': status,
# }
# return results
def process_next_activity_notices():
success = True
status = ""
batch_process_manager = BatchProcessManager()
kind_of_process_list = [ACTIVITY_NOTICE_PROCESS]
# DALE 2020-10-15 This just creates load on the database
# # Count all processes (checked out or not)
# results = batch_process_manager.count_next_steps(
# kind_of_process_list=kind_of_process_list,
# is_in_upcoming_queue=True)
# if not results['success']:
# # Exit out -- we have database problem
# status += "PROBLEM_COUNTING_NEXT_STEPS-IN_UPCOMING_QUEUE: "
# status += results['status']
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=status,
# )
# results = {
# 'success': success,
# 'status': status,
# }
# return results
# total_active_batch_processes = results['batch_process_count']
# status += "BATCH_PROCESSES_IN_UPCOMING_QUEUE: " + str(total_active_batch_processes) + ", "
# DALE 2020-10-15 This just creates load on the database
# results = batch_process_manager.count_next_steps(
# kind_of_process_list=kind_of_process_list,
# is_checked_out=True)
# if not results['success']:
# # Exit out -- we have database problem
# status += "PROBLEM_COUNTING_NEXT_STEPS_FOR_COMPLETED: "
# status += results['status']
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=status,
# )
# results = {
# 'success': success,
# 'status': status,
# }
# return results
# total_checked_out_batch_processes = results['batch_process_count']
# status += "CHECKED_OUT_BATCH_PROCESSES: " + str(total_checked_out_batch_processes) + ", "
if not fetch_batch_process_system_on():
status += "BATCH_PROCESS_SYSTEM_TURNED_OFF-ACTIVITY_NOTICES "
results = {
'success': success,
'status': status,
}
return results
if not fetch_batch_process_system_activity_notices_on():
status += "BATCH_PROCESS_SYSTEM_ACTIVITY_NOTICES_TURNED_OFF "
results = {
'success': success,
'status': status,
}
return results
# Retrieve list of all active ActivityNotice BatchProcess so we can decide what new batches to schedule
# NOTE: We do not run directly from this list below
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_needs_to_be_run=True,
for_upcoming_elections=False)
if not positive_value_exists(results['success']):
success = False
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
# We only want to process one batch_process at a time. The next time this script runs, the next one will be
# picked up and processed.
batch_process_list_for_analysis = []
if positive_value_exists(results['batch_process_list_found']):
batch_process_list_for_analysis = results['batch_process_list']
status += "BATCH_PROCESS_LIST_QUEUED_AT_START_COUNT: " + str(len(batch_process_list_for_analysis)) + ", "
# ##################################
# Generate or Update ActivityNotice entries from ActivityNoticeSeed entries
# We only want one API Refresh process to be running at a time
# Check to see if one of the existing batches is for API Refresh. If so, skip creating a new one.
activity_notice_process_is_already_in_queue = False
for batch_process in batch_process_list_for_analysis:
if batch_process.kind_of_process in kind_of_process_list:
activity_notice_process_is_already_in_queue = True
if not activity_notice_process_is_already_in_queue:
activity_notice_process_is_currently_running = \
batch_process_manager.is_activity_notice_process_currently_running()
if not activity_notice_process_is_currently_running:
results = batch_process_manager.create_batch_process(
kind_of_process=ACTIVITY_NOTICE_PROCESS)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
status += "SCHEDULED_ACTIVITY_NOTICE_PROCESS "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
else:
status += "FAILED_TO_SCHEDULE-" + str(ACTIVITY_NOTICE_PROCESS) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
kind_of_process=ACTIVITY_NOTICE_PROCESS,
status=status,
)
# Finally, retrieve the ActivityNotice BatchProcess to run, and only use the first one returned
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_needs_to_be_run=True,
for_upcoming_elections=False)
if not positive_value_exists(results['success']):
success = False
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
batch_process = None
batch_process_found = False
batch_process_full_list = []
if positive_value_exists(results['batch_process_list_found']):
batch_process_found = True
batch_process_full_list = results['batch_process_list']
# Only use the first one
batch_process = batch_process_full_list[0]
status += "BATCH_PROCESS_LIST_NEEDS_TO_BE_RUN_COUNT: " + str(len(batch_process_full_list)) + ", "
# We should only run one per minute
if batch_process_found:
if batch_process.kind_of_process in [ACTIVITY_NOTICE_PROCESS]:
results = process_activity_notice_batch_process(batch_process)
status += results['status']
else:
status += "KIND_OF_PROCESS_NOT_RECOGNIZED "
results = {
'success': success,
'status': status,
}
return results
def process_next_ballot_items():
success = True
status = ""
if not fetch_batch_process_system_on():
status += "BATCH_PROCESS_SYSTEM_TURNED_OFF-BALLOT_ITEMS "
results = {
'success': success,
'status': status,
}
return results
if not fetch_batch_process_system_ballot_items_on():
status += "BATCH_PROCESS_SYSTEM_BALLOT_ITEMS_TURNED_OFF "
results = {
'success': success,
'status': status,
}
return results
batch_process_manager = BatchProcessManager()
# If we have more than NUMBER_OF_SIMULTANEOUS_BALLOT_ITEM_BATCH_PROCESSES batch_processes that are still active,
# don't start a new import ballot item batch_process
kind_of_process_list = [
REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS,
REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]
# Retrieve list of all ballot item BatchProcesses which have been started but not completed so we can decide
# our next steps
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_active=True,
for_upcoming_elections=True)
if not positive_value_exists(results['success']):
success = False
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
# Note this batch_process_list does not included checked out items that haven't timed out
# These are all batch processes that need to be worked on
batch_process_list = []
if positive_value_exists(results['batch_process_list_found']):
batch_process_list = results['batch_process_list']
status += "BATCH_PROCESSES_TO_BE_RESTARTED: " + str(len(batch_process_list)) + ", "
# If there are any started processes that are not checked out, or checked out but timed out
process_restarted = False
if batch_process_list and len(batch_process_list) > 0:
for batch_process in batch_process_list:
if batch_process.kind_of_process in \
[REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]:
process_restarted = True
# When a batch_process is running, we set date_checked_out to show it is being worked on
results = process_one_ballot_item_batch_process(batch_process)
status += results['status']
# Now that the process is complete, we reset date_checked_out to "NULL"
try:
# Before saving batch_process, make sure we have the latest version, since there were
# updates in process_one_ballot_item_batch_process
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
batch_process.date_checked_out = None
batch_process.save()
except Exception as e:
status += "ERROR-COULD_NOT_SET_CHECKED_OUT_TIME_TO_NULL " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=batch_process.google_civic_election_id,
kind_of_process=batch_process.kind_of_process,
state_code=batch_process.state_code,
status=status,
)
else:
status += "KIND_OF_PROCESS_NOT_RECOGNIZED "
# If a process was started immediately above, exit
if process_restarted:
status += "BATCH_PROCESS_STARTED_PREVIOUSLY_WAS_RESTARTED "
results = {
'success': success,
'status': status,
}
return results
# ############################
# Processing Ballot Items
results = batch_process_manager.count_next_steps(
kind_of_process_list=kind_of_process_list,
is_active=True)
if not results['success']:
# Exit out -- we have database problem
status += "PROBLEM_COUNTING_BATCH_PROCESSES-RUNNING: "
status += results['status']
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
batch_processes_running_count = results['batch_process_count']
status += "BATCH_PROCESSES_RUNNING_COUNT: " + str(batch_processes_running_count) + ", "
# If less than NUMBER_OF_SIMULTANEOUS_BALLOT_ITEM_BATCH_PROCESSES total active processes,
# then add a new batch_process (importing ballot items) to the current queue
if batch_processes_running_count < NUMBER_OF_SIMULTANEOUS_BALLOT_ITEM_BATCH_PROCESSES:
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_active=False,
process_queued=True,
for_upcoming_elections=True)
if not positive_value_exists(results['success']):
success = False
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
if positive_value_exists(results['batch_process_list_found']):
new_batch_process_list = results['batch_process_list']
new_batch_process_list_count = len(new_batch_process_list)
status += "NEW_BATCH_PROCESS_LIST_COUNT: " + str(new_batch_process_list_count) + ", ADDING ONE "
for batch_process in new_batch_process_list:
# Bring the batch_process_list up by 1 item
batch_process_started = False
kind_of_process = ""
try:
kind_of_process = batch_process.kind_of_process
batch_process.date_started = now()
batch_process.save()
batch_process_started = True
except Exception as e:
status += "ERROR-BATCH_PROCESS-CANNOT_SAVE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
if batch_process_started:
if batch_process.kind_of_process in \
[REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]:
# Now process the batch
results = process_one_ballot_item_batch_process(batch_process)
status += results['status']
# Before a batch_process runs, we set `date_checked_out`, like you check out a library book
# When the process is complete, we reset `date_checked_out` to "NULL"
try:
# Before saving batch_process, make sure we have the latest version.
# (For example, it might have been paused since it was first retrieved.)
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
batch_process.date_checked_out = None
batch_process.save()
except Exception as e:
status += "ERROR-COULD_NOT_SET_CHECKED_OUT_TIME_TO_NULL " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=batch_process.google_civic_election_id,
kind_of_process=batch_process.kind_of_process,
state_code=batch_process.state_code,
status=status,
)
else:
status += "KIND_OF_PROCESS_NOT_RECOGNIZED "
break
results = {
'success': success,
'status': status,
}
return results
def process_next_general_maintenance():
success = True
status = ""
batch_process_manager = BatchProcessManager()
# Only include the processes if the process system is turned on
kind_of_process_list = []
if fetch_batch_process_system_api_refresh_on():
api_refresh_process_list = [API_REFRESH_REQUEST]
kind_of_process_list = kind_of_process_list + api_refresh_process_list
if fetch_batch_process_system_calculate_analytics_on():
analytics_process_list = [
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID,
AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_SITEWIDE_VOTER_METRICS,
CALCULATE_SITEWIDE_DAILY_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS,
CALCULATE_ORGANIZATION_DAILY_METRICS,
CALCULATE_ORGANIZATION_ELECTION_METRICS]
kind_of_process_list = kind_of_process_list + analytics_process_list
if fetch_batch_process_system_search_twitter_on():
search_twitter_process_list = [SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE]
kind_of_process_list = kind_of_process_list + search_twitter_process_list
# DALE 2020-10-15 This just creates load on the database
# # Count all processes (checked out or not)
# results = batch_process_manager.count_next_steps(
# kind_of_process_list=kind_of_process_list,
# is_in_upcoming_queue=True)
# if not results['success']:
# # Exit out -- we have database problem
# status += "PROBLEM_COUNTING_NEXT_STEPS-IN_UPCOMING_QUEUE: "
# status += results['status']
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=status,
# )
# results = {
# 'success': success,
# 'status': status,
# }
# return results
# batch_processes_in_upcoming_queue = results['batch_process_count']
# status += "BATCH_PROCESSES_IN_UPCOMING_QUEUE: " + str(batch_processes_in_upcoming_queue) + ", "
# DALE 2020-10-15 This just creates load on the database
# # Count active processes which are already checked out
# results = batch_process_manager.count_next_steps(
# kind_of_process_list=kind_of_process_list,
# is_checked_out=True)
# if not results['success']:
# # Exit out -- we have database problem
# status += "PROBLEM_COUNTING_NEXT_STEPS_FOR_COMPLETED: "
# status += results['status']
# batch_process_manager.create_batch_process_log_entry(
# critical_failure=True,
# status=status,
# )
# results = {
# 'success': success,
# 'status': status,
# }
# return results
# total_checked_out_batch_processes = results['batch_process_count']
# status += "CHECKED_OUT_BATCH_PROCESSES: " + str(total_checked_out_batch_processes) + ", "
if not fetch_batch_process_system_on():
status += "BATCH_PROCESS_SYSTEM_TURNED_OFF-GENERAL "
results = {
'success': success,
'status': status,
}
return results
if not positive_value_exists(len(kind_of_process_list)):
status += "ALL_BATCH_PROCESS_SYSTEM_KINDS_TURNED_OFF "
results = {
'success': success,
'status': status,
}
return results
# Retrieve list of all active General Maintenance BatchProcess so we can decide what new batches to schedule
# NOTE: We do not run directly from this list below
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_needs_to_be_run=True,
for_upcoming_elections=False)
if not positive_value_exists(results['success']):
success = False
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
batch_process_list_for_analysis = []
if positive_value_exists(results['batch_process_list_found']):
batch_process_list_for_analysis = results['batch_process_list']
status += "BATCH_PROCESS_LIST_QUEUED_AT_START_COUNT: " + str(len(batch_process_list_for_analysis)) + ", "
# ############################
# Are there any API's that need to have their internal cache updated?
if not fetch_batch_process_system_api_refresh_on():
status += "BATCH_PROCESS_SYSTEM_API_REFRESH_TURNED_OFF "
else:
# We only want one API Refresh process to be running at a time
# Check to see if one of the existing batches is for API Refresh. If so, skip creating a new one.
api_refresh_process_is_already_in_queue = False
for batch_process in batch_process_list_for_analysis:
if batch_process.kind_of_process in [API_REFRESH_REQUEST]:
api_refresh_process_is_already_in_queue = True
if not api_refresh_process_is_already_in_queue:
api_internal_cache_manager = ApiInternalCacheManager()
results = api_internal_cache_manager.retrieve_next_api_refresh_request()
if positive_value_exists(results['api_refresh_request_found']):
api_refresh_request = results['api_refresh_request']
results = batch_process_manager.create_batch_process(
kind_of_process=API_REFRESH_REQUEST,
api_name=api_refresh_request.api_name,
election_id_list_serialized=api_refresh_request.election_id_list_serialized)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
status += "SCHEDULED_API_REFRESH_REQUEST "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
# Now mark api_refresh_request as checked out
try:
api_refresh_request.date_checked_out = now()
api_refresh_request.save()
except Exception as e:
status += "ERROR-COULD_NOT_MARK_API_REFRESH_REQUEST_WITH_DATE_CHECKED_OUT " + str(e) + " "
else:
status += "FAILED_TO_SCHEDULE-" + str(API_REFRESH_REQUEST) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
kind_of_process=API_REFRESH_REQUEST,
status=status,
)
# ############################
# Twitter Search
if not fetch_batch_process_system_search_twitter_on():
status += "BATCH_PROCESS_SYSTEM_SEARCH_TWITTER_TURNED_OFF "
else:
# We only want one Search Twitter process to be running at a time
# Check to see if one of the existing batches is for API Refresh. If so, skip creating a new one.
local_batch_process_id = 0
search_twitter_process_is_already_in_queue = False
for batch_process in batch_process_list_for_analysis:
if batch_process.kind_of_process in [SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE]:
local_batch_process_id = batch_process.id
status += "SEARCH_TWITTER_ALREADY_RUNNING(" + str(local_batch_process_id) + ") "
search_twitter_process_is_already_in_queue = True
if batch_process.date_checked_out is not None:
status += "SEARCH_TWITTER_TIMED_OUT_AND_BEING_RE_PROCESSED " # See SEARCH_TWITTER_TIMED_OUT
if search_twitter_process_is_already_in_queue:
status += "DO_NOT_CREATE_SEARCH_TWITTER-ALREADY_RUNNING "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=local_batch_process_id,
kind_of_process=SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE,
status=status,
)
else:
number_of_candidates_to_analyze = fetch_number_of_candidates_needing_twitter_search()
if positive_value_exists(number_of_candidates_to_analyze):
results = batch_process_manager.create_batch_process(
kind_of_process=SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
status += "SCHEDULED_NEW_SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
else:
status += "FAILED_TO_SCHEDULE-" + str(SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
kind_of_process=SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE,
status=status,
)
# ############################
# Processing Analytics - Generate Next BatchProcess to run
if not fetch_batch_process_system_calculate_analytics_on():
status += "BATCH_PROCESS_SYSTEM_CALCULATE_ANALYTICS_TURNED_OFF "
else:
# We only want one analytics process to be running at a time
# Check to see if one of the existing batches is for analytics. If so,
analytics_process_is_already_in_queue = False
for batch_process in batch_process_list_for_analysis:
if batch_process.kind_of_process in [
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_ORGANIZATION_DAILY_METRICS, CALCULATE_ORGANIZATION_ELECTION_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS, CALCULATE_SITEWIDE_VOTER_METRICS]:
analytics_process_is_already_in_queue = True
if not analytics_process_is_already_in_queue:
analytics_process_is_currently_running = batch_process_manager.is_analytics_process_currently_running()
if not analytics_process_is_currently_running:
analytics_processing_status = retrieve_analytics_processing_next_step()
kind_of_process = None
analytics_date_as_integer = 0
status += analytics_processing_status['status']
if not analytics_processing_status['success']:
status += "FAILURE_TRYING_TO_RETRIEVE_ANALYTICS_PROCESSING_NEXT_STEP "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
kind_of_process=kind_of_process,
status=status,
)
elif analytics_processing_status['analytics_processing_status_found']:
analytics_date_as_integer = analytics_processing_status['analytics_date_as_integer']
if analytics_processing_status['augment_analytics_action_with_election_id']:
kind_of_process = AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID
elif analytics_processing_status['augment_analytics_action_with_first_visit']:
kind_of_process = AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT
elif analytics_processing_status['calculate_sitewide_voter_metrics']:
kind_of_process = CALCULATE_SITEWIDE_VOTER_METRICS
elif analytics_processing_status['calculate_sitewide_daily_metrics']:
kind_of_process = CALCULATE_SITEWIDE_DAILY_METRICS
elif analytics_processing_status['calculate_sitewide_election_metrics']:
kind_of_process = CALCULATE_SITEWIDE_ELECTION_METRICS
elif analytics_processing_status['calculate_organization_daily_metrics']:
kind_of_process = CALCULATE_ORGANIZATION_DAILY_METRICS
elif analytics_processing_status['calculate_organization_election_metrics']:
kind_of_process = CALCULATE_ORGANIZATION_ELECTION_METRICS
if kind_of_process:
results = batch_process_manager.create_batch_process(
kind_of_process=kind_of_process,
analytics_date_as_integer=analytics_date_as_integer)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
try:
batch_process.date_started = now()
batch_process.save()
status += "SCHEDULED_PROCESS: " + str(kind_of_process) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-BATCH_PROCESS_ANALYTICS-CANNOT_SAVE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
else:
status += "FAILED_TO_SCHEDULE-" + str(kind_of_process) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
kind_of_process=kind_of_process,
status=status,
)
# Finally, retrieve the General Maintenance BatchProcess to run, and only use the first one returned
results = batch_process_manager.retrieve_batch_process_list(
kind_of_process_list=kind_of_process_list,
process_needs_to_be_run=True,
for_upcoming_elections=False)
if not positive_value_exists(results['success']):
success = False
status += "FAILED_TO_RETRIEVE_BATCH_PROCESS_LIST: "
status += results['status']
batch_process_manager.create_batch_process_log_entry(
critical_failure=True,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
batch_process = None
batch_process_found = False
batch_process_full_list = []
if positive_value_exists(results['batch_process_list_found']):
batch_process_found = True
batch_process_full_list = results['batch_process_list']
status += "KINDS_OF_BATCH_PROCESSES_IN_QUEUE [ "
for temp_batch in batch_process_full_list:
if temp_batch.kind_of_process:
status += str(temp_batch.kind_of_process) + " "
status += "] (ONLY_USING_FIRST) "
# Only use the first one
batch_process = batch_process_full_list[0]
status += "BATCH_PROCESS_LIST_NEEDS_TO_BE_RUN_COUNT: " + str(len(batch_process_full_list)) + ", "
# We should only run one per minute
if batch_process_found:
if batch_process.kind_of_process in [API_REFRESH_REQUEST]:
results = process_one_api_refresh_request_batch_process(batch_process)
status += results['status']
elif batch_process.kind_of_process in [
AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID, AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT,
CALCULATE_ORGANIZATION_DAILY_METRICS, CALCULATE_ORGANIZATION_ELECTION_METRICS,
CALCULATE_SITEWIDE_ELECTION_METRICS, CALCULATE_SITEWIDE_VOTER_METRICS]:
results = process_one_analytics_batch_process(batch_process)
status += results['status']
elif batch_process.kind_of_process in [CALCULATE_SITEWIDE_DAILY_METRICS]:
results = process_one_sitewide_daily_analytics_batch_process(batch_process)
status += results['status']
elif batch_process.kind_of_process in [SEARCH_TWITTER_FOR_CANDIDATE_TWITTER_HANDLE]:
results = process_one_search_twitter_batch_process(batch_process, status=status)
status = results['status'] # Not additive since we pass status into function
else:
status += "KIND_OF_PROCESS_NOT_RECOGNIZED "
results = {
'success': success,
'status': status,
}
return results
def process_one_analytics_batch_process(batch_process):
from import_export_batches.models import BatchProcessManager
batch_process_manager = BatchProcessManager()
status = ""
success = True
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
try:
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-ANALYTICS_BATCH-CHECKED_OUT_TIME_NOT_SAVED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Retrieve any existing BatchProcessAnalyticsChunk that has not completed
# (this could include ones that haven't started yet)
results = batch_process_manager.retrieve_analytics_action_chunk_not_completed(batch_process_id=batch_process.id)
if not results['success']:
status += "RETRIEVE_ANALYTICS_ACTION_CHUNK-NOT_SUCCESSFUL: "
status += results['status']
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
analytics_date_as_integer=batch_process.analytics_date_as_integer,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
if results['batch_process_analytics_chunk_found']:
batch_process_analytics_chunk = results['batch_process_analytics_chunk']
status += "ANALYTICS_ACTION_CHUNK_FOUND "
else:
# We need to create a new batch_process_analytics_chunk here.
# We don't consider a batch_process completed until
# a batch_process_analytics_chunk reports that there are no more items retrieved
results = batch_process_manager.create_batch_process_analytics_chunk(batch_process=batch_process)
if results['batch_process_analytics_chunk_created']:
batch_process_analytics_chunk = results['batch_process_analytics_chunk']
status += "ANALYTICS_ACTION_CHUNK_CREATED "
else:
status += "UNABLE_TO_CREATE_ANALYTICS_CHUNK: "
status += results['status']
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
analytics_date_as_integer=batch_process.analytics_date_as_integer,
status=status,
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
# If the batch_process_analytics_chunk has been started but not completed, figure out how many got
# processed before failing
analytics_manager = AnalyticsManager()
if batch_process_analytics_chunk.date_started is not None and batch_process_analytics_chunk.date_completed is None:
status += "ANALYTICS_CHUNK_PREVIOUSLY_STARTED_BUT_NOT_FINISHED "
results = analytics_manager.retrieve_analytics_processed_list(
batch_process_id=batch_process.id,
batch_process_analytics_chunk_id=batch_process_analytics_chunk.id)
analytics_processed_count = 0
if results['analytics_processed_list_found']:
# Exclude the voters already processed for analytics_date_as_integer
analytics_processed_list = results['analytics_processed_list']
analytics_processed_count = len(analytics_processed_list)
else:
status += results['status']
try:
batch_process_analytics_chunk.number_of_rows_successfully_reviewed = analytics_processed_count
batch_process_analytics_chunk.timed_out = True
batch_process_analytics_chunk.date_completed = now()
batch_process_analytics_chunk.save()
status += "BATCH_PROCESS_ANALYTICS_CHUNK_TIMED_OUT, ROWS_REVIEWED: " + str(analytics_processed_count) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-BATCH_PROCESS_ANALYTICS_CHUNK_TIMED_OUT-DATE_COMPLETED_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Now free up the batch_process to process in the next loop
try:
# Before saving batch_process, make sure we have the latest version. (For example, it might have been
# paused since it was first retrieved.)
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
batch_process.date_checked_out = None
batch_process.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
except Exception as e:
status += "ERROR-TIMED_OUT-DATE_COMPLETED_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
try:
batch_process_analytics_chunk.date_started = now()
batch_process_analytics_chunk.save()
except Exception as e:
status += "ERROR-ANALYTICS_CHUNK_DATE_STARTED_TIME_NOT_SAVED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
mark_as_completed = False
if batch_process.kind_of_process in [AUGMENT_ANALYTICS_ACTION_WITH_ELECTION_ID]:
results = process_one_analytics_batch_process_augment_with_election_id(
batch_process, batch_process_analytics_chunk)
status += results['status']
elif batch_process.kind_of_process in [AUGMENT_ANALYTICS_ACTION_WITH_FIRST_VISIT]:
results = process_one_analytics_batch_process_augment_with_first_visit(
batch_process, batch_process_analytics_chunk)
status += results['status']
elif batch_process.kind_of_process in [CALCULATE_SITEWIDE_VOTER_METRICS]:
results = process_sitewide_voter_metrics(batch_process, batch_process_analytics_chunk)
status += results['status']
# elif batch_process.kind_of_process in [CALCULATE_SITEWIDE_DAILY_METRICS]:
# # Should not be here
# pass
elif batch_process.kind_of_process in [CALCULATE_SITEWIDE_ELECTION_METRICS]:
# Not implemented yet -- mark as completed
mark_as_completed = True
elif batch_process.kind_of_process in [CALCULATE_ORGANIZATION_DAILY_METRICS]:
# Not implemented yet -- mark as completed
mark_as_completed = True
elif batch_process.kind_of_process in [CALCULATE_ORGANIZATION_ELECTION_METRICS]:
# Not implemented yet -- mark as completed
mark_as_completed = True
else:
status += "MISSING_KIND_OF_PROCESS "
try:
# Before saving batch_process as completed, make sure we have the latest version.
# (For example, it might have been paused since it was first retrieved.)
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
if mark_as_completed:
# Not implemented yet -- mark as completed
batch_process.date_completed = now()
batch_process.date_checked_out = None
batch_process.save()
except Exception as e:
status += "ERROR-CHECKED_OUT_TIME_NOT_RESET: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
analytics_date_as_integer=batch_process.analytics_date_as_integer,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def process_one_api_refresh_request_batch_process(batch_process):
status = ""
success = True
api_internal_cache_manager = ApiInternalCacheManager()
batch_process_manager = BatchProcessManager()
kind_of_process = batch_process.kind_of_process
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
try:
batch_process.date_started = now()
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-API_REFRESH_REQUEST-CHECKED_OUT_TIME_NOT_SAVED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
api_internal_cache_id = 0
api_internal_cache_saved = False
api_results_retrieved = False
if batch_process.api_name == 'voterGuidesUpcoming':
status += "STARTING_PROCESS_ONE_API_REFRESH_REQUESTED-voterGuidesUpcoming-" \
"(" + str(batch_process.election_id_list_serialized) + ") "
google_civic_election_id_list = json.loads(batch_process.election_id_list_serialized)
results = voter_guides_upcoming_retrieve_for_api(google_civic_election_id_list=google_civic_election_id_list)
status += results['status']
api_results_retrieved = results['success']
json_data = results['json_data']
if json_data['success'] and api_results_retrieved:
# Save the json in the cache
status += "NEW_API_RESULTS_RETRIEVED-CREATING_API_INTERNAL_CACHE "
cached_api_response_serialized = json.dumps(json_data)
results = api_internal_cache_manager.create_api_internal_cache(
api_name=batch_process.api_name,
cached_api_response_serialized=cached_api_response_serialized,
election_id_list_serialized=batch_process.election_id_list_serialized,
)
status += results['status']
api_internal_cache_saved = results['success']
api_internal_cache_id = results['api_internal_cache_id']
else:
status += "NEW_API_RESULTS_RETRIEVE_FAILED "
else:
status += "API_NAME_NOT_RECOGNIZED: " + str(batch_process.api_name) + " "
if api_results_retrieved and api_internal_cache_saved:
try:
batch_process.completion_summary = status
batch_process.date_checked_out = None
batch_process.date_completed = now()
batch_process.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-DATE_COMPLETED_TIME_NOT_SAVED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Mark all refresh requests prior to now as satisfied
if positive_value_exists(api_internal_cache_id):
results = api_internal_cache_manager.mark_prior_api_internal_cache_entries_as_replaced(
api_name=batch_process.api_name,
election_id_list_serialized=batch_process.election_id_list_serialized,
excluded_api_internal_cache_id=api_internal_cache_id)
status += results['status']
# Mark all refresh requests prior to now as satisfied
results = api_internal_cache_manager.mark_refresh_completed_for_prior_api_refresh_requested(
api_name=batch_process.api_name,
election_id_list_serialized=batch_process.election_id_list_serialized)
status += results['status']
else:
status += "API_REFRESH_REQUEST_FAILED "
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def process_one_ballot_item_batch_process(batch_process):
status = ""
success = True
batch_manager = BatchManager()
batch_process_manager = BatchProcessManager()
election_manager = ElectionManager()
retrieve_time_out_duration = 20 * 60 # 20 minutes * 60 seconds
analyze_time_out_duration = 30 * 60 # 30 minutes
create_time_out_duration = 20 * 60 # 20 minutes
kind_of_process = batch_process.kind_of_process
google_civic_election_id = None
if positive_value_exists(batch_process.google_civic_election_id):
google_civic_election_id = batch_process.google_civic_election_id
state_code = None
if positive_value_exists(batch_process.state_code):
state_code = batch_process.state_code
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
try:
# Before saving batch_process, make sure we have the latest version. (For example, it might have been
# paused since it was first retrieved.)
# DALE 2020-October after transition to three batches roots, we can get rid of retrieving this again
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-CHECKED_OUT_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Retrieve BatchProcessBallotItemChunk that has started but not completed
results = batch_process_manager.retrieve_active_ballot_item_chunk_not_completed(
batch_process_id=batch_process.id)
if not results['success']:
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
if results['batch_process_ballot_item_chunk_found']:
batch_process_ballot_item_chunk = results['batch_process_ballot_item_chunk']
else:
# We need to create a new batch_process_ballot_item_chunk here.
# We don't consider a batch_process completed until
# a batch_process_ballot_item_chunk reports that there are no more items retrieved
results = \
batch_process_manager.create_batch_process_ballot_item_chunk(batch_process_id=batch_process.id)
if results['batch_process_ballot_item_chunk_created']:
batch_process_ballot_item_chunk = results['batch_process_ballot_item_chunk']
else:
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=results['status'],
)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
# If here, we have a batch_process_ballot_item_chunk to work on
if batch_process_ballot_item_chunk.retrieve_date_started is None:
# Kick off retrieve
retrieve_success = False
retrieve_row_count = 0
batch_set_id = 0
try:
# If here, we are about to retrieve ballot items
batch_process_ballot_item_chunk.retrieve_date_started = now()
batch_process_ballot_item_chunk.save()
status += "RETRIEVE_DATE_STARTED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-RETRIEVE_DATE_STARTED-CANNOT_SAVE_RETRIEVE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
if batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS:
from import_export_batches.views_admin import \
retrieve_ballots_for_polling_locations_api_v4_internal_view
results = retrieve_ballots_for_polling_locations_api_v4_internal_view(
google_civic_election_id=batch_process.google_civic_election_id,
state_code=batch_process.state_code,
refresh_ballot_returned=True,
date_last_updated_should_not_exceed=batch_process.date_started,
batch_process_ballot_item_chunk=batch_process_ballot_item_chunk,
use_ballotpedia=batch_process.use_ballotpedia,
use_ctcl=batch_process.use_ctcl,
use_vote_usa=batch_process.use_vote_usa,
)
retrieve_success = positive_value_exists(results['success'])
batch_set_id = results['batch_set_id']
retrieve_row_count = results['retrieve_row_count']
status += results['status']
if 'batch_process_ballot_item_chunk' in results:
if results['batch_process_ballot_item_chunk'] and \
hasattr(results['batch_process_ballot_item_chunk'], 'batch_set_id'):
batch_process_ballot_item_chunk = results['batch_process_ballot_item_chunk']
elif batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_VOTERS:
# Retrieving ballot items and cache in import_export_batches tables
from import_export_batches.views_admin import refresh_ballots_for_voters_api_v4_internal_view
results = refresh_ballots_for_voters_api_v4_internal_view(
google_civic_election_id=batch_process.google_civic_election_id,
state_code=batch_process.state_code,
date_last_updated_should_not_exceed=batch_process.date_started,
batch_process_ballot_item_chunk=batch_process_ballot_item_chunk,
use_ballotpedia=batch_process.use_ballotpedia,
use_ctcl=batch_process.use_ctcl,
use_vote_usa=batch_process.use_vote_usa,
)
retrieve_success = positive_value_exists(results['success'])
batch_set_id = results['batch_set_id']
retrieve_row_count = results['retrieve_row_count']
status += results['status']
if 'batch_process_ballot_item_chunk' in results:
if results['batch_process_ballot_item_chunk'] and \
hasattr(results['batch_process_ballot_item_chunk'], 'batch_set_id'):
batch_process_ballot_item_chunk = results['batch_process_ballot_item_chunk']
elif batch_process.kind_of_process == RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS:
from import_export_batches.views_admin import \
retrieve_ballots_for_polling_locations_api_v4_internal_view
# Steve, Oct 2020: This line took 35 seconds to execute on my local, in the debugger
results = retrieve_ballots_for_polling_locations_api_v4_internal_view(
google_civic_election_id=batch_process.google_civic_election_id,
state_code=batch_process.state_code,
refresh_ballot_returned=False,
batch_process_ballot_item_chunk=batch_process_ballot_item_chunk,
use_ballotpedia=batch_process.use_ballotpedia,
use_ctcl=batch_process.use_ctcl,
use_vote_usa=batch_process.use_vote_usa,
)
retrieve_success = positive_value_exists(results['success'])
batch_set_id = results['batch_set_id']
retrieve_row_count = results['retrieve_row_count']
status += results['status']
if 'batch_process_ballot_item_chunk' in results:
if results['batch_process_ballot_item_chunk'] and \
hasattr(results['batch_process_ballot_item_chunk'], 'batch_set_id'):
batch_process_ballot_item_chunk = results['batch_process_ballot_item_chunk']
if batch_process.kind_of_process in \
[REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]:
pass
if retrieve_success: # I think this is exclusively Ballot Items
if positive_value_exists(batch_set_id):
try:
# If here, then ballots were retrieved, so we can set retrieve_date_completed
batch_process_ballot_item_chunk.batch_set_id = batch_set_id
batch_process_ballot_item_chunk.retrieve_row_count = retrieve_row_count
batch_process_ballot_item_chunk.retrieve_date_completed = now()
batch_process_ballot_item_chunk.save()
status += "RETRIEVE_DATE_STARTED-RETRIEVE_DATE_COMPLETED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-RETRIEVE_DATE_STARTED-CANNOT_SAVE_RETRIEVE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
if not positive_value_exists(retrieve_row_count):
if batch_process.kind_of_process == RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS \
or batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS \
or batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_VOTERS:
# If no batch rows were found, we know the entire batch_process is finished.
# Update batch_process.date_completed to now
status += "RETRIEVE_DATE_STARTED-NO_RETRIEVE_VALUES_FOUND-BATCH_IS_COMPLETE "
results = mark_batch_process_as_complete(batch_process, batch_process_ballot_item_chunk,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
else:
status += "RETRIEVE_DATE_STARTED-NO_BATCH_SET_ID_FOUND-BATCH_IS_COMPLETE "
results = mark_batch_process_as_complete(batch_process, batch_process_ballot_item_chunk,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
else:
if not positive_value_exists(batch_set_id):
# Reset the retrieve_date_started to None
try:
status += results['status']
batch_process_ballot_item_chunk.retrieve_date_started = None
batch_process_ballot_item_chunk.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=results['status'],
)
except Exception as e:
status += "ERROR-CANNOT_SAVE_RETRIEVE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
try:
status += results['status']
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-CANNOT_WRITE_TO_BATCH_PROCESS_LOG: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
elif batch_process_ballot_item_chunk.retrieve_date_completed is None:
# Check to see if retrieve process has timed out
date_when_retrieve_has_timed_out = \
batch_process_ballot_item_chunk.retrieve_date_started + timedelta(seconds=retrieve_time_out_duration)
if now() > date_when_retrieve_has_timed_out:
# If so, set retrieve_date_completed to now and set retrieve_timed_out to True
# But first, see if any rows were found
# Were there batches created in the batch set from the retrieve?
number_of_batches = 0
if positive_value_exists(batch_process_ballot_item_chunk.batch_set_id):
number_of_batches = batch_manager.count_number_of_batches_in_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id)
if not positive_value_exists(number_of_batches):
if batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS or \
batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_VOTERS:
# If no batch rows were found, we know the entire batch_process is finished.
# Update batch_process.date_completed to now
status += "ANALYZE_DATE_STARTED-NO_RETRIEVE_VALUES_FOUND-BATCH_IS_COMPLETE "
results = mark_batch_process_as_complete(
batch_process, batch_process_ballot_item_chunk,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
else:
status += "PROBLEM-BATCH_SET_ID_IS_MISSING_FROM_BALLOT_ITEM_CHUNK "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
# But proceed so we can mark the retrieve part of batch_process_ballot_item_chunk as complete
try:
if not positive_value_exists(batch_process_ballot_item_chunk.retrieve_row_count):
# Make sure to store the retrieve_row_count if it wasn't already stored
batch_process_ballot_item_chunk.retrieve_row_count = number_of_batches
batch_process_ballot_item_chunk.retrieve_date_completed = now()
batch_process_ballot_item_chunk.retrieve_timed_out = True
batch_process_ballot_item_chunk.save()
except Exception as e:
status += "ERROR-RETRIEVE_DATE_COMPLETED-CANNOT_SAVE_RETRIEVE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
# Wait
results = {
'success': success,
'status': status,
}
return results
elif batch_process_ballot_item_chunk.analyze_date_started is None:
# ###################
# This is the first pass through ANALYZE
status += "STARTING_ANALYZE_WITH_ANALYZE_DATE_STARTED_NONE "
if not positive_value_exists(batch_process_ballot_item_chunk.batch_set_id):
status += "MISSING_BALLOT_ITEM_CHUNK_BATCH_SET_ID "
try:
batch_process_ballot_item_chunk.analyze_date_started = now()
batch_process_ballot_item_chunk.analyze_date_completed = now()
batch_process_ballot_item_chunk.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-ANALYZE_DATE_STARTED-CANNOT_SAVE_ANALYZE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# If here, we know that the retrieve_date_completed has a value
number_of_batches = 0
try:
# If here we know we have batches that need to be analyzed
if not positive_value_exists(batch_process_ballot_item_chunk.retrieve_row_count):
# Were there batches created in the batch set from the retrieve?
number_of_batches = batch_manager.count_number_of_batches_in_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id)
# Were there batches created in the batch set from the retrieve?
batch_process_ballot_item_chunk.retrieve_row_count = number_of_batches
batch_process_ballot_item_chunk.analyze_date_started = now()
batch_process_ballot_item_chunk.save()
status += "ANALYZE_DATE_STARTED-ANALYZE_DATE_STARTED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-ANALYZE_DATE_STARTED-CANNOT_SAVE_ANALYZE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Now analyze the batch that was stored in the "refresh_ballotpedia_ballots..." function
results = process_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id, analyze_all=True)
analyze_row_count = results['batch_rows_analyzed']
status += results['status']
if positive_value_exists(results['success']):
if not positive_value_exists(analyze_row_count):
if batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_VOTERS:
# If no batch rows were found, we know the entire batch_process is finished.
# Update batch_process.date_completed to now
status += "ANALYZE_DATE_STARTED-REFRESH_BALLOT_ITEMS_FROM_VOTERS-ANALYZE_ROW_COUNT_ZERO "
results = mark_batch_process_as_complete(batch_process, batch_process_ballot_item_chunk,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
else:
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
try:
batch_process_ballot_item_chunk.analyze_row_count = analyze_row_count
batch_process_ballot_item_chunk.analyze_date_completed = now()
batch_process_ballot_item_chunk.save()
status += "ANALYZE_DATE_STARTED-ANALYZE_DATE_COMPLETED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-ANALYZE_DATE_STARTED-CANNOT_SAVE_ANALYZE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
elif batch_process_ballot_item_chunk.analyze_date_completed is None:
# ###################
# This is an ANALYZE process that failed part way through
status += "RESTARTING_FAILED_ANALYZE_PROCESS "
if not positive_value_exists(batch_process_ballot_item_chunk.batch_set_id):
status += "MISSING_BALLOT_ITEM_CHUNK_BATCH_SET_ID "
try:
batch_process_ballot_item_chunk.analyze_date_completed = now()
batch_process_ballot_item_chunk.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-ANALYZE_DATE_COMPLETED-CANNOT_SAVE_ANALYZE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Check to see if analyze process has timed out
date_when_analyze_has_timed_out = \
batch_process_ballot_item_chunk.analyze_date_started + timedelta(seconds=analyze_time_out_duration)
if now() > date_when_analyze_has_timed_out:
# Continue processing where we left off
# We have time for this to run before the time out check above is run again,
# since we have this batch checked out
results = process_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id, analyze_all=True)
status += results['status']
if positive_value_exists(results['success']):
not_analyzed_row_count = batch_manager.count_number_of_batches_in_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id, batch_row_analyzed=False)
if not positive_value_exists(not_analyzed_row_count):
if batch_process.kind_of_process == REFRESH_BALLOT_ITEMS_FROM_VOTERS:
# If no batch rows were found, we know the entire batch_process is finished.
# Update batch_process.date_completed to now
status += "ANALYZE_DATE_STARTED-REFRESH_BALLOT_ITEMS_FROM_VOTERS-ANALYZE_ROW_COUNT_ZERO "
results = mark_batch_process_as_complete(
batch_process, batch_process_ballot_item_chunk,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status)
status += results['status']
results = {
'success': success,
'status': status,
}
return results
if positive_value_exists(not_analyzed_row_count):
try:
status += "RESTARTED_FAILED_ANALYZE_PROCESS-STILL_HAS_ROWS_TO_ANALYZE "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-RESTARTED_FAILED_ANALYZE_PROCESS-CANNOT_SAVE_ANALYZE_DATE_COMPLETED " \
"" + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
}
return results
else:
# All batches in set have been analyzed
try:
analyze_row_count = batch_manager.count_number_of_batches_in_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id, batch_row_analyzed=True)
batch_process_ballot_item_chunk.analyze_row_count = analyze_row_count
batch_process_ballot_item_chunk.analyze_date_completed = now()
batch_process_ballot_item_chunk.save()
status += "ANALYZE_DATE_COMPLETED-ANALYZE_DATE_COMPLETED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-ANALYZE_DATE_STARTED-CANNOT_SAVE_ANALYZE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
results = {
'success': success,
'status': status,
}
return results
else:
status += "PROCESS_BATCH_SET-FALSE "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
# Wait
results = {
'success': success,
'status': status,
}
return results
elif batch_process_ballot_item_chunk.create_date_started is None:
try:
# If here, we know that the analyze_date_completed has a value
batch_process_ballot_item_chunk.create_date_started = now()
batch_process_ballot_item_chunk.save()
status += "CREATE_DATE_STARTED-CREATE_DATE_STARTED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-CREATE_DATE_STARTED-CANNOT_SAVE_CREATE_DATE_STARTED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Process the create entries
results = process_batch_set(batch_set_id=batch_process_ballot_item_chunk.batch_set_id, create_all=True)
create_row_count = results['batch_rows_created']
status += results['status']
if not positive_value_exists(results['success']):
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# Process the delete entries
results = process_batch_set(batch_set_id=batch_process_ballot_item_chunk.batch_set_id, delete_all=True)
status += results['status']
if not positive_value_exists(results['success']):
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
critical_failure=True,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
# If here, we know that the process_batch_set has run
try:
batch_process_ballot_item_chunk.create_row_count = create_row_count
batch_process_ballot_item_chunk.create_date_completed = now()
batch_process_ballot_item_chunk.save()
if positive_value_exists(google_civic_election_id):
results = election_manager.retrieve_election(
google_civic_election_id=google_civic_election_id, read_only=False)
if results['election_found']:
election_on_stage = results['election']
if election_on_stage and hasattr(election_on_stage, 'state_code_list_raw'):
ballot_returned_list_manager = BallotReturnedListManager()
results = \
ballot_returned_list_manager.retrieve_state_codes_in_election(google_civic_election_id)
if results['success']:
state_code_list = results['state_code_list']
try:
state_code_list_raw = ','.join(state_code_list)
election_on_stage.state_code_list_raw = state_code_list_raw
election_on_stage.save()
except Exception as e:
status += "ERROR-COULD_NOT_SAVE_ELECTION: " + str(e) + " "
else:
status += results['status']
else:
status += results['status']
status += "CREATE_DATE_STARTED-CREATE_DATE_COMPLETED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-CREATE_DATE_STARTED-CANNOT_SAVE_CREATE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
elif batch_process_ballot_item_chunk.create_date_completed is None:
date_when_create_has_timed_out = \
batch_process_ballot_item_chunk.create_date_started + timedelta(seconds=create_time_out_duration)
if now() > date_when_create_has_timed_out:
if not positive_value_exists(batch_process_ballot_item_chunk.create_row_count):
# Were there batches created in the batch set from the retrieve?
if positive_value_exists(batch_process_ballot_item_chunk.batch_set_id):
batch_process_ballot_item_chunk.create_row_count = \
batch_manager.count_number_of_batches_in_batch_set(
batch_set_id=batch_process_ballot_item_chunk.batch_set_id, batch_row_created=True)
try:
# If here, set create_date_completed to now and set create_timed_out to True
batch_process_ballot_item_chunk.create_date_completed = now()
batch_process_ballot_item_chunk.create_timed_out = True
batch_process_ballot_item_chunk.save()
if positive_value_exists(google_civic_election_id):
results = election_manager.retrieve_election(
google_civic_election_id=google_civic_election_id, read_only=False)
if results['election_found']:
election_on_stage = results['election']
if election_on_stage and hasattr(election_on_stage, 'state_code_list_raw'):
ballot_returned_list_manager = BallotReturnedListManager()
results = \
ballot_returned_list_manager.retrieve_state_codes_in_election(google_civic_election_id)
if results['success']:
state_code_list = results['state_code_list']
try:
state_code_list_raw = ','.join(state_code_list)
election_on_stage.state_code_list_raw = state_code_list_raw
election_on_stage.save()
except Exception as e:
status += "ERROR-COULD_NOT_SAVE_ELECTION: " + str(e) + " "
else:
status += results['status']
else:
status += results['status']
status += "CREATE_DATE_STARTED-CREATE_DATE_COMPLETED_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
except Exception as e:
status += "ERROR-CREATE_DATE_STARTED-CANNOT_SAVE_CREATE_DATE_COMPLETED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk.id,
batch_set_id=batch_process_ballot_item_chunk.batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
# Wait
results = {
'success': success,
'status': status,
}
return results
else:
# All steps have been completed
pass
results = {
'success': success,
'status': status,
}
return results
def process_activity_notice_batch_process(batch_process):
status = ""
success = True
batch_process_manager = BatchProcessManager()
kind_of_process = batch_process.kind_of_process
process_now = False
# Please also see: longest_activity_notice_processing_run_time_allowed & checked_out_expiration_time
# We adjust timeout for ACTIVITY_NOTICE_PROCESS in retrieve_batch_process_list
activity_notice_processing_time_out_duration = 270 # 4.5 minutes * 60 seconds
if batch_process.date_started is None:
# When a batch_process is running, we mark it "taken off the shelf" to be worked on ("date_checked_out").
# When the process is complete, we should reset this to "NULL"
process_now = True
try:
batch_process.date_started = now()
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-ACTIVITY_NOTICE-CHECKED_OUT_TIME_NOT_SAVED " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
elif batch_process.date_completed is None:
# Check to see if process has timed out
date_when_timed_out = \
batch_process.date_started + timedelta(seconds=activity_notice_processing_time_out_duration)
if now() > date_when_timed_out:
# Update batch_process.date_completed to now
status += "ACTIVITY_NOTICE-TIMED_OUT "
results = mark_batch_process_as_complete(
batch_process,
kind_of_process=kind_of_process,
status=status)
status += results['status']
if process_now:
activity_notice_results = process_activity_notice_seeds_triggered_by_batch_process()
status += activity_notice_results['status']
if activity_notice_results['success']:
activity_notice_seed_count = activity_notice_results['activity_notice_seed_count']
activity_notice_count = activity_notice_results['activity_notice_count']
try:
if activity_notice_seed_count == 0 and activity_notice_count == 0:
# We don't want to leave a bunch of empty batch_processes around
batch_process.delete()
else:
batch_process.completion_summary = "ACTIVITY_NOTICE_RESULTS, " \
"activity_notice_seed_count: {activity_notice_seed_count} " \
"activity_notice_count: {activity_notice_count}" \
"".format(activity_notice_seed_count=activity_notice_seed_count,
activity_notice_count=activity_notice_count)
batch_process.date_checked_out = None
batch_process.date_completed = now()
batch_process.save()
status += "BATCH_PROCESS_SAVED "
if positive_value_exists(activity_notice_seed_count) or positive_value_exists(activity_notice_count):
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-ACTIVITY_NOTICE-DATE_COMPLETED_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
status += "CREATE_OR_UPDATE_ACTIVITY_NOTICES_FAILED "
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def process_one_search_twitter_batch_process(batch_process, status=""):
success = True
batch_process_manager = BatchProcessManager()
kind_of_process = batch_process.kind_of_process
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
try:
if batch_process.date_started is None:
batch_process.date_started = now()
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-CHECKED_OUT_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
retrieve_results = retrieve_possible_twitter_handles_in_bulk()
status += retrieve_results['status']
if retrieve_results['success']:
candidates_analyzed = retrieve_results['candidates_analyzed']
candidates_to_analyze = retrieve_results['candidates_to_analyze']
try:
completion_summary = \
"Candidates Analyzed: {candidates_analyzed} " \
"out of {candidates_to_analyze}" \
"".format(candidates_analyzed=candidates_analyzed,
candidates_to_analyze=candidates_to_analyze)
status += completion_summary + " "
batch_process.completion_summary = completion_summary
batch_process.date_checked_out = None
batch_process.date_completed = now()
batch_process.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-DATE_COMPLETED_TIME_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
status += "RETRIEVE_POSSIBLE_TWITTER_HANDLES_FAILED-MARKED_COMPLETED "
success = False
try:
completion_summary = \
"retrieve_possible_twitter_handles_in_bulk FAILED: {status}" \
"".format(status=status)
status += completion_summary + " "
batch_process.completion_summary = completion_summary
batch_process.date_checked_out = None
batch_process.date_completed = now()
batch_process.save()
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
except Exception as e:
status += "ERROR-COMPLETION_SUMMARY_NOT_SAVED: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def process_one_sitewide_daily_analytics_batch_process(batch_process):
from import_export_batches.models import BatchProcessManager
analytics_manager = AnalyticsManager()
batch_process_manager = BatchProcessManager()
status = ""
success = True
# When a batch_process is running, we mark when it was "taken off the shelf" to be worked on.
# When the process is complete, we should reset this to "NULL"
try:
if batch_process.date_started is None:
batch_process.date_started = now()
batch_process.date_checked_out = now()
batch_process.save()
except Exception as e:
status += "ERROR-CHECKED_OUT_TIME_NOT_SAVED-SITEWIDE_DAILY: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
update_results = update_issue_statistics()
status += update_results['status']
daily_metrics_calculated = False
results = calculate_sitewide_daily_metrics(batch_process.analytics_date_as_integer)
status += results['status']
if positive_value_exists(results['success']):
sitewide_daily_metrics_values = results['sitewide_daily_metrics_values']
update_results = analytics_manager.save_sitewide_daily_metrics_values(sitewide_daily_metrics_values)
status += update_results['status']
if positive_value_exists(update_results['success']):
daily_metrics_calculated = True
else:
status += "SAVE_SITEWIDE_DAILY_METRICS-FAILED_TO_SAVE "
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
else:
status += "SAVE_SITEWIDE_DAILY_METRICS-FAILED_TO_CALCULATE "
success = False
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
try:
if daily_metrics_calculated:
batch_process.date_completed = now()
batch_process.completion_summary = "Sitewide daily metrics SAVED"
else:
batch_process.completion_summary = "Sitewide daily metrics NOT saved"
batch_process.date_checked_out = None
batch_process.save()
except Exception as e:
status += "ERROR-CHECKED_OUT_TIME_NOT_RESET: " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
analytics_date_as_integer=batch_process.analytics_date_as_integer,
status=status,
)
if daily_metrics_calculated:
# If here, there aren't any more sitewide_daily_metrics to process for this date
defaults = {
'finished_calculate_sitewide_daily_metrics': True,
}
status_results = analytics_manager.save_analytics_processing_status(
batch_process.analytics_date_as_integer,
defaults=defaults)
status += status_results['status']
else:
status += "COULD_NOT_CALCULATE_SITEWIDE_DAILY_METRICS "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
kind_of_process=batch_process.kind_of_process,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def process_batch_set(batch_set_id=0, analyze_all=False, create_all=False, delete_all=False):
"""
:param batch_set_id:
:param analyze_all:
:param create_all:
:param delete_all:
:return:
"""
status = ""
success = True
batch_rows_analyzed = 0
batch_rows_created = 0
batch_rows_deleted = 0
start_each_batch_time_tracker = [] # Array of times
summary_of_create_batch_row_action_time_tracker = [] # Array of arrays
if not positive_value_exists(batch_set_id):
status += "BATCH_SET_ID_REQUIRED "
success = False
results = {
'success': success,
'status': status,
'batch_rows_analyzed': batch_rows_analyzed,
'batch_rows_created': batch_rows_created,
}
return results
# Store static data in memory so we don't have to use the database
election_objects_dict = {}
office_objects_dict = {}
measure_objects_dict = {}
if positive_value_exists(analyze_all):
batch_rows_analyzed = 0
batch_header_id_created_list = []
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.exclude(batch_description_analyzed=True)
# Note that this needs to be read_only=False
batch_list = list(batch_description_query)
batch_description_rows_reviewed = 0
for one_batch_description in batch_list:
start_each_batch_time_tracker.append(now().strftime("%H:%M:%S:%f"))
results = create_batch_row_actions(
one_batch_description.batch_header_id,
batch_description=one_batch_description,
election_objects_dict=election_objects_dict,
measure_objects_dict=measure_objects_dict,
office_objects_dict=office_objects_dict,
)
batch_description_rows_reviewed += 1
if results['batch_actions_created']:
batch_rows_analyzed += 1
batch_header_id_created_list.append(one_batch_description.batch_header_id)
if not results['success']:
status += results['status']
election_objects_dict = results['election_objects_dict']
measure_objects_dict = results['measure_objects_dict']
office_objects_dict = results['office_objects_dict']
start_create_batch_row_action_time_tracker = results['start_create_batch_row_action_time_tracker']
summary_of_create_batch_row_action_time_tracker.append(start_create_batch_row_action_time_tracker)
status += "CREATE_BATCH_ROW_ACTIONS_BATCH_ROWS_ANALYZED: " + str(batch_rows_analyzed) + \
" OUT_OF " + str(batch_description_rows_reviewed) + ", "
elif positive_value_exists(create_all):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
batch_rows_created = 0
batch_rows_not_created = 0
for one_batch_description in batch_list:
results = import_data_from_batch_row_actions(
one_batch_description.kind_of_batch, IMPORT_CREATE, one_batch_description.batch_header_id)
if results['number_of_table_rows_created']:
batch_rows_created += 1
else:
batch_rows_not_created += 1
if batch_rows_not_created < 10:
status += results['status']
if not positive_value_exists(results['success']) and len(status) < 1024:
status += results['status']
status += "BATCH_ROWS_CREATED: " + str(batch_rows_created) + ", "
if positive_value_exists(batch_rows_not_created):
status += "BATCH_ROWS_NOT_CREATED: " + str(batch_rows_not_created) + ", "
elif positive_value_exists(delete_all):
batch_description_query = BatchDescription.objects.filter(batch_set_id=batch_set_id)
batch_description_query = batch_description_query.filter(batch_description_analyzed=True)
batch_list = list(batch_description_query)
batch_rows_deleted = 0
for one_batch_description in batch_list:
results = import_data_from_batch_row_actions(
one_batch_description.kind_of_batch, IMPORT_DELETE, one_batch_description.batch_header_id)
if results['number_of_table_rows_deleted']:
batch_rows_deleted += 1
if not positive_value_exists(results['success']) and len(status) < 1024:
status += results['status']
status += "BATCH_ROWS_DELETED: " + str(batch_rows_deleted) + ", "
else:
status += "MUST_SPECIFY_ANALYZE_CREATE_OR_DELETE "
results = {
'success': success,
'status': status,
'batch_rows_analyzed': batch_rows_analyzed,
'batch_rows_created': batch_rows_created,
'batch_rows_deleted': batch_rows_deleted,
}
return results
def mark_batch_process_as_complete(batch_process=None,
batch_process_ballot_item_chunk=None,
batch_set_id=0,
google_civic_election_id=None,
kind_of_process="",
state_code=None,
status=""):
success = True
batch_process_updated = False
batch_process_ballot_item_chunk_updated = False
batch_process_manager = BatchProcessManager()
batch_process_id = 0
batch_process_ballot_item_chunk_id = 0
if batch_process:
try:
# Before saving batch_process, make sure we have the latest version. (For example, it might have been
# paused since it was first retrieved.)
batch_process_results = batch_process_manager.retrieve_batch_process(batch_process.id)
if positive_value_exists(batch_process_results['batch_process_found']):
batch_process = batch_process_results['batch_process']
batch_process_id = batch_process.id
if batch_process.date_started is None:
batch_process.date_started = now()
if batch_process.date_completed is None:
batch_process.date_checked_out = None
batch_process.date_completed = now()
batch_process.save()
batch_process_updated = True
status += "BATCH_PROCESS_MARKED_COMPLETE "
except Exception as e:
success = False
status += "ERROR-CANNOT_MARK_BATCH_PROCESS_AS_COMPLETE " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
if batch_process_ballot_item_chunk:
try:
batch_process_ballot_item_chunk_id = batch_process_ballot_item_chunk.id
if batch_process_ballot_item_chunk.retrieve_date_started is None:
batch_process_ballot_item_chunk.retrieve_date_started = now()
if batch_process_ballot_item_chunk.retrieve_date_completed is None:
batch_process_ballot_item_chunk.retrieve_date_completed = now()
if batch_process_ballot_item_chunk.analyze_date_started is None:
batch_process_ballot_item_chunk.analyze_date_started = now()
if batch_process_ballot_item_chunk.analyze_date_completed is None:
batch_process_ballot_item_chunk.analyze_date_completed = now()
if batch_process_ballot_item_chunk.create_date_started is None:
batch_process_ballot_item_chunk.create_date_started = now()
if batch_process_ballot_item_chunk.create_date_completed is None:
batch_process_ballot_item_chunk.create_date_completed = now()
batch_process_ballot_item_chunk.save()
batch_process_ballot_item_chunk_updated = True
status += "BATCH_PROCESS_BALLOT_ITEM_CHUNK_MARKED_COMPLETE "
except Exception as e:
success = False
status += "ERROR-CANNOT_MARK_BATCH_PROCESS_BALLOT_ITEM_CHUNK_AS_COMPLETE " + str(e) + " "
handle_exception(e, logger=logger, exception_message=status)
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
if batch_process_ballot_item_chunk_updated or batch_process_ballot_item_chunk_updated:
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process_id,
batch_process_ballot_item_chunk_id=batch_process_ballot_item_chunk_id,
batch_set_id=batch_set_id,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
'batch_process': batch_process,
'batch_process_updated': batch_process_updated,
'batch_process_ballot_item_chunk': batch_process_ballot_item_chunk,
'batch_process_ballot_item_chunk_updated': batch_process_ballot_item_chunk_updated,
}
return results
def schedule_retrieve_ballots_for_polling_locations_api_v4(
google_civic_election_id="",
state_code="",
refresh_ballot_returned=False,
use_ballotpedia=False,
use_ctcl=False,
use_vote_usa=False):
status = ""
# [REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
# RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]
if positive_value_exists(refresh_ballot_returned):
kind_of_process = REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
else:
kind_of_process = RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS
status += "SCHEDULING: " + str(kind_of_process) + " "
batch_process_manager = BatchProcessManager()
results = batch_process_manager.create_batch_process(
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
status += "BATCH_PROCESS_SAVED "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=batch_process.google_civic_election_id,
kind_of_process=batch_process.kind_of_process,
state_code=batch_process.state_code,
status=status,
)
else:
status += "FAILED_TO_SCHEDULE-" + str(kind_of_process) + " "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
google_civic_election_id=google_civic_election_id,
kind_of_process=kind_of_process,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
def schedule_refresh_ballots_for_voters_api_v4(
google_civic_election_id="",
state_code="",
voter_id=0,
use_ballotpedia=False,
use_ctcl=False,
use_vote_usa=False):
status = ""
# [REFRESH_BALLOT_ITEMS_FROM_POLLING_LOCATIONS, REFRESH_BALLOT_ITEMS_FROM_VOTERS,
# RETRIEVE_BALLOT_ITEMS_FROM_POLLING_LOCATIONS]
batch_process_manager = BatchProcessManager()
results = batch_process_manager.create_batch_process(
google_civic_election_id=google_civic_election_id,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_VOTERS,
state_code=state_code,
voter_id=voter_id,
use_ballotpedia=use_ballotpedia,
use_ctcl=use_ctcl,
use_vote_usa=use_vote_usa)
status += results['status']
success = results['success']
if results['batch_process_saved']:
batch_process = results['batch_process']
status += "SCHEDULED_REFRESH_BALLOTS_FOR_VOTERS "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=batch_process.id,
google_civic_election_id=batch_process.google_civic_election_id,
kind_of_process=batch_process.kind_of_process,
state_code=batch_process.state_code,
status=status,
)
else:
status += "FAILED_TO_SCHEDULE_REFRESH_BALLOTS_FOR_VOTERS "
batch_process_manager.create_batch_process_log_entry(
batch_process_id=0,
google_civic_election_id=google_civic_election_id,
kind_of_process=REFRESH_BALLOT_ITEMS_FROM_VOTERS,
state_code=state_code,
status=status,
)
results = {
'success': success,
'status': status,
}
return results
|
class ResumeXmlHandler(xml.sax.ContentHandler):
def __init__(self):
self.node = ''
self.params = {}
self.error = None
def startElement(self, name, attrs):
self.node = name
def endElement(self, name):
self.node = ''
def characters(self,content):
if self.node == 'Summary':
for dirty in ['Work Sumamry','WORK SUMMARY','Summary','Professional Summary','PROFESSIONAL','Professional:','PROFESSIONAL:','PROFESSIONAL :','PROFESSIONAL SUMMARY:','Professional summary','SUMMARY','PROFESSIONAL SUMMARY','SUMMARY:','EXECUTIVE SUMMARY']:
if dirty in content:
content=content.replace(dirty,'')
if self.params.has_key(self.node.lower()):
self.params[self.node.lower()] += _my_unescape(content)
else:
self.params[self.node.lower()] = _my_unescape(content)
else:
content=content.strip()
if self.node == 'ResumeFileName':
self.params['resumefilename'] = _my_unescape(content)
elif self.node == 'FirstName':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'LastName':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Email':
for dirty in ['E-mail-','-',':-',':',' ']:
if dirty in content:
content=content.split(dirty)[1]
self.params[self.node.lower()] = _my_unescape(content.strip())
elif self.node == 'Phone' or self.node == 'Mobile' or self.node == 'FormattedPhone' or self.node =='FormattedMobile':
content=content.replace(' ','')
if len(content)>10:
self.params[self.node.lower()] = _my_unescape(content)[-10:]
elif len(content)<10:
pass
else:
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Address':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'City':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'State':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'ZipCode':
self.params['pincode'] = _my_unescape(content)
elif self.node == 'Skill':
skill_list = _my_unescape(content).replace('\n', ',').replace('\r', '').split(',')
labels = ['language', 'frameworks', 'tools', 'operating systems','training','analysis','module','user interface','programming','trouble shooting','capability','specifications','platforms','modules','cleanliness','assembly']
self.params['claimed_skills'] = ''
for skill in skill_list:
skill = skill.strip()
if skill.lower() not in labels:
self.params['claimed_skills'] += 's_%s,'%skill
# candidate profile fields
elif self.node == 'Gender':
gender_dict={'Female':'F',
'Male':'M',
'Other':'O'
}
gender=_my_unescape(content)
if gender in gender_dict.keys():
self.params[self.node.lower()] = gender_dict[gender]
elif self.node =='DateOfBirth':
self.params[self.node.lower()] = _parse_date(_my_unescape(content))
elif self.node == 'LicenseNo':
self.params[self.node.lower()] = _my_unescape(content)
elif self.node == 'Nationality':
self.params[self.node.lower()] = _my_unescape(content)
#End of ResumeXmlHandler()
def _parse_date(data):
if data.count('/') ==2:
DOB = data.split('/')
date_of_birth = '%s-%s-%s'%(DOB[2],DOB[1],DOB[0])
else:
date_of_birth =''
return date_of_birth
class ProfileFieldsSanitizer(file):
def __init__(self,file):
self.invalid_fields = {}
self.filename = file
_NAMEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 150,
}
_EMAILVALIDATOR = {
'type': 'str',
'regex': '(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)',
'maxlen': 30,
}
_PHONEVALIDATOR = {
'type': 'str',
'regex': '^\+?1?\d{9,15}$',
'maxlen': 15,
}
_ADDRVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_CITYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_STATEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_PINVALIDATOR = {
'type': 'int',
'regex': '\d{5,6}',
'maxval': 999999,
'minval': 0,
}
_CLAIMEDSKILLVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 8192,
}
_GENDERVALIDATOR = {
'type': 'str',
'regex': '[FMO]',
'maxlen': 1,
}
_DOBVALIDATOR = {
'type': 'str',
'regex': '\d{4}\-(0?[1-9]|1[012])\-(0?[1-9]|[12][0-9]|3[01])',
'maxlen': 10,
}
_LICENSEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 512,
}
_SUMMARYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 4096,
}
_CTCVALIDATOR = {
'type': 'float',
'regex': '.*',
'maxval': 9999999999.99,
'minval': 0,
}
_NPVALIDATOR = {
'type': 'int',
'regex': '.*',
'maxval': 12,
'minval': -1,
}
_DEGREEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 16,
}
_BRANCHVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 32,
}
_COLLEGEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_UNIVERSITYVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_DEGREEYEARVALIDATOR = {
'type': 'int',
'regex': '.*',
'maxval': 1947,
'minval': 2100,
}
_PERFORMANCEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 128,
}
_EMPLOYERVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_ROLEVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 1024,
}
_JOBLOCATIONVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_JOBDATEVALIDATOR = {
'type': 'str',
'regex': '\d{4}\-(0?[1-9]|1[012])\-(0?[1-9]|[12][0-9]|3[01])',
'maxlen': 10,
}
_JOBPERIODVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 64,
}
_JDVALIDATOR = {
'type': 'str',
'regex': '.*',
'maxlen': 8192,
}
_validators = {
'firstname': _NAMEVALIDATOR,
'lastname': _NAMEVALIDATOR,
'email': _EMAILVALIDATOR,
'phone': _PHONEVALIDATOR,
'address': _ADDRVALIDATOR,
'address1': _ADDRVALIDATOR,
'address2': _ADDRVALIDATOR,
'city': _CITYVALIDATOR,
'state': _STATEVALIDATOR,
'pincode': _PINVALIDATOR,
'claimed_skills': _CLAIMEDSKILLVALIDATOR,
'gender': _GENDERVALIDATOR,
'dateofbirth': _DOBVALIDATOR,
'birthdate': _DOBVALIDATOR,
'licenseno': _LICENSEVALIDATOR,
'dl_number': _LICENSEVALIDATOR,
'summary': _SUMMARYVALIDATOR,
'annualCtc': _CTCVALIDATOR,
'annualctc': _CTCVALIDATOR,
'expectedCtc': _CTCVALIDATOR,
'expectedctc': _CTCVALIDATOR,
'curSalary': _CTCVALIDATOR,
'expSalary': _CTCVALIDATOR,
'noticePeriod': _NPVALIDATOR,
'degree': _DEGREEVALIDATOR,
'branch': _BRANCHVALIDATOR,
'college': _COLLEGEVALIDATOR,
'university': _UNIVERSITYVALIDATOR,
'year': _DEGREEYEARVALIDATOR,
'joiningYear': _DEGREEYEARVALIDATOR,
'graduationYear': _DEGREEYEARVALIDATOR,
'performance': _PERFORMANCEVALIDATOR,
'aggregate': _PERFORMANCEVALIDATOR,
'employer': _EMPLOYERVALIDATOR,
'jobprofile': _ROLEVALIDATOR,
'joblocation': _JOBLOCATIONVALIDATOR,
'startdate': _JOBDATEVALIDATOR,
'enddate': _JOBDATEVALIDATOR,
'jobperiod': _JOBPERIODVALIDATOR,
'jobdescritption': _JDVALIDATOR,
}
def _validate(self, validator, key, value):
if validator['type'] == 'str':
try:
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if validator['type'] == 'int':
try:
int(value)
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if validator['type'] == 'float':
try:
float(value)
value = str(value)
except:
self.invalid_fields[key] = value
return 'invalid'
if not re.match(validator['regex'],value):
self.invalid_fields[key] = value
return 'invalid'
if type(value) == 'string':
if validator['maxlen'] < len(value):
self.invalid_fields[key] = value
return 'invalid'
elif type(value) in ['int' or 'float']:
if validator['maxval'] < value:
self.invalid_fields[key] = value
return 'invalid'
if validator['minval'] > value:
self.invalid_fields[key] = value
return 'invalid'
return 'valid'
def validate(self, name, value):
validity = 'didnotvalidate'
if self._validators.has_key(name):
validator = self._validators[name]
validity = self._validate(validator, name, value)
if validity == 'invalid':
return False
else:
return True
def validate_all_fields(self, dict_in):
for k in dict_in.keys():
value = dict_in[k]
if type(value) is not dict:
if not self.validate(k, value):
dict_in[k] = None
else:
for i in value:
if not self.validate(i, value[i]):
dict_in[k][i] = None
if self.invalid_fields:
f=open(self.filename,'w')
f.write('Field,Value\r\n')
for field in self.invalid_fields:
try:
f.write('%s,%s\r\n'%(field,self.invalid_fields[field]))
except:
f.write('%s\r\n'%(field))
f.close()
else:
os.remove(self.filename)
# End of ProfileFieldsSanitizer()
#Resume parser using RChilli API
def _resume_parser(content_type,resume_data,err_resp=''):
logr.info("Incoming request to parser resume using RChilli API")
errors = ''
valid_content_types = {'application/pdf': 'pdf', 'application/msword': 'doc' , 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx', 'application/vnd.openxmlformats-officedocument.wordprocessingml.template':'dotx', 'application/vnd.ms-word.document.macroEnabled.12':'docm', 'application/vnd.ms-word.template.macroEnabled.12':'dotm'}
if content_type not in valid_content_types.keys() :
logr.info("Invalid content type-%s"%content_type)
errors += 'Invalid value (%s) for the field %s'%(content_type,"Content Type")
err_resp += '<errors>\r\n'
err_resp += _add_xml_field('errorStr',errors)
err_resp += '</errors>\r\n'
return ({},err_resp)
else:
pid=os.getpid()
filename = 'resume_%s.%s'%(pid,valid_content_types[content_type])
data = resume_data.split('#')[0]
decoded_data= b64decode(data)
f = open('/tmp/%s'%filename,'w')
f.write(decoded_data)
f.close()
# Dictionary for storing candidate info
# Key : model fields
# Value: the data
cand_res_data={}
cand_res_data['filename']=filename
retval = call(['/var/floLearning/RChilliParser/parser.sh',filename])
if retval != 0:
errors += "Error while parsing %s"%filename
err_resp += _add_xml_field("errorStr",errors)
return (cand_res_data,err_resp)
output_fn= '/content/candidates/resume_responses/xml_resp_%s.txt'%filename
with open(output_fn,'r') as f_name:
xmldata = f_name.read()
xmlhandler = ResumeXmlHandler()
error_file = 'errors_%s.txt'%pid
retval = call(['touch','/content/candidates/resume_responses/errors/%s'%error_file])
sanitizer = ProfileFieldsSanitizer('/content/candidates/resume_responses/errors/%s'%error_file)
try:
xml.sax.parseString(xmldata,xmlhandler)
except Exception as e:
errors += 'Error while parsing (SAX) %s\r\n'%filename
err_resp += _add_xml_field('errorStr',errors)
return (cand_res_data,err_resp)
cand_resume_data = xmlhandler
sanitizer.validate_all_fields(cand_resume_data.params)
cand_resume_data.params['error_file'] = error_file
cand_resume_data.params['filename'] = filename
cand_resume_data.params['rchillie_resp_file']='xml_resp_%s.txt'%filename
logr.info(cand_resume_data.params)
return (cand_resume_data.params,err_resp)
#end of _resume_parser
def _update_parsed_candidate_profile_helper(candidate_resume_data,candidate):
"""
Use case: This helper is used to update the candidate profile using the dictionary which contains the keys as model object fields and values as the respective data, got from the resume parser.
input: dictionary of resume parsed info
output: saving the data and returning the response of respective fields which are updated
"""
logr.info("Incoming data to update the candidate profile from resume parser output")
addr = candidate_resume_data.get('address',0)
city = candidate_resume_data.get('city',0)
state = candidate_resume_data.get('state',0)
pincode = candidate_resume_data.get('pincode',0)
claimed_skills= candidate_resume_data.get('claimed_skills',0)
gender = candidate_resume_data.get('gender',0)
dateofbirth= candidate_resume_data.get('dateofbirth',0)
licenseno = candidate_resume_data.get('licenseno',0)
summary = candidate_resume_data.get('summary',0)
nationality = candidate_resume_data.get('nationality',0)
candidate_dirty = addr_dirty = False
cand_addr, ignore = CurrentAddress.objects.get_or_create(candidate_id = candidate.id)
if addr:
addr_dirty = True
cand_addr.address1 = addr
if city:
addr_dirty = True
cand_addr.city= city
if state:
addr_dirty = True
cand_addr.state= state
if pincode:
addr_dirty = True
cand_addr.pin_code= pincode
if claimed_skills:
candidate_dirty = True
candidate.claimed_skills= claimed_skills
if candidate_dirty:
candidate.save()
if addr_dirty:
cand_addr.save()
# Saving candidate profile
candidate_profile = Profile.objects.get(candidate_id=candidate.id)
profile_dirty = False
if gender:
profile_dirty = True
candidate_profile.gender = gender
if dateofbirth:
profile_dirty = True
candidate_profile.birthdate = dateofbirth
if licenseno:
profile_dirty = True
candidate_profile.dl_number = licenseno
if nationality:
profile_dirty = True
candidate_profile.nationality = nationality
if summary:
profile_dirty = True
candidate_profile.summary = summary
if profile_dirty:
candidate_profile.save()
# End of _update_parsed_candidate_profile_helper
def _response_builder_from_parsed_resume_data(candidate,candidate_resume_data,resp):
"""
builds the response from the parsed resume info dictionary
"""
indent =3
resp += '\t\t<newProfile>\r\n'
resp += _add_xml_field("id", candidate.id,indent)
resp += _add_xml_field("email", candidate.user.email,indent)
resp += _add_xml_field("firstName", candidate.user.first_name,indent)
resp += _add_xml_field("middleName", candidate.middle_name,indent)
resp += _add_xml_field("lastName", candidate.user.last_name,indent)
resp += _add_xml_field("mobileNumber", candidate.phone_number,indent)
addr = candidate_resume_data.get('address',0)
city = candidate_resume_data.get('city',0)
state = candidate_resume_data.get('state',0)
pincode = candidate_resume_data.get('pincode',0)
claimed_skills= candidate_resume_data.get('claimed_skills',0)
gender = candidate_resume_data.get('gender',0)
dateofbirth= candidate_resume_data.get('dateofbirth',0)
licenseno = candidate_resume_data.get('licenseno',0)
summary = candidate_resume_data.get('summary',0)
nationality = candidate_resume_data.get('nationality',0)
indent_majority = ''
for i in range(indent):
indent_majority += '\t'
# Mock the current address field from the parsed resume dictionary
resp += indent_majority + '<currentAddress>\r\n'
if addr:
resp += _add_xml_field("address1",addr, indent+1)
else:
resp += _add_xml_field("address1","", indent+1)
resp += _add_xml_field("address2","", indent+1)
if city:
resp += _add_xml_field("city", city, indent+1)
else:
resp += _add_xml_field("city", '', indent+1)
if state:
resp += _add_xml_field("state", state, indent+1)
else:
resp += _add_xml_field("state", '', indent+1)
if pincode:
resp += _add_xml_field("pinCode", pincode, indent+1)
else:
resp += _add_xml_field("pinCode", '', indent+1)
resp += indent_majority + '</currentAddress>\r\n'
# Permenant Address fields are always blank in the newProfile fields after the resume parser
resp += indent_majority +'<permenantAddress>\r\n'
addr_fields = _get_required_object_fields("address")
(response, flag) = _add_fields(addr_fields, PermenantAddress(), indent+1, True)
resp += response
resp += indent_majority +'</permenantAddress>\r\n'
# Build candidate profile response
if gender:
resp += _add_xml_field("gender", gender, indent)
else:
resp += indent_majority + '<gender/>\r\n'
if dateofbirth:
resp += _add_xml_field("birthdate", dateofbirth, indent)
else:
resp += indent_majority + '<birthdate/>\r\n'
resp += indent_majority + '<dl_type/>\r\n'
if licenseno:
resp += _add_xml_field("dl_number", licenseno, indent)
else:
resp += indent_majority + '<dl_number/>\r\n'
if nationality:
resp += _add_xml_field("nationality", nationality, indent)
else:
resp += indent_majority + '<nationality/>\r\n'
resp += indent_majority + '<dl_expiry/>\r\n'
resp += indent_majority + '<height/>\r\n'
resp += indent_majority + '<weight/>\r\n'
resp += indent_majority + '<dl_expiry/>\r\n'
resp += indent_majority + '<dl_registered_state/>\r\n'
resp += indent_majority + '<interested_jobs/>\r\n'
resp += indent_majority + '<dreamJob/>\r\n'
resp += indent_majority + '<personalityStrengths/>\r\n'
resp += indent_majority + '<personalityWeaknesses/>\r\n'
resp += indent_majority + '<totalExperience/>\r\n'
resp += indent_majority + '<annualCtc/>\r\n'
resp += indent_majority + '<curSalary/>\r\n'
resp += indent_majority + '<curSalaryFreq/>\r\n'
resp += indent_majority + '<curSalaryCurrency/>\r\n'
resp += indent_majority + '<expectedRaise/>\r\n'
resp += indent_majority + '<expSalary/>\r\n'
resp += indent_majority + '<expSalaryFreq/>\r\n'
resp += indent_majority + '<expSalaryCurrency/>\r\n'
resp += indent_majority + '<noticePeriod/>\r\n'
resp += indent_majority + '<relocation/>\r\n'
resp += indent_majority + '<linkedInProfile/>\r\n'
resp += indent_majority + '<gitHubProfile/>\r\n'
resp += indent_majority + '<queryHomeProfile/>\r\n'
resp += _add_xml_field('status','Looking for job',indent)
resp += indent_majority + '<reference/>\r\n'
if summary:
resp += _add_xml_field("summary",summary, indent)
else:
resp += indent_majority +'<summary/>\r\n'
resp += indent_majority + '<Degree/>\r\n'
resp += indent_majority + '<WorkExperience/>\r\n'
resp += indent_majority + '<uniqueId/>\r\n'
resp += _add_xml_field("ownTwoWheeler", False, indent)
resp += '\t\t</newProfile>\r\n'
return resp
#end of _response_builder_from_parser_resume_data
def _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn):
cid=candidate.id
_change_filename(cand_resume_data,cid)
if not int(candidate.phone_number):
for phone_field in ['phone','mobile','formattedphone','formattedmobile']:
if cand_resume_data.get(phone_field,0):
candidate.phone_number=cand_resume_data[phone_field]
if not candidate.user.last_name:
if cand_resume_data.get('lastname',0):
candidate.user.last_name=cand_resume_data['lastname']
candidate.save()
resp +='<resume>\r\n'
resp += _add_xml_field("filename", res_fn)
resp += '\t<candidate>\r\n'
existing_resp =_fetch_profile_helper(request,cid)
if not existing_resp:
resp += '\t\t<existingProfile/>\r\n'
_update_parsed_candidate_profile_helper(cand_resume_data,candidate)
else:
resp += existing_resp
response = _response_builder_from_parsed_resume_data(candidate,cand_resume_data,'')
resp += response
resp += '\t</candidate>\r\n'
return resp
def _change_filename(cand_resume_data,cid):
org_filename=cand_resume_data['rchillie_resp_file']
mod_filename=cand_resume_data['rchillie_resp_file']=cand_resume_data['rchillie_resp_file'][:15]+'_%s.txt'%cid
retval = call(['mv','/content/candidates/resume_responses/%s'%org_filename,'/content/candidates/resume_responses/%s'%mod_filename])
if retval !=0:
logr.info("resume response file is not getting saved")
org_error_file = cand_resume_data['error_file']
mod_error_file = cand_resume_data['error_file'] = org_error_file[:6]+'_%s.txt'%cid
error_file_path = '/content/candidates/resume_responses/errors/%s'%org_error_file
if os.path.exists(error_file_path):
retval = call(['mv',error_file_path,'/content/candidates/resume_responses/errors/%s'%mod_error_file])
if retval !=0:
logr.info("resume error file is not getting saved")
#End of _change_filename()
# Upload resume helper function
def _upload_resume(request, candidate, is_tmp_stored = False):
logr.info("Incoming request to upload resume; user: %s" % request.user.email)
resp = ''
errors = ''
cont_len = request.META['CONTENT_LENGTH']
if (int(cont_len) > 2048575):
errors += 'Invalid value (%s) for the field %s'%(cont_len,"Content-Length header")
resp += '<errors>\r\n'
resp += _add_xml_field('errorStr',errors)
resp += '</errors>\r\n'
return resp
# Read the request body
header, data = request.body.split(',', 1)
header = header[5:] # Header starts with "data:", so skip first 5 bytes
cont_type, encoding = header.split(';', 1)
valid_content_types = {'application/pdf': 'pdf', 'application/msword': 'doc' , 'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx', 'application/vnd.openxmlformats-officedocument.wordprocessingml.template':'dotx', 'application/vnd.ms-word.document.macroEnabled.12':'docm', 'application/vnd.ms-word.template.macroEnabled.12':'dotm'}
if cont_type not in valid_content_types.keys():
errors += 'Invalid Content Type'
resp += _add_xml_field('errors',errors)
return resp
else:
# Fabricate our own resume file name - to avoid conflicts such as two people having file name as "resume.doc"
res_fn = 'resume_%s.%s' % (candidate.id, valid_content_types[cont_type])
if is_tmp_stored:
path = '/tmp/%s' % res_fn
try:
with open(path, 'wb+') as destination:
destination.write(b64decode(data.split('#')[0]))
resp += _add_xml_field("success", res_fn)
return resp
except Exception as e:
logr.info('Exception: %s' % str(e))
errors += 'Error while saving the resume'
resp +='<errors>\r\n'
resp += _add_xml_field('errorStr',errors)
resp +='</errors>\r\n'
return resp
else:
path = '/content/candidates/resume/%s' % res_fn
resumeUrl = "./images/content/s3content/candidates/resume/%s" % res_fn
if _isJobSeeker(request.user):
parsed_data_set = ParsedResumeData.objects.filter(candidate_id = candidate.id).order_by('-create_time')
if len(parsed_data_set) > 0:
parsed_data = parsed_data_set[0]
else:
parsed_data = None
year_ago = timezone.now() - timedelta(days = 365)
if not parsed_data:
(cand_resume_data,error_resp) = _resume_parser(cont_type,data,'')
if error_resp != '':
resp +='<resume>\r\n'
resp += error_resp
resp +='</resume>\r\n'
return resp
else:
resp += _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn)
elif parsed_data.create_time < year_ago:
(cand_resume_data,error_resp) = _resume_parser(cont_type,data,'')
if error_resp != '':
resp +='<resume>\r\n'
resp += error_resp
resp +='</resume>\r\n'
return resp
else:
resp += _inline_response_builder(resp,request,candidate,cand_resume_data,res_fn)
else:
resp +='<errors>\r\n'
resp += _add_xml_field('errorStr','Resume upload allowed only once per year')
resp += _add_xml_field('filename','%s'%candidate.resume_filename)
resp +='</errors>\r\n'
return resp
# If there's an existing resume URL set in candidate object, delete the old file
if candidate.resume_filename:
old_filename=candidate.resume_filename
old_path = '/content/candidates/resume/%s' % old_filename
try:
os.remove(old_path)
except Exception:
pass
try:
with open(path, 'wb+') as destination:
destination.write(b64decode(data.split('#')[0]))
candidate.resume_filename = res_fn
candidate.save()
resp += _add_xml_field("success", res_fn)
if _isJobSeeker(request.user):
resp +='</resume>\r\n'
return resp
except Exception as e:
logr.info('Exception: %s' % str(e))
errors += 'Error while saving file'
resp += _add_xml_field('errors',errors)
if _isJobSeeker(request.user):
resp +='</resume>\r\n'
return resp
#End of _upload_resume()
def uploadResume(request):
logr.info("Incoming request to upload resume; user: %s" % request.user.email)
if request.method != 'POST':
return HttpResponse(INVALID_METHOD, content_type="text/xml")
candidate = Candidate.objects.get(user_id = request.user.id)
resp = '<?xml version="1.0" encoding="UTF-8"?>\r\n'
resp += _upload_resume(request,candidate)
return HttpResponse(resp, content_type="text/xml")
#End of uploadResume
|
from .protocol import *
import PyQt5
class Camera:
def __init__(self, camera_dict):
self.name = camera_dict['n']
self.address = camera_dict['a']
def __str__(self):
return '{} [{}]'.format(self.name, self.address)
def __repr__(self):
return self.__str__()
@protocol(area='Driver', packets=['CameraList', 'CameraListReply', 'GetCameraName', 'GetCameraNameReply', 'ConnectCamera', 'ConnectCameraReply', \
'CloseCamera', 'signalDisconnected', 'signalCameraConnected', 'signalFPS', 'signalTemperature', 'signalControlChanged', \
'GetControls', 'GetControlsReply', 'GetProperties', 'GetPropertiesReply', 'StartLive', 'StartLiveReply', 'SetControl', \
'SetROI', 'ClearROI'])
class DriverProtocol:
def camera_list(self):
return [Camera(x) for x in self.client.round_trip(self.packet_cameralist.packet(), self.packet_cameralistreply).variant]
def connect_camera(self, camera):
self.client.send(self.packet_connectcamera.packet(variant=camera.address))
def close_camera(self):
self.client.send(self.packet_closecamera.packet())
def get_camera_name(self):
return self.client.round_trip(self.packet_getcameraname.packet(), self.packet_getcameranamereply).variant
def get_controls(self):
return self.client.round_trip(self.packet_getcontrols.packet(), self.packet_getcontrolsreply).variant
def set_control(self, control):
self.client.send(self.packet_setcontrol.packet(variant=control))
def set_roi(self, x, y, width, height):
self.client.send(self.packet_setroi.packet(variant=PyQt5.QtCore.QRect(x, y, width, height)))
def clear_roi(self):
self.client.send(self.packet_clearroi.packet())
def get_properties(self):
return self.client.round_trip(self.packet_getproperties.packet(), self.packet_getpropertiesreply).variant
def start_live(self):
return self.client.round_trip(self.packet_startlive.packet(), self.packet_startlivereply)
def on_signal_fps(self, callback):
def dispatch(packet): callback(packet.variant)
Protocol.register_packet_handler(self.client, self.packet_signalfps, dispatch)
def on_camera_connected(self, callback):
def dispatch(_): callback()
Protocol.register_packet_handler(self.client, self.packet_signalcameraconnected, dispatch)
def on_camera_disconnected(self, callback):
def dispatch(_): callback()
Protocol.register_packet_handler(self.client, self.packet_signaldisconnected, dispatch)
def on_signal_temperature(self, callback):
def dispatch(packet): callback(packet.variant)
Protocol.register_packet_handler(self.client, self.packet_signaltemperature, dispatch)
def on_control_changed(self, callback):
def dispatch(packet): callback(packet.variant)
Protocol.register_packet_handler(self.client, self.packet_signalcontrolchanged, dispatch)
|
from __future__ import annotations
from collections.abc import Callable, Iterable, Iterator, Mapping
from typing import TypeVar, Generic, Optional, Final
from pkg_resources import iter_entry_points, DistributionNotFound
__all__ = ['PluginT', 'Plugin']
#: The plugin type variable.
PluginT = TypeVar('PluginT')
class Plugin(Generic[PluginT], Iterable[tuple[str, PluginT]]):
"""Plugin system, typically loaded from :mod:`pkg_resources` `entry points
<https://packaging.python.org/guides/creating-and-discovering-plugins/#using-package-metadata>`_.
>>> example: Plugin[type[Example]] = Plugin('plugins.example')
>>> example.add('two', ExampleTwo)
>>> example.registered
{'one': <class 'examples.ExampleOne'>,
'two': <class 'examples.ExampleTwo'>}
Note:
Plugins registered from *group* entry points are lazy-loaded. This is
to avoid cyclic imports.
Args:
group: The entry point group to load.
"""
def __init__(self, group: str, *, default: str = None) -> None:
super().__init__()
self.group: Final = group
self._default = default
self._loaded: Optional[dict[str, PluginT]] = None
self._added: dict[str, PluginT] = {}
def __iter__(self) -> Iterator[tuple[str, PluginT]]:
return iter(self.registered.items())
@property
def registered(self) -> Mapping[str, PluginT]:
"""A mapping of the registered plugins, keyed by name."""
loaded = self._load()
return {**loaded, **self._added}
@property
def default(self) -> PluginT:
"""The default plugin implementation.
This property may also be assigned a new string value to change the
name of the default plugin.
>>> example: Plugin[type[Example]] = Plugin('plugins.example',
... default='one')
>>> example.default
<class 'examples.ExampleOne'>
>>> example.default = 'two'
>>> example.default
<class 'examples.ExampleTwo'>
Raises:
KeyError: The default plugin name was not registered.
"""
if self._default is None:
raise KeyError(f'{self.group!r} has no default plugin')
else:
return self.registered[self._default]
@default.setter
def default(self, default: Optional[str]) -> None:
self._default = default
def _load(self) -> Mapping[str, PluginT]:
loaded = self._loaded
if loaded is None:
loaded = {}
for entry_point in iter_entry_points(self.group):
try:
plugin: PluginT = entry_point.load()
except DistributionNotFound:
pass # optional dependencies not installed
else:
loaded[entry_point.name] = plugin
self._loaded = loaded
return loaded
def add(self, name: str, plugin: PluginT) -> None:
"""Add a new plugin by name.
Args:
name: The identifying name of the plugin.
plugin: The plugin object.
"""
self._added[name] = plugin
def register(self, name: str) -> Callable[[PluginT], PluginT]:
"""Decorates a plugin implementation.
Args:
name: The identifying name of the plugin.
"""
def deco(plugin: PluginT) -> PluginT:
self.add(name, plugin)
return plugin
return deco
def __repr__(self) -> str:
return f'Plugin({self.group!r})'
|
import numpy as np
import tensorflow as tf
# Declare list of features, we only have one real-valued feature
def model(features, labels, mode):
# Build a linear model and predict values
W = tf.get_variable("W", [1], dtype=tf.float64)
b = tf.get_variable("b", [1], dtype=tf.float64)
y = W*features['x'] + b
# Loss sub-graph
loss = tf.reduce_sum(tf.square(y - labels))
# Training sub-graph
global_step = tf.train.get_global_step()
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = tf.group(optimizer.minimize(loss),
tf.assign_add(global_step, 1))
# ModelFnOps connects subgraphs we built to the
# appropriate functionality.
return tf.contrib.learn.ModelFnOps(
mode=mode, predictions=y,
loss=loss,
train_op=train)
estimator = tf.contrib.learn.Estimator(model_fn=model)
# define our data sets
x_train = np.array([1., 2., 3., 4.])
y_train = np.array([0., -1., -2., -3.])
x_eval = np.array([2., 5., 8., 1.])
y_eval = np.array([-1.01, -4.1, -7, 0.])
input_fn = tf.contrib.learn.io.numpy_input_fn({"x": x_train}, y_train, 4, num_epochs=1000)
eval_input_fn = tf.contrib.learn.io.numpy_input_fn(
{"x":x_eval}, y_eval, batch_size=4, num_epochs=1000)
# train
estimator.fit(input_fn=input_fn, steps=1000)
# Here we evaluate how well our model did.
train_loss = estimator.evaluate(input_fn=input_fn)
eval_loss = estimator.evaluate(input_fn=eval_input_fn)
print("train loss: %r"% train_loss)
print("eval loss: %r"% eval_loss)
|
#!/usr/bin/python
#Force Totumduino Reset
import RPi.GPIO as GPIO
import time,sys
import serial
import ConfigParser
import logging
config = ConfigParser.ConfigParser()
config.read('/var/www/fabui/python/config.ini')
trace_file=config.get('macro', 'trace_file')
response_file=config.get('macro', 'response_file')
logging.basicConfig(filename=trace_file,level=logging.INFO,format='%(message)s')
open(trace_file, 'w').close() #reset trace file
def trace(string):
logging.info(string)
return
trace("Start reset controller...")
GPIO.cleanup()
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
def reset():
pin = 11
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin, GPIO.HIGH)
time.sleep(0.12)
GPIO.output(pin, GPIO.LOW)
time.sleep(0.12)
GPIO.output(pin, GPIO.HIGH)
reset()
serial_port = config.get('serial', 'port')
serial_baud = config.get('serial', 'baud')
serial = serial.Serial(serial_port, serial_baud, timeout=0.5)
serial.flushInput()
serial.flush()
serial.close()
GPIO.cleanup()
trace("Controller ready")
sys.exit()
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Translation'
db.create_table('blogs_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('origin_blog', self.gf('django.db.models.fields.related.ForeignKey')(related_name='Translation_origin_blog', null=True, to=orm['blogs.Blog'])),
('translated_blog', self.gf('django.db.models.fields.related.ForeignKey')(related_name='Translation_translated_blog', null=True, to=orm['blogs.Blog'])),
))
db.send_create_signal('blogs', ['Translation'])
# Adding model 'Language'
db.create_table('blogs_language', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('language_name', self.gf('django.db.models.fields.CharField')(max_length=40)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=5)),
))
db.send_create_signal('blogs', ['Language'])
def backwards(self, orm):
# Deleting model 'Translation'
db.delete_table('blogs_translation')
# Deleting model 'Language'
db.delete_table('blogs_language')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'}),
'top_level_cat': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
from __future__ import absolute_import, print_function, division
import unittest
from pony.orm.core import *
db = Database('sqlite', ':memory:')
class Male(db.Entity):
name = Required(unicode)
wife = Optional('Female', column='wife')
class Female(db.Entity):
name = Required(unicode)
husband = Optional('Male')
db.generate_mapping(create_tables=True)
class TestOneToOne(unittest.TestCase):
def setUp(self):
with db_session:
db.execute('delete from male')
db.execute('delete from female')
db.insert(Female, id=1, name='F1')
db.insert(Female, id=2, name='F2')
db.insert(Female, id=3, name='F3')
db.insert(Male, id=1, name='M1', wife=1)
db.insert(Male, id=2, name='M2', wife=2)
db.insert(Male, id=3, name='M3', wife=None)
db_session.__enter__()
def tearDown(self):
db_session.__exit__()
def test_1(self):
Male[3].wife = Female[3]
self.assertEqual(Male[3]._vals_[Male.wife], Female[3])
self.assertEqual(Female[3]._vals_[Female.husband], Male[3])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([1, 2, 3], wives)
def test_2(self):
Female[3].husband = Male[3]
self.assertEqual(Male[3]._vals_[Male.wife], Female[3])
self.assertEqual(Female[3]._vals_[Female.husband], Male[3])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([1, 2, 3], wives)
def test_3(self):
Male[1].wife = None
self.assertEqual(Male[1]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([None, 2, None], wives)
def test_4(self):
Female[1].husband = None
self.assertEqual(Male[1]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([None, 2, None], wives)
def test_5(self):
Male[1].wife = Female[3]
self.assertEqual(Male[1]._vals_[Male.wife], Female[3])
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[3]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([3, 2, None], wives)
def test_6(self):
Female[3].husband = Male[1]
self.assertEqual(Male[1]._vals_[Male.wife], Female[3])
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[3]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([3, 2, None], wives)
def test_7(self):
Male[1].wife = Female[2]
self.assertEqual(Male[1]._vals_[Male.wife], Female[2])
self.assertEqual(Male[2]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[2]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([2, None, None], wives)
def test_8(self):
Female[2].husband = Male[1]
self.assertEqual(Male[1]._vals_[Male.wife], Female[2])
self.assertEqual(Male[2]._vals_[Male.wife], None)
self.assertEqual(Female[1]._vals_[Female.husband], None)
self.assertEqual(Female[2]._vals_[Female.husband], Male[1])
commit()
wives = db.select('wife from Male order by Male.id')
self.assertEqual([2, None, None], wives)
def test_to_dict_1(self):
m = Male[1]
d = m.to_dict()
self.assertEqual(d, dict(id=1, name='M1', wife=1))
def test_to_dict_2(self):
m = Male[3]
d = m.to_dict()
self.assertEqual(d, dict(id=3, name='M3', wife=None))
def test_to_dict_3(self):
f = Female[1]
d = f.to_dict()
self.assertEqual(d, dict(id=1, name='F1', husband=1))
def test_to_dict_4(self):
f = Female[3]
d = f.to_dict()
self.assertEqual(d, dict(id=3, name='F3', husband=None))
if __name__ == '__main__':
unittest.main()
|
from django import template
from django.contrib.auth.models import Group
from organization.models import GroupCategory, Role
import re
register = template.Library()
def roles_for_user_in_group(user, group):
return Role.objects.filter(membership__user=user, membership__group=group)
def phone_number_format(number):
if number:
m = re.search(r'^((?:4|9)\d{2})(\d{2})(\d{3})$', number)
if m:
return "%s %s %s" % (m.group(1), m.group(2), m.group(3))
else:
n = re.search(r'^(\d{2})(\d{2})(\d{2})(\d{2})$', number)
if n:
return "%s %s %s %s" % (n.group(1), n.group(2), n.group(3), n.group(4))
else:
return number
@register.simple_tag
def list_groups(request, group_name, groupcategory_name):
"""Give a group and a not related group category.
Lists all groups in groupcategory, filtered on users in the given group.
"""
group = Group.objects.get(name__iexact=group_name)
groupcategory = GroupCategory.objects.get(name=groupcategory_name)
#TODO: Add 404 on exceptions
ret = '<ul class="reset">'
for groupprofile in groupcategory.groupprofile_set.all():
ret += "<li>"
ret += "<h2>" + groupprofile.group.name + "</h2>"
ret += "<table>"
for u in groupprofile.group.user_set.all():
# groupprofile.group.user_set.filter(groups=group) is too eager
#if u.groups.filter(id=group.id).exists():
if u.userprofile_set.filter(status__lt=4):
ret += "<tr>"
if request.organization.group in request.user.groups.all():
ret += "<td class=\"col4\"><a href=\"" + u.get_absolute_url() +"\">" + u.get_full_name() + "</a></td>"
else:
ret += "<td class=\"col4\">" + u.get_full_name() + "</td>"
ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, group) ]) + "</td>"
if request.user.groups.filter(id=group.id):
ret += "<td class=\"col2\">%s</td>" % (phone_number_format(u.get_profile().cellphone) or "",)
ret += "<td class=\"col5\">%s</td>" % (u.email,)
ret += "<td>" + ", ".join([ role.name for role in roles_for_user_in_group(u, groupprofile.group) ]) + "</td>"
ret += "</tr>"
ret += "</table>"
ret += "</li>"
ret += "</ul>"
return ret
|
#!/usr/bin/python
# Find examples of a movement
# Copyright 2015 Daniel McDonald, All rights reserved
# ============================================================================
from __future__ import print_function
from __future__ import division
import csv
import datetime
import glob
import numpy as np
import os
import os.path
import re
import sys
def print_err(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class TimeUnit(object):
def __init__(self, unit):
if unit not in ['d', 'w']:
raise Exception("Invalid time unit: " + unit)
self.unit_type = unit
def __str__(self):
return self.unit_type
class Window(object):
def __init__(self, arg):
p = re.compile(r'^([1-9][0-9]*)([dw])$')
m = p.match(arg)
self.duration = int(m.group(1))
self.unit = TimeUnit(m.group(2))
def accommodates(self, start_date, end_date):
''' Determine if the two dates are within a time window of each other. '''
if str(self.unit) == 'd':
#print(end_date - start_date, start_date, end_date)
return end_date - start_date <= datetime.timedelta(days=self.duration) # TODO Move the timedelta into TimeUnit
elif str(self.unit) == 'w':
return end_date - start_date <= datetime.timedelta(weeks=self.duration)
raise Exception("Invalid window unit: " + str(self.unit))
def print_usage_and_exit():
print_err("\n" + sys.argv[0] + " <quote_dir> <amount> <window>")
print_err("\nquote_dir\tdirectory containing quote files")
print_err("amount\t\tamount of movement (e.g., 2, -2)")
print_err("window\t\ttime for movement to occur (e.g., 1d, 2w)")
exit(1)
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def check_command_line_args():
if len(sys.argv) < 4:
print_usage_and_exit()
if not os.path.exists(sys.argv[1]):
print_err("ERROR: " + sys.argv[1] + " is not a file or directory")
print_usage_and_exit()
if not is_number(sys.argv[2]):
print_err("ERROR: " + sys.argv[2] + " is not a number")
print_usage_and_exit()
if not re.match(r'^[1-9][0-9]*[dw]$', sys.argv[3]):
print_err("ERROR: " + sys.argv[3] + " is not a time window")
print_usage_and_exit()
return sys.argv[1], float(sys.argv[2]), Window(sys.argv[3])
def load_quotes(quote_path):
quotes = dict()
add_quotes_from_folder(quotes, quote_path)
return quotes
def add_quotes_from_folder(quotes, quote_path):
for path in glob.iglob(os.path.join(quote_path, "*")):
if os.path.isdir(path):
add_quotes_from_folder(quotes, path)
elif path.endswith(".csv"):
#print("File: ", path)
add_quotes_from_file(quotes, path)
def add_quotes_from_file(quotes, quote_path):
try:
symbol = re.match(r'.*_([a-z.]+?)\.csv', quote_path).group(1)
except:
print_err("ERROR: Cannot determine symbol from file name: " + quote_path)
return
with open(quote_path, 'rb') as csvfile:
quote_reader = csv.reader(csvfile, delimiter=',')
quote_history = []
for quote_line in quote_reader:
quote_history.append(parse_quote_line(quote_line))
quotes[symbol] = quote_history
def parse_quote_line(quote_line):
date_str, time_str, open_str, high_str, low_str, close_str, volume_str = quote_line
return {"date": datetime.datetime.strptime(date_str, "%Y%m%d").date(),
"open": float(open_str),
"high": float(high_str),
"low": float(low_str),
"close": float(close_str),
"volume": float(volume_str)}
def find_movements(quotes, movement, window):
movements = []
for symbol in quotes.keys():
for i, quote in enumerate(quotes[symbol]):
# If there's a movement within the following window, record symbol and date
j = i
for k, q in enumerate(quotes[symbol][i + 1:]):
if window.accommodates(quote["date"], q["date"]):
j = i + 1 + k
else:
break
if j > i:
close = [q["close"] for q in quotes[symbol][i:j + 1]]
moves = [close[v] - close[u] for u in xrange(0, len(close) - 1) for v in xrange(u + 1, len(close))]
if (movement > 0 and max(moves) >= movement) or (movement < 0 and min(moves) <= movement):
movements.append({"symbol": symbol, "date": quote["date"]})
#print(symbol, i, j, quote["date"], moves)
return movements
if __name__ == "__main__":
quote_path, movement, window = check_command_line_args()
# load data from files
quotes = load_quotes(quote_path)
# find movement matches
movements = find_movements(quotes, movement, window)
# write movements
for m in movements:
print(m["symbol"], m["date"])
|
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from biohub.core.plugins import plugins
from biohub.utils.http import basicauth
from .serializers import PluginSerializer
from .user_plugin_manager import RepositoryRequest, Repository, get_requests
@api_view(['GET'])
def plugins_list(request):
data = PluginSerializer(plugins.plugin_configs.values(), many=True).data
return Response(data)
@basicauth('admin', 'biohub')
def plugin_admin(request):
if request.method.lower() == 'post':
req = RepositoryRequest.from_string(request.POST['target'])
message = request.POST.get('message', '')
if 'approve' in request.POST:
req.approve(message)
elif 'reject' in request.POST:
req.reject(message)
elif 'delete' in request.POST:
req.delete()
return HttpResponseRedirect(reverse('api:plugins:admin'))
template = """
<html>
<head>
<title>plugin admin</title>
</head>
<body>
{}
</body>
</html>
""".format(
'<hr>'.join(
"""
<form method="post" action="">
<a href="https://github.com/{request.username}/{request.repo}/commit/{request.commit}" target="_blank">{request.value}</a>
<input type="hidden" name="target" value="{request.value}">
<textarea name="message"></textarea>
<input type="submit" name="approve" value="approve">
<input type="submit" name="reject" value="reject">
<input type="submit" name="delete" value="delete">
</form>
""".format(request=RepositoryRequest.from_string(r))
for r in get_requests()
)
)
return HttpResponse(template)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def init_plugin(request):
Repository(request.user).init(request.data.get('username'), request.data.get('repo'))
return Response('OK')
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def upgrade_plugin(request):
Repository(request.user).request_upgrade()
return Response('OK')
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def remove_plugin(request):
Repository(request.user).remove()
return Response('OK')
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def plugin_info(request):
return Response(Repository(request.user).serialize())
|
""""""
from typing import Dict, List, Set
from copy import copy
from collections import defaultdict
from vnpy.trader.object import (
LogData, ContractData, TickData,
OrderData, TradeData,
SubscribeRequest, OrderRequest
)
from vnpy.event import Event, EventEngine
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import (
EVENT_TRADE, EVENT_TICK, EVENT_CONTRACT,
EVENT_TIMER, EVENT_ORDER, EVENT_POSITION
)
from vnpy.trader.constant import (
Product, Offset, Direction, OrderType
)
from vnpy.trader.converter import OffsetConverter
from vnpy.trader.utility import round_to, save_json, load_json
from .base import (
APP_NAME, CHAIN_UNDERLYING_MAP,
EVENT_OPTION_NEW_PORTFOLIO,
EVENT_OPTION_ALGO_PRICING, EVENT_OPTION_ALGO_TRADING,
EVENT_OPTION_ALGO_STATUS, EVENT_OPTION_ALGO_LOG,
InstrumentData, PortfolioData
)
try:
from .pricing import black_76_cython as black_76
from .pricing import binomial_tree_cython as binomial_tree
from .pricing import black_scholes_cython as black_scholes
except ImportError:
from .pricing import (
black_76, binomial_tree, black_scholes
)
print("Faile to import cython option pricing model, please rebuild with cython in cmd.")
from .algo import ElectronicEyeAlgo
PRICING_MODELS = {
"Black-76 欧式期货期权": black_76,
"Black-Scholes 欧式股票期权": black_scholes,
"二叉树 美式期货期权": binomial_tree
}
class OptionEngine(BaseEngine):
""""""
setting_filename = "option_master_setting.json"
data_filename = "option_master_data.json"
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
""""""
super().__init__(main_engine, event_engine, APP_NAME)
self.portfolios: Dict[str, PortfolioData] = {}
self.instruments: Dict[str, InstrumentData] = {} # vt_symbol: 合约
self.active_portfolios: Dict[str, PortfolioData] = {}
self.timer_count: int = 0
self.timer_trigger: int = 60
self.offset_converter: OffsetConverter = OffsetConverter(main_engine)
self.get_position_holding = self.offset_converter.get_position_holding
self.hedge_engine: OptionHedgeEngine = OptionHedgeEngine(self)
self.algo_engine: OptionAlgoEngine = OptionAlgoEngine(self)
self.setting: Dict = {}
self.load_setting()
self.register_event()
def close(self) -> None:
""""""
self.save_setting()
self.save_data()
def load_setting(self) -> None:
""""""
self.setting = load_json(self.setting_filename)
def save_setting(self) -> None:
"""
Save underlying adjustment.
"""
save_json(self.setting_filename, self.setting)
def load_data(self) -> None:
""""""
data = load_json(self.data_filename)
for portfolio in self.active_portfolios.values():
portfolio_name = portfolio.name
# Load underlying adjustment from setting
chain_adjustments = data.get("chain_adjustments", {})
chain_adjustment_data = chain_adjustments.get(portfolio_name, {})
if chain_adjustment_data:
for chain in portfolio.chains.values():
chain.underlying_adjustment = chain_adjustment_data.get(
chain.chain_symbol, 0
)
# Load pricing impv from setting
pricing_impvs = data.get("pricing_impvs", {})
pricing_impv_data = pricing_impvs.get(portfolio_name, {})
if pricing_impv_data:
for chain in portfolio.chains.values():
for index in chain.indexes:
key = f"{chain.chain_symbol}_{index}"
pricing_impv = pricing_impv_data.get(key, 0)
if pricing_impv:
call = chain.calls[index]
call.pricing_impv = pricing_impv
put = chain.puts[index]
put.pricing_impv = pricing_impv
def save_data(self) -> None:
""""""
chain_adjustments = {}
pricing_impvs = {}
for portfolio in self.active_portfolios.values():
chain_adjustment_data = {}
pricing_impv_data = {}
for chain in portfolio.chains.values():
chain_adjustment_data[chain.chain_symbol] = chain.underlying_adjustment
for call in chain.calls.values():
key = f"{chain.chain_symbol}_{call.chain_index}"
pricing_impv_data[key] = call.pricing_impv
chain_adjustments[portfolio.name] = chain_adjustment_data
pricing_impvs[portfolio.name] = pricing_impv_data
data = {
"chain_adjustments": chain_adjustments,
"pricing_impvs": pricing_impvs
}
save_json(self.data_filename, data)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
instrument = self.instruments.get(tick.vt_symbol, None)
if not instrument:
return
portfolio = instrument.portfolio
if not portfolio:
return
portfolio.update_tick(tick)
def process_order_event(self, event: Event) -> None:
""""""
order: OrderData = event.data
self.offset_converter.update_order(order)
def process_trade_event(self, event: Event) -> None:
""""""
trade: TradeData = event.data
self.offset_converter.update_trade(trade)
instrument = self.instruments.get(trade.vt_symbol, None)
if not instrument:
return
portfolio = instrument.portfolio
if not portfolio:
return
portfolio.update_trade(trade)
def process_contract_event(self, event: Event) -> None:
""""""
contract: ContractData = event.data
if contract.product == Product.OPTION:
exchange_name = contract.exchange.value
portfolio_name = f"{contract.option_portfolio}.{exchange_name}"
if portfolio_name not in CHAIN_UNDERLYING_MAP:
return
portfolio = self.get_portfolio(portfolio_name)
portfolio.add_option(contract)
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
self.offset_converter.update_position(position)
def process_timer_event(self, event: Event) -> None:
""""""
self.timer_count += 1
if self.timer_count < self.timer_trigger:
return
self.timer_count = 0
for portfolio in self.active_portfolios.values():
portfolio.calculate_atm_price()
def get_portfolio(self, portfolio_name: str) -> PortfolioData:
""""""
portfolio = self.portfolios.get(portfolio_name, None)
if not portfolio:
portfolio = PortfolioData(portfolio_name)
self.portfolios[portfolio_name] = portfolio
event = Event(EVENT_OPTION_NEW_PORTFOLIO, portfolio_name)
self.event_engine.put(event)
return portfolio
def subscribe_data(self, vt_symbol: str) -> None:
""""""
contract = self.main_engine.get_contract(vt_symbol)
req = SubscribeRequest(contract.symbol, contract.exchange)
self.main_engine.subscribe(req, contract.gateway_name)
def update_portfolio_setting(
self,
portfolio_name: str,
model_name: str,
interest_rate: float,
chain_underlying_map: Dict[str, str],
inverse: bool = False,
precision: int = 0
) -> None:
""""""
portfolio = self.get_portfolio(portfolio_name)
for chain_symbol, underlying_symbol in chain_underlying_map.items():
contract = self.main_engine.get_contract(underlying_symbol)
portfolio.set_chain_underlying(chain_symbol, contract)
portfolio.set_interest_rate(interest_rate)
pricing_model = PRICING_MODELS[model_name]
portfolio.set_pricing_model(pricing_model)
portfolio.set_inverse(inverse)
portfolio.set_precision(precision)
portfolio_settings = self.setting.setdefault("portfolio_settings", {})
portfolio_settings[portfolio_name] = {
"model_name": model_name,
"interest_rate": interest_rate,
"chain_underlying_map": chain_underlying_map,
"inverse": inverse,
"precision": precision
}
self.save_setting()
def get_portfolio_setting(self, portfolio_name: str) -> Dict:
""""""
portfolio_settings = self.setting.setdefault("portfolio_settings", {})
return portfolio_settings.get(portfolio_name, {})
def init_portfolio(self, portfolio_name: str) -> bool:
""""""
# Add to active dict
if portfolio_name in self.active_portfolios:
return False
portfolio = self.get_portfolio(portfolio_name)
self.active_portfolios[portfolio_name] = portfolio
# Subscribe market data
for underlying in portfolio.underlyings.values():
self.instruments[underlying.vt_symbol] = underlying
self.subscribe_data(underlying.vt_symbol)
for option in portfolio.options.values():
# Ignore options with no underlying set
if not option.underlying:
continue
self.instruments[option.vt_symbol] = option
self.subscribe_data(option.vt_symbol)
# Update position volume
for instrument in self.instruments.values():
holding = self.offset_converter.get_position_holding(
instrument.vt_symbol
)
if holding:
instrument.update_holding(holding)
portfolio.calculate_pos_greeks()
# Load chain adjustment and pricing impv data
self.load_data()
return True
def get_portfolio_names(self) -> List[str]:
""""""
return list(self.portfolios.keys())
def get_underlying_symbols(self, portfolio_name: str) -> List[str]:
""""""
underlying_prefix = CHAIN_UNDERLYING_MAP[portfolio_name]
underlying_symbols = []
contracts = self.main_engine.get_all_contracts()
for contract in contracts:
if contract.product == Product.OPTION:
continue
if contract.symbol.startswith(underlying_prefix):
underlying_symbols.append(contract.vt_symbol)
underlying_symbols.sort()
return underlying_symbols
def get_instrument(self, vt_symbol: str) -> InstrumentData:
""""""
instrument = self.instruments[vt_symbol]
return instrument
def set_timer_trigger(self, timer_trigger: int) -> None:
""""""
self.timer_trigger = timer_trigger
class OptionHedgeEngine:
""""""
def __init__(self, option_engine: OptionEngine):
""""""
self.option_engine: OptionEngine = option_engine
self.main_engine: MainEngine = option_engine.main_engine
self.event_engine: EventEngine = option_engine.event_engine
# Hedging parameters
self.portfolio_name: str = ""
self.vt_symbol: str = ""
self.timer_trigger = 5
self.delta_target = 0
self.delta_range = 0
self.hedge_payup = 1
self.active: bool = False
self.active_orderids: Set[str] = set()
self.timer_count = 0
self.register_event()
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_order_event(self, event: Event) -> None:
""""""
order: OrderData = event.data
if order.vt_orderid not in self.active_orderids:
return
if not order.is_active():
self.active_orderids.remove(order.vt_orderid)
def process_timer_event(self, event: Event) -> None:
""""""
if not self.active:
return
self.timer_count += 1
if self.timer_count < self.timer_trigger:
return
self.timer_count = 0
self.run()
def start(
self,
portfolio_name: str,
vt_symbol: str,
timer_trigger: int,
delta_target: int,
delta_range: int,
hedge_payup: int
) -> None:
""""""
if self.active:
return
self.portfolio_name = portfolio_name
self.vt_symbol = vt_symbol
self.timer_trigger = timer_trigger
self.delta_target = delta_target
self.delta_range = delta_range
self.hedge_payup = hedge_payup
self.active = True
def stop(self) -> None:
""""""
if not self.active:
return
self.active = False
self.timer_count = 0
def run(self) -> None:
""""""
if not self.check_order_finished():
self.cancel_all()
return
delta_max = self.delta_target + self.delta_range
delta_min = self.delta_target - self.delta_range
# Do nothing if portfolio delta is in the allowed range
portfolio = self.option_engine.get_portfolio(self.portfolio_name)
if delta_min <= portfolio.pos_delta <= delta_max:
return
# Calculate volume of contract to hedge
delta_to_hedge = self.delta_target - portfolio.pos_delta
instrument = self.option_engine.get_instrument(self.vt_symbol)
hedge_volume = delta_to_hedge / instrument.cash_delta
# Send hedge orders
tick = self.main_engine.get_tick(self.vt_symbol)
contract = self.main_engine.get_contract(self.vt_symbol)
holding = self.option_engine.get_position_holding(self.vt_symbol)
# Check if hedge volume meets contract minimum trading volume
if abs(hedge_volume) < contract.min_volume:
return
if hedge_volume > 0:
price = tick.ask_price_1 + contract.pricetick * self.hedge_payup
direction = Direction.LONG
if holding:
available = holding.short_pos - holding.short_pos_frozen
else:
available = 0
else:
price = tick.bid_price_1 - contract.pricetick * self.hedge_payup
direction = Direction.SHORT
if holding:
available = holding.long_pos - holding.long_pos_frozen
else:
available = 0
order_volume = abs(hedge_volume)
req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
type=OrderType.LIMIT,
volume=order_volume,
price=round_to(price, contract.pricetick),
)
# Close positon if opposite available is enough
if available > order_volume:
req.offset = Offset.CLOSE
vt_orderid = self.main_engine.send_order(req, contract.gateway_name)
self.active_orderids.add(vt_orderid)
# Open position if no oppsite available
elif not available:
req.offset = Offset.OPEN
vt_orderid = self.main_engine.send_order(req, contract.gateway_name)
self.active_orderids.add(vt_orderid)
# Else close all opposite available and open left volume
else:
close_req = copy(req)
close_req.offset = Offset.CLOSE
close_req.volume = available
close_orderid = self.main_engine.send_order(close_req, contract.gateway_name)
self.active_orderids.add(close_orderid)
open_req = copy(req)
open_req.offset = Offset.OPEN
open_req.volume = order_volume - available
open_orderid = self.main_engine.send_order(open_req, contract.gateway_name)
self.active_orderids.add(open_orderid)
def check_order_finished(self) -> None:
""""""
if self.active_orderids:
return False
else:
return True
def cancel_all(self) -> None:
""""""
for vt_orderid in self.active_orderids:
order: OrderData = self.main_engine.get_order(vt_orderid)
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
class OptionAlgoEngine:
def __init__(self, option_engine: OptionEngine):
""""""
self.option_engine = option_engine
self.main_engine = option_engine.main_engine
self.event_engine = option_engine.event_engine
self.algos: Dict[str, ElectronicEyeAlgo] = {}
self.active_algos: Dict[str, ElectronicEyeAlgo] = {}
self.underlying_algo_map: Dict[str, ElectronicEyeAlgo] = defaultdict(list)
self.order_algo_map: Dict[str, ElectronicEyeAlgo] = {}
self.register_event()
def init_engine(self, portfolio_name: str) -> None:
""""""
if self.algos:
return
portfolio = self.option_engine.get_portfolio(portfolio_name)
for option in portfolio.options.values():
algo = ElectronicEyeAlgo(self, option)
self.algos[option.vt_symbol] = algo
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
def process_underlying_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
for algo in self.underlying_algo_map[tick.vt_symbol]:
algo.on_underlying_tick(algo)
def process_option_tick_event(self, event: Event) -> None:
""""""
tick: TickData = event.data
algo = self.algos[tick.vt_symbol]
algo.on_option_tick(algo)
def process_order_event(self, event: Event) -> None:
""""""
order: OrderData = event.data
algo = self.order_algo_map.get(order.vt_orderid, None)
if algo:
algo.on_order(order)
def process_trade_event(self, event: Event) -> None:
""""""
trade: TradeData = event.data
algo = self.order_algo_map.get(trade.vt_orderid, None)
if algo:
algo.on_trade(trade)
def process_timer_event(self, event: Event) -> None:
""""""
for algo in self.active_algos.values():
algo.on_timer()
def start_algo_pricing(self, vt_symbol: str, params: dict) -> None:
""""""
algo = self.algos[vt_symbol]
result = algo.start_pricing(params)
if not result:
return
self.underlying_algo_map[algo.underlying.vt_symbol].append(algo)
self.event_engine.register(
EVENT_TICK + algo.option.vt_symbol,
self.process_option_tick_event
)
self.event_engine.register(
EVENT_TICK + algo.underlying.vt_symbol,
self.process_underlying_tick_event
)
def stop_algo_pricing(self, vt_symbol: str) -> None:
""""""
algo = self.algos[vt_symbol]
result = algo.stop_pricing()
if not result:
return
self.event_engine.unregister(
EVENT_TICK + vt_symbol,
self.process_option_tick_event
)
buf = self.underlying_algo_map[algo.underlying.vt_symbol]
buf.remove(algo)
if not buf:
self.event_engine.unregister(
EVENT_TICK + algo.underlying.vt_symbol,
self.process_underlying_tick_event
)
def start_algo_trading(self, vt_symbol: str, params: dict) -> None:
""""""
algo = self.algos[vt_symbol]
algo.start_trading(params)
def stop_algo_trading(self, vt_symbol: str) -> None:
""""""
algo = self.algos[vt_symbol]
algo.stop_trading()
def send_order(
self,
algo: ElectronicEyeAlgo,
vt_symbol: str,
direction: Direction,
offset: Offset,
price: float,
volume: int
) -> str:
""""""
contract = self.main_engine.get_contract(vt_symbol)
req = OrderRequest(
contract.symbol,
contract.exchange,
direction,
OrderType.LIMIT,
volume,
price,
offset
)
vt_orderid = self.main_engine.send_order(req, contract.gateway_name)
self.order_algo_map[vt_orderid] = algo
return vt_orderid
def cancel_order(self, vt_orderid: str) -> None:
""""""
order = self.main_engine.get_order(vt_orderid)
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def write_algo_log(self, algo: ElectronicEyeAlgo, msg: str) -> None:
""""""
msg = f"[{algo.vt_symbol}] {msg}"
log = LogData(APP_NAME, msg)
event = Event(EVENT_OPTION_ALGO_LOG, log)
self.event_engine.put(event)
def put_algo_pricing_event(self, algo: ElectronicEyeAlgo) -> None:
""""""
event = Event(EVENT_OPTION_ALGO_PRICING, algo)
self.event_engine.put(event)
def put_algo_trading_event(self, algo: ElectronicEyeAlgo) -> None:
""""""
event = Event(EVENT_OPTION_ALGO_TRADING, algo)
self.event_engine.put(event)
def put_algo_status_event(self, algo: ElectronicEyeAlgo) -> None:
""""""
event = Event(EVENT_OPTION_ALGO_STATUS, algo)
self.event_engine.put(event)
|
from theHarvester.lib.core import *
import re
class TakeOver:
def __init__(self, hosts):
# NOTE THIS MODULE IS ACTIVE RECON
self.hosts = hosts
self.results = ""
self.totalresults = ""
self.proxy = False
# Thank you to https://github.com/EdOverflow/can-i-take-over-xyz for these fingerprints
self.fingerprints = {"'Trying to access your account?'": 'Campaign Monitor',
'404 Not Found': 'Fly.io',
'404 error unknown site!': 'Pantheon',
'Do you want to register *.wordpress.com?': 'Wordpress',
'Domain uses DO name serves with no records in DO.': 'Digital Ocean',
"It looks like you may have taken a wrong turn somewhere. Don't worry...it happens to all of us.": 'LaunchRock',
'No Site For Domain': 'Kinsta',
'No settings were found for this company:': 'Help Scout',
'Project doesnt exist... yet!': 'Readme.io',
'Repository not found': 'Bitbucket',
'The feed has not been found.': 'Feedpress',
'No such app': 'Heroku',
'The specified bucket does not exist': 'AWS/S3',
'The thing you were looking for is no longer here, or never was': 'Ghost',
"There isn't a Github Pages site here.": 'Github',
'This UserVoice subdomain is currently available!': 'UserVoice',
"Uh oh. That page doesn't exist.": 'Intercom',
"We could not find what you're looking for.": 'Help Juice',
"Whatever you were looking for doesn't currently exist at this address": 'Tumblr',
'is not a registered InCloud YouTrack': 'JetBrains',
'page not found': 'Uptimerobot',
'project not found': 'Surge.sh'}
async def check(self, url, resp):
# Simple function that takes response and checks if any fingerprints exists
# If a fingerprint exists figures out which one and prints it out
regex = re.compile("(?=(" + "|".join(map(re.escape, list(self.fingerprints.keys()))) + "))")
# Sanitize fingerprints
matches = re.findall(regex, resp)
for match in matches:
print(f'\t\033[91m Takeover detected: {url}\033[1;32;40m')
if match in self.fingerprints.keys():
# Sanity check as to not error out
print(f'\t\033[91m Type of takeover is: {self.fingerprints[match]}\033[1;32;40m')
async def do_take(self):
try:
if len(self.hosts) > 0:
tup_resps: list = await AsyncFetcher.fetch_all(self.hosts, takeover=True, proxy=self.proxy)
# Returns a list of tuples in this format: (url, response)
tup_resps = [tup for tup in tup_resps if tup[1] != '']
# Filter out responses whose responses are empty strings (indicates errored)
for url, resp in tup_resps:
await self.check(url, resp)
else:
return
except Exception as e:
print(e)
async def process(self, proxy=False):
self.proxy = proxy
await self.do_take()
|
from __future__ import division, unicode_literals, absolute_import
import ctypes
from tabulate import tabulate
#from ctypes import *
from ctypes import cdll
from ctypes import c_float, byref
import numpy as np
import traceback, os, sys, datetime, glob, copy
from siman import header
from siman.header import print_and_log, printlog, geo_folder, runBash
from siman.classes import CalculationVasp, Structure
from siman.set_functions import InputSet
from siman.functions import return_atoms_to_cell, element_name_inv
from siman.inout import write_xyz
from siman.geo import local_surrounding, local_surrounding2, xcart2xred, xred2xcart
lib = cdll.LoadLibrary(os.path.dirname(__file__)+'/libfindpores.so')
def create_c_array(pylist, ctype):
if ctype == float:
c_array = (ctypes.c_float * len(pylist))(*pylist)
return c_array
def find_pores(st_in, r_matrix=1.4, r_impurity = 0.6, step_dec = 0.05, fine = 0.3, prec = 0.1, calctype = 'central', gbpos = 0,
find_close_to = (), check_pore_vol = 0):
"""
st_in - input Structure() object
r_impurity (A)- all pores smaller than this radius will be found
r_matrix (A) - radius of matrix atoms disregarding to their type
step_dec - scanning step of the cell in Angstroms
fine - allows to change density of local points; local_step = step_dec/fine
prec - precicion of pore center determination
check_pore_vol - allows to estimate volume of pores; has problems for big cells
'find_close_to' - works in the cases of gb and grain_vol; allows to ignore all and find pore close to provided three reduced coordinates
return - instance of Structure() class with coordinates of pores. Number and type of included pores depend on the argument of 'calctype'.
"""
xred = st_in.xred
natom = len(xred)
rprimd = st_in.rprimd
name = st_in.name
#print xred
"""Additional values"""
# check_pore_vol = 1
#if calctype in ("pore_vol","gb","grain_vol","all_local" ): check_pore_vol = 1 #something wrong with this function, especially for big cells
"""----Conversions of types for C++"""
r1 = create_c_array(rprimd[0], float)
r2 = create_c_array(rprimd[1], float)
r3 = create_c_array(rprimd[2], float)
xred1 = (c_float * len(xred))(*[x[0] for x in xred])
xred2 = (c_float * len(xred))(*[x[1] for x in xred])
xred3 = (c_float * len(xred))(*[x[2] for x in xred])
max_npores = 10000;
ntot = ctypes.c_int32(); npores = ctypes.c_int32()
l_pxred1 = (c_float * max_npores)(0) #make static arrays fol local points
l_pxred2 = (c_float * max_npores)(0)
l_pxred3 = (c_float * max_npores)(0)
l_npores = (ctypes.c_int32 * max_npores)(0)
pxred1 = (c_float * max_npores)(0) #make static arrays natoms + npore
pxred2 = (c_float * max_npores)(0)
pxred3 = (c_float * max_npores)(0)
"""----Run C++ function"""
print_and_log("Starting C++ function lib.findpores()...\n")
# print(r_matrix, r_impurity, step_dec, fine, prec)
lib.findpores ( check_pore_vol, \
max_npores, \
byref(ntot), l_pxred1, l_pxred2, l_pxred3, l_npores, \
byref(npores), pxred1, pxred2, pxred3, \
natom, xred1, xred2, xred3, \
c_float(r_matrix), c_float(r_impurity), c_float(step_dec), c_float(fine), c_float(prec), \
r1, r2, r3 )
print_and_log( "ntot is ", ntot.value)
print_and_log( "l_npores[0] is ",l_npores[0])
v = np.zeros((3))
l_pxred = []
shift1 = 0; shift2 = 0
for i_por in range(npores.value):
l_pxred.append( [] )
shift2+=l_npores[i_por]
for i in range(shift1, shift2):
v[0] = l_pxred1[i]; v[1] = l_pxred2[i]; v[2] = l_pxred3[i]
l_pxred[i_por].append( v.copy() )
shift1 = shift2
if shift2 != ntot.value:
print_and_log( "Error! final shift2 not equal to ntot")
#print l_pxred[0]
pxred = [] # only coordinates of pores
#print pxred1[natom]
for i in range(npores.value):
v[0] = pxred1[i+natom]; v[1]= pxred2[i+natom]; v[2] = pxred3[i+natom] #with shift, because first natom elements are coordinates of atoms
pxred.append( v.copy() )
#print pxred
"""----End of C++; result is two lists: lpxred - local geometry of all pores, pxred - coordinates of all pores"""
""" Analyse of pores """
# st_result = Structure()
st_result = st_in.new()
st_result.rprimd = rprimd
targetp = np.array((0.,0.,0.))
if find_close_to:
targetp = np.asarray(find_close_to) #targer point
print_and_log( "Target point is ",targetp)
a = step_dec/fine #the side of little cube formed by the mesh which is used to find spheres inside the pore.
aaa = a*a*a
#find most central pore
if calctype == 'central': #return coordinates of the most central pore
st_result.name = "central_pore_from "+name
center = np.array((0.5,0.5,0.5))#center of cell
d_min = 100
for x in pxred:
d = np.linalg.norm(x - center)
#print x, x-center, d
if d < d_min and x[0] <= 0.5 and x[1] <= 0.5 and x[2] <= 0.5:
d_min = d
x_min = x
print_and_log( "The closest pore to the center has coordinates",x_min)
st_result.xred.append( x_min )
elif calctype == 'gb': #add impurity at gb
st_result.name = "gb_pore_from "+name
d_min = 100; #d2_min = 100
dt_min =100
i_min = 0; x_min = np.zeros((3))
for i, x in enumerate(pxred):
#print "l_npores ",l_npores[i]
d = abs(x[0] - gbpos/rprimd[0][0]) #
#print x[0], d
if find_close_to: closer = (np.linalg.norm(targetp - x) < dt_min)
else: closer = ( d < d_min ) # and x[1]>0.3 and x[2]>0.3:
if closer:
x_pre = x_min
i_pre = i_min
d_min = d
dt_min = np.linalg.norm(targetp - x)
x_min = x
i_min = i
#find and add impurity in bulk
#d2 = abs( x[0] - (gbpos/rprimd[0][0] - 0.25) )
#if d2 < d2_min:
# d2_min = d2
# x2_min = x
# i2_min = i
#print "rprimd[0][0]", rprimd[0][0]
print_and_log( "Position of boundary is ",gbpos/rprimd[0][0])
#x_min[0] = gbpos/rprimd[0][0]
if find_close_to: print_and_log( "The closest pore to the target point is [ %.2f %.2f %.2f ]"%(x_min[0], x_min[1], x_min[2]))
else: print_and_log( "The closest pore to the gb has coordinates",x_min)
st_result.xred.append( x_min )
#st_result.xred.append( x_pre )
#Calculate volume of the pore using local balls:
print_and_log( "The number of pore is ",i_min," ; It has ",l_npores[i_min], "local balls")
print_and_log( "Volume of pore is ", l_npores[i_min] * a*a*a, " A^3")
#st_result.xred.extend( l_pxred[i_min] )
#st_result.xred.extend( l_pxred[i_pre] )
#print "The closest pore to the center of bulk has coordinates",x2_min
#st_result.xred.append( x2_min )
#Calculate volume of the pore using local balls:
#print "The number of bulk pore is ",i2_min," ; It has ",l_npores[i2_min], "local balls"
#print "Volume of pore is ", l_npores[i2_min] * a*a*a, " A^3";
#st_result.xred.extend( l_pxred[i2_min] )
elif calctype == 'grain_vol': #add impurity to volume of grain
st_result.name = "grain_volume_pore_from "+name
d2_min = 100
dt_min = 100
i_min = 0; x_min = np.zeros((3))
for i, x in enumerate(pxred):
#find and add impurity to the bulk
d2 = abs( x[0] - (gbpos/rprimd[0][0] - 0.25) )
if find_close_to: closer = (np.linalg.norm(targetp - x) < dt_min)
else: closer = ( d2 < d2_min ) # and x[1]>0.3 and x[2]>0.3:
if closer:
dt_min = np.linalg.norm(targetp - x)
d2_min = d2
x2_min = x
i2_min = i
if find_close_to: print_and_log( "The closest pore to the target point is [ %.2f %.2f %.2f ]"%(x2_min[0], x2_min[1], x2_min[2]))
else: print_and_log( "The closest pore to the center of bulk has coordinates",x2_min)
st_result.xred.append( x2_min )
#Calculate volume of the pore using local balls:
print_and_log( "The number of bulk pore is ",i2_min," ; It has ",l_npores[i2_min], "local balls")
print_and_log( "Volume of pore is ", l_npores[i2_min] * a*a*a, " A^3")
st_result.xred.extend( l_pxred[i2_min] )
elif calctype == 'all_local':
st_result.name = "all_local_points_from "+name
v_max = 0
i_max = 0
for i in range(npores.value):
v_pore = l_npores[i] * aaa
print_and_log( "Volume of pore is ", l_npores[i] * aaa, " A^3")
if v_pore > v_max: v_max = v_pore; i_max = i
print_and_log( "Pore number ", i_max,"has the largest volume ", v_max," A^3")
# st_result.xred = l_pxred[i_max] # here coordinates of all local points to show geometry of pore with largerst volume
st_result.xred = [x for group in l_pxred for x in group ] # all pores
elif calctype == 'all_pores':
st_result.name = "all_local_pores_from "+name
st_result.xred = pxred
st_result.rprimd = rprimd
st_result.xred2xcart()
st_result.typat = [1 for x in st_result.xred]
st_result.ntypat = 1
st_result.natom = len(st_result.typat)
st_result.znucl = [200]
st_ntypat = 1
return st_result
def add_impurity(it_new, impurity_type = None, addtype = 'central', calc = [], r_pore = 0.5,
it_to = '', ise_to = '', verlist_to = [], copy_geo_from = "", find_close_to = (),add_to_version = 0,
write_geo = True, only_version = None, fine = 4, put_exactly_to = None, check_pore_vol = 0, replace_atom = None, override = False):
"""
Add impurities in pores.
Input:
it_new - name of new structure with impurity
impurity_type - name of impurity from Mendeley table, for example 'C'
addtype - type of adding: ['central',]; 'central' means that impurity
will be placed as close to the geometrical center of cell as possible.
it_to , ise_to , verlist_to - completed calculations in which impurity
will be added
if 'verlist_to' is empty, function will try to find geometry files in 'geo_folder + struct_des[it_to].sfolder' folder;
even if 'it_to' is empty it will try to find files in 'geo_folder + struct_des[it_new].sfolder+'/from' ' folder.
'ise_to' also can be empty
if 'copy_geo_from' is not empty, then programm copy all files from folder 'copy_geo_from' to
folder 'geo_folder + struct_des[it_to].sfolder+"/"+it_to' or 'geo_folder + struct_des[it_new].sfolder+"/from" '
'find_close_to' is tuple of three reduced coordinates of point close to which you want to find impurity. If empty - ignored;
'add_to_version' is integer number added to each 'verlist_to' number to produce ver_new.
'only_version' - if == [v,], then instertion will be provided only for v. If None insertion will be made in all found versions
If you want to add impurity to relaxed structure ...
'fine' - integer number; allows to reduce number of small steps for defining center
Possible addtype's:
'central' - add one atom to the pore which is most close to the center of the cell but with reduced coordinates less than 0.5 0.5 0.5
'all_pore' - add atoms in every found pore
'all_local' - add atoms to every local point which allows to visualise topology of pores.
'gb' - uses self.gbpos and places atom close to this value assuming that it will be at gb
'grain_vol' - uses self.gbpos and assuming that cell contains two gb and two equal grains, places atom close to the centre of grain; y and z can be arbiratry
put_exactly_to - will add impurity to this point
find_close_to - will try to find closest void and insert pore here.
check_pore_vol - allows to estimate volume of pores; has problems for big cells
replace_atom - if not None, than the specified atom is substituted
Side effects: creates new geometry folder with input structures;
"""
struct_des = header.struct_des
def test_adding_of_impurities(added, init, v):
"""
Can be used only inside add_impurity()
Replicates the structure and find again pores
"""
global natoms_v1
if added == None: return
if v == 1: #TEST
natoms_v1 = len(added.init.xcart) # for test
st_rep_after = added.init.replic( (1,2,1) )
rep = copy.deepcopy(init)
rep.init = rep.init.replic( (1,2,1) );
#print rep
rep = add(znucl, "", rep, write_geo = False)
#print rep
#print "xcart of replic after adding ", st_rep_after.xcart
#print "xcart of adding to replic ", rep.init.xcart
if len(st_rep_after.xcart) != len(rep.init.xcart): raise RuntimeError
p = 0
#for x2 in st_rep_after.xcart:
# print x2
for x in rep.init.xcart:
a = any( ( np.around(x2, p) == np.around(x, p) ).all() for x2 in st_rep_after.xcart )
#b = any( ( np.ceil(x2, p) == np.ceil(x, p) ).all() for x2 in st_rep_after.xcart )
#c = any( ( np.floor(x2, p) == np.floor(x, p) ).all() for x2 in st_rep_after.xcart )
#print a, b, c
#np.concatenate(a, b, c):
if not a:
print_and_log( "Error! Can't find ", np.around(x,3), "in replic ")
raise RuntimeError
#assert all([ all( np.around(v1, 8) == np.around(v2, 8) ) for (v1, v2) in zip(st_rep_after.xcart, rep.init.xcart) ])
print_and_log( "add_impurity: test succesfully done")
if natoms_v1 != len(added.init.xcart): print_and_log("You have different number of pores in different versions\n"); raise RuntimeError
return
def add(znucl, xyzpath = "", new = None, write_geo = True, put_exactly_to = None):
"if put_exactly_to is True, then atom just added and nothing are searched"
if write_geo and os.path.exists(new.path["input_geo"]) and not override:
print_and_log("add: File '"+new.path["input_geo"]+"' already exists; continue\n", imp = 'Y');
return new
#new.init = return_atoms_to_cell(new.init)
if replace_atom:
#atom substitution
if znucl not in new.init.znucl:
new.init.znucl.append(znucl)
new.init.ntypat+=1
new.init.typat[replace_atom] = new.init.ntypat
else:
ind = new.init.znucl.index(znucl)
new.init.typat[replace_atom] = ind + 1
new.init.nznucl = []
for typ in range(1, new.init.ntypat+1):
new.init.nznucl.append(new.init.typat.count(typ) )
else:
new_before = copy.deepcopy(new)
# new.init.xcart[-2][0]-=0.9 #was made once manually for c1gCOi10.1
# new.init.xcart[-2][2]+=0.2
# new.init.xred = xcart2xred(new.init.xcart, new.init.rprimd)
write_xyz(new.init)
#step = 0.042
step = 0.06
#r_pore = 0.56
#fine = 0.3 # for visualisation of pores
#fine = 4 #controls small steps; the steps are smaller for larger numbers
#r_pore = 0.54
prec = 0.004 # precision of center Angs
if new.hex_a == None:
r_mat = 1.48 -step
else:
r_mat = new.hex_a / 2 - step
if put_exactly_to:
pores_xred = [np.array(put_exactly_to),]
print_and_log( 'Inmpurity just put in ', pores_xred, imp = 'Y')
else:
pores = find_pores(new.init, r_mat, r_pore, step, fine, prec, addtype, new.gbpos, find_close_to, check_pore_vol) #octahedral
pores_xred = pores.xred
npores = len(pores_xred)
st = new.init
#delete last oxygen; was made once manually for c1gCOi10.1
# st.natom-=1
# del st.xred[-1]
# del st.typat[-1]
st.natom += npores
st.xred.extend( pores_xred )
if znucl in st.znucl:
print_and_log( "znucl of added impurity is already in cell")
ind = st.znucl.index(znucl)
typat = ind+1
st.nznucl[ind]+=npores
else:
st.ntypat +=1
typat = st.ntypat
st.znucl.append( znucl )
st.nznucl.append( npores )
for i in range( npores ):
st.typat.append( typat )
st.xred2xcart()
new.init = st
#print "Add impurity: len(xred ", len(new.init.xred)
#print "natom", new.init.natom
#For automatisation of fit
try:
#new.build
if new.build.nadded == None: new.build.nadded=npores
else: new.build.nadded+=npores
if new.build.listadded == [None]: new.build.listadded = range(new.natom - npores, new.natom) #list of atoms which were added
else: new.build.listadded.extend( range(new.natom - npores, new.natom) )
#print "Warning!!! Information about added impurities rewritten"
except AttributeError:
pass
#new.init.znucl = new.znucl
#new.init.typat = new.typat
#write_xyz(replic(new.init, (2,1,2)) , xyzpath)
#test_adding_of_impurities(new, new_before, v)
print_and_log("Impurity with Z="+str(znucl)+" has been added to the found pore in "+new.name+"\n\n")
if write_geo:
write_xyz(new.init , xyzpath)
new.write_geometry("init",new.des, override = override)
print_and_log( "\n")
return new
"""0.Begin----------------------------------------------------------------------------"""
znucl = element_name_inv(impurity_type)
if impurity_type != 'octa' and impurity_type not in it_new:
print_and_log("add_impurity: Your name 'it_new' is incorrect!\n\n")
raise RuntimeError
#del header.history[-2]
#
#hstring = ("add_impurity('%s', '%s', '%s', calc, %.3f, '%s', '%s', %s, '%s') #at %s" %
# (it_new, impurity_type, addtype, r_pore,
# it_to, ise_to, verlist_to, copy_geo_from,
# datetime.date.today() ) )
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
#geo_exists =
"""1. The case of insertion to existing calculations--------------------------------------------------"""
if verlist_to:
for v in verlist_to:
if only_version and v not in only_version: continue # only_version = None works for all versions
id = (it_to, ise_to, v)
new = copy.deepcopy(calc[id])
new.init = new.end #replace init structure by the end structure
new.version = v+add_to_version
new.name = it_new#+'.'+id[1]+'.'+str(id[2])
new.des = 'Obtained from '+str(id)+' by adding '+impurity_type+' impurity '
path_new_geo = struct_des[it_new].sfolder+"/"+it_new+"/"+it_new+'.imp.'+addtype+'.'+str(new.version)+'.'+'geo'
new.init.name = it_new+".init."+str(new.version)
xyzpath = struct_des[it_new].sfolder+"/"+it_new
new.path["input_geo"] = geo_folder+path_new_geo
print_and_log("File '"+new.path["input_geo"] +"' with impurity will be created\n");
#new.init.name = 'test_before_add_impurity'
new = add(znucl, xyzpath, new, write_geo, put_exactly_to = put_exactly_to)
"""2. The case of insertion to geo files------------------------------------------------------------"""
else:
""" Please rewrite using new functions """
print_and_log("You does not set 'id' of relaxed calculation. I try to find geometry files in "+it_new+" folder\n")
if it_to: geo_path = geo_folder + struct_des[it_to].sfolder + "/" + it_to
else: geo_path = geo_folder + struct_des[it_new].sfolder + "/" + it_new+'/from'
if copy_geo_from:
print_and_log("You asked to copy geo files from "+copy_geo_from+" to " +geo_path+ " folder\n")
#if not os.path.exists(os.path.dirname(geo_path)):
runBash( "mkdir -p "+geo_path )
runBash( "cp "+copy_geo_from+"/* "+geo_path )
if os.path.exists(geo_path):
print_and_log("Folder '"+geo_path +"' was found. Trying to add impurity\n");
else:
print_and_log("Error! Folder "+geo_path+" does not exist\n"); raise RuntimeError
#geofilelist = glob.glob(geo_path+'/*.geo*') #Find input_geofile
#geofilelist = runBash('find '+geo_path+' -name "*grainA*.geo*" ').splitlines()
#geofilelist = runBash('find '+geo_path+' -name "*.geo*" ').splitlines()
geofilelist = glob.glob(geo_path+'/*.geo*')
print_and_log( "There are several files here already: ", geofilelist, imp = 'y' )
#print 'find '+geo_path+' -name "*.geo*" ',geofilelist
#return
for input_geofile in geofilelist:
v = int( runBash("grep version "+str(input_geofile) ).split()[1] )
if only_version and v not in only_version: continue # only_version = None works for all versions
new = CalculationVasp()
new.version = v
new.name = input_geofile
new.read_geometry(input_geofile)
init = copy.deepcopy(new)
igl = input_geofile.split("/")
#new.name = igl[-3]+'/'+igl[-3] #+input_geofile
new.name = struct_des[it_new].sfolder+"/"+it_new+"/"+it_new
print_and_log( "New path and part of name of file is ", new.name, imp = 'Y')
#return
new.des = 'Obtained from '+input_geofile+' by adding '+impurity_type+' impurity '
#new.init.xred = new.xred
#new.init.rprimd = new.rprimd
#print new.rprimd
new.init.name = new.name+'.imp.'+addtype+'.'+str(new.version)
#new.path["input_geo"] = geo_folder+it_new+"/"+new.end.name+'.'+'geo'
new.path["input_geo"] = geo_folder+"/"+new.init.name+'.'+'geo'
#new.init.name = 'test_before_add_impurity'
new = add(znucl, "", new, write_geo, put_exactly_to = put_exactly_to)
return new.path["input_geo"] #return for last version
def insert_cluster(insertion, i_center, matrix, m_center):
"""
Take care of orientation; typat should be consistent
Input:
insertion - object of class Structure(), which is supposed to be inserted in matrix
in such a way that i_center will be combined with m_center.
matrix - object of class Structure().
i_center, m_center - numpy arrays (3) cartesian coordinates
"""
ins = copy.deepcopy(insertion)
mat = copy.deepcopy(matrix)
r = mat.rprimd
hproj = [ (r[0][i]+r[1][i]+r[2][i]) * 0.5 for i in (0,1,2) ] #projection of vectors on three axis
if 1:
for i, x in enumerate(ins.xcart):
ins.xcart[i] = x - i_center
for i, x in enumerate(mat.xcart):
mat.xcart[i] = x - m_center
max_dis = 1
for i_x, ix in enumerate(ins.xcart):
dv_min = max_dis
print_and_log( "Insertion atom ",ix,)
if 1:
for j, mx in enumerate(mat.xcart):
dv = mx - ix
for i in 0,1,2:
if dv[i] > hproj[i]: dv = dv - mat.rprimd[i] #periodic boundary conditions - can be not correct (in the sense that closest image can lie not 100 % in the neighbourhood image cell ) for oblique cells and large absolute values of dv
if dv[i] < -hproj[i]: dv = dv + mat.rprimd[i]
len1 = np.linalg.norm(dv)
len2, second_len2 = mat.image_distance(mx, ix, r, 1) #check len1
#print "Lengths calculated with two methods ", len1, len2
len1 = len2 #just use second method
#assert np.around(len1,1) == np.around(len2,1)
if len1 < dv_min:
dv_min = len1;
j_r = j # number of matrix atom to replace
if 1:
#Modify to replace overlapping atoms
if dv_min == max_dis:
print_and_log( " is more far away from any matrix atom than ",dv_min," A; I insert it")
# mat.xcart.append( ix )
# print_and_log( 'type of added atom is ', ins.typat[i_x])
# mat.typat.append( ins.typat[i_x] )
mat = mat.add_atom(xc = ix, element = ins.get_elements()[i_x] )
else:
print_and_log( "will replace martix atom", mat.xcart[j_r] )
mat.xcart[j_r] = ix.copy()
mat.rprimd = r
mat.xcart2xred()
mat.natom = len(mat.xcart)
mat.name = 'test_of_insert'
st = mat
# print(st.natom, len(st.xcart), len(st.typat), len(st.znucl), max(st.typat) )
# write_xyz(mat)
mat = mat.return_atoms_to_cell()
mat.write_poscar()
return mat
#write_xyz(mat)
def make_interface(main_slab, m_xc, second_slab, s_xc):
"""
Make interfaces
Both slabs should have close sizes along x and y and should be oriented correctly
Input:
main_slab (Structure) - slab
second_slab (Structure) - slab, scaled to coincide with the main slab
m_xc, s_xc (array(3)) - cartesian coordinates of pointis in main_slab and secondary slab to be combined
Return Slab with interface and scaled second slab
"""
ins = copy.deepcopy(second_slab)
mat = copy.deepcopy(main_slab)
if 1:
#scale insertion
mr = mat.rprimd_len()
ir = ins.rprimd_len()
print('Matrix vlength', mr)
print('Insert vlength', ir)
x_scale = mr[0]/ ir[0]
y_scale = mr[1]/ ir[1]
print('Scaling factors', x_scale, y_scale)
# print('i_center', i_center)
ins.rprimd[0] = ins.rprimd[0]*x_scale
ins.rprimd[1] = ins.rprimd[1]*y_scale
ir = ins.rprimd_len()
s_xred = xcart2xred([s_xc], ins.rprimd)[0]
print('Insert vlength after scaling', ir)
ins.update_xcart()
# ins.xcart2xred()
ins_sc = ins.copy()
ins_sc.name+='_scaled'
s_xc = xred2xcart([s_xred], ins.rprimd)[0]
# print('i_center', i_center)
if 1:
for i, x in enumerate(ins.xcart):
ins.xcart[i] = x - s_xc
for i, x in enumerate(mat.xcart):
mat.xcart[i] = x - m_xc
for i_x, ix in enumerate(ins.xcart):
mat = mat.add_atom(xc = ix, element = ins.get_elements()[i_x] )
mat.xcart2xred()
mat.natom = len(mat.xcart)
mat.name += 'inteface'
mat = mat.return_atoms_to_cell()
mat = mat.shift_atoms([0,0,0.5])
return mat, ins_sc
def insert(it_ins, ise_ins, mat_path, it_new, calc, type_of_insertion = "xcart" ):
"""For insertion of atoms to cells with changed lateral sizes
Input:
'type_of_insertion = xred' used to add xred coordinates
mat_path - path to geo files which are supposed to be changed
it_ins - already existed calculation; xred will be used from this calculation.
it_new - new folder in geo folder for obtained structure
This function finds version of calculation in folder mat_path and tries to use the same version of it_ins
"""
if not os.path.exists(mat_path):
print_and_log("Error! Path "+mat_path+" does not exist\n\n")
raise RuntimeError
if it_ins not in mat_path and it_ins not in it_new:
print_and_log('Cells are', it_ins, mat_path, it_new)
print_and_log("Error! you are trying to insert coordinates from cell with different name\n\n")
#raise RuntimeError
hstring = ("%s #on %s"% (traceback.extract_stack(None, 2)[0][3], datetime.date.today() ) )
if hstring != header.history[-1]: header.history.append( hstring )
geofilelist = runBash('find '+mat_path+'/target -name "*.geo*" ').splitlines()
if geofilelist == []:
print_and_log("Warning! Target folder is empty. Trying to find in root folder ...")
geofilelist = runBash('find '+mat_path+'/ -name "*.geo*" ').splitlines()
ins = None
for mat_geofile in geofilelist:
mat = CalculationVasp()
mat.name = mat_geofile
mat.read_geometry(mat_geofile)
#step = 0.27
#r_pore = 0.56
#r_mat = mat.hex_a / 2 - step
#pores = find_pores(mat.init, r_mat, r_pore, step, 0.3, 'central') #octahedral
#mat.xcart.append ( pores.xcart[0] )
#mat.typat.append(1)
try:
ins_working = ins
ins = calc[(it_ins, ise_ins, mat.version)]
except KeyError:
print_and_log( "No key", (it_ins, ise_ins, mat.version), "I use previous working version !!!", imp = 'y' )
ins = ins_working
#return
#ins.end.znucl = ins.znucl
#ins.end.nznucl = ins.nznucl
#ins.end.ntypat = ins.ntypat
#ins.end.typat = ins.typat
#print ins.xcart[-1]
mat_geopath = geo_folder+struct_des[it_new].sfolder + '/'
if type_of_insertion == "xcart":
#Please update here!
mat_filename = '/'+it_new+"."+"inserted."+str(mat.version)+'.'+'geo'
v = np.zeros(3)
result = insert_cluster(ins.end, v, mat.init, v )
mat.end = result
mat.init = result
# mat.znucl = mat.end.znucl
# mat.nznucl = mat.end.nznucl
# mat.ntypat = mat.end.ntypat
# mat.typat = mat.end.typat
# mat.natom = len(mat.end.xred)
#mat.version = ins.version
des = ins.name+" was inserted to "+mat_geofile
elif type_of_insertion == "xred":
mat_filename = '/from/'+it_new+".xred."+str(mat.version)+'.'+'geo'
#mat.end.rprimd = mat.rprimd
#mat.init.xred = copy.deepcopy(ins.end.xred)
#mat.init.typat = copy.deepcopy(ins.end.)
#print ins.end.xcart
rprimd = copy.deepcopy(mat.init.rprimd)
#build = mat.build
mat.init = copy.deepcopy(ins.end)
#mat.build = build
mat.init.rprimd = rprimd #return initial rprimd
mat.init.xred2xcart() #calculate xcart with new rprimd
des = "atoms with reduced coord. from "+ins.name+" was fully copied to "+mat_geofile
mat.init.name = 'test_insert_xred'+str(mat.version)
write_xyz(mat.init)
mat.path["input_geo"] = mat_geopath + it_new + mat_filename
if not mat.write_geometry("init",des): continue
print_and_log("Xred from "+it_ins+" was inserted in "+mat_geofile+" and saved as "+mat_filename+" \n\n")
return
def determine_voids(st, r_impurity, fine = 1, step_dec = 0.05):
if not r_impurity:
printlog('add_neb(): Error!, Please provide *r_impurity* (1.6 A?)')
sums = []
avds = []
printlog('Searching for voids', important = 'y')
st_pores = find_pores(st, r_matrix = 0.5, r_impurity = r_impurity, step_dec = step_dec, fine = fine, calctype = 'all_pores')
printlog('List of found voids:\n', np.array(st_pores.xcart) )
write_xyz(st.add_atoms(st_pores.xcart, 'H'), file_name = st.name+'_possible_positions')
write_xyz(st.add_atoms(st_pores.xcart, 'H'), replications = (2,2,2), file_name = st.name+'_possible_positions_replicated')
for x in st_pores.xcart:
# summ = local_surrounding(x, st, n_neighbours = 6, control = 'sum', periodic = True)
# avd = local_surrounding(x, st, n_neighbours = 6, control = 'av_dev', periodic = True)
summ, avd = local_surrounding2(x, st, n_neighbours = 6, control = 'sum_av_dev', periodic = True)
# print (summ, avd)
sums.append(summ)
avds.append(avd[0])
# print
sums = np.array(sums)
avds = np.array(avds).round(0)
print_and_log('Sum of distances to 6 neighboring atoms for each void (A):\n', sums, imp ='y')
print_and_log('Distortion of voids (0 - is symmetrical):\n', avds, imp ='y')
return st_pores, sums, avds
def determine_unique_voids(st_pores, sums, avds):
crude_prec = 1 # number of signs after 0
sums_crude = np.unique(sums.round(crude_prec))
print_and_log('The unique voids based on the sums:',
'\nwith 0.01 A prec:',np.unique(sums.round(2)),
'\nwith 0.1 A prec:',sums_crude,
imp ='y')
print_and_log('Based on crude criteria only', len(sums_crude),'types of void are relevant', imp = 'y')
insert_positions = []
start_table = []
for i, s in enumerate(sums_crude):
index_of_first = np.where(sums.round(crude_prec)==s)[0][0]
start_table.append([i, st_pores.xcart[index_of_first].round(2), index_of_first,
avds[index_of_first], sums[index_of_first] ])
insert_positions.append( st_pores.xcart[index_of_first] )
print_and_log( tabulate(start_table, headers = ['void #', 'Cart.', 'Index', 'Dev.', 'Sum'], tablefmt='psql'), imp = 'Y' )
return insert_positions
def insert_atom(st, el, i_void = None, i_void_list = None, r_imp = 1.6, ):
"""Simple Wrapper for inserting atoms
i_void (int) has higher priority than i_void_list
return st_new, i_add, sts_by_one
st_new - all positions are filled
i_add - the number of last inserted atom
sts_by_one - list of structures with only one inserted atom in all found positions
"""
r_impurity = r_imp
st_pores, sums, avds = determine_voids(st, r_impurity)
insert_positions = determine_unique_voids(st_pores, sums, avds)
printlog('To continue please choose *i_void* from the list above', imp = 'y')
# st.name = st.name.split('+')[0]
if i_void:
i_void_list = [i_void]
if i_void_list is None:
i_void_list = list(range(len(insert_positions)))
printlog('No i_void was provided, I insert all', imp = 'y')
st_new = st.copy()
sts_by_one = []
for i in i_void_list:
xc = insert_positions[i]
st_new, i_add = st_new.add_atoms([xc], el, return_ins = True)
st_one, _ = st.add_atoms([xc], el, return_ins = True)
st_one.name+='+'+el+str(i)
sts_by_one.append(st_one)
st_new.name+='+'+el+str(i)
st_new.des+=';Atom '+el+' added to '+ str(xc)
printlog(st.des, imp = 'y')
st_new.write_poscar()
st_new.magmom = [None]
return st_new, i_add, sts_by_one
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/3D_viewer/B1 case
from paravistest import *
from presentations import *
from pvsimple import *
import pvserver as paravis
import math
# 1. First viewer creation
view1 = GetRenderView()
# 2. Second viewer creation
view2 = CreateRenderView()
# 3. Change view size
size1 = view1.ViewSize
size2 = view2.ViewSize
w = size1[0] + size2[0]
w1 = math.trunc(w * 0.7)
w2 = w - w1
view1.ViewSize = [w1, size1[1]]
view2.ViewSize = [w2, size2[1]]
# 4. Change view position
h = view1.ViewSize[1]
view1.ViewSize = [h//2, w]
view2.ViewSize = [h//2, w]
view1.ViewPosition = [0, h//2]
view1.ViewPosition = [0, 0]
Render(view1)
Render(view2)
|
from twisted.words.protocols import irc
from txircd.modbase import Command
from txircd.utils import irc_lower
from fnmatch import fnmatch
class ListCommand(Command):
def onUse(self, user, data):
chancache = []
for channame, channel in self.ircd.channels.iteritems():
if data["chanfilter"] is not None:
filterMatch = False
for filterEntry in data["chanfilter"]:
if fnmatch(channame, filterEntry):
filterMatch = True
break
if not filterMatch:
continue
chancache.append({
"channel": channel,
"name": channel.name,
"users": len(channel.users),
"topic": channel.topic if channel.topic else ""
})
if "listdata" in self.ircd.actions:
for action in self.ircd.actions["listdata"]:
chancache = action(user, chancache)
if not chancache:
break
for cdata in chancache:
user.sendMessage(irc.RPL_LIST, cdata["name"], str(cdata["users"]), ":[{}] {}".format(cdata["channel"].modeString(user), cdata["topic"]))
user.sendMessage(irc.RPL_LISTEND, ":End of channel list")
def processParams(self, user, params):
if user.registered > 0:
user.sendMessage(irc.ERR_NOTREGISTERED, "LIST", ":You have not registered")
return {}
if params:
chanFilter = irc_lower(params[0]).split(",")
else:
chanFilter = None
return {
"user": user,
"chanfilter": chanFilter
}
class Spawner(object):
def __init__(self, ircd):
self.ircd = ircd
def spawn(self):
return {
"commands": {
"LIST": ListCommand()
}
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for vectorization of array kernels."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.compat import compat
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops.parallel_for import control_flow_ops as pfor_control_flow_ops
from tensorflow.python.ops.parallel_for.test_util import PForTestCase
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ArrayTest(PForTestCase):
def test_gather(self):
x = random_ops.random_uniform([3, 3, 3])
x2 = array_ops.placeholder_with_default(x, shape=None) # Has dynamic shape.
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
for y in [x, x2, x_i]:
axes = [0] if y is x_i else [0, 2, -1]
for axis in axes:
outputs.append(array_ops.gather(y, 2, axis=axis))
outputs.append(array_ops.gather(y, i, axis=axis))
outputs.append(array_ops.gather(y, [i], axis=axis))
outputs.append(array_ops.gather(y, [i, 2], axis=axis))
outputs.append(array_ops.gather(y, [[2, i], [i, 1]], axis=axis))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_gather_nd(self):
x = random_ops.random_uniform([3, 3, 3])
def loop_fn(i):
outputs = []
x_i = array_ops.gather(x, i)
outputs.append(array_ops.gather_nd(x_i, [0], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [i], batch_dims=0))
outputs.append(array_ops.gather_nd(x_i, [[i], [i], [i]], batch_dims=1))
return outputs
self._test_loop_fn(loop_fn, 3)
def test_shape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.shape(x_i), array_ops.shape(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_size(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.size(x_i), array_ops.size(x_i, out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_rank(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.rank(x_i)
self._test_loop_fn(loop_fn, 3)
def test_shape_n(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([3])
def loop_fn(i):
x_i = array_ops.gather(x, i)
y_i = array_ops.gather(y, i)
return array_ops.shape_n([x_i, x, y, y_i]), array_ops.shape_n(
[x_i, x, y, y_i], out_type=dtypes.int64)
self._test_loop_fn(loop_fn, 3)
def test_reshape(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.reshape(x1, [-1]), array_ops.reshape(x1, [1, 3, 1, -1])
self._test_loop_fn(loop_fn, 3)
def test_broadcast_to(self):
x = random_ops.random_uniform([3, 2, 1, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.broadcast_to(x1, [2, 2, 3]),
array_ops.broadcast_to(x1, [1, 2, 1, 3]))
self._test_loop_fn(loop_fn, 3)
def test_expand_dims(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.expand_dims(
x1, axis=-1), array_ops.expand_dims(
x1, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_one_hot(self):
indices = random_ops.random_uniform(
[3, 2, 3], minval=0, maxval=4, dtype=dtypes.int32)
def loop_fn(i):
indices_i = array_ops.gather(indices, i)
return (array_ops.one_hot(indices_i, depth=4, on_value=2., off_value=-2.),
array_ops.one_hot(indices_i, depth=4, axis=1))
self._test_loop_fn(loop_fn, 3)
def test_searchsorted(self):
sorted_inputs = math_ops.cumsum(random_ops.random_uniform([3, 2, 4]),
axis=-1)
values = random_ops.random_uniform([2, 3], minval=-1, maxval=4.5)
def loop_fn(i):
inputs_i = array_ops.gather(sorted_inputs, i)
return [array_ops.searchsorted(inputs_i, values, out_type=dtypes.int32,
side="left"), # creates LowerBound op.
array_ops.searchsorted(inputs_i, values, out_type=dtypes.int64,
side="right")] # creates UpperBound op.
self._test_loop_fn(loop_fn, 3)
def test_slice(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.slice(x1, begin=(0, 1), size=(2, 1))
self._test_loop_fn(loop_fn, 3)
def test_tile(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [2, 1])
self._test_loop_fn(loop_fn, 3)
def test_tile_loop_dependent(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.tile(x1, [i, 1])
with self.assertRaisesRegexp(ValueError, "expected to be loop invariant"):
pfor_control_flow_ops.pfor(loop_fn, 2)
def test_pack(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.stack([x1, y], axis=-1)
self._test_loop_fn(loop_fn, 1)
def test_unpack(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x_i = array_ops.gather(x, i)
return array_ops.unstack(
x_i, 4, axis=-1), array_ops.unstack(
x_i, 3, axis=1)
self._test_loop_fn(loop_fn, 3)
def test_pad(self):
x = random_ops.random_uniform([3, 2, 3])
padding = constant_op.constant([[1, 2], [3, 4]])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.pad(x1, padding, mode="CONSTANT")
self._test_loop_fn(loop_fn, 3)
def test_split(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.split(x1, 2, axis=0), array_ops.split(x1, 3, axis=-1)
self._test_loop_fn(loop_fn, 3)
def test_split_v(self):
x = random_ops.random_uniform([3, 6, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.split(x1, [2, 1, 3], axis=0),
array_ops.split(x1, [3], axis=-1))
self._test_loop_fn(loop_fn, 3)
def test_squeeze(self):
x = random_ops.random_uniform([5, 1, 2, 1])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return (array_ops.squeeze(x1, axis=0),
array_ops.squeeze(x1, axis=-1),
array_ops.squeeze(x1))
self._test_loop_fn(loop_fn, 3)
def test_transpose(self):
x = random_ops.random_uniform([3, 2, 3, 4])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.transpose(x1, [2, 1, 0])
self._test_loop_fn(loop_fn, 3)
def test_zeros_like(self):
x = random_ops.random_uniform([3, 2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
z = array_ops.zeros_like(x1),
return z, z + x1
self._test_loop_fn(loop_fn, 3)
def test_concat_v2(self):
x = random_ops.random_uniform([3, 2, 3])
y = random_ops.random_uniform([2, 3])
def loop_fn(i):
x1 = array_ops.gather(x, i)
return array_ops.concat(
[x1, x1, y], axis=0), array_ops.concat(
[x1, x1, y], axis=-1)
self._test_loop_fn(loop_fn, 3)
def test_unary_cwise_ops(self):
for op in [array_ops.identity, array_ops.stop_gradient]:
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 5])
g.watch(x)
# pylint: disable=cell-var-from-loop
def loop_fn(i):
with g:
x1 = array_ops.gather(x, i)
y = op(x1) + x1
loss = nn.l2_loss(y)
return op(x), y, g.gradient(loss, x1)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_identity_n(self):
x = random_ops.random_uniform([3, 4])
def loop_fn(i):
return array_ops.identity_n([x, array_ops.gather(x, i)])
self._test_loop_fn(loop_fn, 3)
def test_matrix_band_part(self):
x = random_ops.random_uniform([3, 4, 2, 2])
for num_lower, num_upper in ((0, -1), (-1, 0), (1, 1)):
# pylint: disable=cell-var-from-loop
def loop_fn(i):
return array_ops.matrix_band_part(
array_ops.gather(x, i),
num_lower=num_lower,
num_upper=num_upper)
# pylint: enable=cell-var-from-loop
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag(self):
x = random_ops.random_uniform([3, 2, 4])
def loop_fn(i):
diagonal = array_ops.gather(x, i)
if compat.forward_compatible(2019, 10, 31):
return array_ops.matrix_diag(diagonal, k=(0, 1), num_rows=4, num_cols=5)
return array_ops.matrix_diag(diagonal)
self._test_loop_fn(loop_fn, 3)
def test_matrix_diag_part(self):
x = random_ops.random_uniform([3, 4, 6])
def loop_fn(i):
input = array_ops.gather(x, i) # pylint: disable=redefined-builtin
if compat.forward_compatible(2019, 10, 31):
return array_ops.matrix_diag_part(input, k=(-2, 0), padding_value=3)
return array_ops.matrix_diag_part(input)
self._test_loop_fn(loop_fn, 3)
def test_matrix_set_diag(self):
matrices = random_ops.random_uniform([3, 4, 4])
diags = random_ops.random_uniform([3, 4])
if compat.forward_compatible(2019, 10, 31):
bands = random_ops.random_uniform([3, 3, 4])
def loop_fn(i):
matrix_i = array_ops.gather(matrices, i)
diag_i = array_ops.gather(diags, i)
results = [
array_ops.matrix_set_diag(matrix_i, diag_i),
array_ops.matrix_set_diag(matrices[0, ...], diag_i),
array_ops.matrix_set_diag(matrix_i, diags[0, ...])
]
if compat.forward_compatible(2019, 10, 31):
k = (-1, 1)
band_i = array_ops.gather(bands, i)
results.extend([
array_ops.matrix_set_diag(matrix_i, band_i, k=k),
array_ops.matrix_set_diag(matrices[0, ...], band_i, k=k),
array_ops.matrix_set_diag(matrix_i, bands[0, ...], k=k)
])
return results
self._test_loop_fn(loop_fn, 3)
def test_strided_slice(self):
with backprop.GradientTape(persistent=True) as g:
x = random_ops.random_uniform([3, 3, 4, 4, 2, 2, 2])
g.watch(x)
def loop_fn(i):
with g:
x_i = array_ops.gather(x, i)
y = x_i[:2, ::2, 1::3, ..., array_ops.newaxis, 1]
loss = nn.l2_loss(y)
return y, g.gradient(loss, x_i)
self._test_loop_fn(loop_fn, 3)
if __name__ == "__main__":
test.main()
|
# isomeasurer.py
# ALS 2017/06/01
import os
import astropy.units as u
from astropy.io import fits
import numpy as np
import astropy.table as at
import pickle
import scipy.ndimage as simg
from ..measurer import Measurer
from ... import tabtools
from . import polytools
from . import plottools
class isoMeasurer(Measurer):
def __init__(self, **kwargs):
"""
child of Measurer
do isophotal measurements
"""
super(isoMeasurer, self).__init__(**kwargs)
self.msrtype = 'iso'
def get_fp_contours(self, imgtag='OIII5008_I', onlycenter=False, suffix=''):
""" e.g., msr_iso-OIII5008_I{suffix}_contours.pkl
\or msr_iso-OIII5008_I{suffix}_contours-ctr.pkl
"""
if onlycenter:
ctrtag = '-ctr'
else:
ctrtag = ''
fp_root = self.get_fp_msrtagroot(imgtag=imgtag, suffix=suffix)
return fp_root+'_contours{ctrtag}.pkl'.format(ctrtag=ctrtag)
def make_measurements(self, imgtag='OIII5008_I', isocut=3.e-15*u.Unit('erg / (arcsec2 cm2 s)'), minarea=5, onlycenter=True, centerradius=5.*u.arcsec, plotsuffix='', savecontours=False, plotmsr=False, msrsuffix='', overwrite=False, append=False):
"""
make measurements on a map and write to msr_iso.csv.
if imgtag='OIII5008_I' then measure 'stamp-OIII5008_I.fits'
Params
------
self
imgtag='OIII5008_I'
overwrite = False (bool)
isocut=1.e-15*u.Unit('erg / (arcsec2 cm2 s)'):
isophote cut
minarea=0:
connected contour area (# pix) above the area is counted as part of the isophote measurement
onlycenter=False:
whether to consider only the center contours
centerradius=2.*u.arcsec
plotsuffix = '':
plotsuffix label to be attach to the end of the plot or contour file names.
savecontours=False
plotmsr=False
msrsuffix=''
plotsuffix label in the end of the measurement csv file: msr_iso_{msrsuffix}.csv.
overwrite=False
append=False
Return
------
status (bool)
Write Output
------------
e.g., msr_iso.csv
"""
fn = self.get_fp_msr(msrsuffix=msrsuffix)
condi = {'imgtag': imgtag, 'isocut': isocut, 'minarea': minarea, 'onlycenter': onlycenter, 'centerradius': centerradius}
if append or overwrite or (not tabtools.fn_has_row(fn, condi)):
print("[isomeasurer] making measurement")
img = self.get_stamp_img(imgtag=imgtag, wunit=True)
xc, yc = self._get_xc_yc(img)
# calc
if np.all(~np.isnan(img)):
contours = self._get_contours_from_img(img=img, isocut=isocut, xc=xc, yc=yc, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tab_msr = self._get_tab_measurements_from_contours(contours=contours, xc=xc, yc=yc)
else:
contours = []
tab_msr = self._get_tab_measurements_nan()
tab_params = self._get_tab_params(imgtag=imgtag, isocut=isocut, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius)
tabout = at.hstack([tab_params, tab_msr])
# output
tabtools.write_row(fn=fn, row=tabout, condi=condi, overwrite=overwrite, append=append)
# optional output
if savecontours:
fn_contours = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=plotsuffix)
write_pickle(contours, fn_contours, overwrite=overwrite)
if plotmsr:
fn_plot = self.get_fp_msrplot(imgtag=imgtag, suffix=plotsuffix)
plottools.make_plot_img_w_contours(fn_plot=fn_plot, img=img, contours=contours)
else:
print("[isomeasurer] skip making measurement as files exist")
return os.path.isfile(fn)
def make_visualpanel(self, fn=None, compo_bands ='gri', imgtag='OIII5008_I', onlycenter=True, minarea=5, centerradius=5.*u.arcsec, tocolorbar=True, totitle=True, fontsize=12, overwrite=False):
"""
make panel figure to visualize the composit and the iso measurements
saved to e.g., 'msr_iso-OIII5008_I_panel.pdf'
Params
------
fn = None: default: msr_iso_{imgtag}_panel.pdf
compo_bands ='gri', imgtag='OIII5008_I', overwrite=False
Return
------
status
"""
if fn is None:
fn = self.get_fp_msrplot(imgtag=imgtag, suffix='_panel')
else:
fn = self.dir_obj+fn
if not os.path.isfile(fn) or overwrite:
print("[isomeasurer] making visual panel")
# get files ready
self.make_colorimg(bands=compo_bands, img_type='stamp', overwrite=False)
# access data
img_compo = simg.imread(self.dir_obj+'color_stamp-{}.png'.format(compo_bands))
img_map = self.get_stamp_img(imgtag=imgtag, wunit=False)
suffix = '_3e-15'
isocut = 3.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours3 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours3):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours3 = read_pickle(fn_contours3)
suffix = '_1e-15'
isocut = 1.e-15*u.Unit('erg / (arcsec2 cm2 s)')
fn_contours1 = self.get_fp_contours(imgtag=imgtag, onlycenter=onlycenter, suffix=suffix)
if not os.path.isfile(fn_contours1):
print("[isomeasurer] re-doing measurements to make contours required for visual panel plots")
self.make_measurements(imgtag=imgtag, isocut=isocut, plotsuffix=suffix, minarea=minarea, onlycenter=onlycenter, centerradius=centerradius, overwrite=True, savecontours=True, plotmsr=False),
contours1 = read_pickle(fn_contours1)
z = self.z
pixsize = self.pixsize.to_value(u.arcsec)
legend_suffix = ' at 3'
name = self.obj.name[4:]
title_compo = '${}~{}~{}~$'.format(compo_bands[0], compo_bands[1], compo_bands[2])+'$\mathrm{Composite}$'
title_map = '$\mathrm{[OIII]\lambda 5007~Intensity}$'
label_cbar = '$I~[10^{-15}~\mathrm{erg~s^{-1}~cm^{-2}~arcsec^{-2}}]$'
plottools.make_iso_visual_panel(fn, img_compo, img_map, contours1, contours3, z, pixsize, legend_suffix, name, title_compo, title_map, label_cbar, tocolorbar=tocolorbar, totitle=totitle, fontsize=fontsize)
else:
print("[isomeasurer] skip making visual panel as files exist")
return os.path.isfile(fn)
def _get_tab_params(self, imgtag, isocut, minarea, onlycenter, centerradius):
"""
return a one row table of the measurement params
"""
tab = at.Table([[imgtag], [str(isocut)], [minarea], [onlycenter], [str(centerradius)], ], names=['imgtag', 'isocut', 'minarea', 'onlycenter', 'centerradius', ])
return tab
def _get_tab_measurements_from_contours(self, contours, xc, yc):
"""
calculate iso measurements from contours, return a table like:
"""
tab = polytools.ShapeParamsTab_from_contours(contours, xc, yc)
# unit conversion
area_ars = tab['area_pix'][0]*(self.pixsize/u.arcsec)**2
dmax_ars = self._pix_to_theta(tab['dmax_pix'][0], wunit=False)
rmax_ars = self._pix_to_theta(tab['rmax_pix'][0], wunit=False)
dper_ars = self._pix_to_theta(tab['dper_pix'][0], wunit=False)
kpc_per_arcsec = np.array(self._get_kpc_proper_per_arcsec())
area_kpc = area_ars * kpc_per_arcsec**2
dmax_kpc = dmax_ars * kpc_per_arcsec
rmax_kpc = rmax_ars * kpc_per_arcsec
dper_kpc = dper_ars * kpc_per_arcsec
tab_converted = at.Table(names=['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', ])
tab_converted.add_row([area_kpc, dmax_kpc, rmax_kpc, dper_kpc, area_ars, dmax_ars, rmax_ars, dper_ars, ])
tabout = at.hstack([tab_converted, tab])
return tabout
def _get_tab_measurements_nan(self):
"""
return a tab measurement just like _get_tab_measurements_from_contours() but with entries all nan.
"""
names = ['area_kpc', 'dmax_kpc', 'rmax_kpc', 'dper_kpc', 'area_ars', 'dmax_ars', 'rmax_ars', 'dper_ars', 'area_pix', 'dmax_pix', 'rmax_pix', 'dper_pix', 'theta_dmax', 'theta_rmax', 'theta_dper', 'aspectr']
tabout = at.Table(names=names)
tabout.add_row([np.nan for i in range(len(names))])
return tabout
def _get_contours_from_img(self, img, isocut, xc, yc, minarea=0., onlycenter=False, centerradius=2.*u.arcsec):
"""
make contour at isocut of image as python pickle file (fn_contours)
always overwrite
Params
------
self
img (array)
isocut (float or quantity):
has to be of the same type of unit as image
minarea (float):
minimum area (pix) to be considered as contour patch
onlycenter (bool):
whether to take only center patches as patches (they all have to pass minarea test as well)
centerradius (angular quantity):
if onlycenter = True, then it sets the radius of the center area. only patches overlapping with that area will be considered.
"""
# prep
try:
img.unit
except:
img_nparr = img/isocut
else:
img_nparr = np.array((img/isocut).to(u.dimensionless_unscaled))
# find contours -- satisfy minarea
contours = polytools.find_largecontours(img=img_nparr, threshold=1., minarea=minarea)
if onlycenter: # select only those at the center
centerradius_pix = self._theta_to_pix(centerradius)
contours = polytools.select_center_contours(contours, xc, yc, radius=centerradius_pix)
return contours
def read_pickle(fn):
with open(fn, 'rb') as handle:
result = pickle.load(handle)
return result
def write_pickle(result, fn, overwrite=False):
if not os.path.isfile(fn) or overwrite:
with open(fn, 'wb') as handle:
pickle.dump(result, handle)
|
from numpy import *
from functions import sigmoid
set_printoptions(precision=4)
class RTF():
'''
Recurrent Basis/Transformation Function
---------------------------------------
Turn x into \phi in a recurrent manner.
'''
W_hh = None
W_ih = None
z = None
def __init__(self, N_i, N_h, f=sigmoid, density=0.1):
'''
'''
self.f = f # non-linearity
self.N_i = N_i # inputs
# Generate nodes
self.z = zeros(N_h) # nodes
self.z[0] = 1. # output bias node
# Generate random weights
self.W_ih = random.randn(N_i,N_h-1) * 1.0 * (random.rand(N_i,N_h-1) <= density)
self.W_hh = random.randn(N_h-1,N_h-1) * 1.0 * (random.rand(N_h-1,N_h-1) <= density)
# Calculate the eigenvectors (V) of W_hh
V,U = linalg.eig(self.W_hh)
# Check that we won't be dividing by 0
if max(absolute(V)) <= 0.:
V = V + 0.01
# Scale the initial weights to a spectral radius of 1.
self.W_hh = self.W_hh / max(absolute(V))
#self.b_ih = random.randn(N_h-1) * 0.1
def store_y(self,y):
print "we can store y (the PREVIOUS output) so as to use it in the transformamtion"
def phi(self,x):
#print "+++++++++++"
#print self.W_hh.shape
#print self.W_ih.shape
##print self.b_ih.shape
#print x.shape
#print self.z.shape
#print "==========="
self.z[1:] = self.f( dot(self.W_hh, self.z[1:]) + dot(self.W_ih.T, x) ) #self.b_ih + <--- I don't think bias is needed for ESN??
return self.z
def reset(self):
self.z = self.z * 0.
self.z[0] = 1.
class RTFv2(RTF):
'''
Like RTF, but includes (@TODO)
- output feedback loop
- regularization (noise to the input)
- efficient sparse solution (each node is connected to exactly N other nodes) -- similary to Markov Chain code for Jaakko's seminar course.
'''
W_oh = None
y = None
v = None
def __init__(self, N_i, N_h, N_o, f=sigmoid, density=0.1, state_noise=0.01):
RTF.__init__(self,N_i,N_h,f,density)
self.N_o = N_o # outputs
self.W_oh = random.randn(N_o,N_h-1) * 1.0 * (random.rand(N_o,N_h-1) <= density) # NEW
self.v = state_noise
def store_y(self,y):
self.y = y
def phi(self,x):
self.z[0:-1] = self.f( dot(self.W_hh, self.z[0:-1]) + dot(self.W_ih.T, x) + dot(self.W_oh.T, self.y)) + random.randn(len(self.z)-1) * self.v
return self.z
def demo():
D = 2
H = 10
N = 100
rtf = RTF(D,H,f=sigmoid,density=0.2)
#print rtf.W
X = random.randn(N,D) #(random.rand(N,D) > 0.5) * 1.
X[:,0] = 1.
X[10:20,:] = 0.
X[40:60,:] = 0.
X[80:100,:] = 0.
Z = zeros((N,H))
for i in range(N):
Z[i] = rtf.phi(X[i])
import matplotlib
matplotlib.use('Qt4Agg')
from matplotlib.pyplot import *
fig = figure()
ax = fig.add_subplot(111)
ax.set_xlim([0,N])
ax.set_ylim([-0.1,1.1])
lines = [None for i in range(H+D)]
for j in range(D):
lines[j], = ax.plot([0,0],"k:",label=""+str(j),linewidth=2)
for j in range(D,H+D):
lines[j], = ax.plot([0,0],label=""+str(j),linewidth=2)
ion()
for lim in range(1,N):
for j in range(D):
lines[j].set_data(range(0,lim),X[0:lim,j])
for j in range(H):
lines[j].set_data(range(0,lim),Z[0:lim,j])
pause(0.1)
grid(True)
legend()
show()
ioff()
if __name__ == '__main__':
demo()
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import decimal
import unittest
import pytz
from pyflink.table import DataTypes, Row
from pyflink.table.tests.test_udf import SubtractOne
from pyflink.table.udf import udf
from pyflink.testing import source_sink_utils
from pyflink.testing.test_case_utils import PyFlinkStreamTableTestCase, \
PyFlinkBlinkBatchTableTestCase, PyFlinkBlinkStreamTableTestCase
class PandasUDFTests(unittest.TestCase):
def test_non_exist_udf_type(self):
with self.assertRaisesRegex(ValueError,
'The udf_type must be one of \'general, pandas\''):
udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), udf_type="non-exist")
class PandasUDFITTests(object):
def test_basic_functionality(self):
# pandas UDF
self.t_env.register_function(
"add_one",
udf(lambda i: i + 1, DataTypes.BIGINT(), DataTypes.BIGINT(), udf_type="pandas"))
self.t_env.register_function("add", add)
# general Python UDF
self.t_env.register_function(
"subtract_one", udf(SubtractOne(), DataTypes.BIGINT(), DataTypes.BIGINT()))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd'],
[DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT(), DataTypes.BIGINT()])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements([(1, 2, 3), (2, 5, 6), (3, 1, 9)], ['a', 'b', 'c'])
t.where("add_one(b) <= 3") \
.select("a, b + 1, add(a + 1, subtract_one(c)) + 2, add(add_one(a), 1L)") \
.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1,3,6,3", "3,2,14,5"])
def test_all_data_types(self):
import pandas as pd
import numpy as np
def tinyint_func(tinyint_param):
assert isinstance(tinyint_param, pd.Series)
assert isinstance(tinyint_param[0], np.int8), \
'tinyint_param of wrong type %s !' % type(tinyint_param[0])
return tinyint_param
def smallint_func(smallint_param):
assert isinstance(smallint_param, pd.Series)
assert isinstance(smallint_param[0], np.int16), \
'smallint_param of wrong type %s !' % type(smallint_param[0])
assert smallint_param[0] == 32767, 'smallint_param of wrong value %s' % smallint_param
return smallint_param
def int_func(int_param):
assert isinstance(int_param, pd.Series)
assert isinstance(int_param[0], np.int32), \
'int_param of wrong type %s !' % type(int_param[0])
assert int_param[0] == -2147483648, 'int_param of wrong value %s' % int_param
return int_param
def bigint_func(bigint_param):
assert isinstance(bigint_param, pd.Series)
assert isinstance(bigint_param[0], np.int64), \
'bigint_param of wrong type %s !' % type(bigint_param[0])
return bigint_param
def boolean_func(boolean_param):
assert isinstance(boolean_param, pd.Series)
assert isinstance(boolean_param[0], np.bool_), \
'boolean_param of wrong type %s !' % type(boolean_param[0])
return boolean_param
def float_func(float_param):
assert isinstance(float_param, pd.Series)
assert isinstance(float_param[0], np.float32), \
'float_param of wrong type %s !' % type(float_param[0])
return float_param
def double_func(double_param):
assert isinstance(double_param, pd.Series)
assert isinstance(double_param[0], np.float64), \
'double_param of wrong type %s !' % type(double_param[0])
return double_param
def varchar_func(varchar_param):
assert isinstance(varchar_param, pd.Series)
assert isinstance(varchar_param[0], str), \
'varchar_param of wrong type %s !' % type(varchar_param[0])
return varchar_param
def varbinary_func(varbinary_param):
assert isinstance(varbinary_param, pd.Series)
assert isinstance(varbinary_param[0], bytes), \
'varbinary_param of wrong type %s !' % type(varbinary_param[0])
return varbinary_param
def decimal_func(decimal_param):
assert isinstance(decimal_param, pd.Series)
assert isinstance(decimal_param[0], decimal.Decimal), \
'decimal_param of wrong type %s !' % type(decimal_param[0])
return decimal_param
def date_func(date_param):
assert isinstance(date_param, pd.Series)
assert isinstance(date_param[0], datetime.date), \
'date_param of wrong type %s !' % type(date_param[0])
return date_param
def time_func(time_param):
assert isinstance(time_param, pd.Series)
assert isinstance(time_param[0], datetime.time), \
'time_param of wrong type %s !' % type(time_param[0])
return time_param
timestamp_value = datetime.datetime(1970, 1, 1, 0, 0, 0, 123000)
def timestamp_func(timestamp_param):
assert isinstance(timestamp_param, pd.Series)
assert isinstance(timestamp_param[0], datetime.datetime), \
'timestamp_param of wrong type %s !' % type(timestamp_param[0])
assert timestamp_param[0] == timestamp_value, \
'timestamp_param is wrong value %s, should be %s!' % (timestamp_param[0],
timestamp_value)
return timestamp_param
def array_func(array_param):
assert isinstance(array_param, pd.Series)
assert isinstance(array_param[0], np.ndarray), \
'array_param of wrong type %s !' % type(array_param[0])
return array_param
def nested_array_func(nested_array_param):
assert isinstance(nested_array_param, pd.Series)
assert isinstance(nested_array_param[0], np.ndarray), \
'nested_array_param of wrong type %s !' % type(nested_array_param[0])
return pd.Series(nested_array_param[0])
def row_func(row_param):
assert isinstance(row_param, pd.Series)
assert isinstance(row_param[0], dict), \
'row_param of wrong type %s !' % type(row_param[0])
return row_param
self.t_env.register_function(
"tinyint_func",
udf(tinyint_func, [DataTypes.TINYINT()], DataTypes.TINYINT(), udf_type="pandas"))
self.t_env.register_function(
"smallint_func",
udf(smallint_func, [DataTypes.SMALLINT()], DataTypes.SMALLINT(), udf_type="pandas"))
self.t_env.register_function(
"int_func",
udf(int_func, [DataTypes.INT()], DataTypes.INT(), udf_type="pandas"))
self.t_env.register_function(
"bigint_func",
udf(bigint_func, [DataTypes.BIGINT()], DataTypes.BIGINT(), udf_type="pandas"))
self.t_env.register_function(
"boolean_func",
udf(boolean_func, [DataTypes.BOOLEAN()], DataTypes.BOOLEAN(), udf_type="pandas"))
self.t_env.register_function(
"float_func",
udf(float_func, [DataTypes.FLOAT()], DataTypes.FLOAT(), udf_type="pandas"))
self.t_env.register_function(
"double_func",
udf(double_func, [DataTypes.DOUBLE()], DataTypes.DOUBLE(), udf_type="pandas"))
self.t_env.register_function(
"varchar_func",
udf(varchar_func, [DataTypes.STRING()], DataTypes.STRING(), udf_type="pandas"))
self.t_env.register_function(
"varbinary_func",
udf(varbinary_func, [DataTypes.BYTES()], DataTypes.BYTES(), udf_type="pandas"))
self.t_env.register_function(
"decimal_func",
udf(decimal_func, [DataTypes.DECIMAL(38, 18)], DataTypes.DECIMAL(38, 18),
udf_type="pandas"))
self.t_env.register_function(
"date_func",
udf(date_func, [DataTypes.DATE()], DataTypes.DATE(), udf_type="pandas"))
self.t_env.register_function(
"time_func",
udf(time_func, [DataTypes.TIME()], DataTypes.TIME(), udf_type="pandas"))
self.t_env.register_function(
"timestamp_func",
udf(timestamp_func, [DataTypes.TIMESTAMP(3)], DataTypes.TIMESTAMP(3),
udf_type="pandas"))
self.t_env.register_function(
"array_str_func",
udf(array_func, [DataTypes.ARRAY(DataTypes.STRING())],
DataTypes.ARRAY(DataTypes.STRING()), udf_type="pandas"))
self.t_env.register_function(
"array_timestamp_func",
udf(array_func, [DataTypes.ARRAY(DataTypes.TIMESTAMP(3))],
DataTypes.ARRAY(DataTypes.TIMESTAMP(3)), udf_type="pandas"))
self.t_env.register_function(
"array_int_func",
udf(array_func, [DataTypes.ARRAY(DataTypes.INT())],
DataTypes.ARRAY(DataTypes.INT()), udf_type="pandas"))
self.t_env.register_function(
"nested_array_func",
udf(nested_array_func, [DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))],
DataTypes.ARRAY(DataTypes.STRING()), udf_type="pandas"))
row_type = DataTypes.ROW(
[DataTypes.FIELD("f1", DataTypes.INT()),
DataTypes.FIELD("f2", DataTypes.STRING()),
DataTypes.FIELD("f3", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("f4", DataTypes.ARRAY(DataTypes.INT()))])
self.t_env.register_function(
"row_func",
udf(row_func, [row_type], row_type, udf_type="pandas"))
table_sink = source_sink_utils.TestAppendSink(
['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u'],
[DataTypes.TINYINT(), DataTypes.SMALLINT(), DataTypes.INT(), DataTypes.BIGINT(),
DataTypes.BOOLEAN(), DataTypes.BOOLEAN(), DataTypes.FLOAT(), DataTypes.DOUBLE(),
DataTypes.STRING(), DataTypes.STRING(), DataTypes.BYTES(), DataTypes.DECIMAL(38, 18),
DataTypes.DECIMAL(38, 18), DataTypes.DATE(), DataTypes.TIME(), DataTypes.TIMESTAMP(3),
DataTypes.ARRAY(DataTypes.STRING()), DataTypes.ARRAY(DataTypes.TIMESTAMP(3)),
DataTypes.ARRAY(DataTypes.INT()),
DataTypes.ARRAY(DataTypes.STRING()), row_type])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements(
[(1, 32767, -2147483648, 1, True, False, 1.0, 1.0, 'hello', '中文',
bytearray(b'flink'), decimal.Decimal('1000000000000000000.05'),
decimal.Decimal('1000000000000000000.05999999999999999899999999999'),
datetime.date(2014, 9, 13), datetime.time(hour=1, minute=0, second=1),
timestamp_value, ['hello', '中文', None], [timestamp_value], [1, 2],
[['hello', '中文', None]], Row(1, 'hello', timestamp_value, [1, 2]))],
DataTypes.ROW(
[DataTypes.FIELD("a", DataTypes.TINYINT()),
DataTypes.FIELD("b", DataTypes.SMALLINT()),
DataTypes.FIELD("c", DataTypes.INT()),
DataTypes.FIELD("d", DataTypes.BIGINT()),
DataTypes.FIELD("e", DataTypes.BOOLEAN()),
DataTypes.FIELD("f", DataTypes.BOOLEAN()),
DataTypes.FIELD("g", DataTypes.FLOAT()),
DataTypes.FIELD("h", DataTypes.DOUBLE()),
DataTypes.FIELD("i", DataTypes.STRING()),
DataTypes.FIELD("j", DataTypes.STRING()),
DataTypes.FIELD("k", DataTypes.BYTES()),
DataTypes.FIELD("l", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("m", DataTypes.DECIMAL(38, 18)),
DataTypes.FIELD("n", DataTypes.DATE()),
DataTypes.FIELD("o", DataTypes.TIME()),
DataTypes.FIELD("p", DataTypes.TIMESTAMP(3)),
DataTypes.FIELD("q", DataTypes.ARRAY(DataTypes.STRING())),
DataTypes.FIELD("r", DataTypes.ARRAY(DataTypes.TIMESTAMP(3))),
DataTypes.FIELD("s", DataTypes.ARRAY(DataTypes.INT())),
DataTypes.FIELD("t", DataTypes.ARRAY(DataTypes.ARRAY(DataTypes.STRING()))),
DataTypes.FIELD("u", row_type)]))
t.select("tinyint_func(a),"
"smallint_func(b),"
"int_func(c),"
"bigint_func(d),"
"boolean_func(e),"
"boolean_func(f),"
"float_func(g),"
"double_func(h),"
"varchar_func(i),"
"varchar_func(j),"
"varbinary_func(k),"
"decimal_func(l),"
"decimal_func(m),"
"date_func(n),"
"time_func(o),"
"timestamp_func(p),"
"array_str_func(q),"
"array_timestamp_func(r),"
"array_int_func(s),"
"nested_array_func(t),"
"row_func(u)") \
.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual,
["1,32767,-2147483648,1,true,false,1.0,1.0,hello,中文,"
"[102, 108, 105, 110, 107],1000000000000000000.050000000000000000,"
"1000000000000000000.059999999999999999,2014-09-13,01:00:01,"
"1970-01-01 00:00:00.123,[hello, 中文, null],[1970-01-01 00:00:00.123],"
"[1, 2],[hello, 中文, null],1,hello,1970-01-01 00:00:00.123,[1, 2]"])
class BlinkPandasUDFITTests(object):
def test_data_types_only_supported_in_blink_planner(self):
import pandas as pd
timezone = self.t_env.get_config().get_local_timezone()
local_datetime = pytz.timezone(timezone).localize(
datetime.datetime(1970, 1, 1, 0, 0, 0, 123000))
def local_zoned_timestamp_func(local_zoned_timestamp_param):
assert isinstance(local_zoned_timestamp_param, pd.Series)
assert isinstance(local_zoned_timestamp_param[0], datetime.datetime), \
'local_zoned_timestamp_param of wrong type %s !' % type(
local_zoned_timestamp_param[0])
assert local_zoned_timestamp_param[0] == local_datetime, \
'local_zoned_timestamp_param is wrong value %s, %s!' % \
(local_zoned_timestamp_param[0], local_datetime)
return local_zoned_timestamp_param
self.t_env.register_function(
"local_zoned_timestamp_func",
udf(local_zoned_timestamp_func,
[DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3)],
DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3),
udf_type="pandas"))
table_sink = source_sink_utils.TestAppendSink(
['a'], [DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3)])
self.t_env.register_table_sink("Results", table_sink)
t = self.t_env.from_elements(
[(local_datetime,)],
DataTypes.ROW([DataTypes.FIELD("a", DataTypes.TIMESTAMP_WITH_LOCAL_TIME_ZONE(3))]))
t.select("local_zoned_timestamp_func(local_zoned_timestamp_func(a))") \
.insert_into("Results")
self.t_env.execute("test")
actual = source_sink_utils.results()
self.assert_equals(actual, ["1970-01-01T00:00:00.123Z"])
class StreamPandasUDFITTests(PandasUDFITTests,
PyFlinkStreamTableTestCase):
pass
class BlinkBatchPandasUDFITTests(PandasUDFITTests,
BlinkPandasUDFITTests,
PyFlinkBlinkBatchTableTestCase):
pass
class BlinkStreamPandasUDFITTests(PandasUDFITTests,
BlinkPandasUDFITTests,
PyFlinkBlinkStreamTableTestCase):
pass
@udf(input_types=[DataTypes.BIGINT(), DataTypes.BIGINT()], result_type=DataTypes.BIGINT(),
udf_type='pandas')
def add(i, j):
return i + j
if __name__ == '__main__':
import unittest
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
#!/bin/env python
from __future__ import absolute_import
__author__ = "Gina Haeussge <osd@foosel.net>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The RaionPi Project - Released under terms of the AGPLv3 License"
import errno
import subprocess
import sys
def _get_git_executables():
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
return GITS
def _git(args, cwd, hide_stderr=False, verbose=False, git_executable=None):
if git_executable is not None:
commands = [git_executable]
else:
commands = _get_git_executables()
for c in commands:
try:
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % args[0])
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return p.returncode, stdout
def _python(args, cwd, python_executable, sudo=False):
command = [python_executable] + args
if sudo:
command = ["sudo"] + command
try:
p = subprocess.Popen(command, cwd=cwd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except:
return None, None
stdout = p.communicate()[0].strip()
if sys.version >= "3":
stdout = stdout.decode()
return p.returncode, stdout
def update_source(git_executable, folder, target, force=False):
print(">>> Running: git diff --shortstat")
returncode, stdout = _git(["diff", "--shortstat"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git diff\" failed with returncode %d: %s" % (returncode, stdout))
if stdout and stdout.strip():
# we got changes in the working tree, maybe from the user, so we'll now rescue those into a patch
import time
import os
timestamp = time.strftime("%Y%m%d%H%M")
patch = os.path.join(folder, "%s-preupdate.patch" % timestamp)
print(">>> Running: git diff and saving output to %s" % timestamp)
returncode, stdout = _git(["diff"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, installation directory was dirty and state could not be persisted as a patch to %s" % patch)
with open(patch, "wb") as f:
f.write(stdout)
print(">>> Running: git reset --hard")
returncode, stdout = _git(["reset", "--hard"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git reset --hard\" failed with returncode %d: %s" % (returncode, stdout))
print(">>> Running: git pull")
returncode, stdout = _git(["pull"], folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Could not update, \"git pull\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
if force:
reset_command = ["reset"]
reset_command += [target]
print(">>> Running: git %s" % " ".join(reset_command))
returncode, stdout = _git(reset_command, folder, git_executable=git_executable)
if returncode != 0:
raise RuntimeError("Error while updating, \"git %s\" failed with returncode %d: %s" % (" ".join(reset_command), returncode, stdout))
print(stdout)
def install_source(python_executable, folder, user=False, sudo=False):
print(">>> Running: python setup.py clean")
returncode, stdout = _python(["setup.py", "clean"], folder, python_executable)
if returncode != 0:
print("\"python setup.py clean\" failed with returncode %d: %s" % (returncode, stdout))
print("Continuing anyways")
print(stdout)
print(">>> Running: python setup.py install")
args = ["setup.py", "install"]
if user:
args.append("--user")
returncode, stdout = _python(args, folder, python_executable, sudo=sudo)
if returncode != 0:
raise RuntimeError("Could not update, \"python setup.py install\" failed with returncode %d: %s" % (returncode, stdout))
print(stdout)
def parse_arguments():
import argparse
parser = argparse.ArgumentParser(prog="update-octoprint.py")
parser.add_argument("--git", action="store", type=str, dest="git_executable",
help="Specify git executable to use")
parser.add_argument("--python", action="store", type=str, dest="python_executable",
help="Specify python executable to use")
parser.add_argument("--force", action="store_true", dest="force",
help="Set this to force the update to only the specified version (nothing newer)")
parser.add_argument("--sudo", action="store_true", dest="sudo",
help="Install with sudo")
parser.add_argument("--user", action="store_true", dest="user",
help="Install to the user site directory instead of the general site directory")
parser.add_argument("folder", type=str,
help="Specify the base folder of the RaionPi installation to update")
parser.add_argument("target", type=str,
help="Specify the commit or tag to which to update")
args = parser.parse_args()
return args
def main():
args = parse_arguments()
git_executable = None
if args.git_executable:
git_executable = args.git_executable
python_executable = sys.executable
if args.python_executable:
python_executable = args.python_executable
folder = args.folder
target = args.target
import os
if not os.access(folder, os.W_OK):
raise RuntimeError("Could not update, base folder is not writable")
update_source(git_executable, folder, target, force=args.force)
install_source(python_executable, folder, user=args.user, sudo=args.sudo)
if __name__ == "__main__":
main()
|
"""
Adapt/extend autotest.client.test.test for Docker test sub-framework
Implement docker subtest base to avoid circular dependences in dockertest core
modules.
"""
# Pylint runs from a different directory, it's fine to import this way
# pylint: disable=W0403
import logging
import traceback
from xceptions import DockerTestFail
from config import get_as_list
class SubBase(object):
"""
Methods/attributes common to Subtest & SubSubtest classes
:note: This class is indirectly referenced by the control-file
so it cannot contain anything dockertest-implementation
specific.
"""
#: Configuration section for subclass, auto-generated by ``__init__``.
config_section = None
#: Configuration dictionary (read-only for instances)
config = None
#: Unique temporary directory for this instance (automatically cleaned up)
#: **warning**: DO NOT ASSUME DIRECTORY WILL BE EMPTY!!!
tmpdir = None
#: Number of additional space/tab characters to prefix when logging
n_spaces = 16 # date/timestamp length
#: Number of additional space/tab characters to prefix when logging
n_tabs = 1 # one-level
step_log_msgs = {
"initialize": "initialize()",
"run_once": "run_once()",
"postprocess": "postprocess()",
"cleanup": "cleanup()"
}
def __init__(self, *args, **dargs):
super(SubBase, self).__init__(*args, **dargs)
self.step_log_msgs = self.step_log_msgs.copy()
def initialize(self):
"""
Called every time the test is run.
"""
self.log_step_msg('initialize')
# Issue warnings for failed to customize suggested options
not_customized = self.config.get('__example__', None)
if not_customized is not None and not_customized is not '':
self.logdebug("WARNING: Recommended options not customized:")
for nco in get_as_list(not_customized):
self.logdebug("WARNING: %s" % nco)
self.logwarning("WARNING: Test results may be externally "
"dependent! (See debug log for details)")
msg = "%s configuration:\n" % self.__class__.__name__
for key, value in self.config.items():
if key == '__example__' or key.startswith('envcheck'):
continue
msg += '\t\t%s = "%s"\n' % (key, value)
self.logdebug(msg)
def run_once(self):
"""
Called once only to exercise subject of sub-subtest
"""
self.log_step_msg('run_once')
def postprocess(self):
"""
Called to process results of subject
"""
self.log_step_msg('postprocess')
def cleanup(self):
"""
Always called, before any exceptions thrown are re-raised.
"""
self.log_step_msg('cleanup')
def log_step_msg(self, stepname):
"""
Send message stored in ``step_log_msgs`` key ``stepname`` to logingo
"""
msg = self.step_log_msgs.get(stepname)
if msg:
self.loginfo(msg)
@staticmethod
def failif(condition, reason=None):
"""
Convenience method for subtests to avoid importing ``TestFail``
exception
:param condition: Boolean condition, fail test if True.
:param reason: Helpful text describing why the test failed
:raise DockerTestFail: If condition evaluates ``True``
"""
if reason is None:
reason = "Failed test condition"
if bool(condition):
raise DockerTestFail(reason)
@staticmethod
def failif_ne(actual, expected, reason=None):
"""
Convenience method for subtests to compare two values and
fail if they differ. Failure message will include the expected
and actual values for ease of debugging.
:param actual: value being tested
:param expected: value to which we compare.
:param reason: Helpful text describing why the test failed
:raise DockerTestFail: If actual != expected
"""
if actual == expected:
return
if reason is None:
reason = "Failed test condition"
# By default, quote each value. This is especially helpful when
# actual or expected is the empty string or a string with spaces.
# But if both are numeric types the quotes distract, so remove them.
arg = "'{}'"
if all(isinstance(x, (int, long, float)) for x in [actual, expected]):
arg = "{}"
spec = "{}: expected " + arg + "; got " + arg
raise DockerTestFail(spec.format(reason, expected, actual))
@staticmethod
def failif_not_in(needle, haystack, description=None):
"""
Convenience method for subtests to test for an expected substring
being contained in a larger string, e.g. to look for XYZ in a
command's stdout/stderr.
:param needle: the string you're looking for
:param haystack: the actual string, e.g stdout results from a command
:param description: description of haystack, e.g. 'stdout from foo'
:raise DockerTestFail: if needle is not found in haystack
"""
if description is None:
description = 'string'
if needle in haystack:
return
raise DockerTestFail("Expected string '%s' not in %s '%s'"
% (needle, description, haystack))
@classmethod
def log_x(cls, lvl, msg, *args):
"""
Send ``msg`` & ``args`` through to logging module function with
name ``lvl``
"""
meth = getattr(logging, lvl)
testname = cls.__name__
return meth("%s%s: %s" % ("\t" * cls.n_tabs, testname, msg), *args)
@classmethod
def log_xn(cls, lvl, msg, *args):
"""
Multiline-split and send msg & args through to logging module
:param lvl: logging method name (``'debug'``, ``'info'``, etc.)
:param msg: Message format-string
"""
# date, loglevel, this module offset
newline = '\n' + ' ' * cls.n_spaces + '\t' * cls.n_tabs
newline += " " * (len(cls.__name__) + 2) # cls name + ': '
try:
msg = (str(msg) % args).replace('\n', newline)
except TypeError:
if args is tuple():
cls.logwarning("Following message contains format strings but "
"has no arguments:")
msg = str(msg).replace('\n', newline)
else:
raise TypeError("Not all arguments converted during formatting"
": msg='%s', args=%s" % (msg, args))
return cls.log_x(lvl, msg)
@classmethod
def logdebug(cls, message, *args):
r"""
Log a DEBUG level message to the controlling terminal **only**
:param message: Same as ``logging.debug()``
:param \*args: Same as ``logging.debug()``
"""
# Never split over multiple lines
cls.log_x('debug', message, *args)
@classmethod
def loginfo(cls, message, *args):
r"""
Log a INFO level message to the controlling terminal **only**
:param message: Same as ``logging.info()``
:param \*args: Same as ``logging.info()``
"""
cls.log_xn('info', message, *args)
@classmethod
def logwarning(cls, message, *args):
r"""
Log a WARNING level message to the controlling terminal **only**
:param message: Same as ``logging.warning()``
:param \*args: Same as ``logging.warning()``
"""
cls.log_xn('warn', message, *args)
@classmethod
def logerror(cls, message, *args):
r"""
Log a ERROR level message to the controlling terminal **only**
:param message: Same as ``logging.error()``
:param \*args: Same as ``logging.error()``
"""
cls.log_xn('error', message, *args)
@classmethod
def logtraceback(cls, name, exc_info, error_source, detail):
r"""
Log error to error, traceback to debug, of controlling terminal
**only**
"""
error_head = ("%s failed to %s\n%s\n%s" % (name, error_source,
detail.__class__.__name__,
detail))
error_tb = traceback.format_exception(exc_info[0],
exc_info[1],
exc_info[2])
error_tb = "".join(error_tb).strip() + "\n"
cls.logerror(error_head)
cls.logdebug(error_tb)
|
# coding: utf-8
from sqlalchemy import Column, Integer, String, Boolean, Text, text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class User(Base):
__tablename__ = 'users'
Id = Column(Integer, primary_key=True)
Username = Column(String(12, u'utf8mb4_unicode_ci'), nullable=False, unique=True)
Password = Column(String(128, u'utf8mb4_unicode_ci'), nullable=False)
Swid = Column(String(39, u'utf8mb4_unicode_ci'), nullable=False)
LoginKey = Column(String(32, u'utf8mb4_unicode_ci'))
ConfirmationHash = Column(String(128, u'utf8mb4_unicode_ci'))
Avatar = Column(Integer, nullable=False)
AvatarAttributes = Column(String(98, u'utf8mb4_unicode_ci'), nullable=False,server_default=text(
"""'{"spriteScale":100,"spriteSpeed":100,"ignoresBlockLayer":false,"invisible":false,"floating":false}'"""))
Coins = Column(Integer, nullable=False, server_default=text("'10000'"))
Moderator = Column(Boolean, nullable=False, default=False)
Inventory = Column(Text(collation=u'utf8mb4_unicode_ci'), nullable=False)
Color = Column(Integer, nullable=False, server_default=text("'1'"))
Head = Column(Integer, nullable=False, server_default=text("'0'"))
Face = Column(Integer, nullable=False, server_default=text("'0'"))
Neck = Column(Integer, nullable=False, server_default=text("'0'"))
Body = Column(Integer, nullable=False, server_default=text("'0'"))
Hands = Column(Integer, nullable=False, server_default=text("'0'"))
Feet = Column(Integer, nullable=False, server_default=text("'0'"))
Photo = Column(Integer, nullable=False, server_default=text("'0'"))
Pin = Column(Integer, nullable=False, server_default=text("'0'"))
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
from PIL import Image, ImageDraw, ImageFont
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from braces.views import LoginRequiredMixin
from easy_thumbnails.files import get_thumbnailer
from .models import User
class UserBadgeJPEG(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
def get(self, request, username):
participant = self.get_object()
event = participant.event
img = Image.new('RGBA', (event.badge_size_x, event.badge_size_y), event.badge_color)
draw = ImageDraw.Draw(img)
match = {
'event': event.name,
'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0] ),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
response = HttpResponse(content_type="image/png")
img.save(response, "PNG")
return HttpResponse(response, content_type="image/png")
class UserBadgeView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
template_name = 'users/user_badge.html'
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
fields = ['first_name', 'last_name', 'phone', 'activities' ] #FIXME : add all needed fields
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import logging
import os
import base64
import tornado.ioloop
import tornado.web
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.render("sample.html")
def post(self):
image_name = self.get_argument("name", "")
image_data = self.get_argument("data", "")
if image_data == "":
image_data = self.request.files['data'][0]['body']
# decode image data
data = image_data.split(",")
body = ""
# sometimes when body data is white
try:
body = data[1]
except Exception:
pass
try:
os.stat("uploads")
except Exception:
os.mkdir("uploads")
f = open(os.path.join("uploads", image_name), "wb")
f.write(base64.decodestring(body))
f.close()
# return image url
# self.write({ "thumb":"/static/sample/uploads/" + image_name})
self.write("/static/sample/uploads/" + image_name)
class CDNSample(tornado.web.RequestHandler):
"""sample connecting to Loadingplay's CDN"""
def get(self):
self.render("sample_cdn.html")
application = tornado.web.Application([
(r"/", MainHandler),
(r"/cdn", CDNSample)
],
template_path=os.path.dirname(__file__),
static_path=os.path.join('..'),
debug=True
)
if __name__ == "__main__":
application.listen(8888)
logging.info("Running server on port 8888")
tornado.ioloop.IOLoop.current().start()
|
"""MagicHelper - dockable widget showing magic commands for the MainWindow
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib imports
import json
import re
import sys
# System library imports
from IPython.external.qt import QtGui, QtCore
from IPython.core.magic import magic_escapes
class MagicHelper(QtGui.QDockWidget):
"""MagicHelper - dockable widget for convenient search and running of
magic command for IPython QtConsole.
"""
#-------------------------------------------------------------------------
# signals
#-------------------------------------------------------------------------
pasteRequested = QtCore.Signal(str, name='pasteRequested')
"""This signal is emitted when user wants to paste selected magic
command into the command line.
"""
runRequested = QtCore.Signal(str, name='runRequested')
"""This signal is emitted when user wants to execute selected magic command
"""
readyForUpdate = QtCore.Signal(name='readyForUpdate')
"""This signal is emitted when MagicHelper is ready to be populated.
Since kernel querying mechanisms are out of scope of this class,
it expects its owner to invoke MagicHelper.populate_magic_helper()
as a reaction on this event.
"""
#-------------------------------------------------------------------------
# constructor
#-------------------------------------------------------------------------
def __init__(self, name, parent):
super(MagicHelper, self).__init__(name, parent)
self.data = None
class MinListWidget(QtGui.QListWidget):
"""Temp class to overide the default QListWidget size hint
in order to make MagicHelper narrow
"""
def sizeHint(self):
s = QtCore.QSize()
s.setHeight(super(MinListWidget, self).sizeHint().height())
s.setWidth(self.sizeHintForColumn(0))
return s
# construct content
self.frame = QtGui.QFrame()
self.search_label = QtGui.QLabel("Search:")
self.search_line = QtGui.QLineEdit()
self.search_class = QtGui.QComboBox()
self.search_list = MinListWidget()
self.paste_button = QtGui.QPushButton("Paste")
self.run_button = QtGui.QPushButton("Run")
# layout all the widgets
main_layout = QtGui.QVBoxLayout()
search_layout = QtGui.QHBoxLayout()
search_layout.addWidget(self.search_label)
search_layout.addWidget(self.search_line, 10)
main_layout.addLayout(search_layout)
main_layout.addWidget(self.search_class)
main_layout.addWidget(self.search_list, 10)
action_layout = QtGui.QHBoxLayout()
action_layout.addWidget(self.paste_button)
action_layout.addWidget(self.run_button)
main_layout.addLayout(action_layout)
self.frame.setLayout(main_layout)
self.setWidget(self.frame)
# connect all the relevant signals to handlers
self.visibilityChanged[bool].connect(self._update_magic_helper)
self.search_class.activated[int].connect(
self.class_selected
)
self.search_line.textChanged[str].connect(
self.search_changed
)
self.search_list.itemDoubleClicked[QtGui.QListWidgetItem].connect(
self.paste_requested
)
self.paste_button.clicked[bool].connect(
self.paste_requested
)
self.run_button.clicked[bool].connect(
self.run_requested
)
#-------------------------------------------------------------------------
# implementation
#-------------------------------------------------------------------------
def _update_magic_helper(self, visible):
"""Start update sequence.
This method is called when MagicHelper becomes visible. It clears
the content and emits readyForUpdate signal. The owner of the
instance is expected to invoke populate_magic_helper() when magic
info is available.
"""
if not visible or self.data is not None:
return
self.data = {}
self.search_class.clear()
self.search_class.addItem("Populating...")
self.search_list.clear()
self.readyForUpdate.emit()
def populate_magic_helper(self, data):
"""Expects data returned by lsmagics query from kernel.
Populates the search_class and search_list with relevant items.
"""
self.search_class.clear()
self.search_list.clear()
self.data = json.loads(
data['data'].get('application/json', {})
)
self.search_class.addItem('All Magics', 'any')
classes = set()
for mtype in sorted(self.data):
subdict = self.data[mtype]
for name in sorted(subdict):
classes.add(subdict[name])
for cls in sorted(classes):
label = re.sub("([a-zA-Z]+)([A-Z][a-z])", "\g<1> \g<2>", cls)
self.search_class.addItem(label, cls)
self.filter_magic_helper('.', 'any')
def class_selected(self, index):
"""Handle search_class selection changes
"""
item = self.search_class.itemData(index)
regex = self.search_line.text()
self.filter_magic_helper(regex=regex, cls=item)
def search_changed(self, search_string):
"""Handle search_line text changes.
The text is interpreted as a regular expression
"""
item = self.search_class.itemData(
self.search_class.currentIndex()
)
self.filter_magic_helper(regex=search_string, cls=item)
def _get_current_search_item(self, item=None):
"""Retrieve magic command currently selected in the search_list
"""
text = None
if not isinstance(item, QtGui.QListWidgetItem):
item = self.search_list.currentItem()
text = item.text()
return text
def paste_requested(self, item=None):
"""Emit pasteRequested signal with currently selected item text
"""
text = self._get_current_search_item(item)
if text is not None:
self.pasteRequested.emit(text)
def run_requested(self, item=None):
"""Emit runRequested signal with currently selected item text
"""
text = self._get_current_search_item(item)
if text is not None:
self.runRequested.emit(text)
def filter_magic_helper(self, regex, cls):
"""Update search_list with magic commands whose text match
regex and class match cls.
If cls equals 'any' - any class matches.
"""
if regex == "" or regex is None:
regex = '.'
if cls is None:
cls = 'any'
self.search_list.clear()
for mtype in sorted(self.data):
subdict = self.data[mtype]
prefix = magic_escapes[mtype]
for name in sorted(subdict):
mclass = subdict[name]
pmagic = prefix + name
if (re.match(regex, name) or re.match(regex, pmagic)) and \
(cls == 'any' or cls == mclass):
self.search_list.addItem(pmagic)
|
"""
Implement CourseTab
"""
from abc import ABCMeta
import logging
from xblock.fields import List
from openedx.core.lib.api.plugins import PluginError
log = logging.getLogger("edx.courseware")
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
class CourseTab(object):
"""
The Course Tab class is a data abstraction for all tabs (i.e., course navigation links) within a course.
It is an abstract class - to be inherited by various tab types.
Derived classes are expected to override methods as needed.
When a new tab class is created, it should define the type and add it in this class' factory method.
"""
__metaclass__ = ABCMeta
# Class property that specifies the type of the tab. It is generally a constant value for a
# subclass, shared by all instances of the subclass.
type = ''
icon = ''
# The title of the tab, which should be internationalized using
# ugettext_noop since the user won't be available in this context.
title = None
# Class property that specifies whether the tab can be hidden for a particular course
is_hideable = False
# Class property that specifies whether the tab is hidden for a particular course
is_hidden = False
# The relative priority of this view that affects the ordering (lower numbers shown first)
priority = None
# Class property that specifies whether the tab can be moved within a course's list of tabs
is_movable = True
# Class property that specifies whether the tab is a collection of other tabs
is_collection = False
# True if this tab is dynamically added to the list of tabs
is_dynamic = False
# True if this tab is a default for the course (when enabled)
is_default = True
# True if this tab can be included more than once for a course.
allow_multiple = False
# If there is a single view associated with this tab, this is the name of it
view_name = None
def __init__(self, tab_dict):
"""
Initializes class members with values passed in by subclasses.
Args:
tab_dict (dict) - a dictionary of parameters used to build the tab.
"""
self.name = tab_dict.get('name', self.title)
self.tab_id = tab_dict.get('tab_id', getattr(self, 'tab_id', self.type))
self.link_func = tab_dict.get('link_func', link_reverse_func(self.view_name))
self.is_hidden = tab_dict.get('is_hidden', False)
@classmethod
def is_enabled(cls, course, user=None):
"""Returns true if this course tab is enabled in the course.
Args:
course (CourseDescriptor): the course using the feature
user (User): an optional user interacting with the course (defaults to None)
"""
raise NotImplementedError()
def get(self, key, default=None):
"""
Akin to the get method on Python dictionary objects, gracefully returns the value associated with the
given key, or the default if key does not exist.
"""
try:
return self[key]
except KeyError:
return default
def __getitem__(self, key):
"""
This method allows callers to access CourseTab members with the d[key] syntax as is done with
Python dictionary objects.
"""
if key == 'name':
return self.name
elif key == 'type':
return self.type
elif key == 'tab_id':
return self.tab_id
elif key == 'is_hidden':
return self.is_hidden
else:
raise KeyError('Key {0} not present in tab {1}'.format(key, self.to_json()))
def __setitem__(self, key, value):
"""
This method allows callers to change CourseTab members with the d[key]=value syntax as is done with
Python dictionary objects. For example: course_tab['name'] = new_name
Note: the 'type' member can be 'get', but not 'set'.
"""
if key == 'name':
self.name = value
elif key == 'tab_id':
self.tab_id = value
elif key == 'is_hidden':
self.is_hidden = value
else:
raise KeyError('Key {0} cannot be set in tab {1}'.format(key, self.to_json()))
def __eq__(self, other):
"""
Overrides the equal operator to check equality of member variables rather than the object's address.
Also allows comparison with dict-type tabs (needed to support callers implemented before this class
was implemented).
"""
if isinstance(other, dict) and not self.validate(other, raise_error=False):
# 'other' is a dict-type tab and did not validate
return False
# allow tabs without names; if a name is required, its presence was checked in the validator.
name_is_eq = (other.get('name') is None or self.name == other['name'])
# only compare the persisted/serialized members: 'type' and 'name'
return self.type == other.get('type') and name_is_eq
def __ne__(self, other):
"""
Overrides the not equal operator as a partner to the equal operator.
"""
return not self == other
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Validates the given dict-type tab object to ensure it contains the expected keys.
This method should be overridden by subclasses that require certain keys to be persisted in the tab.
"""
return key_checker(['type'])(tab_dict, raise_error)
@classmethod
def load(cls, type_name, **kwargs):
"""
Constructs a tab of the given type_name.
Args:
type_name (str) - the type of tab that should be constructed
**kwargs - any other keyword arguments needed for constructing this tab
Returns:
an instance of the CourseTab subclass that matches the type_name
"""
json_dict = kwargs.copy()
json_dict['type'] = type_name
return cls.from_json(json_dict)
def to_json(self):
"""
Serializes the necessary members of the CourseTab object to a json-serializable representation.
This method is overridden by subclasses that have more members to serialize.
Returns:
a dictionary with keys for the properties of the CourseTab object.
"""
to_json_val = {'type': self.type, 'name': self.name}
if self.is_hidden:
to_json_val.update({'is_hidden': True})
return to_json_val
@staticmethod
def from_json(tab_dict):
"""
Deserializes a CourseTab from a json-like representation.
The subclass that is instantiated is determined by the value of the 'type' key in the
given dict-type tab. The given dict-type tab is validated before instantiating the CourseTab object.
If the tab_type is not recognized, then an exception is logged and None is returned.
The intention is that the user should still be able to use the course even if a
particular tab is not found for some reason.
Args:
tab: a dictionary with keys for the properties of the tab.
Raises:
InvalidTabsException if the given tab doesn't have the right keys.
"""
# TODO: don't import openedx capabilities from common
from openedx.core.lib.course_tabs import CourseTabPluginManager
tab_type_name = tab_dict.get('type')
if tab_type_name is None:
log.error('No type included in tab_dict: %r', tab_dict)
return None
try:
tab_type = CourseTabPluginManager.get_plugin(tab_type_name)
except PluginError:
log.exception(
"Unknown tab type %r Known types: %r.",
tab_type_name,
CourseTabPluginManager.get_tab_types()
)
return None
tab_type.validate(tab_dict)
return tab_type(tab_dict=tab_dict)
class StaticTab(CourseTab):
"""
A custom tab.
"""
type = 'static_tab'
icon = 'fa fa-circle'
is_default = False # A static tab is never added to a course by default
allow_multiple = True
def __init__(self, tab_dict=None, name=None, url_slug=None):
def link_func(course, reverse_func):
""" Returns a url for a given course and reverse function. """
return reverse_func(self.type, args=[course.id.to_deprecated_string(), self.url_slug])
self.url_slug = tab_dict.get('url_slug') if tab_dict else url_slug
if tab_dict is None:
tab_dict = dict()
if name is not None:
tab_dict['name'] = name
tab_dict['link_func'] = link_func
tab_dict['tab_id'] = 'static_tab_{0}'.format(self.url_slug)
super(StaticTab, self).__init__(tab_dict)
@classmethod
def is_enabled(cls, course, user=None):
"""
Static tabs are viewable to everyone, even anonymous users.
"""
return True
@classmethod
def validate(cls, tab_dict, raise_error=True):
"""
Ensures that the specified tab_dict is valid.
"""
return (super(StaticTab, cls).validate(tab_dict, raise_error)
and key_checker(['name', 'url_slug'])(tab_dict, raise_error))
def __getitem__(self, key):
if key == 'url_slug':
return self.url_slug
else:
return super(StaticTab, self).__getitem__(key)
def __setitem__(self, key, value):
if key == 'url_slug':
self.url_slug = value
else:
super(StaticTab, self).__setitem__(key, value)
def to_json(self):
""" Return a dictionary representation of this tab. """
to_json_val = super(StaticTab, self).to_json()
to_json_val.update({'url_slug': self.url_slug})
return to_json_val
def __eq__(self, other):
if not super(StaticTab, self).__eq__(other):
return False
return self.url_slug == other.get('url_slug')
class CourseTabList(List):
"""
An XBlock field class that encapsulates a collection of Tabs in a course.
It is automatically created and can be retrieved through a CourseDescriptor object: course.tabs
"""
# TODO: Ideally, we'd like for this list of tabs to be dynamically
# generated by the tabs plugin code. For now, we're leaving it like this to
# preserve backwards compatibility.
@staticmethod
def initialize_default(course):
"""
An explicit initialize method is used to set the default values, rather than implementing an
__init__ method. This is because the default values are dependent on other information from
within the course.
"""
course.tabs.extend([
CourseTab.load('courseware'),
CourseTab.load('course_info')
])
# Presence of syllabus tab is indicated by a course attribute
if hasattr(course, 'syllabus_present') and course.syllabus_present:
course.tabs.append(CourseTab.load('syllabus'))
# If the course has a discussion link specified, use that even if we feature
# flag discussions off. Disabling that is mostly a server safety feature
# at this point, and we don't need to worry about external sites.
if course.discussion_link:
discussion_tab = CourseTab.load(
'external_discussion', name=_('External Discussion'), link=course.discussion_link
)
else:
discussion_tab = CourseTab.load('discussion')
course.tabs.extend([
CourseTab.load('textbooks'),
discussion_tab,
CourseTab.load('wiki'),
CourseTab.load('progress'),
])
@staticmethod
def get_discussion(course):
"""
Returns the discussion tab for the given course. It can be either of type 'discussion'
or 'external_discussion'. The returned tab object is self-aware of the 'link' that it corresponds to.
"""
# the discussion_link setting overrides everything else, even if there is a discussion tab in the course tabs
if course.discussion_link:
return CourseTab.load(
'external_discussion', name=_('External Discussion'), link=course.discussion_link
)
# find one of the discussion tab types in the course tabs
for tab in course.tabs:
if tab.type == 'discussion' or tab.type == 'external_discussion':
return tab
return None
@staticmethod
def get_tab_by_slug(tab_list, url_slug):
"""
Look for a tab with the specified 'url_slug'. Returns the tab or None if not found.
"""
return next((tab for tab in tab_list if tab.get('url_slug') == url_slug), None)
@staticmethod
def get_tab_by_type(tab_list, tab_type):
"""
Look for a tab with the specified type. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.type == tab_type), None)
@staticmethod
def get_tab_by_id(tab_list, tab_id):
"""
Look for a tab with the specified tab_id. Returns the first matching tab.
"""
return next((tab for tab in tab_list if tab.tab_id == tab_id), None)
@staticmethod
def iterate_displayable(course, user=None, inline_collections=True):
"""
Generator method for iterating through all tabs that can be displayed for the given course and
the given user with the provided access settings.
"""
for tab in course.tabs:
if tab.is_enabled(course, user=user) and not (user and tab.is_hidden):
if tab.is_collection:
# If rendering inline that add each item in the collection,
# else just show the tab itself as long as it is not empty.
if inline_collections:
for item in tab.items(course):
yield item
elif len(list(tab.items(course))) > 0:
yield tab
else:
yield tab
@classmethod
def validate_tabs(cls, tabs):
"""
Check that the tabs set for the specified course is valid. If it
isn't, raise InvalidTabsException with the complaint.
Specific rules checked:
- if no tabs specified, that's fine
- if tabs specified, first two must have type 'courseware' and 'course_info', in that order.
"""
if tabs is None or len(tabs) == 0:
return
if len(tabs) < 2:
raise InvalidTabsException("Expected at least two tabs. tabs: '{0}'".format(tabs))
if tabs[0].get('type') != 'courseware':
raise InvalidTabsException(
"Expected first tab to have type 'courseware'. tabs: '{0}'".format(tabs))
if tabs[1].get('type') != 'course_info':
raise InvalidTabsException(
"Expected second tab to have type 'course_info'. tabs: '{0}'".format(tabs))
# the following tabs should appear only once
# TODO: don't import openedx capabilities from common
from openedx.core.lib.course_tabs import CourseTabPluginManager
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.allow_multiple:
cls._validate_num_tabs_of_type(tabs, tab_type.type, 1)
@staticmethod
def _validate_num_tabs_of_type(tabs, tab_type, max_num):
"""
Check that the number of times that the given 'tab_type' appears in 'tabs' is less than or equal to 'max_num'.
"""
count = sum(1 for tab in tabs if tab.get('type') == tab_type)
if count > max_num:
msg = (
"Tab of type '{type}' appears {count} time(s). "
"Expected maximum of {max} time(s)."
).format(
type=tab_type, count=count, max=max_num,
)
raise InvalidTabsException(msg)
def to_json(self, values):
"""
Overrides the to_json method to serialize all the CourseTab objects to a json-serializable representation.
"""
json_data = []
if values:
for val in values:
if isinstance(val, CourseTab):
json_data.append(val.to_json())
elif isinstance(val, dict):
json_data.append(val)
else:
continue
return json_data
def from_json(self, values):
"""
Overrides the from_json method to de-serialize the CourseTab objects from a json-like representation.
"""
self.validate_tabs(values)
tabs = []
for tab_dict in values:
tab = CourseTab.from_json(tab_dict)
if tab:
tabs.append(tab)
return tabs
# Validators
# A validator takes a dict and raises InvalidTabsException if required fields are missing or otherwise wrong.
# (e.g. "is there a 'name' field?). Validators can assume that the type field is valid.
def key_checker(expected_keys):
"""
Returns a function that checks that specified keys are present in a dict.
"""
def check(actual_dict, raise_error=True):
"""
Function that checks whether all keys in the expected_keys object is in the given actual_dict object.
"""
missing = set(expected_keys) - set(actual_dict.keys())
if not missing:
return True
if raise_error:
raise InvalidTabsException(
"Expected keys '{0}' are not present in the given dict: {1}".format(expected_keys, actual_dict)
)
else:
return False
return check
def link_reverse_func(reverse_name):
"""
Returns a function that takes in a course and reverse_url_func,
and calls the reverse_url_func with the given reverse_name and course's ID.
This is used to generate the url for a CourseTab without having access to Django's reverse function.
"""
return lambda course, reverse_url_func: reverse_url_func(reverse_name, args=[course.id.to_deprecated_string()])
def need_name(dictionary, raise_error=True):
"""
Returns whether the 'name' key exists in the given dictionary.
"""
return key_checker(['name'])(dictionary, raise_error)
class InvalidTabsException(Exception):
"""
A complaint about invalid tabs.
"""
pass
class UnequalTabsException(Exception):
"""
A complaint about tab lists being unequal
"""
pass
|
"""
@created_at 2014-07-19
@author Exequiel Fuentes <efulet@gmail.com>
@author Brian Keith <briankeithn@gmail.com>
"""
# Se recomienda seguir los siguientes estandares:
# 1. Para codificacion: PEP 8 - Style Guide for Python Code (http://legacy.python.org/dev/peps/pep-0008/)
# 2. Para documentacion: PEP 257 - Docstring Conventions (http://legacy.python.org/dev/peps/pep-0257/)
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.lda import LDA
from sklearn.naive_bayes import GaussianNB
from pca_lda_exception import FKSkLearnException
class FKSkLearn():
"""Esta clase encapsula los metodos de la libreria sklearn para calcular
PCA+LDA.
"""
def __init__(self, db_path):
"""Crea una instancia de la clase PCA_LDA.
:param db_path: La ruta a la base de datos.
"""
d = np.load(db_path)
self.data = d['data']
self.labels = d['labels']
self._dimensions = self.data.shape[1]
self._x_train, self._x_test, self._y_train, self._y_test = (None,)*4
self._lda_train, self._lda_test = (None,)*2
self._y_pred, self._y_prob = (None,)*2
self._gnb = None
self._test_size = 0.3
self._random_state = 0
def fk_get_lda_train(self):
"""Retorna lda_train
"""
return self._lda_train
def fk_get_lda_test(self):
"""Retorna lda_test
"""
return self._lda_test
def fk_get_y_train(self):
"""Retorna y_train
"""
return self._y_train
def fk_get_y_test(self):
"""Retorna y_test
"""
return self._y_test
def fk_get_y_pred(self):
"""Retorna y_pred
"""
return self._y_pred
def fk_get_y_prob(self):
"""Retorna y_prob
"""
return self._y_prob
def fk_train_test_split(self):
"""Encapsula el metodo train_test_split
"""
self._x_train, self._x_test, self._y_train, self._y_test = train_test_split(self.data, self.labels, test_size=self._test_size, random_state=self._random_state)
def fk_pca_lda(self):
"""Construye el subespacio PCA y LDA encontrando la dimension optima de PCA
"""
k_opt = self._find_optimal_dimension()
print "Dimension Optima:", k_opt
#k_opt = 6
self._lda_train, self._lda_test = self._pca_lda(k_opt)
def fk_bayes_classifier(self):
"""Contruye el clasificar Bayes
"""
if self._lda_train is None:
raise FKSkLearnException("lda_train no fue calculada.")
if self._lda_test is None:
raise FKSkLearnException("lda_test no fue calculada.")
if self._y_train is None:
raise FKSkLearnException("y_train no fue calculada.")
self._gnb = GaussianNB()
self._gnb.fit(self._lda_train, self._y_train)
self._y_pred = self._gnb.predict(self._lda_test)
self._y_prob = self._gnb.predict_proba(self._lda_train)
def fk_score(self):
"""Encapsula el metodo score
"""
if self._gnb is None:
raise FKSkLearnException("Objecto GaussianNB es nulo.")
if self._lda_test is None:
raise FKSkLearnException("lda_test no fue calculada.")
if self._y_test is None:
raise FKSkLearnException("y_test no fue calculada.")
return self._gnb.score(self._lda_test, self._y_test)
def _pca_lda(self, components):
"""Construye el subespacio PCA y LDA.
:param components: Valor entero para calcular la dimension
"""
if components is None or not isinstance(components, int):
raise FKSkLearnException("components no puede ser nulo o diferente de entero.")
if self._x_train is None:
raise FKSkLearnException("x_train no fue calculada.")
if self._x_test is None:
raise FKSkLearnException("x_test no fue calculada.")
if self._y_train is None:
raise FKSkLearnException("y_train no fue calculada.")
# Construir subespacio PCA con el valor optimo obtenido.
pca = PCA(n_components=components)
# --- Construyo PCA a partir del conjunto de entrenamiento
pca.fit(self._x_train)
pca_train = pca.transform(self._x_train)
# --- Proyecto al subespacio PCA con los datos de prueba
pca_test = pca.transform(self._x_test)
# Construir el subespacio LDA
clf = LDA()
# --- Construyo el subespacio LDA a partir del conjunto de entrenamiento proyectado PCA
clf.fit(pca_train, self._y_train)
lda_train = clf.transform(pca_train)
# --- Proyecto al subespacio LDA los datos de prueba proyectados del subespacio PCA
lda_test = clf.transform(pca_test)
return lda_train, lda_test
def _find_optimal_dimension(self):
"""Encuentra la dimension optima. Basado en el algoritmo creado
por Juan Bekios-Calfa <juan.bekios@ucn.cl>
"""
if self._y_train is None:
raise FKSkLearnException("y_train no fue calculada.")
if self._y_test is None:
raise FKSkLearnException("y_test no fue calculada.")
r = 0 # Classification rate
k = 1 # Number of components
for n_components in xrange(1, self._dimensions + 1):
# Entrenar PCA+LDA con la cantidad de componentes dada.
lda_train, lda_test = self._pca_lda(n_components)
# Clasificar Bayes
gnb = GaussianNB()
gnb.fit(lda_train, self._y_train)
r_i = gnb.score(lda_test, self._y_test)
if r_i > r:
r = r_i
k = n_components
return k
|
# tk/graphics.py
# by John M. Zelle, Ph.D
# edited by Rushy Panchal
"""Simple object oriented graphics library
The library is designed to make it very easy for novice programmers to
experiment with computer graphics in an object oriented fashion. It is
written by John Zelle for use with the book "Python Programming: An
Introduction to Computer Science" (Franklin, Beedle & Associates).
LICENSE: This is open-source software released under the terms of the
GPL (http://www.gnu.org/licenses/gpl.html).
PLATFORMS: The package is a wrapper around Tkinter and should run on
any platform where Tkinter is available.
INSTALLATION: Put this file somewhere where Python can see it.
OVERVIEW: There are two kinds of objects in the library. The GraphWin
class implements a window where drawing can be done and various
GraphicsObjects are provided that can be drawn into a GraphWin. As a
simple example, here is a complete program to draw a circle of radius
10 centered in a 100x100 window:
--------------------------------------------------------------------
from graphics import *
try:
from Tkinter import *
except ImportError:
from tkinter import *
def main():
root = Tk()
win = GraphWin(root, 100, 100)
c = Circle(Point(50,50), 10)
c.draw(win)
win.getMouse() # Pause window
main()
--------------------------------------------------------------------
GraphWin objects support coordinate transformation through the
setCoords method and pointer-based input through getMouse.
The library provides the following graphical objects:
Point
Line
Circle
Oval
Rectangle
Polygon
Text
Entry (for text-based input)
Image
Various attributes of graphical objects can be set such as
outline-color, fill-color and line-width. Graphical objects also
support moving and hiding for animation effects.
The library also provides a very simple class for pixel-based image
manipulation, Pixmap. A pixmap can be loaded from a file and displayed
using an Image object. Both getPixel and setPixel methods are provided
for manipulating the image.
DOCUMENTATION: For complete documentation, see Chapter 4 of "Python
Programming: An Introduction to Computer Science" by John Zelle,
published by Franklin, Beedle & Associates. Also see
http://mcsp.wartburg.edu/zelle/python for a quick reference"""
'''
Changes made to create a Tkinter-friendly 'graphics.py' Version 5.0:
- In the main body frame, get rid of the '_root = Tk(); _root.withdraw()'
- create a 'master' parameter in GraphWin() AND get rid of the 'title' parameter --- the Tk() is already titled in the application ('Tk(className = _title_)')
- create a global variable _root (it is used elsewhere but was originally initialized in the main body of graphics.py)
- set _root to master
- get rid of the master.title(title) command
- delete the master.resizable(0, 0) command
- create global _root instances in EVERY function it is mentioned in:
* main code structure: update
* class GraphWin: __init__, __autoflush
* class GraphicsObject: draw, undraw, move, _reconfig
* class Entry:
__init__
* class Image: __init__
- in class GraphWin, change __autoflush to:
def __autoflush(self):
global _root
if self.autoflush:
try:
_root.update()
except tkinter.TclError:
pass
* The function must be changed because if the window is closed using _root.destroy(), you get an error if you try to update it
Notes:
- I noticed that graphics.py creates a Tkinter window ('_root = Tk()') as _root in the beginning, so I figured if I deleted that and then replaced it with my own window,
I could add whatever I wanted to the window since it becomes a Tkinter instance, not solely a 'graphics.py' instance
- The geometry manager is 'grid':
the Canvas itself is gridded at (row = 0, column = 0) by default
'''
# Version 5.1 12/23/2013
# * Allows saving of window using the Python Imaging Library (PIL) to an image file
# - This is set within the constructor: GraphWin(..., save_image = True). By default, it is False.
# * Currently, does not support undrawing of objects from the image
# Version 5.0 4/6/2013
# * Allows mixing with Tkinter
#
# Version 4.2 5/26/2011
# * Modified Image to allow multiple undraws like other GraphicsObjects
# Version 4.1 12/29/2009
# * Merged Pixmap and Image class. Old Pixmap removed, use Image.
# Version 4.0.1 10/08/2009
# * Modified the autoflush on GraphWin to default to True
# * Autoflush check on close, setBackground
# * Fixed getMouse to flush pending clicks at entry
# Version 4.0 08/2009
# * Reverted to non-threaded version. The advantages (robustness,
# efficiency, ability to use with other Tk code, etc.) outweigh
# the disadvantage that interactive use with IDLE is slightly more
# cumbersome.
# * Modified to run in either Python 2.x or 3.x (same file).
# * Added Image.getPixmap()
# * Added update() -- stand alone function to cause any pending
# graphics changes to display.
#
# Version 3.4 10/16/07
# Fixed GraphicsError to avoid "exploded" error messages.
# Version 3.3 8/8/06
# Added checkMouse method to GraphWin
# Version 3.2.3
# Fixed error in Polygon init spotted by Andrew Harrington
# Fixed improper threading in Image constructor
# Version 3.2.2 5/30/05
# Cleaned up handling of exceptions in Tk thread. The graphics package
# now raises an exception if attempt is made to communicate with
# a dead Tk thread.
# Version 3.2.1 5/22/05
# Added shutdown function for tkinter thread to eliminate race-condition
# error "chatter" when main thread terminates
# Renamed various private globals with _
# Version 3.2 5/4/05
# Added Pixmap object for simple image manipulation.
# Version 3.1 4/13/05
# Improved the Tk thread communication so that most Tk calls
# do not have to wait for synchonization with the Tk thread.
# (see _tkCall and _tkExec)
# Version 3.0 12/30/04
# Implemented Tk event loop in separate thread. Should now work
# interactively with IDLE. Undocumented autoflush feature is
# no longer necessary. Its default is now False (off). It may
# be removed in a future version.
# Better handling of errors regarding operations on windows that
# have been closed.
# Addition of an isClosed method to GraphWindow class.
# Version 2.2 8/26/04
# Fixed cloning bug reported by Joseph Oldham.
# Now implements deep copy of config info.
# Version 2.1 1/15/04
# Added autoflush option to GraphWin. When True (default) updates on
# the window are done after each action. This makes some graphics
# intensive programs sluggish. Turning off autoflush causes updates
# to happen during idle periods or when flush is called.
# Version 2.0
# Updated Documentation
# Made Polygon accept a list of Points in constructor
# Made all drawing functions call TK update for easier animations
# and to make the overall package work better with
# Python 2.3 and IDLE 1.0 under Windows (still some issues).
# Removed vestigial turtle graphics.
# Added ability to configure font for Entry objects (analogous to Text)
# Added setTextColor for Text as an alias of setFill
# Changed to class-style exceptions
# Fixed cloning of Text objects
# Version 1.6
# Fixed Entry so StringVar uses _root as master, solves weird
# interaction with shell in Idle
# Fixed bug in setCoords. X and Y coordinates can increase in
# "non-intuitive" direction.
# Tweaked wm_protocol so window is not resizable and kill box closes.
# Version 1.5
# Fixed bug in Entry. Can now define entry before creating a
# GraphWin. All GraphWins are now toplevel windows and share
# a fixed root (called _root).
# Version 1.4
# Fixed Garbage collection of Tkinter images bug.
# Added ability to set text atttributes.
# Added Entry boxes.
import time, os, sys
try:
# import as appropriate for 2.x vs. 3.x
import tkinter
except ImportError:
import Tkinter as tkinter
try:
import Image as PILImage
import ImageDraw
HAS_PIL = True
except ImportError:
HAS_PIL = False
##########################################################################
# Module Exceptions
class GraphicsError(Exception):
"""Generic error class for graphics module exceptions."""
pass
OBJ_ALREADY_DRAWN = "Object currently drawn"
UNSUPPORTED_METHOD = "Object doesn't support operation"
BAD_OPTION = "Illegal option value"
DEAD_THREAD = "Graphics thread quit unexpectedly"
### Other Constants
LOCAL = "local"
GLOBAL = "global"
def update():
global _root
_root.update()
############################################################################
# Graphics classes start here
class GraphWin(tkinter.Canvas):
"""A GraphWin is a toplevel window for displaying graphics."""
def __init__(self, master = None, width = 200, height = 200, autoflush = True, row = None, column = None, padx = None, pady = None, title = "Graphics Window", save_image = False):
global _root
if master is None:
master = tkinter.Tk(className = ' ' + title)
_root = master
self.width, self.height = width, height
tkinter.Canvas.__init__(self, master, width=width, height=height)
if row or column or padx or pady:
if not row:
row = 0
if not column:
column = 0
if not padx:
padx = 0
if not pady:
pady = 5
self.grid(row = row, column = column, padx = padx, pady = pady)
if not HAS_PIL:
self.save_image = False
else:
self.save_image = save_image
if self.save_image:
self.image = PILImage.new('RGBA', (self.width, self.height))
self.drawing_image = ImageDraw.Draw(self.image)
self.image_path = 'tk_graphTools_Graph_image.jpg'
self.foreground = "black"
self.items = []
self.mouseX = None
self.mouseY = None
self.bind("<Button-1>", self._onClick)
self.height = height
self.width = width
self.autoflush = autoflush
self._mouseCallback = None
self.trans = None
self.closed = False
self.all_objects = {}
master.lift()
if autoflush:
_root.update()
def __checkOpen(self):
if self.closed:
raise GraphicsError("window is closed")
def setBackground(self, color):
"""Set background color of the window"""
self.__checkOpen()
self.config(bg=color)
self.__autoflush()
def title(self, name):
'''Titles the main'''
self.master.title(name)
def setCoords(self, x1, y1, x2, y2):
"""Set coordinates of window to run from (x1,y1) in the
lower-left corner to (x2,y2) in the upper-right corner."""
self.center = Point(x2 - x1, y2 - y1)
self.xMin, self.yMin, self.xMax, self.yMax = x1, y1, x2, y2
self.trans = Transform(self.width, self.height, x1, y1, x2, y2)
def clear(self, *items):
self.delete(tkinter.ALL)
items = list(items) + list(self.all_objects.values())
undrawAll(*items)
if self.save_image:
self.image = PILImage.new('RGBA', (self.width, self.height))
self.drawing_image = ImageDraw.Draw(self.image)
def close(self):
"""Close the window"""
if self.closed:
return
self.closed = True
self.master.destroy()
self.__autoflush()
def isClosed(self):
return self.closed
def isOpen(self):
return not self.closed
def __autoflush(self):
global _root
if self.autoflush:
try:
_root.update()
except tkinter.TclError:
pass
def plot(self, x, y, color="black"):
"""Set pixel (x,y) to the given color"""
self.__checkOpen()
xs,ys = self.toScreen(x,y)
self.create_line(xs,ys,xs+1,ys+1, fill=color)
self.__autoflush()
if self.save_image:
self.drawing_image.point((xs, ys), color)
def plotPixel(self, x, y, color="black"):
"""Set pixel raw (independent of window coordinates) pixel
(x,y) to color"""
self.__checkOpen()
self.create_line(x,y,x+1,y+1, fill=color)
self.__autoflush()
if self.save_image:
self.drawing_image.point((x, y), color)
def flush(self):
"""Update drawing to the window"""
self.__checkOpen()
self.update_idletasks()
def getMouse(self):
"""Wait for mouse click and return Point object representing
the click"""
self.update() # flush any prior clicks
self.mouseX = None
self.mouseY = None
while self.mouseX == None or self.mouseY == None:
self.update()
if self.isClosed():
raise GraphicsError("getMouse in closed window")
time.sleep(.1) # give up thread
x,y = self.toWorld(self.mouseX, self.mouseY)
self.mouseX = None
self.mouseY = None
return Point(x,y)
def checkMouse(self):
"""Return last mouse click or None if mouse has
not been clicked since last call"""
if self.isClosed():
raise GraphicsError("checkMouse in closed window")
self.update()
if self.mouseX != None and self.mouseY != None:
x,y = self.toWorld(self.mouseX, self.mouseY)
self.mouseX = None
self.mouseY = None
return Point(x,y)
else:
return None
def getHeight(self):
"""Return the height of the window"""
return self.height
def getWidth(self):
"""Return the width of the window"""
return self.width
def save(self, filepath = "graphwin.jpg"):
'''Saves the drawn image under the given filepath'''
if HAS_PIL and self.save_image:
self.image.save(filepath)
def saveImage(self, filepath = "graphwin.jpg"):
'''Deprecated (but maintained for backwards compatability), please use GraphWin.save'''
self.save(filepath)
def translate(self, x, y, mode = LOCAL):
'''Translates the (x, y) pixel coordinate to the custom coordinates'''
if mode == GLOBAL:
x -= self.winfo_rootx()
y -= self.winfo_rooty()
return self.toWorld(x, y)
def translateCustom(self, x, y, mode = LOCAL):
'''Translates custom coordinates to pixel coordinates'''
x, y = self.toScreen(x, y)
if mode == GLOBAL:
x += self.winfo_rootx()
y += self.winfo_rooty()
return (x, y)
def toScreen(self, x, y):
'''Returns pixel coordinates'''
trans = self.trans
if trans:
return self.trans.screen(x,y)
else:
return x,y
def toWorld(self, x, y):
'''Returns custom coordinates'''
trans = self.trans
if trans:
return self.trans.world(x,y)
else:
return x,y
def setMouseHandler(self, func):
self._mouseCallback = func
def _onClick(self, e):
self.mouseX = e.x
self.mouseY = e.y
if self._mouseCallback:
self._mouseCallback(Point(e.x, e.y))
class Transform:
"""Internal class for 2-D coordinate transformations"""
def __init__(self, w, h, xlow, ylow, xhigh, yhigh):
# w, h are width and height of window
# (xlow,ylow) coordinates of lower-left [raw (0,h-1)]
# (xhigh,yhigh) coordinates of upper-right [raw (w-1,0)]
xspan = (xhigh-xlow)
yspan = (yhigh-ylow)
self.xbase = xlow
self.ybase = yhigh
self.xscale = xspan/float(w)
self.yscale = yspan/float(h)
def screen(self,x,y):
# Returns x,y in screen (actually window) coordinates
xs = (x-self.xbase) / self.xscale
ys = (self.ybase-y) / self.yscale
return int(xs+0.5),int(ys+0.5)
def world(self,xs,ys):
# Returns xs,ys in world coordinates
x = xs*self.xscale + self.xbase
y = self.ybase - ys*self.yscale
return x,y
# Default values for various item configuration options. Only a subset of
# keys may be present in the configuration dictionary for a given item
DEFAULT_CONFIG = {"fill":"",
"outline":"black",
"width":"1",
"arrow":"none",
"text":"",
"justify":"center",
"font": ("helvetica", 12, "normal")}
class GraphicsObject:
"""Generic base class for all of the drawable objects"""
# A subclass of GraphicsObject should override _draw and
# and _move methods.
def __init__(self, options):
# options is a list of strings indicating which options are
# legal for this object.
# When an object is drawn, canvas is set to the GraphWin(canvas)
# object where it is drawn and id is the TK identifier of the
# drawn shape.
self.canvas = None
self.id = None
# config is the dictionary of configuration options for the widget.
config = {}
for option in options:
config[option] = DEFAULT_CONFIG[option]
self.config = config
def setFill(self, color):
"""Set interior color to color"""
self._reconfig("fill", color)
def setOutline(self, color):
"""Set outline color to color"""
self._reconfig("outline", color)
def setWidth(self, width):
"""Set line weight to width"""
self._reconfig("width", width)
def draw(self, graphwin):
global _root
"""Draw the object in graphwin, which should be a GraphWin
object. A GraphicsObject may only be drawn into one
window. Raises an error if attempt made to draw an object that
is already visible."""
if self.canvas and not self.canvas.isClosed():
raise GraphicsError(OBJ_ALREADY_DRAWN)
if graphwin.isClosed():
raise GraphicsError("Can't draw to closed window")
self.canvas = graphwin
self.id = self._draw(graphwin, self.config)
if graphwin.autoflush:
_root.update()
graphwin.all_objects[self.id] = self
def undraw(self):
global _root
"""Undraw the object (i.e. hide it). Returns silently if the
object is not currently drawn."""
if not self.canvas:
return
if not self.canvas.isClosed():
self.canvas.delete(self.id)
if self.canvas.autoflush:
_root.update()
try:
del self.canvas.all_objects[self.id]
except (AttributeError, KeyError):
pass
self.canvas = None
self.id = None
def move(self, dx, dy):
global _root
"""move object dx units in x direction and dy units in y
direction"""
self._move(dx,dy)
canvas = self.canvas
if canvas and not canvas.isClosed():
trans = canvas.trans
if trans:
x = dx/ trans.xscale
y = -dy / trans.yscale
else:
x = dx
y = dy
self.canvas.move(self.id, x, y)
if canvas.autoflush:
_root.update()
def _reconfig(self, option, setting):
global _root
# Internal method for changing configuration of the object
# Raises an error if the option does not exist in the config
# dictionary for this object
if option not in self.config:
raise GraphicsError(UNSUPPORTED_METHOD)
options = self.config
options[option] = setting
if self.canvas and not self.canvas.isClosed():
self.canvas.itemconfig(self.id, options)
if self.canvas.autoflush:
_root.update()
def getColor(self, attribute):
'''Gets the color'''
color = self.config[attribute]
if not color:
if isinstance(self, (Line, Point)):
return 'black'
return 'black' if attribute == 'outline' else (0, 0, 0, 0)
return color
def _draw(self, canvas, options):
"""draws appropriate figure on canvas with options provided
Returns Tk id of item drawn"""
pass # must override in subclass
def _move(self, dx, dy):
"""updates internal state of object to move it dx,dy units"""
pass # must override in subclass
class Point(GraphicsObject):
def __init__(self, x, y):
GraphicsObject.__init__(self, ["outline", "fill"])
self.setFill = self.setOutline
self.x = x
self.y = y
def _draw(self, canvas, options):
x,y = canvas.toScreen(self.x,self.y)
if self.save_image:
self.drawing_image.point((x, y), color)
return canvas.create_rectangle(x,y,x+1,y+1,options)
def _move(self, dx, dy):
self.x = self.x + dx
self.y = self.y + dy
def clone(self):
other = Point(self.x,self.y)
other.config = self.config.copy()
return other
def getX(self): return self.x
def getY(self): return self.y
class _BBox(GraphicsObject):
# Internal base class for objects represented by bounding box
# (opposite corners) Line segment is a degenerate case.
def __init__(self, p1, p2, options=["outline","width","fill"]):
GraphicsObject.__init__(self, options)
self.p1 = p1.clone()
self.p2 = p2.clone()
def _move(self, dx, dy):
self.p1.x = self.p1.x + dx
self.p1.y = self.p1.y + dy
self.p2.x = self.p2.x + dx
self.p2.y = self.p2.y + dy
def getP1(self): return self.p1.clone()
def getP2(self): return self.p2.clone()
def getCenter(self):
p1 = self.p1
p2 = self.p2
return Point((p1.x+p2.x)/2.0, (p1.y+p2.y)/2.0)
class Rectangle(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2)
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.rectangle((x1, y2, x2 + 1, y1 + 1), self.getColor('fill'), self.getColor('outline'))
return canvas.create_rectangle(x1,y1,x2,y2,options)
def clone(self):
other = Rectangle(self.p1, self.p2)
other.config = self.config.copy()
return other
class Oval(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2)
def clone(self):
other = Oval(self.p1, self.p2)
other.config = self.config.copy()
return other
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.ellipse((x1, y2, x2, y1), self.getColor('fill'), self.getColor('outline'))
return canvas.create_oval(x1,y1,x2,y2,options)
class Circle(Oval):
def __init__(self, center, radius):
p1 = Point(center.x-radius, center.y-radius)
p2 = Point(center.x+radius, center.y+radius)
Oval.__init__(self, p1, p2)
self.radius = radius
def clone(self):
other = Circle(self.getCenter(), self.radius)
other.config = self.config.copy()
return other
def getRadius(self):
return self.radius
class Line(_BBox):
def __init__(self, p1, p2):
_BBox.__init__(self, p1, p2, ["arrow","fill","width"])
self.setFill(DEFAULT_CONFIG['outline'])
self.setOutline = self.setFill
def clone(self):
other = Line(self.p1, self.p2)
other.config = self.config.copy()
return other
def _draw(self, canvas, options):
p1 = self.p1
p2 = self.p2
x1,y1 = canvas.toScreen(p1.x,p1.y)
x2,y2 = canvas.toScreen(p2.x,p2.y)
if canvas.save_image:
canvas.drawing_image.line((x1, y1, x2, y2), self.getColor('fill'))
return canvas.create_line(x1,y1,x2,y2,options)
def setArrow(self, option):
if not option in ["first","last","both","none"]:
raise GraphicsError(BAD_OPTION)
self._reconfig("arrow", option)
class Polygon(GraphicsObject):
def __init__(self, *points):
# if points passed as a list, extract it
if len(points) == 1 and type(points[0]) == type([]):
points = points[0]
self.points = list(map(Point.clone, points))
GraphicsObject.__init__(self, ["outline", "width", "fill"])
def clone(self):
other = Polygon(*self.points)
other.config = self.config.copy()
return other
def getPoints(self):
return list(map(Point.clone, self.points))
def _move(self, dx, dy):
for p in self.points:
p.move(dx,dy)
def _draw(self, canvas, options):
args = [canvas]
image_args = []
for p in self.points:
x,y = canvas.toScreen(p.x,p.y)
args.append(x)
args.append(y)
image_args.append((x, y))
args.append(options)
if canvas.save_image:
if len(image_args) > 2:
canvas.drawing_image.polygon(image_args, self.getColor('fill'), self.getColor('outline'))
return GraphWin.create_polygon(*args)
class Text(GraphicsObject):
def __init__(self, p, text):
GraphicsObject.__init__(self, ["justify","fill","text","font"])
self.setText(text)
self.anchor = p.clone()
self.setFill(DEFAULT_CONFIG['outline'])
self.setOutline = self.setFill
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
if canvas.save_image:
canvas.drawing_image.text((x, y), self.text, font = None)
return canvas.create_text(x,y,options)
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def clone(self):
other = Text(self.anchor, self.config['text'])
other.config = self.config.copy()
return other
def setText(self,text):
self.text = text
self._reconfig("text", text)
def getText(self):
return self.config["text"]
def getAnchor(self):
return self.anchor.clone()
def setFace(self, face):
if face in ['helvetica','arial','courier','times roman', 'comic sans']:
f,s,b = self.config['font']
self._reconfig("font",(face,s,b))
else:
raise GraphicsError(BAD_OPTION)
def setSize(self, size):
if 5 <= size <= 36:
f,s,b = self.config['font']
self._reconfig("font", (f,size,b))
else:
raise GraphicsError(BAD_OPTION)
def setStyle(self, style):
if style in ['bold','normal','italic', 'bold italic']:
f,s,b = self.config['font']
self._reconfig("font", (f,s,style))
else:
raise GraphicsError(BAD_OPTION)
def setTextColor(self, color):
self.setFill(color)
class Entry(GraphicsObject):
def __init__(self, p, width):
global _root
GraphicsObject.__init__(self, [])
self.anchor = p.clone()
#print self.anchor
self.width = width
self.text = tkinter.StringVar(_root)
self.text.set("")
self.fill = "gray"
self.color = "black"
self.font = DEFAULT_CONFIG['font']
self.entry = None
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
frm = tkinter.Frame(canvas.master)
self.entry = tkinter.Entry(frm,
width=self.width,
textvariable=self.text,
bg = self.fill,
fg = self.color,
font=self.font)
self.entry.pack()
#self.setFill(self.fill)
return canvas.create_window(x,y,window=frm)
def getText(self):
return self.text.get()
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def getAnchor(self):
return self.anchor.clone()
def clone(self):
other = Entry(self.anchor, self.width)
other.config = self.config.copy()
other.text = tkinter.StringVar()
other.text.set(self.text.get())
other.fill = self.fill
return other
def setText(self, t):
self.text.set(t)
def setFill(self, color):
self.fill = color
if self.entry:
self.entry.config(bg=color)
def _setFontComponent(self, which, value):
font = list(self.font)
font[which] = value
self.font = tuple(font)
if self.entry:
self.entry.config(font=self.font)
def setFace(self, face):
if face in ['helvetica','arial','courier','times roman']:
self._setFontComponent(0, face)
else:
raise GraphicsError(BAD_OPTION)
def setSize(self, size):
if 5 <= size <= 36:
self._setFontComponent(1,size)
else:
raise GraphicsError(BAD_OPTION)
def setStyle(self, style):
if style in ['bold','normal','italic', 'bold italic']:
self._setFontComponent(2,style)
else:
raise GraphicsError(BAD_OPTION)
def setTextColor(self, color):
self.color=color
if self.entry:
self.entry.config(fg=color)
class Image(GraphicsObject):
idCount = 0
imageCache = {} # tkinter photoimages go here to avoid GC while drawn
def __init__(self, p, *pixmap):
global root
GraphicsObject.__init__(self, [])
self.anchor = p.clone()
self.imageId = Image.idCount
Image.idCount = Image.idCount + 1
if len(pixmap) == 1:
# file name provided
self.img = tkinter.PhotoImage(file=pixmap[0], master=_root)
else:
# width and height provided
width, height = pixmap
self.img = tkinter.PhotoImage(master=_root, width=width, height=height)
def _draw(self, canvas, options):
p = self.anchor
x,y = canvas.toScreen(p.x,p.y)
self.imageCache[self.imageId] = self.img # save a reference
if canvas.save_image:
canvas.drawing_image.bitmap((x, y), self.img)
return canvas.create_image(x,y,image=self.img)
def _move(self, dx, dy):
self.anchor.move(dx,dy)
def undraw(self):
try:
del self.imageCache[self.imageId] # allow gc of tkinter photoimage
except KeyError:
pass
GraphicsObject.undraw(self)
def getAnchor(self):
return self.anchor.clone()
def clone(self):
other = Image(Point(0,0), 0, 0)
other.img = self.img.copy()
other.anchor = self.anchor.clone()
other.config = self.config.copy()
return other
def getWidth(self):
"""Returns the width of the image in pixels"""
return self.img.width()
def getHeight(self):
"""Returns the height of the image in pixels"""
return self.img.height()
def getPixel(self, x, y):
"""Returns a list [r,g,b] with the RGB color values for pixel (x,y)
r,g,b are in range(256)
"""
value = self.img.get(x,y)
if type(value) == type(0):
return [value, value, value]
else:
return list(map(int, value.split()))
def setPixel(self, x, y, color):
"""Sets pixel (x,y) to the given color"""
self.img.put("{" + color +"}", (x, y))
def save(self, filename):
"""Saves the pixmap image to filename.
The format for the save image is determined from the filname extension."""
path, name = os.path.split(filename)
ext = name.split(".")[-1]
self.img.write( filename, format=ext)
def color_rgb(r,g,b):
"""r,g,b are intensities of red, green, and blue in range(256)
Returns color specifier string for the resulting color"""
return "#%02x%02x%02x" % (r,g,b)
def drawAll(window, *itemsToDraw):
"""Draw all items to a window"""
if not isinstance(window, GraphWin):
raise TypeError('Window must be a GraphWin object')
for item in itemsToDraw: item.draw(window)
def undrawAll(*itemsToUndraw):
"""Undraws all items from a window"""
for item in itemsToUndraw: item.undraw()
def redrawAll(window, *itemsToRedraw):
"""Redraws all items to a window"""
if not isinstance(window, GraphWin):
raise TypeError('Window must be a GraphWin object')
undrawAll(*itemsToRedraw)
drawAll(window, *itemsToRedraw)
def test():
win = GraphWin(tkinter.Tk())
win.setCoords(0,0,10,10)
win.grid()
t = Text(Point(5,5), "Centered Text")
t.draw(win)
p = Polygon(Point(1,1), Point(5,3), Point(2,7))
p.draw(win)
e = Entry(Point(5,6), 10)
e.draw(win)
win.getMouse()
p.setFill("red")
p.setOutline("blue")
p.setWidth(2)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(e.getText())
e.setFill("green")
e.setText("Spam!")
e.move(2,0)
win.getMouse()
p.move(2,3)
s = ""
for pt in p.getPoints():
s = s + "(%0.1f,%0.1f) " % (pt.getX(), pt.getY())
t.setText(s)
win.getMouse()
p.undraw()
e.undraw()
t.setStyle("bold")
win.getMouse()
t.setStyle("normal")
win.getMouse()
t.setStyle("italic")
win.getMouse()
t.setStyle("bold italic")
win.getMouse()
t.setSize(14)
win.getMouse()
t.setFace("arial")
t.setSize(20)
win.getMouse()
win.close()
if __name__ == "__main__":
test()
|
"""Module provides higher level system tests."""
# Copyright (C) 2014 Bradley Alan Smith
import unittest,time,datetime
import sys
import config_cloud as cfg
sys.path.append('../')
from core.pg.datatypes import *
target = 'rest'
if target == 'local':
import core.cloud as cc
cloud = cc.Cloud(cfg.user)
elif target == 'rest':
sys.path.append('../client')
import pyclient as cc
cloud = cc.connect("http://localhost:5000",cfg.user)
#@unittest.skip('skip')
class TestObjectDatabase(unittest.TestCase):
"""Tests the creation, population and destruction of a basic database."""
cloud = cloud
def setUp(self):
args = ('Test',)
self.db_args = args
self.db = self.cloud.create_database(*args)
def tearDown(self):
"""Drop the test database."""
self.db.drop()
def test_object(self):
"""Can we put data objects in and get them back out? No PG 9.3"""
db = cloud.get_byid(self.db.objid)
self.assertEquals(db.info['name'],'Test')
self.assertEquals(db.info['owner'],cfg.user)
db = cloud.Database('Test')
self.assertEquals(db.info['name'],'Test')
self.assertEquals(db.info['owner'],cfg.user)
with self.assertRaises(cc.RelationExists):
self.cloud.create_database('Test')
cols = [
{'name':'pk','datatype':Text},
{'name':'a','datatype':Integer},
{'name':'b','datatype':DataObject},
]
table = self.db.create_table('testTable',cols)
table = self.db.Table('testTable')
values = [
{'pk':'frank','a':2,'b':{'j':1,'k':2}},
{'pk':'jerry','a':5,'b':{'l':3,'m':4}},
{'pk':'ann','a':7,'b':{'n':5,'o':6}},
{'pk':'francine','a':9,'b':{'p':7,'q':8}},
]
table.insert(values)
results = self.cloud.select(table,order_by='a')
self.assertEquals(results[0]['b']['j'],1)
self.assertEquals(results[2]['b']['o'],6)
pk, a, b = table.columns()
where = b.contains({'k':2})
results = self.cloud.select(table,where = where, order_by='a')
self.assertEquals(len(results),1)
where = b.containedby({'a':'charlie','k':2,'j':1})
results = self.cloud.select(table,where = where, order_by='a')
self.assertEquals(len(results),1)
where = b.haskey('k')
results = self.cloud.select(table,where = where, order_by='a')
self.assertEquals(len(results),1)
where = b.hasall(['n','o'])
results = self.cloud.select(table,where = where, order_by='a')
self.assertEquals(len(results),1)
where = b.hasany(['k','m','o'])
results = self.cloud.select(table,where = where, order_by='a')
self.assertEquals(len(results),3)
def getTests(cls):
return unittest.TestLoader().loadTestsFromTestCase(cls)
def suite():
suite = unittest.TestSuite()
suite.addTest(getTests(TestObjectDatabase))
return suite
if __name__ == '__main__':
unittest.main()
|
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2014-2016 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .. import ivi
from .. import pwrmeter
import time
Units = set(['dBm', 'Watts'])
class agilent437B(ivi.Driver, pwrmeter.Base, pwrmeter.ManualRange,
pwrmeter.DutyCycleCorrection, pwrmeter.AveragingCount,
pwrmeter.ZeroCorrection, pwrmeter.Calibration,
pwrmeter.ReferenceOscillator):
"Agilent 437B RF power meter"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '437B')
super(agilent437B, self).__init__(*args, **kwargs)
self._channel_count = 1
self._identity_description = "Agilent 437B RF power meter driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 3
self._identity_specification_minor_version = 0
self._identity_supported_instrument_models = ['437B']
self._init_channels()
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent437B, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility_reset()
def _load_id_string(self):
if self._driver_operation_simulate:
self._identity_instrument_manufacturer = "Not available while simulating"
self._identity_instrument_model = "Not available while simulating"
self._identity_instrument_firmware_revision = "Not available while simulating"
else:
lst = self._ask("*IDN?").split(",")
self._identity_instrument_manufacturer = lst[0]
self._identity_instrument_model = lst[1]
self._identity_instrument_firmware_revision = lst[3]
self._set_cache_valid(True, 'identity_instrument_manufacturer')
self._set_cache_valid(True, 'identity_instrument_model')
self._set_cache_valid(True, 'identity_instrument_firmware_revision')
def _get_identity_instrument_manufacturer(self):
if self._get_cache_valid():
return self._identity_instrument_manufacturer
self._load_id_string()
return self._identity_instrument_manufacturer
def _get_identity_instrument_model(self):
if self._get_cache_valid():
return self._identity_instrument_model
self._load_id_string()
return self._identity_instrument_model
def _get_identity_instrument_firmware_revision(self):
if self._get_cache_valid():
return self._identity_instrument_firmware_revision
self._load_id_string()
return self._identity_instrument_firmware_revision
def _utility_disable(self):
pass
def _utility_error_query(self):
error_code = 0
error_message = "No error"
#if not self._driver_operation_simulate:
# error_code, error_message = self._ask(":system:error?").split(',')
# error_code = int(error_code)
# error_message = error_message.strip(' "')
return (error_code, error_message)
def _utility_lock_object(self):
pass
def _utility_reset(self):
if not self._driver_operation_simulate:
self._write("*RST")
self._clear()
self.driver_operation.invalidate_all_attributes()
def _utility_reset_with_defaults(self):
self._utility_reset()
def _utility_self_test(self):
code = 0
message = "Self test passed"
if not self._driver_operation_simulate:
code = int(self._ask("*TST?"))
if code != 0:
message = "Self test failed"
return (code, message)
raise ivi.OperationNotSupportedException()
def _utility_unlock_object(self):
pass
def _init_channels(self):
try:
super(agilent437B, self)._init_channels()
except AttributeError:
pass
self._channel_name = list()
self._channel_averaging_count_auto = list()
self._channel_correction_frequency = list()
self._channel_offset = list()
self._channel_range_auto = list()
self._channel_units = list()
for i in range(self._channel_count):
self._channel_name.append("channel%d" % (i+1))
self._channel_averaging_count_auto.append(True)
self._channel_correction_frequency.append(50e6)
self._channel_offset.append(0.0)
self._channel_range_auto.append(True)
self._channel_units.append('dBm')
self.channels._set_list(self._channel_name)
def _get_channel_averaging_count_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count_auto[index]
def _set_channel_averaging_count_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not value:
raise ivi.ValueNotSupportedException()
self._channel_averaging_count_auto[index] = value
def _get_channel_correction_frequency(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_correction_frequency[index]
def _set_channel_correction_frequency(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("FR%eEN" % (value))
self._channel_correction_frequency[index] = value
self._set_cache_valid(index=index)
def _get_channel_offset(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_offset[index]
def _set_channel_offset(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("OS%eEN" % (value))
self._channel_offset[index] = value
self._set_cache_valid(index=index)
def _get_channel_range_auto(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_auto[index]
def _set_channel_range_auto(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
self._channel_range_auto[index] = value
def _get_channel_units(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_units[index]
def _set_channel_units(self, index, value):
index = ivi.get_index(self._channel_name, index)
if value not in Units:
raise ivi.ValueNotSupportedException()
if not self._driver_operation_simulate:
if value == 'dBm':
self._write("LG")
elif value == 'Watts':
self._write("LN")
self._channel_units[index] = value
self._set_cache_valid(index=index)
def _get_measurement_measurement_state(self):
return self._measurement_measurement_state
def _measurement_abort(self):
self._clear()
pass
def _measurement_configure(self, operator, operand1, operand2):
pass
def _measurement_fetch(self):
if self._driver_operation_simulate:
return
val = self._read()
return float(val)
def _measurement_initiate(self):
if self._driver_operation_simulate:
return
self._write("TR1")
def _measurement_read(self, maximum_time):
self._measurement_initiate()
return self._measurement_fetch()
def _get_channel_range_lower(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_lower[index]
def _set_channel_range_lower(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_lower[index] = value
def _get_channel_range_upper(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_range_upper[index]
def _set_channel_range_upper(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
self._channel_range_upper[index] = value
def _get_channel_duty_cycle_enabled(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_enabled[index]
def _set_channel_duty_cycle_enabled(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = bool(value)
if not self._driver_operation_simulate:
self._write("DC%d" % int(value))
self._channel_duty_cycle_enabled[index] = value
self._set_cache_valid(index=index)
def _get_channel_duty_cycle_value(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_duty_cycle_value[index]
def _set_channel_duty_cycle_value(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = float(value)
if not self._driver_operation_simulate:
self._write("DY%eEN" % (value))
self._channel_duty_cycle_value[index] = value
self._set_cache_valid(index=index)
def _get_channel_averaging_count(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_averaging_count[index]
def _set_channel_averaging_count(self, index, value):
index = ivi.get_index(self._channel_name, index)
value = int(value)
if not self._driver_operation_simulate:
self._write("FM%eEN" % (value))
self._channel_averaging_count[index] = value
self._set_cache_valid(index=index)
def _get_channel_zero_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_zero_state[index]
def _channel_zero(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("ZE")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_zero_state[index] = 'complete'
def _get_channel_calibration_state(self, index):
index = ivi.get_index(self._channel_name, index)
return self._channel_calibration_state[index]
def _channel_calibrate(self, index):
index = ivi.get_index(self._channel_name, index)
if self._driver_operation_simulate:
return
self._write("CS")
self._write("CLEN")
it = 0
while True:
val = self._read_stb()
if val & 2:
break
if val & 8 or it > 20:
return
time.sleep(0.5)
self._channel_calibration_state[index] = 'complete'
def _get_reference_oscillator_enabled(self):
return self._reference_oscillator_enabled
def _set_reference_oscillator_enabled(self, value):
value = bool(value)
if not self._driver_operation_simulate:
self._write("OC%d" % int(value))
self._reference_oscillator_enabled = value
self._set_cache_valid()
def _get_reference_oscillator_frequency(self):
return self._reference_oscillator_frequency
def _set_reference_oscillator_frequency(self, value):
value = float(value)
value = 50e6 # fixed at 50 MHz
self._reference_oscillator_frequency = value
def _get_reference_oscillator_level(self):
return self._reference_oscillator_level
def _set_reference_oscillator_level(self, value):
value = float(value)
value = 0.0 # fixed at 1.00 mW (0 dBm)
self._reference_oscillator_level = value
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Automated tests for checking transformation algorithms (the models package).
"""
from __future__ import with_statement
import logging
import unittest
import os
from six.moves import zip as izip
from collections import namedtuple
from testfixtures import log_capture
import numpy as np
from gensim import utils
from gensim.models import doc2vec, keyedvectors
from gensim.test.utils import datapath, get_tmpfile, common_texts as raw_sentences
class DocsLeeCorpus(object):
def __init__(self, string_tags=False, unicode_tags=False):
self.string_tags = string_tags
self.unicode_tags = unicode_tags
def _tag(self, i):
if self.unicode_tags:
return u'_\xa1_%d' % i
elif self.string_tags:
return '_*%d' % i
return i
def __iter__(self):
with open(datapath('lee_background.cor')) as f:
for i, line in enumerate(f):
yield doc2vec.TaggedDocument(utils.simple_preprocess(line), [self._tag(i)])
list_corpus = list(DocsLeeCorpus())
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
def load_on_instance():
# Save and load a Doc2Vec Model on instance for test
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(tmpf)
model = doc2vec.Doc2Vec() # should fail at this point
return model.load(tmpf)
class TestDoc2VecModel(unittest.TestCase):
def test_persistence(self):
"""Test storing/loading the entire model."""
tmpf = get_tmpfile('gensim_doc2vec.tst')
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
model.save(tmpf)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
def testPersistenceWord2VecFormat(self):
"""Test storing the entire model in word2vec format."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(), min_count=1)
# test saving both document and word embedding
test_doc_word = get_tmpfile('gensim_doc2vec.dw')
model.save_word2vec_format(test_doc_word, doctag_vec=True, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc_word, binary=True)
self.assertEqual(len(model.wv.vocab) + len(model.docvecs), len(binary_model_dv.vocab))
# test saving document embedding only
test_doc = get_tmpfile('gensim_doc2vec.d')
model.save_word2vec_format(test_doc, doctag_vec=True, word_vec=False, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_doc, binary=True)
self.assertEqual(len(model.docvecs), len(binary_model_dv.vocab))
# test saving word embedding only
test_word = get_tmpfile('gensim_doc2vec.w')
model.save_word2vec_format(test_word, doctag_vec=False, word_vec=True, binary=True)
binary_model_dv = keyedvectors.KeyedVectors.load_word2vec_format(test_word, binary=True)
self.assertEqual(len(model.wv.vocab), len(binary_model_dv.vocab))
def test_unicode_in_doctag(self):
"""Test storing document vectors of a model with unicode titles."""
model = doc2vec.Doc2Vec(DocsLeeCorpus(unicode_tags=True), min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
try:
model.save_word2vec_format(tmpf, doctag_vec=True, word_vec=True, binary=True)
except UnicodeEncodeError:
self.fail('Failed storing unicode title.')
def test_load_mmap(self):
"""Test storing/loading the entire model."""
model = doc2vec.Doc2Vec(sentences, min_count=1)
tmpf = get_tmpfile('gensim_doc2vec.tst')
# test storing the internal arrays into separate files
model.save(tmpf, sep_limit=0)
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf))
# make sure mmaping the arrays back works, too
self.models_equal(model, doc2vec.Doc2Vec.load(tmpf, mmap='r'))
def test_int_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs[np.int64(0)].shape, (100,))
self.assertRaises(KeyError, model.__getitem__, '_*0')
def test_missing_string_doctag(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertRaises(KeyError, model.docvecs.__getitem__, 'not_a_tag')
def test_string_doctags(self):
"""Test doc2vec doctag alternatives"""
corpus = list(DocsLeeCorpus(True))
# force duplicated tags
corpus = corpus[0:10] + corpus
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertEqual(len(model.docvecs.doctag_syn0), 300)
self.assertEqual(model.docvecs[0].shape, (100,))
self.assertEqual(model.docvecs['_*0'].shape, (100,))
self.assertTrue(all(model.docvecs['_*0'] == model.docvecs[0]))
self.assertTrue(max(d.offset for d in model.docvecs.doctags.values()) < len(model.docvecs.doctags))
self.assertTrue(
max(model.docvecs._int_index(str_key) for str_key in model.docvecs.doctags.keys())
< len(model.docvecs.doctag_syn0)
)
# verify docvecs.most_similar() returns string doctags rather than indexes
self.assertEqual(model.docvecs.offset2doctag[0], model.docvecs.most_similar([model.docvecs[0]])[0][0])
def test_empty_errors(self):
# no input => "RuntimeError: you must first build vocabulary before training the model"
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, [])
# input not empty, but rather completely filtered out
self.assertRaises(RuntimeError, doc2vec.Doc2Vec, list_corpus, min_count=10000)
def test_similarity_unseen_docs(self):
"""Test similarity of out of training sentences"""
rome_str = ['rome', 'italy']
car_str = ['car']
corpus = list(DocsLeeCorpus(True))
model = doc2vec.Doc2Vec(min_count=1)
model.build_vocab(corpus)
self.assertTrue(
model.docvecs.similarity_unseen_docs(model, rome_str, rome_str) >
model.docvecs.similarity_unseen_docs(model, rome_str, car_str)
)
def model_sanity(self, model, keep_training=True):
"""Any non-trivial model on DocsLeeCorpus can pass these sanity checks"""
fire1 = 0 # doc 0 sydney fires
fire2 = np.int64(8) # doc 8 sydney fires
tennis1 = 6 # doc 6 tennis
# inferred vector should be top10 close to bulk-trained one
doc0_inferred = model.infer_vector(list(DocsLeeCorpus())[0].words)
sims_to_infer = model.docvecs.most_similar([doc0_inferred], topn=len(model.docvecs))
f_rank = [docid for docid, sim in sims_to_infer].index(fire1)
self.assertLess(f_rank, 10)
# fire2 should be top30 close to fire1
sims = model.docvecs.most_similar(fire1, topn=len(model.docvecs))
f2_rank = [docid for docid, sim in sims].index(fire2)
self.assertLess(f2_rank, 30)
# same sims should appear in lookup by vec as by index
doc0_vec = model.docvecs[fire1]
sims2 = model.docvecs.most_similar(positive=[doc0_vec], topn=21)
sims2 = [(id, sim) for id, sim in sims2 if id != fire1] # ignore the doc itself
sims = sims[:20]
self.assertEqual(list(zip(*sims))[0], list(zip(*sims2))[0]) # same doc ids
self.assertTrue(np.allclose(list(zip(*sims))[1], list(zip(*sims2))[1])) # close-enough dists
# sim results should be in clip range if given
clip_sims = \
model.docvecs.most_similar(fire1, clip_start=len(model.docvecs) // 2, clip_end=len(model.docvecs) * 2 // 3)
sims_doc_id = [docid for docid, sim in clip_sims]
for s_id in sims_doc_id:
self.assertTrue(len(model.docvecs) // 2 <= s_id <= len(model.docvecs) * 2 // 3)
# tennis doc should be out-of-place among fire news
self.assertEqual(model.docvecs.doesnt_match([fire1, tennis1, fire2]), tennis1)
# fire docs should be closer than fire-tennis
self.assertTrue(model.docvecs.similarity(fire1, fire2) > model.docvecs.similarity(fire1, tennis1))
# keep training after save
if keep_training:
tmpf = get_tmpfile('gensim_doc2vec.tst')
model.save(tmpf)
loaded = doc2vec.Doc2Vec.load(tmpf)
loaded.train(sentences, total_examples=loaded.corpus_count, epochs=loaded.iter)
def test_training(self):
"""Test doc2vec training."""
corpus = DocsLeeCorpus()
model = doc2vec.Doc2Vec(size=100, min_count=2, iter=20, workers=1)
model.build_vocab(corpus)
self.assertEqual(model.docvecs.doctag_syn0.shape, (300, 100))
model.train(corpus, total_examples=model.corpus_count, epochs=model.iter)
self.model_sanity(model)
# build vocab and train in one step; must be the same as above
model2 = doc2vec.Doc2Vec(corpus, size=100, min_count=2, iter=20, workers=1)
self.models_equal(model, model2)
def test_dbow_hs(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=1, negative=0, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_hs(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dms_hs(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dmc_hs(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, size=24, window=4,
hs=1, negative=0, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dbow_neg(self):
"""Test DBOW doc2vec training."""
model = doc2vec.Doc2Vec(list_corpus, dm=0, hs=0, negative=10, min_count=2, iter=20)
self.model_sanity(model)
def test_dmm_neg(self):
"""Test DM/mean doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dms_neg(self):
"""Test DM/sum doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=0, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_dmc_neg(self):
"""Test DM/concatenate doc2vec training."""
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_concat=1, size=24, window=4, hs=0,
negative=10, alpha=0.05, min_count=2, iter=20
)
self.model_sanity(model)
def test_parallel(self):
"""Test doc2vec parallel training."""
if doc2vec.FAST_VERSION < 0: # don't test the plain NumPy version for parallelism (too slow)
return
corpus = utils.RepeatCorpus(DocsLeeCorpus(), 10000)
for workers in [2, 4]:
model = doc2vec.Doc2Vec(corpus, workers=workers)
self.model_sanity(model)
def test_deterministic_hs(self):
"""Test doc2vec results identical with identical RNG seed."""
# hs
model = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_neg(self):
"""Test doc2vec results identical with identical RNG seed."""
# neg
model = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
model2 = doc2vec.Doc2Vec(DocsLeeCorpus(), hs=0, negative=3, seed=42, workers=1)
self.models_equal(model, model2)
def test_deterministic_dmc(self):
"""Test doc2vec results identical with identical RNG seed."""
# bigger, dmc
model = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
model2 = doc2vec.Doc2Vec(
DocsLeeCorpus(), dm=1, dm_concat=1, size=24,
window=4, hs=1, negative=3, seed=42, workers=1
)
self.models_equal(model, model2)
def test_mixed_tag_types(self):
"""Ensure alternating int/string tags don't share indexes in doctag_syn0"""
mixed_tag_corpus = [doc2vec.TaggedDocument(words, [i, words[0]]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(mixed_tag_corpus)
expected_length = len(sentences) + len(model.docvecs.doctags) # 9 sentences, 7 unique first tokens
self.assertEqual(len(model.docvecs.doctag_syn0), expected_length)
def models_equal(self, model, model2):
# check words/hidden-weights
self.assertEqual(len(model.wv.vocab), len(model2.wv.vocab))
self.assertTrue(np.allclose(model.wv.syn0, model2.wv.syn0))
if model.hs:
self.assertTrue(np.allclose(model.syn1, model2.syn1))
if model.negative:
self.assertTrue(np.allclose(model.syn1neg, model2.syn1neg))
# check docvecs
self.assertEqual(len(model.docvecs.doctags), len(model2.docvecs.doctags))
self.assertEqual(len(model.docvecs.offset2doctag), len(model2.docvecs.offset2doctag))
self.assertTrue(np.allclose(model.docvecs.doctag_syn0, model2.docvecs.doctag_syn0))
def test_delete_temporary_training_data(self):
"""Test doc2vec model after delete_temporary_training_data"""
for i in [0, 1]:
for j in [0, 1]:
model = doc2vec.Doc2Vec(sentences, size=5, min_count=1, window=4, hs=i, negative=j)
if i:
self.assertTrue(hasattr(model, 'syn1'))
if j:
self.assertTrue(hasattr(model, 'syn1neg'))
self.assertTrue(hasattr(model, 'syn0_lockf'))
model.delete_temporary_training_data(keep_doctags_vectors=False, keep_inference=False)
self.assertTrue(len(model['human']), 10)
self.assertTrue(model.wv.vocab['graph'].count, 5)
self.assertTrue(not hasattr(model, 'syn1'))
self.assertTrue(not hasattr(model, 'syn1neg'))
self.assertTrue(not hasattr(model, 'syn0_lockf'))
self.assertTrue(model.docvecs and not hasattr(model.docvecs, 'doctag_syn0'))
self.assertTrue(model.docvecs and not hasattr(model.docvecs, 'doctag_syn0_lockf'))
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=1,
negative=0, alpha=0.05, min_count=2, iter=20
)
model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True)
self.assertTrue(model.docvecs and hasattr(model.docvecs, 'doctag_syn0'))
self.assertTrue(hasattr(model, 'syn1'))
self.model_sanity(model, keep_training=False)
model = doc2vec.Doc2Vec(
list_corpus, dm=1, dm_mean=1, size=24, window=4, hs=0,
negative=1, alpha=0.05, min_count=2, iter=20
)
model.delete_temporary_training_data(keep_doctags_vectors=True, keep_inference=True)
self.model_sanity(model, keep_training=False)
self.assertTrue(hasattr(model, 'syn1neg'))
def test_word_vec_non_writeable(self):
model = keyedvectors.KeyedVectors.load_word2vec_format(datapath('word2vec_pre_kv_c'))
vector = model['says']
with self.assertRaises(ValueError):
vector *= 0
@log_capture()
def testBuildVocabWarning(self, l):
"""Test if logger warning is raised on non-ideal input to a doc2vec model"""
raw_sentences = ['human', 'machine']
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec()
model.build_vocab(sentences)
warning = "Each 'words' should be a list of words (usually unicode strings)."
self.assertTrue(warning in str(l))
@log_capture()
def testTrainWarning(self, l):
"""Test if warning is raised if alpha rises during subsequent calls to train()"""
raw_sentences = [['human'],
['graph', 'trees']]
sentences = [doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(raw_sentences)]
model = doc2vec.Doc2Vec(alpha=0.025, min_alpha=0.025, min_count=1, workers=8, size=5)
model.build_vocab(sentences)
for epoch in range(10):
model.train(sentences, total_examples=model.corpus_count, epochs=model.iter)
model.alpha -= 0.002
model.min_alpha = model.alpha
if epoch == 5:
model.alpha += 0.05
warning = "Effective 'alpha' higher than previous training cycles"
self.assertTrue(warning in str(l))
def testLoadOnClassError(self):
"""Test if exception is raised when loading doc2vec model on instance"""
self.assertRaises(AttributeError, load_on_instance)
# endclass TestDoc2VecModel
if not hasattr(TestDoc2VecModel, 'assertLess'):
# workaround for python 2.6
def assertLess(self, a, b, msg=None):
self.assertTrue(a < b, msg="%s is not less than %s" % (a, b))
setattr(TestDoc2VecModel, 'assertLess', assertLess)
# following code is useful for reproducing paragraph-vectors paper sentiment experiments
class ConcatenatedDoc2Vec(object):
"""
Concatenation of multiple models for reproducing the Paragraph Vectors paper.
Models must have exactly-matching vocabulary and document IDs. (Models should
be trained separately; this wrapper just returns concatenated results.)
"""
def __init__(self, models):
self.models = models
if hasattr(models[0], 'docvecs'):
self.docvecs = ConcatenatedDocvecs([model.docvecs for model in models])
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
def infer_vector(self, document, alpha=0.1, min_alpha=0.0001, steps=5):
return np.concatenate([model.infer_vector(document, alpha, min_alpha, steps) for model in self.models])
def train(self, *ignore_args, **ignore_kwargs):
pass # train subcomponents individually
class ConcatenatedDocvecs(object):
def __init__(self, models):
self.models = models
def __getitem__(self, token):
return np.concatenate([model[token] for model in self.models])
SentimentDocument = namedtuple('SentimentDocument', 'words tags split sentiment')
def read_su_sentiment_rotten_tomatoes(dirname, lowercase=True):
"""
Read and return documents from the Stanford Sentiment Treebank
corpus (Rotten Tomatoes reviews), from http://nlp.Stanford.edu/sentiment/
Initialize the corpus from a given directory, where
http://nlp.stanford.edu/~socherr/stanfordSentimentTreebank.zip
has been expanded. It's not too big, so compose entirely into memory.
"""
logging.info("loading corpus from %s", dirname)
# many mangled chars in sentences (datasetSentences.txt)
chars_sst_mangled = [
'à', 'á', 'â', 'ã', 'æ', 'ç', 'è', 'é', 'í',
'í', 'ï', 'ñ', 'ó', 'ô', 'ö', 'û', 'ü'
]
sentence_fixups = [(char.encode('utf-8').decode('latin1'), char) for char in chars_sst_mangled]
# more junk, and the replace necessary for sentence-phrase consistency
sentence_fixups.extend([
('Â', ''),
('\xa0', ' '),
('-LRB-', '('),
('-RRB-', ')'),
])
# only this junk in phrases (dictionary.txt)
phrase_fixups = [('\xa0', ' ')]
# sentence_id and split are only positive for the full sentences
# read sentences to temp {sentence -> (id,split) dict, to correlate with dictionary.txt
info_by_sentence = {}
with open(os.path.join(dirname, 'datasetSentences.txt'), 'r') as sentences:
with open(os.path.join(dirname, 'datasetSplit.txt'), 'r') as splits:
next(sentences) # legend
next(splits) # legend
for sentence_line, split_line in izip(sentences, splits):
(id, text) = sentence_line.split('\t')
id = int(id)
text = text.rstrip()
for junk, fix in sentence_fixups:
text = text.replace(junk, fix)
(id2, split_i) = split_line.split(',')
assert id == int(id2)
if text not in info_by_sentence: # discard duplicates
info_by_sentence[text] = (id, int(split_i))
# read all phrase text
phrases = [None] * 239232 # known size of phrases
with open(os.path.join(dirname, 'dictionary.txt'), 'r') as phrase_lines:
for line in phrase_lines:
(text, id) = line.split('|')
for junk, fix in phrase_fixups:
text = text.replace(junk, fix)
phrases[int(id)] = text.rstrip() # for 1st pass just string
SentimentPhrase = namedtuple('SentimentPhrase', SentimentDocument._fields + ('sentence_id',))
# add sentiment labels, correlate with sentences
with open(os.path.join(dirname, 'sentiment_labels.txt'), 'r') as sentiments:
next(sentiments) # legend
for line in sentiments:
(id, sentiment) = line.split('|')
id = int(id)
sentiment = float(sentiment)
text = phrases[id]
words = text.split()
if lowercase:
words = [word.lower() for word in words]
(sentence_id, split_i) = info_by_sentence.get(text, (None, 0))
split = [None, 'train', 'test', 'dev'][split_i]
phrases[id] = SentimentPhrase(words, [id], split, sentiment, sentence_id)
assert len([phrase for phrase in phrases if phrase.sentence_id is not None]) == len(info_by_sentence) # all
# counts don't match 8544, 2210, 1101 because 13 TRAIN and 1 DEV sentences are duplicates
assert len([phrase for phrase in phrases if phrase.split == 'train']) == 8531 # 'train'
assert len([phrase for phrase in phrases if phrase.split == 'test']) == 2210 # 'test'
assert len([phrase for phrase in phrases if phrase.split == 'dev']) == 1100 # 'dev'
logging.info(
"loaded corpus with %i sentences and %i phrases from %s",
len(info_by_sentence), len(phrases), dirname
)
return phrases
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
logging.info("using optimization %s", doc2vec.FAST_VERSION)
unittest.main()
|
"""Script that formats substitution matrices obtained from NCBI
This folder contains substitution matrices (mostly PAM and BLOSUM) used by
NCBI's BLAST tool. Data was obtained from
ftp://ftp.ncbi.nih.gov/blast/matrices/ on 2/14/16.
* PAM matrices - in two formats: PAM{n} and PAM{n}.cdi. N ranges from 10 to
500 in increments of 10 for the first. Scales range from ln(2)/2 for
PAM10 to ln(2)/7 for PAM500. The .cdi variants only exist for a few N
values and seem to be in units of 10ths of a bit.
* BLOSUM matrices - from BLOSUM30 to 100 in increments of 5. There's also a
BLOSUMN matrix which I am unsure about. Each has a .50 variant which is
the exact same file (by sha1 checksum).
* Nucleotide - NUC.4.4 looks useful, NUC.4.2 is non-rectangular?
* Identity - IDENTITY and MATCH can be replaced by
pyalign.matrix.identity_matrix.
* Others - DAYHOFF and GONNET seem to be slightly different PAM250 matrices.
Don't see these being useful.
"""
import os
from cStringIO import StringIO
from cPickle import Pickler, HIGHEST_PROTOCOL
import numpy as np
import pandas as pd
from pyalign import SubstitutionMatrix
ln2 = float(np.log(2))
# Names of all PAM matrix files along with their mutation distances and scales
# in inverse bits
pam_files = [
('PAM10', 10, 2),
('PAM20', 20, 2),
('PAM30', 30, 2),
('PAM40', 40, 2),
('PAM50', 50, 2),
('PAM60', 60, 2),
('PAM70', 70, 2),
('PAM80', 80, 2),
('PAM90', 90, 2),
('PAM100', 100, 2),
('PAM110', 110, 2),
('PAM120', 120, 2),
('PAM130', 130, 2),
('PAM140', 140, 2),
('PAM150', 150, 2),
('PAM160', 160, 2),
('PAM170', 170, 3),
('PAM180', 180, 3),
('PAM190', 190, 3),
('PAM200', 200, 3),
('PAM210', 210, 3),
('PAM220', 220, 3),
('PAM230', 230, 3),
('PAM240', 240, 3),
('PAM250', 250, 3),
('PAM260', 260, 3),
('PAM270', 270, 4),
('PAM280', 280, 4),
('PAM290', 290, 4),
('PAM300', 300, 4),
('PAM310', 310, 4),
('PAM320', 320, 4),
('PAM330', 330, 4),
('PAM340', 340, 4),
('PAM350', 350, 5),
('PAM360', 360, 5),
('PAM370', 370, 5),
('PAM380', 380, 5),
('PAM390', 390, 5),
('PAM400', 400, 5),
('PAM410', 410, 6),
('PAM420', 420, 6),
('PAM430', 430, 6),
('PAM440', 440, 6),
('PAM450', 450, 6),
('PAM460', 460, 6),
('PAM470', 470, 7),
('PAM480', 480, 7),
('PAM490', 490, 7),
('PAM500', 500, 7),
('PAM40.cdi', 40, 10),
('PAM80.cdi', 80, 10),
('PAM120.cdi', 120, 10),
('PAM160.cdi', 160, 10),
('PAM200.cdi', 200, 10),
('PAM250.cdi', 250, 10)
]
# Names of all PAM matrix files along with their percent values and scales in
# inverse bits
blosum_files = [
('BLOSUM30', 30, 5),
('BLOSUM35', 35, 4),
('BLOSUM40', 40, 4),
('BLOSUM45', 45, 3),
('BLOSUM50', 50, 3),
('BLOSUM55', 55, 3),
('BLOSUM60', 60, 2),
('BLOSUM62', 62, 2),
('BLOSUM65', 65, 2),
('BLOSUM70', 70, 2),
('BLOSUM75', 75, 2),
('BLOSUM80', 80, 3),
('BLOSUM85', 85, 2),
('BLOSUM90', 90, 2),
('BLOSUM100', 100, 3),
('BLOSUMN', None, 2)
]
# Names of additional matrix files in format (file_name, new_name, attrs)
addl_files = [
('NUC', 'NUC.4.4', {'type': 'NUC'})
]
def parse_ncbi_matrix(lines):
"""Parses a matrix file in the format obtained from NCBI
returns a tuple of (matrix, symbols, description)
"""
# Buffer to store uncommented lines
table_buffer = StringIO()
# Store description as well
desc_lines = []
# Split lines into description/non-description
for line in lines:
if line.startswith('#'):
desc_line = line[1:]
if desc_line.startswith(' '):
desc_line = desc_line[1:]
if desc_line:
desc_lines.append(desc_line)
elif line.strip():
table_buffer.write(line)
# Parse table
table_buffer.seek(0)
table = pd.read_table(table_buffer, sep=r'\s+')
# Should have identical row/column labels
assert table.columns.equals(table.index)
return table.values, list(table.columns), ''.join(desc_lines).strip()
def get_matrices():
"""(name, ncbi_file, extra_attrs) for each matrix to be formatted"""
matrices = list(addl_files)
# PAM matrices
for fname, dist, bits in pam_files:
name = 'PAM{}_{}'.format(dist, bits)
attrs = {
'type': 'PAM',
'scale': ln2 / bits,
'scale_invbits': bits,
'dist': dist
}
matrices.append((name, fname, attrs))
# BLOSUM matrices
for fname, pct, bits in blosum_files:
attrs = {
'type': 'BLOSUM',
'scale': ln2 / bits,
'scale_invbits': bits,
'percent': pct
}
matrices.append((fname, fname, attrs))
return matrices
def create_matrices(indir='.'):
"""Creates SubstitutionMatrix instances from NCBI matrices in directory.
Also gives dict of matrix attributes by name.
Returns:
dict (matrices), dict (attributes)
"""
matrices = dict()
# Stores attributes for each matrix
matrix_attrs = dict()
# For each matrix
for name, ncbi_file, extra_attrs in get_matrices():
# Parse the file
fpath = os.path.join(indir, ncbi_file)
with open(fpath) as fh:
values, symbols, description = parse_ncbi_matrix(fh)
# Create the matrix object
matrix = SubstitutionMatrix(symbols, values)
matrices[name] = matrix
# Attributes
attrs = {
'ncbi_file': ncbi_file,
'description': description,
'range': (np.min(values), np.max(values))
}
attrs.update(extra_attrs)
matrix_attrs[name] = attrs
return matrices, matrix_attrs
def pickle_matrices(matrices, outdir='.'):
"""Pickles dictionary of matrices output by create_matrices"""
for name, matrix in matrices.iteritems():
fpath = os.path.join(outdir, name + '.pickle')
with open(fpath, 'wb') as fh:
pickler = Pickler(fh, HIGHEST_PROTOCOL)
pickler.dump(matrix)
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 18 20:12:11 2020
@author: dougl
"""
import os
import pandas as pd
from utils import col_alpha_to_num, col_num_to_alpha
def parse_data(data, feature_cols=None, unique_col=None):
''' Ensures we're dealing with a pandas DataFrame, and cuts the frame down
to only the specified data. If no unique_col argument is provided, a new
column at 'A' will be created called 'ID'.
Args:
data (pd.DataFrame/str): Data to work with. Can be a DataFrame or a filepath to a .csv.
feature_cols (str/list): Either a list of column strings like ['A', 'B', 'AB'], or 'all'.
unique_col (str): Optional. The index of the column containing unique keys, to be omitted from any mathemetical operations when feature_cols='all' is used.
Returns:
(pd.DataFrame): The trimmed DataFrame, with the unique column in 'A'.
'''
if feature_cols is None:
raise ValueError('Please provide either a list of feature column indices, '\
'such as ["B", "C", "AD"], or pass the string "all" to '\
'treat every column as numeric data. Pass a unique_col '\
'if you want to retain an ID-column, without it affecting '\
'feature calculations (usually column "A").')
# Check the input data is a .csv file or DataFrame
if type(data) == str:
assert os.path.exists(data),\
'Parsing data: File not found: "{}"'.format(data)
assert data.lower().endswith('.csv'),\
'Parsing data: Requires a .csv file'
data = pd.read_csv(data, dtype=object)
elif not isinstance(data, pd.DataFrame):
raise ValueError('Parsing data: "data" arg is not a filepath or pd.DataFrame ({})'\
.format(type(data)))
# If passed 'all' for feature_cols, figure out the column indices to use
if feature_cols == 'all':
feature_cols = [col_num_to_alpha(i) for i in list(range(len(data.columns)))]
if unique_col is not None:
feature_cols.remove(unique_col)
# Shave the DataFrame into only salient data, the unique column forced to 'A'
column_data_list = []
# Fill the unique 'A' column
if unique_col is None:
id_df = pd.DataFrame([str(i) for i in list(range(0, len(data)))], columns=['ID'], dtype=str)
column_data_list.append(id_df['ID'])
else:
n = data.columns[col_alpha_to_num(unique_col)]
id_df = pd.DataFrame([str(i) for i in list(data[n])], columns=[n], dtype=str)
column_data_list.append(data[n])
# Fill the numeric columns
for feat_col in feature_cols:
idx = col_alpha_to_num(feat_col)
col_name = data.columns[idx]
column_data_list.append(data[col_name])
return pd.DataFrame(column_data_list, dtype=object).transpose()
|
"""
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import os.path
import xml.etree.ElementTree as ET
from programy.processors.processing import ProcessorLoader
from programy.config.brain import BrainConfiguration
from programy.mappings.denormal import DenormalCollection
from programy.mappings.gender import GenderCollection
from programy.mappings.maps import MapCollection
from programy.mappings.normal import NormalCollection
from programy.mappings.person import PersonCollection
from programy.mappings.predicates import PredicatesCollection
from programy.mappings.pronouns import PronounsCollection
from programy.mappings.properties import PropertiesCollection
from programy.mappings.sets import SetCollection
from programy.mappings.triples import TriplesCollection
from programy.parser.aiml_parser import AIMLParser
from programy.utils.services.service import ServiceFactory
from programy.utils.text.text import TextUtils
class Brain(object):
def __init__(self, configuration: BrainConfiguration):
self._configuration = configuration
self._aiml_parser = AIMLParser(stop_on_invalid=True)
self._denormal_collection = DenormalCollection()
self._normal_collection = NormalCollection()
self._gender_collection = GenderCollection()
self._person_collection = PersonCollection()
self._person2_collection = PersonCollection()
self._predicates_collection = PredicatesCollection()
self._pronouns_collection = PronounsCollection()
self._triples_collection = TriplesCollection()
self._sets_collection = SetCollection()
self._maps_collection = MapCollection()
self._properties_collection = PropertiesCollection()
self._preprocessors = ProcessorLoader()
self._postprocessors = ProcessorLoader()
self.load(self._configuration)
@property
def configuration(self):
return self._configuration
@property
def aiml_parser(self):
return self._aiml_parser
@property
def denormals(self):
return self._denormal_collection
@property
def normals(self):
return self._normal_collection
@property
def genders(self):
return self._gender_collection
@property
def persons(self):
return self._person_collection
@property
def person2s(self):
return self._person2_collection
@property
def predicates(self):
return self._predicates_collection
@property
def pronounds(self):
return self._pronouns_collection
@property
def triples(self):
return self._triples_collection
@property
def sets(self):
return self._sets_collection
@property
def maps(self):
return self._maps_collection
@property
def properties(self):
return self._properties_collection
@property
def preprocessors(self):
return self._preprocessors
@property
def postprocessors(self):
return self._postprocessors
def load(self, brain_configuration: BrainConfiguration):
self._aiml_parser.load_aiml(brain_configuration)
self.load_collections(brain_configuration)
self.load_services(brain_configuration)
def _load_denormals(self, brain_configuration):
if brain_configuration.denormal is not None:
total = self._denormal_collection.load_from_filename(brain_configuration.denormal)
logging.info("Loaded a total of %d denormalisations", total)
else:
logging.warning("No configuration setting for denormal")
def _load_normals(self, brain_configuration):
if brain_configuration.normal is not None:
total = self._normal_collection.load_from_filename(brain_configuration.normal)
logging.info("Loaded a total of %d normalisations", total)
else:
logging.warning("No configuration setting for normal")
def _load_genders(self, brain_configuration):
if brain_configuration.gender is not None:
total = self._gender_collection.load_from_filename(brain_configuration.gender)
logging.info("Loaded a total of %d genderisations", total)
else:
logging.warning("No configuration setting for gender")
def _load_persons(self, brain_configuration):
if brain_configuration.person is not None:
total = self._person_collection.load_from_filename(brain_configuration.person)
logging.info("Loaded a total of %d persons", total)
else:
logging.warning("No configuration setting for person")
def _load_person2s(self, brain_configuration):
if brain_configuration.person2 is not None:
total = self._person2_collection.load_from_filename(brain_configuration.person2)
logging.info("Loaded a total of %d person2s", total)
else:
logging.warning("No configuration setting for person2")
def _load_predicates(self, brain_configuration):
if brain_configuration.predicates is not None:
total = self._predicates_collection.load_from_filename(brain_configuration.predicates)
logging.info("Loaded a total of %d predicates", total)
else:
logging.warning("No configuration setting for predicates")
def _load_pronouns(self, brain_configuration):
if brain_configuration.pronouns is not None:
total = self._pronouns_collection.load_from_filename(brain_configuration.pronouns)
logging.info("Loaded a total of %d pronouns", total)
else:
logging.warning("No configuration setting for pronouns")
def _load_properties(self, brain_configuration):
if brain_configuration.properties is not None:
total = self._properties_collection.load_from_filename(brain_configuration.properties)
logging.info("Loaded a total of %d properties", total)
else:
logging.warning("No configuration setting for properties")
def _load_triples(self, brain_configuration):
if brain_configuration.triples is not None:
total = self._properties_collection.load_from_filename(brain_configuration.triples)
logging.info("Loaded a total of %d triples", total)
else:
logging.warning("No configuration setting for triples")
def _load_sets(self, brain_configuration):
if brain_configuration.set_files is not None:
total = self._sets_collection.load(brain_configuration.set_files)
logging.info("Loaded a total of %d sets files", total)
else:
logging.warning("No configuration setting for set files")
def _load_maps(self, brain_configuration):
if brain_configuration.map_files is not None:
total = self._maps_collection.load(brain_configuration.map_files)
logging.info("Loaded a total of %d maps files", total)
else:
logging.warning("No configuration setting for map files")
def _load_preprocessors(self, brain_configuration):
if brain_configuration.preprocessors is not None:
total = self._preprocessors.load(brain_configuration.preprocessors)
logging.info("Loaded a total of %d pre processors", total)
else:
logging.warning("No configuration setting for pre processors")
def _load_postprocessors(self, brain_configuration):
if brain_configuration.postprocessors is not None:
total = self._postprocessors.load(brain_configuration.postprocessors)
logging.info("Loaded a total of %d post processors", total)
else:
logging.warning("No configuration setting for post processors")
def load_collections(self, brain_configuration):
self._load_denormals(brain_configuration)
self._load_normals(brain_configuration)
self._load_genders(brain_configuration)
self._load_persons(brain_configuration)
self._load_person2s(brain_configuration)
self._load_predicates(brain_configuration)
self._load_pronouns(brain_configuration)
self._load_properties(brain_configuration)
self._load_triples(brain_configuration)
self._load_sets(brain_configuration)
self._load_maps(brain_configuration)
self._load_preprocessors(brain_configuration)
self._load_postprocessors(brain_configuration)
def load_services(self, brain_configuration):
ServiceFactory.preload_services(brain_configuration.services)
def pre_process_question(self, bot, clientid, question):
return self.preprocessors.process(bot, clientid, question)
def ask_question(self, bot, clientid, sentence) -> str:
conversation = bot.get_conversation(clientid)
topic_pattern = conversation.predicate("topic")
if topic_pattern is None:
logging.debug("No Topic pattern default to [*]")
topic_pattern = "*"
else:
logging.debug("Topic pattern = [%s]", topic_pattern)
try:
that_question = conversation.nth_question(2)
that_sentence = that_question.current_sentence()
# If the last response was valid, i.e not none and not empty string, then use
# that as the that_pattern, otherwise we default to '*' as pattern
if that_sentence.response is not None and that_sentence.response != '':
that_pattern = TextUtils.strip_all_punctuation(that_sentence.response)
logging.debug("That pattern = [%s]", that_pattern)
else:
logging.debug("That pattern, no response, default to [*]")
that_pattern = "*"
except Exception:
logging.debug("No That pattern default to [*]")
that_pattern = "*"
match_context = self._aiml_parser.match_sentence(bot, clientid,
sentence,
topic_pattern=topic_pattern,
that_pattern=that_pattern)
if match_context is not None:
template_node = match_context.template_node()
logging.debug("AIML Parser evaluating template [%s]", template_node.to_string())
#template_node.template.dump(tabs="", output_func=print)
response = template_node.template.resolve(bot, clientid)
return response
return None
def post_process_response(self, bot, clientid, response: str):
return self.postprocessors.process(bot, clientid, response)
def dump_tree(self):
self._aiml_parser.pattern_parser.root.dump(tabs="")
def write_learnf_to_file(self, bot, clientid, pattern, topic, that, template):
learnf_path = "%s/learnf%s" % (self._configuration.aiml_files.files, self._configuration.aiml_files.extension)
logging.debug("Writing learnf to %s", learnf_path)
if os.path.isfile(learnf_path) is False:
file = open(learnf_path, "w+")
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
file.write('<aiml>\n')
file.write('</aiml>\n')
file.close()
tree = ET.parse(learnf_path)
root = tree.getroot()
# Add our new element
child = ET.Element("category")
child.append(pattern)
child.append(topic)
child.append(that)
child.append(template.xml_tree(bot, clientid))
root.append(child)
tree.write(learnf_path, method="xml")
|
#!/usr/bin/env python
from time import sleep
import sys
sys.argv.append( '-b' )
import tutils
import ROOT as r
import IPython
import argparse
import os
import fnmatch
from configobj import ConfigObj
import eval_string
from tqdm import tqdm
from tabulate import tabulate
from string import atoi
from string import atof
def dump_example():
sexample = '''
[options]
libs =
[histogram]
# will draw only if varexp defined (here or in the parent tree)
input_dir =
active = True
output_file = default_output.root
input_file = job3/Tree_AnalysisResults.root
tree_name = t
varexp = muons.Phi()
selection =
option = e
nentries =
firstentry =
x = -PI,PI
nbinsx = 100
x_title = '#varphi (rad)'
y_title = counts
title = muons phi
name = muons_phi
[[another]]
selection = (pt>10)
[[another1]]
selection = +(pt<20)
[special]
# this will copy all the features of the [another]
# but change only the one specified here (note: copy IS RECURSIVE - will copy tree of sections)
copy = another
nbinsx = 20
[histogram_from_dir]
active = True
output_file = +_output
input_file = Tree_AnalysisResults.root
input_dir = .
tree_name = t
varexp = muons.Phi()
selection =
option = e
nentries =
firstentry =
x = -PI,PI
nbinsx = 2*PI*11
x_title = '#varphi (rad)'
y_title = counts
title = muons phi
name = muons_phi
'''
with open('tdraw_example.cfg', 'w') as f:
print >> f, sexample
print '[i] tdraw_example.cfg written.'
def get_value(s, op=None, vdefault=None):
if type(s) != str:
s = '{}'.format(s)
retval = 0
try:
np = eval_string.NumericStringParser()
retval = np.eval(s)
except:
if vdefault is None:
print >> sys.stderr, '[e] unable to convert to a value:[',s,']',type(s), len(s)
else:
retval = vdefault
if op != None:
if op == int:
rest = retval - op(retval)
if rest > 0.5:
rest = int(1)
else:
rest = 0
retval = op(retval) + rest
if op == bool:
retval = op(retval)
return retval
def find_files(rootdir='.', pattern='*'):
return [os.path.join(rootdir, filename)
for rootdir, dirnames, filenames in os.walk(rootdir)
for filename in filenames
if fnmatch.fnmatch(filename, pattern)]
def quick_check_section(s, sname):
once_per_section = 0
opts= ['active', 'output_file', 'input_file', 'input_dir', 'tree_name', 'varexp', 'selection', 'option', 'nentries', 'firstentry', 'x', 'nbinsx', 'x_title', 'y_title', 'title', 'name']
retval = True
for o in opts:
try:
s[o]
except:
print >> sys.stderr, '[e] option [', o, '] missing in section [', sname, ']'
if once_per_section == 0:
once_per_section = 1
print ' note: some options can be blank but present anyhow'
retval = False
return retval
def section_has_setting(what, section, recursive=True):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
if retval is None and recursive is True:
if section.parent.name:
retval = section_has_setting(what, section.parent, recursive)
return retval
class TDrawEntry(object):
def __init__(self, section):
self.fields = ['name', 'title', 'active', 'input_dir',
'input_file', 'tree_name', 'varexp',
'selection', 'nentries', 'firstentry',
'x', 'nbinsx', 'x_title', 'y_title',
'option', 'output_file']
self.section = section
self.parents = self.get_parents()
self.title = self.setting('title', section, '')
self.active = get_value(str(self.setting('active', section, True)), bool, 1)
self.input_dir = self.setting('input_dir', section, '')
if '$' in self.input_dir:
self.input_dir = os.path.expandvars(self.input_dir)
self.input_file = self.setting('input_file', section, '')
self.output_file = self.setting('output_file', section, 'tdraw_out.root')
self.tree_name = self.setting('tree_name', section, 't')
self.varexp = self.setting('varexp', section, '')
self.selection = self.setting('selection', section, '')
self.option = self.setting('option', section, 'e')
self.nentries = self.setting('nentries', section, 1000000000)
self.firstentry = self.setting('firstentry', section, 0)
self.nbinsx = self.setting('nbinsx', section, 10)
self.nbinsy = self.setting('nbinsy', section, 10)
self.x_title = self.setting('x_title', section, 'default x title')
self.y_title = self.setting('y_title', section, 'default y title')
self.name = self.make_name(section) # section.name
self.x = []
self.selection = self.get_selection(section)
self.x.append(get_value(self.setting('x', section, [-1, 1])[0], float))
self.x.append(get_value(self.setting('x', section, [-1, 1])[1], float))
self.y = []
self.y.append(get_value(self.setting('y', section, [-1, 1])[0], float))
self.y.append(get_value(self.setting('y', section, [-1, 1])[1], float))
if not self.title:
# self.title = self.name
if len(self.selection) > 1:
self.title = '{} w/ {}'.format(self.varexp, self.selection)
else:
self.title = '{}'.format(self.varexp)
def copy_fields(self, t):
for f in self.fields:
self.__setattr__(f, t.__getattribute__(f))
if len(self.title) < 1:
if len(self.selection) > 1:
self.title = '{} w/ {}'.format(self.varexp, self.selection)
else:
self.title = '{}'.format(self.varexp)
def get_selection(self, section):
sel = self.setting('selection', section, '')
if len(sel) > 0:
if sel[0] == '+':
if len(sel) > 1:
if len(self.get_selection(section.parent)) > 0:
sel = self.get_selection(section.parent) + ' && ' + sel[1:]
else:
sel = sel[1:]
else:
sel = self.get_selection(section.parent)
return sel
def is_iterable(self, o):
retval = False
try:
iter(o)
retval = True
except TypeError:
retval = False
return retval
def _setting(self, what, section):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
if retval is None:
if section.parent.name:
retval = self._setting(what, section.parent)
return retval
def _setting_self(self, what, section):
retval = None
try:
retval = section[what]
except:
# check the parent whether setting exists
retval = None
return retval
def setting(self, what, section, vdefault):
retval = self._setting(what, section)
if retval is None:
if vdefault is None:
retval = ''
else:
retval = vdefault
else:
if vdefault is None:
pass
else:
if self.is_iterable(vdefault):
if type(vdefault) == str:
pass
else:
if type(retval) == str:
retval = retval.split(',')
if self.is_iterable(vdefault) and len(vdefault) > 0:
if type(vdefault[0]) == int:
retval = [int(get_value(x, int, vdefault)) for x in retval]
if type(vdefault[0]) == float:
retval = [float(get_value(x, float, vdefault)) for x in retval]
if type(vdefault[0]) == bool:
retval = [bool(get_value(x, bool, vdefault)) for x in retval]
else:
if type(vdefault) == int:
retval = int(get_value(retval, int, vdefault))
if type(vdefault) == float:
retval = float(get_value(retval, float, vdefault))
if type(vdefault) == bool:
retval = bool(get_value(retval, bool, vdefault))
return retval
def make_name(self, section):
s = section
name = [section.name]
while s:
if s.parent.name:
name.append(s.parent.name)
else:
break
s = s.parent
name.reverse()
return '_'.join(name)
def get_parents(self):
s = self.section
name = [self.section.name]
while s:
if s.parent.name:
name.append(s.parent.name)
else:
break
s = s.parent
name.reverse()
return ' '.join(name)
def row_full(self):
return [self.name, self.title, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.nentries, self.firstentry, str(self.x), self.nbinsx, self.x_title, self.y_title, self.option, self.output_file]
def row_head_full(self):
return ['name', 'title', 'active', 'input_dir', 'input_file', 'tree_name', 'varexp', 'selection', 'nentries', 'firstentry', 'x-range', 'nbinsx', 'x_title', 'y_title', 'option', 'output_file']
def row_more(self):
return [self.val_and_type(x) for x in [self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.x, self.nentries, self.option, self.output_file]]
def row_head_more(self):
return ['name', 'active', 'in_dir', 'in_file', 'tree', 'varexp', 'sel.', 'x-range', 'NE', 'opt', 'output_file']
def row(self):
return [self.val_and_type(x) for x in [self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection]]
def row_head(self):
return ['name', 'active', 'dir', 'in_file', 'tree', 'varexp', 'sel.']
def row_commented(self):
return [x for x in ['#', self.name, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection]]
def row_head_commented(self):
return ['#', 'name', 'active', 'dir', 'in_file', 'tree', 'varexp', 'sel.']
def val_and_type(self, x):
if type(x) == str:
return '"{}"'.format(x)
else:
return str(x)
def __repr__(self):
return self.parents + '\n' + ' | '.join([self.val_and_type(x) for x in [self.name, self.title, self.active, self.input_dir, self.input_file, self.tree_name, self.varexp, self.selection, self.nentries, self.firstentry, self.x, self.nbinsx, self.x_title, self.y_title, self.option, self.output_file]])
class TDrawConfig(object):
def __init__(self, fname, opts=None):
self.fname = fname
self.config = ConfigObj(fname, raise_errors=True)
self.recreate = False
self.clean = True
if opts:
self.recreate = opts.recreate
self.clean = opts.clean
self.cleaned_files = []
self.entries = []
self.copies = []
self.process()
def process_section(self, section):
if section.name == 'config':
return
if len(section.sections):
for s in section.sections:
self.process_section(section[s])
if section_has_setting('varexp', section, recursive=True):
tde = TDrawEntry(section)
self.entries.append(tde)
else:
if self.is_copy(section):
self.process_copy(section)
else:
if section_has_setting('varexp', section, recursive=True):
tde = TDrawEntry(section)
self.entries.append(tde)
def is_copy(self, s):
try:
if len(s['copy']) > 0:
return True
except:
return False
def process_copy(self, s):
scopy = s['copy']
copy_names = []
if type(scopy) is str:
copy_names.append(scopy)
else:
for scp in scopy:
copy_names.append(scp)
#print 'to copy..',copy_names
for scopy in copy_names:
model = TDrawEntry(s)
current_entries = list(self.entries)
for se in current_entries:
docopy = False
#if type(se.parents) is str:
# if scopy in se.parents.split(' '):
# docopy = True
#else:
# if scopy in se.parents:
# docopy = True
if scopy == se.name[:len(scopy)]:
docopy = True
#print 'copy: ', scopy, se.name, docopy, type(se.parents), se.parents
if docopy:
# print '[i] use for copy:', se.name
newtde = TDrawEntry(se.section)
newtde.copy_fields(se)
#newtde.name = se.name
#newtde.parents = se.parents
for sf in model.fields:
setting = model._setting_self(sf, model.section)
if setting:
if sf == 'selection':
if setting[0] == '+':
if len(setting.strip()) > 1:
setting = '({}) && ({})'.format(newtde.selection, setting[1:])
newtde.__setattr__(sf, setting)
newtde.parents = '{} {}'.format(model.name, newtde.parents)
newtde.name = '{}_{}'.format(model.name, newtde.name)
newtde.title = '{} {}'.format(newtde.title, s.name)
#print 'new name:', newtde.name
#print
#self.copies.append(newtde)
self.entries.append(newtde)
def load_lib(self, libpath):
#sexplib = r.gSystem.ExpandPathName(libpath.strip())
sexplib = r.gSystem.DynamicPathName(libpath.strip())
sexplib_lib = os.path.basename(sexplib)
sexplib_dir = os.path.dirname(sexplib)
sexplib_fullpath = os.path.join(sexplib_dir, sexplib_lib)
#s = r.TString(sexplib_fullpath)
#sp = r.gSystem.FindDynamicLibrary(s)
#print sp
print '[i] loading', sexplib_fullpath
r.gSystem.AddDynamicPath(sexplib_dir)
retval = r.gSystem.Load(sexplib_lib)
print ' status', retval
def process(self):
for s in self.config.sections:
if s == 'options':
try:
slibs = self.config[s]['libs']
if type(slibs) == list:
for slib in slibs:
self.load_lib(slib)
else:
self.load_lib(slibs)
except:
pass
continue
#if self.is_copy(self.config[s]):
# #print '[i]', s, 'is a copy'
# continue
self.process_section(self.config[s])
# now add copies
# for s in self.config.sections:
# if s != 'options':
# if self.is_copy(self.config[s]):
# self.process_copy(s)
#for e in self.copies:
# self.entries.append(e)
for e in self.entries:
if len(e.input_file) < 1:
e.active = False
if len(e.output_file) < 1:
e.active = False
if len(e.varexp) < 1:
e.active = False
def __repr__(self):
#return '\n'.join(['[i] {} {}'.format(i, str(s)) for i,s in enumerate(self.entries)])
return tabulate([e.row() for e in self.entries], headers=self.entries[0].row_head())
def tab_comment(self):
print tabulate([e.row_commented() for e in self.entries], headers=self.entries[0].row_head_commented(), tablefmt='plain')
#print tabulate([e.row_commented() for e in self.entries])
def dump_class_config(self, fout):
outs = sys.stdout
sys.stdout = fout
self.tab_comment()
for e in self.entries:
print e.name,'=',e.name
print '{}_file = {}'.format(e.name, e.output_file)
print '{}_title = {}'.format(e.name, e.title)
print '{}_varexp = {}'.format(e.name, e.varexp)
print '{}_selection = {}'.format(e.name, e.selection)
print 'histograms = {}'.format(','.join([e.name for e in self.entries]))
print 'files = {}'.format(','.join([e.output_file for e in self.entries]))
print 'titles = {}'.format(','.join([e.title for e in self.entries]))
sys.stdout = outs
def run(self):
print '[i] run...'
cleaned = []
errors = []
errors.append('[e] errors:')
if len(self.entries)<1:
print '[i] no entries?'
return
pbare = tqdm(self.entries, desc=' entry')
for e in pbare:
# pbare.set_description(' {}:{}'.format(pbare.n, e.name))
# pbare.update(0)
if not e.active:
continue
foutname = e.output_file
if not foutname:
foutname = '+out'
if e.input_dir:
input_files = find_files(e.input_dir, pattern=e.input_file)
#print ' e.input_dir:',e.input_dir, 'input_file:',e.input_file
else:
input_files = [e.input_file]
pbar = tqdm(input_files, desc=' file')
for fn in pbar:
ifn = input_files.index(fn)
#pbar.set_description(' file #{}'.format(pbar.n))
sfn = fn
if len(fn) > 40:
sfn = fn[:18] + '..' + fn[len(fn)-20:]
if foutname[0] == '+':
sfoutname = fn.replace('.root', foutname[1:].replace('.root', '') + '.root')
else:
if (len(input_files) > 1):
sfoutname = foutname.replace('.root', '_{}.root'.format(ifn))
else:
sfoutname = foutname
#if sfoutname in cleaned:
# pbar.set_description(' {} : {}'.format(e.name, sfn))
#else:
# if self.clean:
# pbar.set_description(' {} : (c:{}) {}'.format(e.name, sfoutname, sfn))
# else:
# pbar.set_description(' {} : (o:{}) {}'.format(e.name, sfoutname, sfn))
fin = r.TFile(fn)
if not fin:
continue
errors.append('[e] file {} unable to open'.format(fn))
dopt = e.option
if 'norange' in dopt:
hstring = 'htmp'
dopt = e.option.replace('norange', '')
else:
# check if drawing in 2D
_varexp_tmp = e.varexp.replace('::', '__static__')
# if ':' in e.varexp:
if ':' in _varexp_tmp:
hstring = 'htmp({0},{1},{2},{3},{4},{5})'.format(e.nbinsx, e.x[0], e.x[1], e.nbinsy, e.y[0], e.y[1])
else:
hstring = 'htmp({0},{1},{2})'.format(e.nbinsx, e.x[0], e.x[1])
#print e.name, dopt, e.option
t = fin.Get(e.tree_name)
hout = None
if t:
# print e.varexp, e.selection, e.option, e.nentries, e.firstentry
nentr = t.Draw(e.varexp + '>>{}'.format(hstring), e.selection, dopt, e.nentries, e.firstentry)
# print '[i] number of entries drawn:',nentr
hout = r.gDirectory.Get('htmp')
if hout:
hout.SetDirectory(0)
hout.SetName(e.name)
hout.SetTitle(e.title)
hout.GetXaxis().SetTitle(e.x_title)
hout.GetYaxis().SetTitle(e.y_title)
else:
errors.append('[e] histogram {} does not exist; tree {} Nentries={}'.format('htmp', t.GetName(), t.GetEntries()))
else:
errors.append('[e] tree {} not found - file {}'.format(e.tree_name, fn))
continue
if hout:
if self.clean is True:
if sfoutname in cleaned:
pass
else:
# print '[i] clean', sfoutname, 'requested'
try:
os.remove(sfoutname)
except:
pass
if sfoutname not in cleaned:
cleaned.append(sfoutname)
fout = r.TFile(sfoutname, 'UPDATE')
fout.cd()
hout.Write()
fout.Purge()
fout.Close()
fin.Close()
else:
errors.append('[e] output histogram {} {} not made'.format(e.name, hstring))
print
print '[i] output files:'
for fn in cleaned:
print ' '+fn
if len(errors) > 1:
for i, er in enumerate(errors):
if i > 0:
print er.replace('[e] ', ' ')
else:
print er
print '[i] done.'
def tdraw_from_file(fname, recreate=False, clean_first=False):
cleaned = []
smode = 'UPDATE'
if recreate == True:
smode = 'RECREATE'
if fname == None:
return
print '[i] file write mode is:',smode
print '[i] config file:', fname
config = ConfigObj(fname, raise_errors = True)
for s in config.sections:
if s == 'options':
try:
slibs = config[s]['libs']
if type(slibs) == list:
for slib in slibs:
sexplib = r.gSystem.ExpandPathName(slib.strip())
print '[i] loading',sexplib
r.gSystem.Load(sexplib)
else:
sexplib = r.gSystem.ExpandPathName(slibs)
print '[i] loading',sexplib
r.gSystem.Load(sexplib)
except:
pass
continue
if quick_check_section(config[s], s) == False:
continue
if get_value(config[s]['active']) == 0:
continue
print '[i] section [',s,']'
input_fname = config[s]['input_file']
foutname = config[s]['output_file']
if not foutname:
foutname = '+out'
sdir = config[s]['input_dir']
if sdir:
input_files = find_files(sdir, pattern=input_fname)
print ' sdir is:',sdir
else:
input_files = [input_fname]
print ' tdraw:', config[s]['name'], ';'.join([config[s]['title'], config[s]['x_title'], config[s]['y_title']])
pbar = tqdm(input_files)
for fn in pbar:
# pbar.set_description(' processing file: %s' % fn)
nchars = 0
sfn = fn
if len(fn) > 40:
sfn = fn[:18] + '..' + fn[len(fn)-20:]
pbar.set_description(' {} : {}'.format(s, sfn))
if foutname[0] == '+':
sfoutname = fn.replace('.root', foutname[1:] + '.root')
else:
sfoutname = foutname
#print ' output:',sfoutname
fin = r.TFile(fn)
if not fin:
continue
hstring = 'htmp({0},{1},{2})'.format(int(get_value(config[s]['nbinsx'])), get_value(config[s]['x'][0]), get_value(config[s]['x'][1]))
t = fin.Get(config[s]['tree_name'])
if t:
#t.MakeClass('Correlations')
nentries = config[s]['nentries']
if not nentries:
nentries = '1000000000'
firstentry = config[s]['firstentry']
if not firstentry:
firstentry = '0'
t.Draw(config[s]['varexp'] + '>>{}'.format(hstring), config[s]['selection'], config[s]['option'], int(get_value(nentries)), int(get_value(firstentry)))
hout = r.gDirectory.Get('htmp')
hout.SetDirectory(0)
hout.SetName(config[s]['name'])
hout.SetTitle(config[s]['title'])
hout.GetXaxis().SetTitle(config[s]['x_title'])
hout.GetYaxis().SetTitle(config[s]['y_title'])
if clean_first == True:
if sfoutname in cleaned:
pass
else:
# print '[i] clean',sfoutname,'requested'
fout = r.TFile(sfoutname, 'recreate')
fout.Close()
cleaned.append(sfoutname)
fout = r.TFile(sfoutname, smode)
fout.cd()
hout.Write()
fout.Purge()
fout.Close()
fin.Close()
print ' done.'
if __name__=="__main__":
parser = argparse.ArgumentParser(description='execute tdraw based on the config file', prog=os.path.basename(__file__))
#parser.add_argument('-w', '--write', help='dump the contents', action='store_true')
#parser.add_argument('-f', '--fname', help='file name to operate on', type=str)
#parser.add_argument('-r', '--read', help='read a file', type=str)
parser.add_argument('-b', '--batch', help='batchmode - do not end with IPython prompt', action='store_true')
parser.add_argument('-i', '--ipython', help='end with IPython prompt', action='store_true')
parser.add_argument('-g', '--example', help='dump an example file and exit', action='store_true')
parser.add_argument('--recreate', help='write files with "recreate" instead of "update"', action='store_true')
parser.add_argument('--clean', help='remove output file - once before start...', action='store_true')
parser.add_argument('fname', type=str, nargs='*')
parser.add_argument('--old', help='old implementation', action='store_true')
parser.add_argument('--test', help='show what we get from the config...', action='store_true')
parser.add_argument('--configobj', help='show what we get from the config...', action='store_true')
args = parser.parse_args()
if args.example:
dump_example()
sys.exit(0)
tutils.setup_basic_root()
if args.fname:
tc = r.TCanvas('ctmp', 'ctmp')
for fn in args.fname:
tc.cd()
if args.old:
tdraw_from_file(fn, args.recreate, args.clean)
else:
cfg = TDrawConfig(fn, args)
if args.configobj:
cfg.dump_class_config()
else:
print cfg
if not args.test:
cfg.run()
fconfobj = fn.replace('.cfg', '_out.confobj')
with open(fconfobj, 'w') as f:
cfg.dump_class_config(f)
print '[i]',fconfobj,'written.'
if args.ipython:
IPython.embed()
|
import inspect
from inspect import Parameter
import logging
from ray.utils import is_cython
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# This dummy type is also defined in ArgumentsBuilder.java. Please keep it
# synced.
DUMMY_TYPE = b"__RAY_DUMMY__"
def get_signature(func):
"""Get signature parameters.
Support Cython functions by grabbing relevant attributes from the Cython
function and attaching to a no-op function. This is somewhat brittle, since
inspect may change, but given that inspect is written to a PEP, we hope
it is relatively stable. Future versions of Python may allow overloading
the inspect 'isfunction' and 'ismethod' functions / create ABC for Python
functions. Until then, it appears that Cython won't do anything about
compatability with the inspect module.
Args:
func: The function whose signature should be checked.
Returns:
A function signature object, which includes the names of the keyword
arguments as well as their default values.
Raises:
TypeError: A type error if the signature is not supported
"""
# The first condition for Cython functions, the latter for Cython instance
# methods
if is_cython(func):
attrs = [
"__code__", "__annotations__", "__defaults__", "__kwdefaults__"
]
if all(hasattr(func, attr) for attr in attrs):
original_func = func
def func():
return
for attr in attrs:
setattr(func, attr, getattr(original_func, attr))
else:
raise TypeError(
f"{func!r} is not a Python function we can process")
return inspect.signature(func)
def extract_signature(func, ignore_first=False):
"""Extract the function signature from the function.
Args:
func: The function whose signature should be extracted.
ignore_first: True if the first argument should be ignored. This should
be used when func is a method of a class.
Returns:
List of Parameter objects representing the function signature.
"""
signature_parameters = list(get_signature(func).parameters.values())
if ignore_first:
if len(signature_parameters) == 0:
raise ValueError("Methods must take a 'self' argument, but the "
f"method '{func.__name__}' does not have one.")
signature_parameters = signature_parameters[1:]
return signature_parameters
def flatten_args(signature_parameters, args, kwargs):
"""Validates the arguments against the signature and flattens them.
The flat list representation is a serializable format for arguments.
Since the flatbuffer representation of function arguments is a list, we
combine both keyword arguments and positional arguments. We represent
this with two entries per argument value - [DUMMY_TYPE, x] for positional
arguments and [KEY, VALUE] for keyword arguments. See the below example.
See `recover_args` for logic restoring the flat list back to args/kwargs.
Args:
signature_parameters (list): The list of Parameter objects
representing the function signature, obtained from
`extract_signature`.
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
Returns:
List of args and kwargs. Non-keyword arguments are prefixed
by internal enum DUMMY_TYPE.
Raises:
TypeError: Raised if arguments do not fit in the function signature.
Example:
>>> flatten_args([1, 2, 3], {"a": 4})
[None, 1, None, 2, None, 3, "a", 4]
"""
reconstructed_signature = inspect.Signature(
parameters=signature_parameters)
try:
reconstructed_signature.bind(*args, **kwargs)
except TypeError as exc: # capture a friendlier stacktrace
raise TypeError(str(exc)) from None
list_args = []
for arg in args:
list_args += [DUMMY_TYPE, arg]
for keyword, arg in kwargs.items():
list_args += [keyword, arg]
return list_args
def recover_args(flattened_args):
"""Recreates `args` and `kwargs` from the flattened arg list.
Args:
flattened_args: List of args and kwargs. This should be the output of
`flatten_args`.
Returns:
args: The non-keyword arguments passed into the function.
kwargs: The keyword arguments passed into the function.
"""
assert len(flattened_args) % 2 == 0, (
"Flattened arguments need to be even-numbered. See `flatten_args`.")
args = []
kwargs = {}
for name_index in range(0, len(flattened_args), 2):
name, arg = flattened_args[name_index], flattened_args[name_index + 1]
if name == DUMMY_TYPE:
args.append(arg)
else:
kwargs[name] = arg
return args, kwargs
def _convert_from_parameter_kind(kind):
if kind == Parameter.POSITIONAL_ONLY:
return 0
if kind == Parameter.POSITIONAL_OR_KEYWORD:
return 1
if kind == Parameter.VAR_POSITIONAL:
return 2
if kind == Parameter.KEYWORD_ONLY:
return 3
if kind == Parameter.VAR_KEYWORD:
return 4
def _convert_to_parameter_kind(value):
if value == 0:
return Parameter.POSITIONAL_ONLY
if value == 1:
return Parameter.POSITIONAL_OR_KEYWORD
if value == 2:
return Parameter.VAR_POSITIONAL
if value == 3:
return Parameter.KEYWORD_ONLY
if value == 4:
return Parameter.VAR_KEYWORD
|
# Copyright (c) 2015 Thales Services SAS
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
from designateclient import exceptions
from designateclient.tests import base
from designateclient import utils
LIST_MOCK_RESPONSE = [
{'id': '13579bdf-0000-0000-abcd-000000000001', 'name': 'abcd'},
{'id': '13579bdf-0000-0000-baba-000000000001', 'name': 'baba'},
{'id': '13579bdf-0000-0000-baba-000000000002', 'name': 'baba'},
]
class UtilsTestCase(base.TestCase):
def _find_resourceid_by_name_or_id(self, name_or_id, by_name=False):
resource_client = mock.Mock()
resource_client.list.return_value = LIST_MOCK_RESPONSE
resourceid = utils.find_resourceid_by_name_or_id(
resource_client, name_or_id)
self.assertEqual(by_name, resource_client.list.called)
return resourceid
def test_find_resourceid_with_hyphen_uuid(self):
expected = str(uuid.uuid4())
observed = self._find_resourceid_by_name_or_id(expected)
self.assertEqual(expected, observed)
def test_find_resourceid_with_nonhyphen_uuid(self):
expected = str(uuid.uuid4())
fakeid = expected.replace('-', '')
observed = self._find_resourceid_by_name_or_id(fakeid)
self.assertEqual(expected, observed)
def test_find_resourceid_with_unique_resource(self):
observed = self._find_resourceid_by_name_or_id('abcd', by_name=True)
self.assertEqual('13579bdf-0000-0000-abcd-000000000001', observed)
def test_find_resourceid_with_nonexistent_resource(self):
self.assertRaises(exceptions.ResourceNotFound,
self._find_resourceid_by_name_or_id,
'taz', by_name=True)
def test_find_resourceid_with_multiple_resources(self):
self.assertRaises(exceptions.NoUniqueMatch,
self._find_resourceid_by_name_or_id,
'baba', by_name=True)
|
'''
Test functions for handling star formation rates
'''
import numpy as np
from scipy.integrate import odeint
import sfrs as SFR
def IntegrationTest():
''' Simple test the integration
'''
logsfr = lambda mstar, t: np.log10(t**2)
for tt in np.arange(1., 11., 1.):
M_int = SFR.integSFR(logsfr, np.array([0.]),
np.array([0.]), np.array([tt]),
mass_dict={'type': 'rk4', 'f_retain': 1e-9, 't_step': 0.01})
print np.log10(10**M_int[0] - 1.), np.log10(tt**3/3.)
return None
def Integration_ScipyComp():
''' Simple test the integration
'''
dydt = lambda y, t: t
M_euler = SFR.ODE_Euler(dydt, np.array([0.]), np.array([0.,10.]), 0.001)
M_RK4 = SFR.ODE_RK4(dydt, np.array([0.]), np.array([0.,10.]), 0.1)
M_scipy = odeint(dydt, np.array([0.]), np.array([0.,10.]))
print M_euler
print M_RK4
print M_scipy
return None
if __name__=='__main__':
#IntegrationTest()
Integration_ScipyComp()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ---------------------------------------------------
# Copyright (c) 2013 Pablo Caro. All Rights Reserved.
# Pablo Caro <me@pcaro.es> - http://pcaro.es/
# ProgressBar.py
# ---------------------------------------------------
import sys
class ProgressBar:
def __init__(self, min=0, max=100, width=60, charset='[=]'):
self.min = min
self.max = max
self.width = width
self.current = min
self.percent = 0.0
self.int_percent = 0
if len(charset) != 3:
charset = '[=]'
self.charset = charset
self.bar = ''
self.used = -1
self.int_percent_change = False
def update(self, current):
self.current = current
self.percent = (float(self.current-self.min)/(self.max-self.min))*100.0
int_percent = int(self.percent)
if int_percent != self.int_percent:
self.int_percent_change = True
self.int_percent = int_percent
self.__generate_bar__()
if self.int_percent_change:
self.int_percent_change = False
return True
else:
return False
def show(self):
sys.stdout.write(str(self))
sys.stdout.flush()
def __str__(self):
return self.bar
def __generate_bar__(self):
self.used = int((float(self.current-self.min)/(self.max-self.min)) *
(self.width-6))
center = self.charset[1] * self.used
self.bar = (self.charset[0] + center + self.charset[2]
+ " " + str(self.int_percent) + '%' + '\r')
def main():
pass
if __name__ == '__main__':
main()
|
# Project Euler Problem 22
# Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names, begin by sorting it into alphabetical order. Then working out the alphabetical value for each name, multiply this value by its alphabetical position in the list to obtain a name score.
# For example, when the list is sorted into alphabetical order, COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN would obtain a score of 938 × 53 = 49714.
# What is the total of all the name scores in the file?
import math,sys,os
def alphabetScore(sLetter):
score = {'a':1,'b':2,'c':3,'d':4,'e':5,'f':6,'g':7,'h':8,'i':9,'j':10,'k':11,'l':12,'m':13,'n':14,'o':15,'p':16,'q':17,'r':18,'s':19,'t':20,'u':21,'v':22,'w':23,'x':24,'y':25,'z':26}
sLetter = sLetter.lower()
return score.get(sLetter)
def wordScore(sWord):
score = 0
for i in sWord:
score += alphabetScore(i)
return score
def scoreArray(arrayScores):
#arrayScores.sort()
print arrayScores[937]
print arrayScores[938]
rank = 0
score = 0
for i in arrayScores:
rank += 1
score += i*rank
return score
def main(lineArray):
rankArray = []
lineArray.sort()
for item in lineArray:
item = item.replace('\"','')
rankArray.append(wordScore(item))
print scoreArray(rankArray)
#testArray = [123,23,58,90]
#print scoreArray(testArray)
if len(sys.argv) < 2:
print "NEED A FILENAME"
else:
FILENAME = sys.argv[1]
fObject = open(FILENAME,'r')
LINE = fObject.readline()
fObject.close()
nameArray = LINE.split(',')
main(nameArray)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.