code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
import serial
import rospy
class GroveO2:
"""
Class that represents a Grove O2 sensor instance and provides functions
for interfacing with the sensor.
"""
def __init__(self, analog_port=0, serial_port='/dev/serial/by-id/usb-Numato_Systems_Pvt._Ltd._Numato_Lab_8_Channel_USB_GPIO_Module-if00', pseudo=False):
self.analog_port = analog_port
self.serial_port = serial_port
self.pseudo = pseudo
self.o2 = None
self.sensor_is_connected = True
self.connect()
def __del__(self):
if self.serial:
self.serial.close()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
if self.serial:
self.serial.close()
return self
def connect(self):
if self.pseudo:
rospy.loginfo('Connected to pseudo sensor')
return
try:
self.serial = serial.Serial(self.serial_port, 19200, timeout=1)
rospy.logdebug("self.serial.isOpen() = {}".format(self.serial.isOpen()))
if not self.sensor_is_connected:
self.sensor_is_connected = True
rospy.loginfo('Connected to sensor')
except:
if self.sensor_is_connected:
self.sensor_is_connected = False
rospy.logwarn('Unable to connect to sensor')
def poll(self):
if self.pseudo:
self.o2 = 19.3
return
if self.sensor_is_connected:
try:
self.serial.write(('adc read {}\r'.format(self.analog_port)).encode())
response = self.serial.read(25)
voltage = float(response[10:-3]) * 5 / 1024
if voltage == 0:
return
self.o2 = voltage * 0.21 / 2.0 * 100 # percent
rospy.logdebug('o2 = {}'.format(self.o2))
except:
rospy.logwarn("O2 SENSOR> Failed to read value during poll")
self.o2 = None
self.sensor_is_connected = False
else:
self.connect()
|
davoclavo/openag_brain
|
src/openag_brain/peripherals/grove_o2.py
|
Python
|
gpl-3.0
| 2,153
|
""" Sahana Eden Module Automated Tests - HRM006 Add Staff To Office
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from tests.web2unittest import SeleniumUnitTest
class AddStaffToOffice(SeleniumUnitTest):
def test_hrm006_add_staff_to_office(self):
"""
@case: HRM006
@description: Add a premade made staff to an Office
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
browser = self.browser
config = self.config
self.login(account="admin", nexturl="org/office")
self.dt_filter("AP Zone")
self.dt_action()
url = browser.current_url
url_parts = url.split("/")
try:
org_id = int(url_parts[-2])
except:
org_id = int(url_parts[-1])
browser.get("%s/org/office/%s/human_resource" % (config.url, org_id))
self.browser.find_element_by_id("show-add-btn").click()
self.browser.find_element_by_id("select_from_registry").click()
self.create("hrm_human_resource",
[
( "person_id",
"Beatriz de Carvalho",
"autocomplete")
]
)
|
vgupta6/Project-2
|
modules/tests/hrm/add_staff_to_office.py
|
Python
|
mit
| 2,514
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('questions', '0003_question_owner'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer_text', models.TextField(verbose_name=b'answer')),
('created_on', models.DateTimeField(auto_now_add=True)),
('answers_question', models.ForeignKey(to='questions.Question')),
],
options={
'db_table': 'answers',
},
bases=(models.Model,),
),
migrations.RemoveField(
model_name='comments',
name='comments_question',
),
migrations.DeleteModel(
name='Comments',
),
]
|
Kuzenkov/SimpleAnalogueStackOverflow
|
questions/migrations/0004_auto_20150211_0703.py
|
Python
|
mit
| 997
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('commerce', '0002_commerceconfiguration'),
]
operations = [
migrations.AlterModelOptions(
name='commerceconfiguration',
options={},
),
]
|
edx/edx-platform
|
lms/djangoapps/commerce/migrations/0003_auto_20160329_0709.py
|
Python
|
agpl-3.0
| 303
|
from flask import render_template
from app import app
@app.route('/')
@app.route('/index')
def index():
user = { 'nickname': 'Miguel' } # fake user
posts = [ # fake array of posts
{
'author': { 'nickname': 'John' },
'body': 'Beautiful day in Portland!'
},
{
'author': { 'nickname': 'Susan' },
'body': 'The Avengers movie was so cool!'
}
]
return render_template("index.html",
title = 'Home',
user = user,
posts = posts)
|
pugong/microblog
|
app/index.py
|
Python
|
bsd-3-clause
| 541
|
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import racer
import matplotlib.pyplot as plt
speed = 2
wait_time = 0.1
start = racer.Point(2, 0)
goal = racer.Point(2, 4)
fig = plt.figure()
ax_dr = fig.add_subplot(111)
ag = racer.Agent(
racer.model.SinModel(2, 1, 1, 2),
racer.model.LinearModel(0, 1))
ag2 = racer.Agent(
racer.model.SinModel(-2, 2, 1, 2),
racer.model.LinearModel(0, 1.3))
ag3 = racer.Agent(
racer.model.SinModel(2, 2, 1, 2),
racer.model.LinearModel(0, 2))
strg = racer.RoadmapGenerator(
start=start, width=4, height=4, max_dist=0.5)
rm = strg.generate(300)
search = racer.Search(rm, speed, wait_time)
path, tree = search.get_path(start, goal, ag, ag2, ag3)
dr = racer.Drawer(fig, ax_dr)
for i, p in enumerate(path[1:]):
# ax_dr.set_xlabel("X Position [m]")
# ax_dr.set_ylabel("Y Position [m]")
# ax_dr.set_zlabel("Time [s]")
dr.draw_temporal_nodes(tree, 0, p.t)
dr.draw_temporal_edges(tree, 0, p.t)
dr.draw_path(path[:(i + 2)])
dr.draw_agent(ag, p.t)
dr.draw_agent(ag2, p.t)
dr.draw_agent(ag3, p.t)
plt.xlim([-0.1, 4.1])
plt.ylim([-0.1, 4.1])
plt.savefig("figs/tree_{}.pdf".format(i), bbox_inches="tight")
dr.clear()
|
wallarelvo/racer
|
tests/search_test.py
|
Python
|
apache-2.0
| 1,261
|
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.db import portbindings_base
from neutron.extensions import portbindings
class PortBindingPort(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
host = sa.Column(sa.String(255), nullable=False)
port = orm.relationship(
models_v2.Port,
backref=orm.backref("portbinding",
lazy='joined', uselist=False,
cascade='delete'))
class PortBindingMixin(portbindings_base.PortBindingBaseMixin):
extra_binding_dict = None
def _port_model_hook(self, context, original_model, query):
query = query.outerjoin(PortBindingPort,
(original_model.id ==
PortBindingPort.port_id))
return query
def _port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
query = query.filter(PortBindingPort.host.in_(values))
return query
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"portbindings_port",
'_port_model_hook',
None,
'_port_result_filter_hook')
def _process_portbindings_create_and_update(self, context, port_data,
port):
binding_profile = port.get(portbindings.PROFILE)
binding_profile_set = attributes.is_attr_set(binding_profile)
if not binding_profile_set and binding_profile is not None:
del port[portbindings.PROFILE]
binding_vnic = port.get(portbindings.VNIC_TYPE)
binding_vnic_set = attributes.is_attr_set(binding_vnic)
if not binding_vnic_set and binding_vnic is not None:
del port[portbindings.VNIC_TYPE]
# REVISIT(irenab) Add support for vnic_type for plugins that
# can handle more than one type.
# Currently implemented for ML2 plugin that does not use
# PortBindingMixin.
host = port_data.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
with context.session.begin(subtransactions=True):
bind_port = context.session.query(
PortBindingPort).filter_by(port_id=port['id']).first()
if host_set:
if not bind_port:
context.session.add(PortBindingPort(port_id=port['id'],
host=host))
else:
bind_port.host = host
else:
host = bind_port.host if bind_port else None
self._extend_port_dict_binding_host(port, host)
def get_port_host(self, context, port_id):
with context.session.begin(subtransactions=True):
bind_port = context.session.query(
PortBindingPort).filter_by(port_id=port_id).first()
return bind_port.host if bind_port else None
def _extend_port_dict_binding_host(self, port_res, host):
super(PortBindingMixin, self).extend_port_dict_binding(
port_res, None)
port_res[portbindings.HOST_ID] = host
def extend_port_dict_binding(self, port_res, port_db):
host = port_db.portbinding.host if port_db.portbinding else None
self._extend_port_dict_binding_host(port_res, host)
def _extend_port_dict_binding(plugin, port_res, port_db):
if not isinstance(plugin, PortBindingMixin):
return
plugin.extend_port_dict_binding(port_res, port_db)
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, [_extend_port_dict_binding])
|
waltBB/neutron_read
|
neutron/db/portbindings_db.py
|
Python
|
apache-2.0
| 4,645
|
# coding:utf-8
"""
Created by 捡龙眼
3/4/2016
"""
from __future__ import absolute_import, unicode_literals, print_function
import public.special_exception
LOG_THREAD = "log_thread"
TIME_THREAD = "time_thread"
GLOBAL_THREAD = {}
def add_thread(key, thread_object):
if key in GLOBAL_THREAD:
raise public.special_exception.KeyExistError("%s is exist" % (key))
GLOBAL_THREAD[key] = thread_object
def get_thread(key):
return GLOBAL_THREAD.get(key)
def clear_thread():
for thread_object in GLOBAL_THREAD.values():
try:
thread_object.stop_thread()
except BaseException as e:
print(e)
HTTP_REQUEST_MANAGER = "http_request_manager"
WAITE_CONNECT_MANAGER = "waite_client_manager"
AUTH_CONNECT_MANAGER = "auth_connect_manager"
GLOBAL_OBJECT = {}
def add_object(key, object):
if key in GLOBAL_OBJECT:
raise public.special_exception.KeyExistError("%s is exist" % (key))
GLOBAL_OBJECT[key] = object
def get_object(key):
return GLOBAL_OBJECT.get(key)
|
yanjianlong/server_cluster
|
public/global_manager.py
|
Python
|
bsd-3-clause
| 1,046
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('topics', '0005_auto_20150109_1605'),
]
operations = [
migrations.AlterModelOptions(
name='storypointer',
options={'ordering': ('-pub_date',)},
),
]
|
texastribune/txlege84
|
txlege84/topics/migrations/0006_auto_20150309_1140.py
|
Python
|
mit
| 380
|
#!/usr/bin/python
#-*- coding: utf-8 -*-
"""
(c) 2012 - Copyright Pierre-Yves Chibon <pingou@pingoured.fr>
Distributed under License GPLv3 or later
You can find a copy of this license on the website
http://www.gnu.org/licenses/gpl.html
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA.
This script is meant to be run as a cron job to send the reminders for
each meeting that asked for it.
"""
from __future__ import unicode_literals, absolute_import
import smtplib
import warnings
import logging
from email.mime.text import MIMEText
import fedocal
import fedocal.fedocallib as fedocallib
import fedocal.fedocallib.fedmsgshim as fedmsg
_log = logging.getLogger(__name__)
def fedmsg_publish(meeting, meeting_id):
""" Publish the meeting.reminder messages on fedora-messaging.
:arg meeting: a Meeting object from fedocallib.model
:arg meeting_id: an int representing the meeting identifier in the
database
"""
_log.info('Publishing a message for meeting: %s', meeting)
print('Publishing a message for meeting: %s' % meeting)
meeting_dict = meeting.to_json()
meeting_dict['meeting_id'] = meeting_id
message = dict(
meeting=meeting_dict,
calendar=meeting.calendar.to_json()
)
fedmsg.publish(topic='meeting.reminder', msg=message)
def send_reminder_meeting(meeting, meeting_id):
""" This function sends the actual reminder of a given meeting.
:arg meeting: a Meeting object from fedocallib.model
:arg meeting_id: an int representing the meeting identifier in the
database
"""
if not meeting.reminder_id:
return
_log.info("Sending email reminder about meeting: %s", meeting)
print("Sending email reminder about meeting: %s" % meeting)
location = ''
if meeting.meeting_location:
location = 'At %s' % meeting.meeting_location
string = u"""Dear all,
You are kindly invited to the meeting:
%(name)s on %(date)s from %(time_start)s to %(time_stop)s %(timezone)s
%(location)s
The meeting will be about:
%(description)s
Source: %(host)s/meeting/%(id)s/
""" % ({
'name': u'%s' % meeting.meeting_name,
'date': meeting.meeting_date,
'time_start': meeting.meeting_time_start,
'time_stop': meeting.meeting_time_stop,
'timezone': meeting.meeting_timezone,
'location': u'%s' % location,
'description': u'%s' % meeting.meeting_information,
'id': meeting_id,
'host': fedocal.APP.config['SITE_URL'],
})
if meeting.reminder.reminder_text:
string = string + u"""
Please note:
%s""" % meeting.reminder.reminder_text
msg = MIMEText(string.encode('utf-8'), 'plain', 'utf-8')
msg['Subject'] = '[Fedocal] Reminder meeting : %s' % meeting.meeting_name
from_email = meeting.meeting_manager[0]
from_email = '%s@fedoraproject.org' % from_email
msg['From'] = meeting.reminder.reminder_from or from_email
msg['To'] = meeting.reminder.reminder_to.replace(',', ', ')
# Send the message via our own SMTP server, but don't include the
# envelope header.
smtp = smtplib.SMTP(fedocal.APP.config['SMTP_SERVER'])
smtp.sendmail(
from_email,
meeting.reminder.reminder_to.split(','),
msg.as_string())
smtp.quit()
return msg
def send_reminder():
""" Retrieve all the meeting for which we should send a reminder and
do it.
"""
db_url = fedocal.APP.config['DB_URL']
session = fedocallib.create_session(db_url)
meetings = fedocallib.retrieve_meeting_to_remind(
session, offset=int(fedocal.APP.config['CRON_FREQUENCY']))
msgs = []
for meeting in meetings:
meeting_id = meeting.meeting_id
meeting = fedocallib.update_date_rec_meeting(meeting, action='next')
_log.info("Processing meeting: %s", meeting)
print("Processing meeting: %s" % meeting)
msgs.append(send_reminder_meeting(meeting, meeting_id))
fedmsg_publish(meeting, meeting_id)
return msgs
if __name__ == '__main__':
send_reminder()
|
fedora-infra/fedocal
|
fedocal_cron.py
|
Python
|
gpl-3.0
| 4,708
|
# -*- coding: utf-8 -*-
"""
Catch-up TV & More
Copyright (C) 2018 SylvainCecchetto
This file is part of Catch-up TV & More.
Catch-up TV & More is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Catch-up TV & More is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with Catch-up TV & More; if not, write to the Free Software Foundation,
Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
# The unicode_literals import only has
# an effect on Python 2.
# It makes string literals as unicode like in Python 3
from __future__ import unicode_literals
from codequick import Route, Resolver, Listitem, utils, Script
from resources.lib import web_utils
from resources.lib import resolver_proxy
from resources.lib.menu_utils import item_post_treatment
import re
import urlquick
# TO DO
# Add more button
# test videos to see if there is other video hoster
URL_ROOT = 'https://becurious.ch'
URL_VIDEOS = URL_ROOT + '/?infinity=scrolling'
@Route.register
def list_categories(plugin, item_id, **kwargs):
"""
Build categories listing
- Tous les programmes
- Séries
- Informations
- ...
"""
resp = urlquick.get(URL_ROOT,
headers={'User-Agent': web_utils.get_random_ua()})
root = resp.parse("ul", attrs={"class": "sub-menu"})
for category_datas in root.iterfind(".//li"):
category_title = category_datas.find('.//a').text
category_url = category_datas.find('.//a').get('href')
item = Listitem()
item.label = category_title
item.set_callback(list_videos,
item_id=item_id,
category_url=category_url)
item_post_treatment(item)
yield item
@Route.register
def list_videos(plugin, item_id, category_url, **kwargs):
resp = urlquick.get(category_url)
root = resp.parse()
for video_datas in root.iterfind(".//article"):
video_title = video_datas.find('.//a').get('title')
video_url = video_datas.find('.//a').get('href')
video_image = video_datas.find('.//img').get('src')
item = Listitem()
item.label = video_title
item.art['thumb'] = item.art['landscape'] = video_image
item.set_callback(get_video_url,
item_id=item_id,
video_url=video_url)
item_post_treatment(item, is_playable=True, is_downloadable=True)
yield item
@Resolver.register
def get_video_url(plugin,
item_id,
video_url,
download_mode=False,
**kwargs):
resp = urlquick.get(video_url)
root = resp.parse()
stream_datas = root.find('.//iframe').get('src')
# Case Youtube
if 'youtube' in stream_datas:
video_id = re.compile('www.youtube.com/embed/(.*?)[\?\"\&]').findall(
stream_datas)[0]
return resolver_proxy.get_stream_youtube(plugin, video_id,
download_mode)
# Case Vimeo
elif 'vimeo' in stream_datas:
video_id = re.compile('player.vimeo.com/video/(.*?)[\?\"]').findall(
stream_datas)[0]
return resolver_proxy.get_stream_vimeo(plugin, video_id, download_mode)
else:
# Add Notification
return False
|
SylvainCecchetto/plugin.video.catchuptvandmore
|
plugin.video.catchuptvandmore/resources/lib/channels/ch/becurioustv.py
|
Python
|
gpl-2.0
| 3,792
|
import logging
import requests
import string
import urllib.parse as parse
from pyquery import PyQuery as pq
class SoundcloudApi:
TRANS_TABLE = str.maketrans('', '', string.punctuation)
def __init__(self):
self.logger = logging.getLogger(__name__)
self.query_url = 'https://soundcloud.com/search?q={0}+{1}'
def get(self, url):
d = pq(url=url)
music_type = "album" if url.split('/')[4] == "sets" else "track"
info = d("h1 > a")
title = info.eq(0).html()
artist = info.eq(1).html()
artwork = d("img").eq(1).attr("src")
self.logger.info('title: {0} artist: {1}'.format(title, artist))
return {
'type': music_type,
'title': info.eq(0).html(),
'artist': info.eq(1).html(),
'art': artwork
}
def search(self, title, artist):
clean_title = title.replace(' & ', '+')
clean_artist = artist.replace(' & ', '+')
query = self.query_url.format(clean_title, clean_artist).replace(' ', '+')
self.logger.info('query: {0}'.format(query))
d = pq(url=query)
expected_artist = clean_artist.lower()
expected_title = clean_title.lower()
results = d("ul:last > li > h2 > a")
for item in results.items():
text = item.text().lower()
self.logger.info('text: {0}'.format(text))
self.logger.info('title: {0}'.format(title))
if title.lower() in text:
href = 'https://soundcloud.com{0}'.format(item.attr.href)
return href
return None
|
nick41496/Beatnik
|
beatnik/api_manager/clients/soundcloud_api.py
|
Python
|
gpl-3.0
| 1,622
|
# -*- coding: utf-8 -*-
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cuon.Databases.SingleData import SingleData
import logging
import threading
import threading
import string
class SingleScheduling(SingleData):
def __init__(self, allTables):
SingleData.__init__(self)
# tables.dbd and address
self.sNameOfTable = "partner_schedul"
self.xmlTableDef = 0
self.loadTable(allTables)
#self.athread = threading.Thread(target = self.loadTable())
#self.athread.start()
self.listHeader['names'] = ['name', 'zip', 'city', 'Street', 'ID']
self.listHeader['size'] = [25,10,25,25,10]
self.out( "number of Columns ")
self.out( len(self.table.Columns))
#
self.partnerId = 0
self.liSchedulTime = None
def readNonWidgetEntries(self, dicValues):
dicValues['partnerid'] = [self.partnerId, 'int']
# print 'dicValues Schedul = ', dicValues['schedul_time_begin'], dicValues['schedul_time_end']
# print '2-->', self.liSchedulTime
# sSql = "select fct_getSchedulTime(" + `dicValues['schedul_time_begin'][0]` + ", " + `dicValues['schedul_time_end'][0]`+ ", array "+`self.liSchedulTime`+" )"
# result = self.rpc.callRP('Database.executeNormalQuery', sSql, self.dicUser)
return dicValues
def getPartnerID(self):
id = 0
if self.firstRecord.has_key('partnerid'):
id = self.firstRecord['partnerid']
return id
def getShortRemark(self):
s = None
if self.firstRecord.has_key('short_remark'):
s = self.firstRecord['short_remark']
return s
def getShortRemark(self):
s = None
if self.firstRecord.has_key('short_remark'):
s = self.firstRecord['short_remark']
return s
def getNotes(self):
s = None
if self.firstRecord.has_key('notes'):
s = self.firstRecord['notes']
return s
def fillExtraEntries(self, oneRecord):
if oneRecord.has_key('schedul_datetime'):
print '-----------------------------------------------------'
print 'Schedul-Time: ', oneRecord['schedul_datetime']
liDate = string.split(oneRecord['schedul_datetime'])
if liDate:
try:
assert len(liDate) == 2
eDate = self.getWidget('eDate')
eTime = self.getWidget('eTime')
eDate.set_text(liDate[0])
eTime.set_text(liDate[1])
#(liDate[1])
except:
print 'error in Date'
else :
print `oneRecord`
|
CuonDeveloper/cuon
|
cuon_client/cuon/bin/cuon/Addresses/SingleScheduling.py
|
Python
|
gpl-3.0
| 3,563
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import ok_
from nose.tools import raises
import os
from py_reporter.utilities import get_named_range
from py_reporter.utilities import get_workbook
import unittest
class TestUtilities(unittest.TestCase):
def setUp(self):
self.cwd = os.path.dirname(os.path.realpath(__file__))
self.test_excel_file = os.path.join(self.cwd, 'data', 'test.xlsx')
def test_get_workbook_with_valid_excel_file(self):
workbook = get_workbook(self.test_excel_file)
ok_(workbook is not None)
@raises(IOError)
def test_get_workbook_with_invalid_excel_file(self):
get_workbook('/v/a/b/x/c/d')
def test_get_named_range_with_valid_workbook(self):
workbook = get_workbook(self.test_excel_file)
ok_(len(get_named_range(workbook, 'test')) > 0)
# vim: filetype=python
|
ryankanno/py-reporter
|
tests/test_utilities.py
|
Python
|
mit
| 881
|
# * *************************************************************
# *
# * Soft Active Mater on Surfaces (SAMoS)
# *
# * Author: Rastko Sknepnek
# *
# * Division of Physics
# * School of Engineering, Physics and Mathematics
# * University of Dundee
# *
# * (c) 2013, 2014
# *
# * School of Science and Engineering
# * School of Life Sciences
# * University of Dundee
# *
# * (c) 2015
# *
# * Author: Silke Henkes
# *
# * Department of Physics
# * Institute for Complex Systems and Mathematical Biology
# * University of Aberdeen
# *
# * (c) 2014, 2015
# *
# * This program cannot be used, copied, or modified without
# * explicit written permission of the authors.
# *
# * ***************************************************************
# Utility code for converting standard data output (dat) of the simulation
# into the VTP format suitable for visualization with ParaView
import sys
import argparse
from read_data import *
import numpy as np
import numpy.linalg as lin
from datetime import *
from glob import glob
from scipy.spatial import ConvexHull
import math as m
import vtk
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, help="input file (base name)")
parser.add_argument("-o", "--output", type=str, help="output directory")
parser.add_argument("-s", "--skip", type=int, default=0, help="skip this many samples")
parser.add_argument("-E", "--end_sample", type=int, default=None, help="last sample")
parser.add_argument("-S", "--step", type=int, default=1, help="step between samples")
parser.add_argument("-C", "--contact", type=str, default=None, help="contact network data file")
parser.add_argument("-e", "--exclude", type=float, default=None, help="exclude all contact line that are longer than this value")
parser.add_argument("--connected", action='store_true', default=False, help="Include Delaunay triangulation data")
parser.add_argument("--nematic", action='store_true', default=False, help="Shift n vectors such that particle is in the middle of director.")
parser.add_argument("-l", "--length", type=float, default=1.0, help="rod length")
parser.add_argument("-b", "--bonds", type=str, default=None, help="bond file")
args = parser.parse_args()
print
print "\tActive Particles on Curved Spaces (APCS)"
print "\tConverts dat files to VTP files"
print
print "\tRastko Sknepnek"
print "\tUniversity of Dundee"
print "\t(c) 2014"
print "\t----------------------------------------------"
print
print "\tInput files : ", args.input
print "\tOutput files : ", args.output
print "\tSkip frames : ", args.skip
if args.contact != None:
print "\tContact network data file : ", args.contact
if args.exclude != None:
print "\tExclude all contact lines that are longer than : ", args.exclude
print
start = datetime.now()
if args.end_sample == None:
files = sorted(glob(args.input+'*.dat'))[args.skip::args.step]
if len(files) == 0:
files = sorted(glob(args.input+'*.dat.gz'))[args.skip::args.step]
else:
files = sorted(glob(args.input+'*.dat'))[args.skip:args.end_sample:args.step]
if len(files) == 0:
files = sorted(glob(args.input+'*.dat.gz'))[args.skip:args.end_sample:args.step]
if args.contact != None:
cont_files = sorted(glob(args.contact+'*.con'))[args.skip:]
if len(files) != len(cont_files):
print "There has to be same number of data and contact files."
sys.exit(1)
# read bonds
bonds = []
if args.bonds != None:
with open(args.bonds,'r') as bond_file:
lines = bond_file.readlines()
#print lines
#lines = lines.split('\n')
lines = map(lambda x: x.strip(), lines)
for line in lines:
b = line.split()
bonds.append((int(b[2]),int(b[3])))
u=0
for f in files:
print "Processing file : ", f
Points = vtk.vtkPoints()
has_v = False
has_n = False
data = ReadData(f)
if not (data.keys.has_key('x') and data.keys.has_key('y') and data.keys.has_key('z')):
raise "Particle coordinate not specified in the input data."
x = np.array(data.data[data.keys['x']])
y = np.array(data.data[data.keys['y']])
z = np.array(data.data[data.keys['z']])
Lx, Ly, Lz = np.max(x) - np.min(x), np.max(y) - np.min(y), np.max(z) - np.min(z)
if (data.keys.has_key('vx') or data.keys.has_key('vy') or data.keys.has_key('vz')):
vx = np.array(data.data[data.keys['vx']])
vy = np.array(data.data[data.keys['vy']])
vz = np.array(data.data[data.keys['vz']])
has_v = True
if (data.keys.has_key('nx') or data.keys.has_key('ny') or data.keys.has_key('nz')):
nx = np.array(data.data[data.keys['nx']])
ny = np.array(data.data[data.keys['ny']])
nz = np.array(data.data[data.keys['nz']])
has_n = True
if (data.keys.has_key('radius')):
r = np.array(data.data[data.keys['radius']])
else:
r = np.ones(len(x))
if (data.keys.has_key('type')):
tp = np.array(data.data[data.keys['type']])
else:
tp = np.ones(len(x))
if (data.keys.has_key('flag')):
flag = np.array(data.data[data.keys['flag']])
else:
flag = np.arange(len(x))
Radii = vtk.vtkDoubleArray()
Radii.SetNumberOfComponents(1)
Radii.SetName('Radius')
Types = vtk.vtkDoubleArray()
Types.SetNumberOfComponents(1)
Types.SetName('Type')
Flags = vtk.vtkDoubleArray()
Flags.SetNumberOfComponents(1)
Flags.SetName('Flag')
if has_v:
Velocities = vtk.vtkDoubleArray()
Velocities.SetNumberOfComponents(3)
Velocities.SetName("Velocity")
if has_n:
Directors = vtk.vtkDoubleArray()
Directors.SetNumberOfComponents(3)
Directors.SetName("Directors")
# Negative directors to mimic neamtic (silly but should work)
if args.nematic:
NDirectors = vtk.vtkDoubleArray()
NDirectors.SetNumberOfComponents(3)
NDirectors.SetName("NDirectors")
for (xx,yy,zz,rr,t,f) in zip(x,y,z,r,tp,flag):
Points.InsertNextPoint(xx,yy,zz)
Radii.InsertNextValue(rr)
Types.InsertNextValue(t)
Flags.InsertNextValue(f)
if has_v:
for (vvx,vvy,vvz) in zip(vx,vy,vz):
Velocities.InsertNextTuple3(vvx,vvy,vvz)
if has_n:
rod_len = args.length
for (nnx,nny,nnz) in zip(nx,ny,nz):
if args.nematic:
Directors.InsertNextTuple3(0.5*rod_len*nnx,0.5*rod_len*nny,0.5*rod_len*nnz)
NDirectors.InsertNextTuple3(-0.5*rod_len*nnx,-0.5*rod_len*nny,-0.5*rod_len*nnz)
else:
Directors.InsertNextTuple3(rod_len*nnx,rod_len*nny,rod_len*nnz)
if args.contact != None:
if args.connected:
print "Error! You cannot use --connected flag with the contact network data"
sys.exit(1)
Lines = vtk.vtkCellArray()
Line = vtk.vtkLine()
contact = open(cont_files[u],'r')
con_lines = contact.readlines()
con_lines = map(lambda x: x.strip().split(),con_lines)
edges = []
for line in con_lines:
if not line[0] == '#':
i, j = int(line[1]), int(line[2])
if args.exclude != None:
dr = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
if dr < args.exclude:
edges.append((i,j))
else:
edges.append((i,j))
contact.close()
for (i,j) in edges:
Line.GetPointIds().SetId(0,i)
Line.GetPointIds().SetId(1,j)
Lines.InsertNextCell(Line)
if args.bonds != None:
Lines = vtk.vtkCellArray()
Line = vtk.vtkLine()
Lengths = vtk.vtkDoubleArray()
Lengths.SetNumberOfComponents(1)
Lengths.SetName('BondLength')
for (i,j) in bonds:
dr = np.sqrt((x[i]-x[j])**2 + (y[i]-y[j])**2 + (z[i]-z[j])**2)
if dr < 0.5*max([Lx,Ly,Lz]):
Line.GetPointIds().SetId(0,i)
Line.GetPointIds().SetId(1,j)
Lines.InsertNextCell(Line)
Lengths.InsertNextValue(dr)
if args.connected:
Lines = vtk.vtkCellArray()
Line = vtk.vtkLine()
Lengths = vtk.vtkDoubleArray()
Lengths.SetNumberOfComponents(1)
Lengths.SetName('Length')
NNeighs = vtk.vtkDoubleArray()
NNeighs.SetNumberOfComponents(1)
NNeighs.SetName('NNeigh')
Areas = vtk.vtkDoubleArray()
Areas.SetNumberOfComponents(1)
Areas.SetName('Area')
Faces = vtk.vtkCellArray()
Polygon = vtk.vtkPolygon()
points = np.column_stack((x,y,z))
hull = ConvexHull(points)
edges = []
nneighs = [0 for i in xrange(len(x))]
for h in hull.simplices:
i, j, k = h
if not sorted([i,j]) in edges: edges.append(sorted([i,j]))
if not sorted([i,k]) in edges: edges.append(sorted([i,k]))
if not sorted([j,k]) in edges: edges.append(sorted([j,k]))
#a1 = points[j]-points[i]
#a2 = points[k]-points[i]
#area = 0.5*lin.norm(np.cross(a1,a2))
#Areas.InsertNextValue(area)
#Polygon.GetPointIds().SetNumberOfIds(3)
#Polygon.GetPointIds().SetId(0, i)
#Polygon.GetPointIds().SetId(1, j)
#Polygon.GetPointIds().SetId(2, k)
#Faces.InsertNextCell(Polygon)
for (i,j) in edges:
Line.GetPointIds().SetId(0,i)
Line.GetPointIds().SetId(1,j)
Lines.InsertNextCell(Line)
nneighs[i] += 1
nneighs[j] += 1
dx, dy, dz = x[i]-x[j], y[i]-y[j], z[i]-z[j]
Lengths.InsertNextValue(m.sqrt(dx*dx + dy*dy + dz*dz))
for n in nneighs:
NNeighs.InsertNextValue(n)
polydata = vtk.vtkPolyData()
polydata.SetPoints(Points)
if args.connected:
polydata.GetPointData().AddArray(NNeighs)
polydata.SetLines(Lines)
polydata.GetCellData().AddArray(Lengths)
#polydata.SetPolys(Faces)
#polydata.GetCellData().AddArray(Areas)
if args.contact != None:
polydata.SetLines(Lines)
if args.bonds != None:
polydata.SetLines(Lines)
polydata.GetCellData().AddArray(Lengths)
polydata.GetPointData().AddArray(Radii)
polydata.GetPointData().AddArray(Types)
polydata.GetPointData().AddArray(Flags)
if has_v:
polydata.GetPointData().AddArray(Velocities)
if has_n:
polydata.GetPointData().AddArray(Directors)
if args.nematic:
polydata.GetPointData().AddArray(NDirectors)
polydata.Modified()
writer = vtk.vtkXMLPolyDataWriter()
#outname = '.'.join(f.split('.')[:-1])
#print outname
outname='frame_%09d' % u
u+=1
writer.SetFileName(args.output+'/'+outname+'.vtp')
if vtk.VTK_MAJOR_VERSION <= 5:
writer.SetInput(polydata)
else:
writer.SetInputData(polydata)
writer.SetDataModeToAscii()
writer.Write()
end = datetime.now()
total = end - start
print
print " *** Completed in ", total.total_seconds(), " seconds *** "
print
|
sknepneklab/SAMoS
|
utils/RastkoVisu/dat2vtp.py
|
Python
|
gpl-3.0
| 10,485
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import easy_thumbnails.fields
from django.conf import settings
import userena.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('presenters', '0002_auto_20151117_1721'),
]
operations = [
migrations.CreateModel(
name='OAuth',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=128)),
('uid', models.CharField(max_length=128)),
('expired', models.DateTimeField()),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mugshot', easy_thumbnails.fields.ThumbnailerImageField(help_text='A personal image displayed in your profile.', upload_to=userena.models.upload_to_mugshot, verbose_name='mugshot', blank=True)),
('privacy', models.CharField(default=b'registered', help_text='Designates who can view your profile.', max_length=15, verbose_name='privacy', choices=[(b'open', 'Open'), (b'registered', 'Registered'), (b'closed', 'Closed')])),
('favourite_snack', models.CharField(max_length=5, verbose_name='favourite snack')),
('follows', models.ManyToManyField(to='presenters.Presenter')),
],
options={
'abstract': False,
'permissions': (('view_profile', 'Can view profile'),),
},
),
migrations.CreateModel(
name='UserSource',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('flag', models.IntegerField()),
('name', models.CharField(max_length=50)),
],
),
migrations.AddField(
model_name='userprofile',
name='source',
field=models.ForeignKey(to='accounts.UserSource', null=True),
),
migrations.AddField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='my_profile', verbose_name='user', to=settings.AUTH_USER_MODEL),
),
]
|
infinity1207/thirtylol
|
src/accounts/migrations/0001_initial.py
|
Python
|
mit
| 2,612
|
# Generated by Django 2.2.1 on 2019-05-18 10:46
from django.db import migrations
def remove_empty(apps, schema_editor):
Certificate = apps.get_model('django_ca', 'Certificate')
Certificate.objects.filter(revoked_reason='').update(revoked_reason='unspecified')
CertificateAuthority = apps.get_model('django_ca', 'CertificateAuthority')
CertificateAuthority.objects.filter(revoked_reason='').update(revoked_reason='unspecified')
def noop():
pass
class Migration(migrations.Migration):
dependencies = [
('django_ca', '0013_certificateauthority_crl_number'),
]
operations = [
migrations.RunPython(remove_empty, noop),
]
|
mathiasertl/django-ca
|
ca/django_ca/migrations/0014_auto_20190518_1046.py
|
Python
|
gpl-3.0
| 677
|
from rect import Rect
class QuadTree(object):
""" QuadTree data structure of simulated objects
"""
#def __init__(self, xywh):
# self.rect = Rect(xywh)
def __init__(self, items=None, depth=8, bounding_rect=None):
"""Creates a quad-tree.
@param items:
A sequence of items to store in the quad-tree.
Note that these items must be of SimObject.
@param depth:
The maximum recursion depth.
@param bounding_rect:
The bounding rectangle of all of the items in the quad-tree.
Type of Rect or (x,y,w,h) of the rectangle
For internal use only.
"""
# The sub-quadrants are empty to start with.
self.nw = self.ne = self.se = self.sw = None
self.depth = depth
# Find this quadrant's centre.
if bounding_rect:
bounding_rect = Rect(bounding_rect)
else:
# If there isn't a bounding rect, then calculate it from the items.
if items:
bounding_rect = Rect(items[0].get_bounding_rect())
for item in items[1:]:
bounding_rect.add(Rect(item.get_bounding_rect()))
else:
# in case there are no items, assume a big rect (100x100 meters)
bounding_rect = Rect((0.0,0.0,100.0,100.0))
self.rect = bounding_rect
self.items = []
# Insert items
self.insert_items(items)
#print("QuadTree:", self, self.items)
def insert_items(self, items):
""" Insert a list of SimObject items
"""
# nothing to do if the list is empty or None
if not items:
return
rect_items = [(item, Rect(item.get_bounding_rect()))
for item in items]
# If we've reached the maximum depth then insert all items into
# this quadrant.
if self.depth <= 0 or not items:
self.items += rect_items
return
cx, cy = self.rect.center
nw_items = []
ne_items = []
se_items = []
sw_items = []
for item, item_rect in rect_items:
# Which of the sub-quadrants does the item overlap?
in_nw = item_rect.left <= cx and item_rect.top >= cy
in_sw = item_rect.left <= cx and item_rect.bottom <= cy
in_ne = item_rect.right >= cx and item_rect.top >= cy
in_se = item_rect.right >= cx and item_rect.bottom <= cy
# If it overlaps all 4 quadrants then insert it at the current
# depth, otherwise append it to a list to be inserted under every
# quadrant that it overlaps.
if in_nw and in_ne and in_se and in_sw:
self.items.append((item, item_rect))
else:
if in_nw: nw_items.append(item)
if in_ne: ne_items.append(item)
if in_se: se_items.append(item)
if in_sw: sw_items.append(item)
# Create the sub-quadrants, recursively.
if nw_items:
self.nw = QuadTree(nw_items, self.depth-1,
(self.rect.left, cy,
self.rect.width/2, self.rect.height/2))
if ne_items:
self.ne = QuadTree(ne_items, self.depth-1,
(cx, cy,
self.rect.width/2, self.rect.height/2))
if se_items:
self.se = QuadTree(se_items, self.depth-1,
(cx, self.rect.bottom,
self.rect.width/2, self.rect.height/2))
if sw_items:
self.sw = QuadTree(sw_items, self.depth-1,
(self.rect.left, self.rect.bottom,
self.rect.width/2, self.rect.height/2))
def find_items(self, xywh):
"""Returns the items that overlap a bounding rectangle.
Returns the set of all items in the quad-tree that overlap with a
bounding rectangle.
@param xywh:
The bounding rectangle being tested against the quad-tree.
"""
rect = Rect(xywh)
def overlaps(other):
return rect.right >= other.left and rect.left <= other.right and \
rect.bottom <= other.top and rect.top >= other.bottom
# Find the hits at the current level.
hits = [item for item, item_rect in self.items
if overlaps(item_rect)]
# Recursively check the lower quadrants.
cx, cy = self.rect.center
if self.nw and rect.left <= cx and rect.top >= cy:
hits += self.nw.find_items(rect)
if self.sw and rect.left <= cx and rect.bottom <= cy:
hits += self.sw.find_items(rect)
if self.ne and rect.right >= cx and rect.top >= cy:
hits += self.ne.find_items(rect)
if self.se and rect.right >= cx and rect.bottom <= cy:
hits += self.se.find_items(rect)
return set(hits)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.rect)
|
ZhuangER/robot_path_planning
|
scripts/quadtree.py
|
Python
|
mit
| 5,344
|
"""
Copyright 2014 Michael Seiler
Boston University
miseiler@gmail.com
This file is part of Crosstalker.
Crosstalker is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Crosstalker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Crosstalker. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import clustio, scripts
import numpy as N
import multiprocessing as mp
from itertools import combinations as comb
from itertools import combinations_with_replacement as combr
from auc import roc, mutual, auc
from ctalk import get_sa, mutualize
MP_MAX_QUEUE = 16
def findpathwaysizes(fn1, fn2, pathway_dict, sizes, threshold):
mat1 = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn1)
mat2 = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn2)
assert (mat1.gene_names == mat2.gene_names).all()
mutualize(mat1)
mutualize(mat2)
M = N.abs(mat1.M - mat2.M)
res = []
nd = {}
for i in xrange(len(sizes)):
nd[sizes[i]] = i
for i, j in comb(xrange(len(M)), 2):
if M[i][j] >= threshold:
res.append(list(sorted([ nd[len(pathway_dict[mat1.gene_names[i]])], nd[len(pathway_dict[mat1.gene_names[j]])] ])))
#return list(set([ tuple(x) for x in res if (N.array(x) > 16).all() and (N.array(x) < 350).all() ]))
return list(set([ tuple(x) for x in res ]))
def permcomp_by_size(fn1, fn2, fln, p1size, p2size, iter=1000):
sa1 = get_sa(fn1)
sa2 = get_sa(fn2)
assert set(sa1.keys()) == set(sa2.keys())
results = []
seedmat = N.random.rand(2, 2)
Q1 = mp_auc_matrix(fln, [p1size, p2size], sa1, similarity=True, seedmat = seedmat, iter=iter)
Q2 = mp_auc_matrix(fln, [p1size, p2size], sa2, similarity=True, seedmat = seedmat, iter=iter)
return Q1, Q2
def permcomp(fn1, fn2, fln, pathway_dict, sizes, threshold=0.0, procs=mp.cpu_count(), iter=100):
sa1 = get_sa(fn1)
sa2 = get_sa(fn2)
assert set(sa1.keys()) == set(sa2.keys())
# Dec 30 2014 currently on hold while we work out how this is going to go
#pairs = findpathwaysizes(fn1, fn2, pathway_dict, sizes, threshold)
#print('Calculating permutations for %s/%s possible pairs' % (len(pairs), (len(sizes) * (len(sizes) + 1)) / 2))
seed_dict = dict(zip(sizes, N.random.rand(len(sizes))))
result1 = mp_auc_matrix(fln, sizes, sa1, similarity=True, procs=procs, iter=iter, seed_dict=seed_dict)
result2 = mp_auc_matrix(fln, sizes, sa2, similarity=True, procs=procs, iter=iter, seed_dict=seed_dict)
return result1, result2
def predictability_perm_roc(s, size1, size2, overlap, sa, iter, similarity, seed):
"""
Calculate ROC of predictability for pathway sizes size1 and size2
given s, an sdata object that is assumed to be a distance matrix normalized between 0 and 1
if similarity is True, the matrix is assumed to be a similarity matrix instead
"""
try:
assert (s.gene_names == s.sample_ids).all()
except:
raise ValueError, 'sdata object is not a distance/similarity matrix'
try:
assert not (s.M < 0).any()
assert not (s.M > 1).any()
except:
raise ValueError, 'Unnormalized matrix; data found which is outside [0,1] bound'
if not similarity:
Q = (1 - s.M.copy()) # Convert to similarity matrix
else:
Q = s.M.copy()
# Create a new s object for convenience, certainly not efficient
c = clustio.parsers.NullParser()
c.samples = [ clustio.parsers.SampleData(sample_id=x) for x in s.gene_names ]
c.gene_names = s.gene_names.copy()
c.M = Q
import random
random.seed(seed)
from random import sample
res = []
for _ in xrange(iter):
# Take a random sample without replacement of total necessary size
gene_pool = sample(c.gene_names, size1 + size2 - overlap)
# Find splits
div1 = size1 - overlap
div2 = size2 - overlap + div1
# Engineer two sets of size size1 and size2 with exactly overlap genes overlapping
gl1 = gene_pool[:div1] + gene_pool[div2:]
gl2 = gene_pool[div1:div2] + gene_pool[div2:]
#Sens = TP / (TP + FN)
#Spec = FP / (TN + FP)
#Plot sens vs 1 - spec
roc1 = roc(c, gl1, gl2, sa)
roc2 = roc(c, gl2, gl1, sa)
res.append(mutual(roc1, roc2))
return res
def _pr(q, rq, ns, num, sa):
#print('Worker started')
res = []
while True:
v = q.get()
if v is None:
#print('Worker received termination request! Stopping...')
break
psize1, psize2, overlap = v
#if i % 100 == 0 and j == i+1:
# print('Current job status: %s' % i)
seed = ns.seed_dict[(psize1, psize2, overlap)]
perms = predictability_perm_roc(ns.s, psize1, psize2, overlap, sa, ns.iter, ns.similarity, seed)
res.append((psize1, psize2, overlap, perms))
#res.append((psize2, psize1, overlap, perms))
#print('Worker %s writing to file...' % num)
#clustio.write_list(['\t'.join([str(elem) for elem in x]) for x in res], 'results_%s.txt' % num)
#print('Queuing result (%s)' % num)
rq.put(res)
def mp_auc_matrix(s, pathwaysizes, sa, similarity=False, iter=1000, procs=mp.cpu_count(), seed_dict=None):
"""
Expects a list of lists of pathways (groups of elements) found in s.gene_names
Returns an asymmetric matrix of AUC values where M[i][j] is the predictive value of pathway i for pathway j
"""
if seed_dict is None:
print('Random seeds for paired perm tests not given, generating new random seeds')
seed_dict = dict(zip(pathwaysizes, N.random.rand(len(pathwaysizes))))
assert len(seed_dict) == len(pathwaysizes)
print('Performing permutation tests for %s sets of pathway sizes and overlaps' % len(pathwaysizes))
result_dict = {}
ns = mp.Manager()
q = mp.Queue()
rq = mp.Queue()
ns.s = s
ns.similarity = similarity
ns.iter = iter
ns.seed_dict = seed_dict
# Set up queue
qsplit = [ pathwaysizes[MP_MAX_QUEUE*i:MP_MAX_QUEUE*(i+1)] for i in xrange(len(pathwaysizes)/MP_MAX_QUEUE + 1) ] # Splits the queue up into sizes MP_MAX_QUEUE, plus a remainder list. I don't know why it works.
for combs in qsplit:
for c in combs:
q.put(c)
# Start workers
#print('Worker count: %s' % procs)
workers = {}
for i in xrange(procs):
workers[i] = mp.Process(target=_pr, args=(q, rq, ns, i, sa))
workers[i].start()
q.put(None)
# If we ask the workers to join, the results queue fills and the threads will block while waiting for the queue to unfill
# Wait for workers
for k in workers:
res = rq.get() # Blocks until results queue completes, which is also when the workers terminate
for i, j, k, v in res:
result_dict[(i,j,k)] = v
result_dict[(j,i,k)] = v
return result_dict
|
miseiler/crosstalker
|
auc_perm.py
|
Python
|
gpl-3.0
| 7,525
|
import logging
import re
import subprocess
from operator import itemgetter
from shutil import which
from streamlink import logger
from streamlink.exceptions import StreamError
from streamlink.stream.streamprocess import StreamProcess
from streamlink.utils import escape_librtmp, rtmpparse
log = logging.getLogger(__name__)
class RTMPStream(StreamProcess):
"""RTMP stream using rtmpdump.
*Attributes:*
- :attr:`params` A :class:`dict` containing parameters passed to rtmpdump
"""
__shortname__ = "rtmp"
logging_parameters = ("quiet", "verbose", "debug", "q", "V", "z")
def __init__(self, session, params, redirect=False, **kwargs):
StreamProcess.__init__(self, session, params=params, **kwargs)
self.timeout = self.session.options.get("rtmp-timeout")
self.redirect = redirect
# set rtmpdump logging level
if self.session.options.get("subprocess-errorlog-path") or \
self.session.options.get("subprocess-errorlog"):
# disable any current logging level
for p in self.logging_parameters:
self.parameters.pop(p, None)
if logger.root.level == logging.DEBUG:
self.parameters["debug"] = True
else:
self.parameters["verbose"] = True
@property
def cmd(self):
return self.session.options.get("rtmp-rtmpdump")
def __repr__(self):
return "<RTMPStream({0!r}, redirect={1!r}>".format(self.parameters,
self.redirect)
def __json__(self):
return dict(type=RTMPStream.shortname(),
args=self.arguments,
params=self.parameters)
def open(self):
if self.session.options.get("rtmp-proxy"):
if not self._supports_param("socks"):
raise StreamError("Installed rtmpdump does not support --socks argument")
self.parameters["socks"] = self.session.options.get("rtmp-proxy")
if "jtv" in self.parameters and not self._supports_param("jtv"):
raise StreamError("Installed rtmpdump does not support --jtv argument")
if "weeb" in self.parameters and not self._supports_param("weeb"):
raise StreamError("Installed rtmpdump does not support --weeb argument")
if self.redirect:
self._check_redirect()
self.parameters["flv"] = "-"
return StreamProcess.open(self)
def _check_redirect(self, timeout=20):
params = self.parameters.copy()
# remove any existing logging parameters
for p in self.logging_parameters:
params.pop(p, None)
# and explicitly set verbose
params["verbose"] = True
log.debug("Attempting to find tcURL redirect")
process = self.spawn(params, timeout=timeout, stderr=subprocess.PIPE)
self._update_redirect(process.stderr.read())
def _update_redirect(self, stderr):
tcurl, redirect = None, None
stderr = str(stderr, "utf8")
m = re.search(r"DEBUG: Property: <Name:\s+redirect,\s+STRING:\s+(\w+://.+?)>", stderr)
if m:
redirect = m.group(1)
if redirect:
log.debug(f"Found redirect tcUrl: {redirect}")
if "rtmp" in self.parameters:
tcurl, playpath = rtmpparse(self.parameters["rtmp"])
if playpath:
rtmp = "{redirect}/{playpath}".format(redirect=redirect, playpath=playpath)
else:
rtmp = redirect
self.parameters["rtmp"] = rtmp
if "tcUrl" in self.parameters:
self.parameters["tcUrl"] = redirect
def _supports_param(self, param, timeout=5.0):
try:
rtmpdump = self.spawn(dict(help=True), timeout=timeout, stderr=subprocess.PIPE)
except StreamError as err:
raise StreamError("Error while checking rtmpdump compatibility: {0}".format(err.message))
for line in rtmpdump.stderr.readlines():
m = re.match(r"^--(\w+)", str(line, "ascii"))
if not m:
continue
if m.group(1) == param:
return True
return False
@classmethod
def is_usable(cls, session):
cmd = session.options.get("rtmp-rtmpdump")
return which(cmd) is not None
def to_url(self):
stream_params = dict(self.params)
params = [stream_params.pop("rtmp", "")]
if "swfVfy" in self.params:
stream_params["swfUrl"] = self.params["swfVfy"]
stream_params["swfVfy"] = True
if "swfhash" in self.params:
stream_params["swfVfy"] = True
stream_params.pop("swfhash", None)
stream_params.pop("swfsize", None)
# sort the keys for stability of output
for key, value in sorted(stream_params.items(), key=itemgetter(0)):
if isinstance(value, list):
for svalue in value:
params.append("{0}={1}".format(key, escape_librtmp(svalue)))
else:
params.append("{0}={1}".format(key, escape_librtmp(value)))
return " ".join(params)
|
beardypig/streamlink
|
src/streamlink/stream/rtmpdump.py
|
Python
|
bsd-2-clause
| 5,251
|
#!/usr/bin/env python
from __future__ import unicode_literals
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import copy
from test.helper import FakeYDL, assertRegexpMatches
from youtube_dl import YoutubeDL
from youtube_dl.extractor import YoutubeIE
from youtube_dl.postprocessor.common import PostProcessor
class YDL(FakeYDL):
def __init__(self, *args, **kwargs):
super(YDL, self).__init__(*args, **kwargs)
self.downloaded_info_dicts = []
self.msgs = []
def process_info(self, info_dict):
self.downloaded_info_dicts.append(info_dict)
def to_screen(self, msg):
self.msgs.append(msg)
def _make_result(formats, **kwargs):
res = {
'formats': formats,
'id': 'testid',
'title': 'testttitle',
'extractor': 'testex',
}
res.update(**kwargs)
return res
class TestFormatSelection(unittest.TestCase):
def test_prefer_free_formats(self):
# Same resolution => download webm
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 460, 'url': 'x'},
{'ext': 'mp4', 'height': 460, 'url': 'y'},
]
info_dict = _make_result(formats)
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'webm')
# Different resolution => download best quality (mp4)
ydl = YDL()
ydl.params['prefer_free_formats'] = True
formats = [
{'ext': 'webm', 'height': 720, 'url': 'a'},
{'ext': 'mp4', 'height': 1080, 'url': 'b'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
# No prefer_free_formats => prefer mp4 and flv for greater compatibility
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'webm', 'height': 720, 'url': '_'},
{'ext': 'mp4', 'height': 720, 'url': '_'},
{'ext': 'flv', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'mp4')
ydl = YDL()
ydl.params['prefer_free_formats'] = False
formats = [
{'ext': 'flv', 'height': 720, 'url': '_'},
{'ext': 'webm', 'height': 720, 'url': '_'},
]
info_dict['formats'] = formats
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['ext'], 'flv')
def test_format_limit(self):
formats = [
{'format_id': 'meh', 'url': 'http://example.com/meh', 'preference': 1},
{'format_id': 'good', 'url': 'http://example.com/good', 'preference': 2},
{'format_id': 'great', 'url': 'http://example.com/great', 'preference': 3},
{'format_id': 'excellent', 'url': 'http://example.com/exc', 'preference': 4},
]
info_dict = _make_result(formats)
ydl = YDL()
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
ydl = YDL({'format_limit': 'good'})
assert ydl.params['format_limit'] == 'good'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'good')
ydl = YDL({'format_limit': 'great', 'format': 'all'})
ydl.process_ie_result(info_dict.copy())
self.assertEqual(ydl.downloaded_info_dicts[0]['format_id'], 'meh')
self.assertEqual(ydl.downloaded_info_dicts[1]['format_id'], 'good')
self.assertEqual(ydl.downloaded_info_dicts[2]['format_id'], 'great')
self.assertTrue('3' in ydl.msgs[0])
ydl = YDL()
ydl.params['format_limit'] = 'excellent'
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'excellent')
def test_format_selection(self):
formats = [
{'format_id': '35', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': '45', 'ext': 'webm', 'preference': 2, 'url': '_'},
{'format_id': '47', 'ext': 'webm', 'preference': 3, 'url': '_'},
{'format_id': '2', 'ext': 'flv', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': '20/47'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '20/71/worst'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
ydl = YDL()
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '2')
ydl = YDL({'format': 'webm/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '47')
ydl = YDL({'format': '3gp/40/mp4'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], '35')
def test_format_selection_audio(self):
formats = [
{'format_id': 'audio-low', 'ext': 'webm', 'preference': 1, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-mid', 'ext': 'webm', 'preference': 2, 'vcodec': 'none', 'url': '_'},
{'format_id': 'audio-high', 'ext': 'flv', 'preference': 3, 'vcodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 4, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-high')
ydl = YDL({'format': 'worstaudio'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'audio-low')
formats = [
{'format_id': 'vid-low', 'ext': 'mp4', 'preference': 1, 'url': '_'},
{'format_id': 'vid-high', 'ext': 'mp4', 'preference': 2, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestaudio/worstaudio/best'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'vid-high')
def test_format_selection_audio_exts(self):
formats = [
{'format_id': 'mp3-64', 'ext': 'mp3', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'ogg-64', 'ext': 'ogg', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-64', 'ext': 'aac', 'abr': 64, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'mp3-32', 'ext': 'mp3', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
{'format_id': 'aac-32', 'ext': 'aac', 'abr': 32, 'url': 'http://_', 'vcodec': 'none'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'best'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'aac-64')
ydl = YDL({'format': 'mp3'})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'mp3-64')
ydl = YDL({'prefer_free_formats': True})
ie = YoutubeIE(ydl)
ie._sort_formats(info_dict['formats'])
ydl.process_ie_result(copy.deepcopy(info_dict))
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'ogg-64')
def test_format_selection_video(self):
formats = [
{'format_id': 'dash-video-low', 'ext': 'mp4', 'preference': 1, 'acodec': 'none', 'url': '_'},
{'format_id': 'dash-video-high', 'ext': 'mp4', 'preference': 2, 'acodec': 'none', 'url': '_'},
{'format_id': 'vid', 'ext': 'mp4', 'preference': 3, 'url': '_'},
]
info_dict = _make_result(formats)
ydl = YDL({'format': 'bestvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-high')
ydl = YDL({'format': 'worstvideo'})
ydl.process_ie_result(info_dict.copy())
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'dash-video-low')
def test_youtube_format_selection(self):
order = [
'38', '37', '46', '22', '45', '35', '44', '18', '34', '43', '6', '5', '36', '17', '13',
# Apple HTTP Live Streaming
'96', '95', '94', '93', '92', '132', '151',
# 3D
'85', '84', '102', '83', '101', '82', '100',
# Dash video
'137', '248', '136', '247', '135', '246',
'245', '244', '134', '243', '133', '242', '160',
# Dash audio
'141', '172', '140', '171', '139',
]
for f1id, f2id in zip(order, order[1:]):
f1 = YoutubeIE._formats[f1id].copy()
f1['format_id'] = f1id
f1['url'] = 'url:' + f1id
f2 = YoutubeIE._formats[f2id].copy()
f2['format_id'] = f2id
f2['url'] = 'url:' + f2id
info_dict = _make_result([f1, f2], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
info_dict = _make_result([f2, f1], extractor='youtube')
ydl = YDL()
yie = YoutubeIE(ydl)
yie._sort_formats(info_dict['formats'])
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], f1id)
def test_format_filtering(self):
formats = [
{'format_id': 'A', 'filesize': 500, 'width': 1000},
{'format_id': 'B', 'filesize': 1000, 'width': 500},
{'format_id': 'C', 'filesize': 1000, 'width': 400},
{'format_id': 'D', 'filesize': 2000, 'width': 600},
{'format_id': 'E', 'filesize': 3000},
{'format_id': 'F'},
{'format_id': 'G', 'filesize': 1000000},
]
for f in formats:
f['url'] = 'http://_/'
f['ext'] = 'unknown'
info_dict = _make_result(formats)
ydl = YDL({'format': 'best[filesize<3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'D')
ydl = YDL({'format': 'best[filesize<=3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': 'best[filesize <= ? 3000]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'F')
ydl = YDL({'format': 'best [filesize = 1000] [width>450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'B')
ydl = YDL({'format': 'best [filesize = 1000] [width!=450]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'C')
ydl = YDL({'format': '[filesize>?1]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
ydl = YDL({'format': '[filesize<1M]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'E')
ydl = YDL({'format': '[filesize<1MiB]'})
ydl.process_ie_result(info_dict)
downloaded = ydl.downloaded_info_dicts[0]
self.assertEqual(downloaded['format_id'], 'G')
def test_add_extra_info(self):
test_dict = {
'extractor': 'Foo',
}
extra_info = {
'extractor': 'Bar',
'playlist': 'funny videos',
}
YDL.add_extra_info(test_dict, extra_info)
self.assertEqual(test_dict['extractor'], 'Foo')
self.assertEqual(test_dict['playlist'], 'funny videos')
def test_prepare_filename(self):
info = {
'id': '1234',
'ext': 'mp4',
'width': None,
}
def fname(templ):
ydl = YoutubeDL({'outtmpl': templ})
return ydl.prepare_filename(info)
self.assertEqual(fname('%(id)s.%(ext)s'), '1234.mp4')
self.assertEqual(fname('%(id)s-%(width)s.%(ext)s'), '1234-NA.mp4')
# Replace missing fields with 'NA'
self.assertEqual(fname('%(uploader_date)s-%(id)s.%(ext)s'), 'NA-1234.mp4')
def test_format_note(self):
ydl = YoutubeDL()
self.assertEqual(ydl._format_note({}), '')
assertRegexpMatches(self, ydl._format_note({
'vbr': 10,
}), '^\s*10k$')
def test_postprocessors(self):
filename = 'post-processor-testfile.mp4'
audiofile = filename + '.mp3'
class SimplePP(PostProcessor):
def run(self, info):
with open(audiofile, 'wt') as f:
f.write('EXAMPLE')
info['filepath']
return False, info
def run_pp(params):
with open(filename, 'wt') as f:
f.write('EXAMPLE')
ydl = YoutubeDL(params)
ydl.add_post_processor(SimplePP())
ydl.post_process(filename, {'filepath': filename})
run_pp({'keepvideo': True})
self.assertTrue(os.path.exists(filename), '%s doesn\'t exist' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(filename)
os.unlink(audiofile)
run_pp({'keepvideo': False})
self.assertFalse(os.path.exists(filename), '%s exists' % filename)
self.assertTrue(os.path.exists(audiofile), '%s doesn\'t exist' % audiofile)
os.unlink(audiofile)
if __name__ == '__main__':
unittest.main()
|
marado/youtube-dl
|
test/test_YoutubeDL.py
|
Python
|
unlicense
| 15,690
|
# -*- coding: utf-8 -*-
import os, logging
from celery import Celery
from redis import StrictRedis, exceptions
from flask import Flask
app = Flask(__name__, instance_relative_config=True)
app.config.from_object('config')
app.config.from_pyfile('config.py')
# configure logging
logging.basicConfig(
filename=app.config['BREEDCAFS_LOG'],
level=logging.DEBUG,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'
)
# celery for scheduling large uploads
celery = Celery(
app,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(
task_serializer='pickle',
result_serializer='pickle',
event_serializer='pickle',
accept_content=['pickle', 'json']
)
# and also use redis (not just with celery) for basic local data store like login attempts and caching
redis_store = StrictRedis(host='localhost', port=app.config['REDIS_PORT'], db=0)
redis_exceptions = exceptions
from neo4j import GraphDatabase, ServiceUnavailable, TransactionError
from neo4j.exceptions import SecurityError
#from neo4j import watch
from app import views
# these are the variable view rules for retrieving lists:
app.add_url_rule(
'/location/countries/',
view_func=views.ListCountries.as_view('countries'),
methods=['GET']
)
app.add_url_rule(
'/location/<country>/',
view_func=views.ListRegions.as_view('regions'),
methods=['GET']
)
app.add_url_rule(
'/location/<country>/<region>/',
view_func=views.ListFarms.as_view('farms'),
methods=['GET']
)
app.add_url_rule(
'/location/<country>/<region>/<farm>/',
view_func=views.ListFields.as_view('fields'),
methods=['GET']
)
app.add_url_rule(
'/location/blocks/<field_uid>/',
view_func=views.ListBlocks.as_view('blocks'),
methods=['GET']
)
app.add_url_rule(
'/location/treecount/<uid>/',
view_func=views.TreeCount.as_view('treecount'),
methods=['GET']
)
|
marcusmchale/breedcafs
|
app/__init__.py
|
Python
|
gpl-3.0
| 1,878
|
# -*- test-case-name: twisted.positioning.test.test_base,twisted.positioning.test.test_sentence -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Generic positioning base classes.
@since: 14.0
"""
from __future__ import absolute_import, division
from functools import partial
from operator import attrgetter
from zope.interface import implementer
from constantly import Names, NamedConstant
from twisted.python.util import FancyEqMixin
from twisted.positioning import ipositioning
MPS_PER_KNOT = 0.5144444444444444
MPS_PER_KPH = 0.27777777777777777
METERS_PER_FOOT = 0.3048
class Angles(Names):
"""
The types of angles.
@cvar LATITUDE: Angle representing a latitude of an object.
@type LATITUDE: L{NamedConstant}
@cvar LONGITUDE: Angle representing the longitude of an object.
@type LONGITUDE: L{NamedConstant}
@cvar HEADING: Angle representing the heading of an object.
@type HEADING: L{NamedConstant}
@cvar VARIATION: Angle representing a magnetic variation.
@type VARIATION: L{NamedConstant}
"""
LATITUDE = NamedConstant()
LONGITUDE = NamedConstant()
HEADING = NamedConstant()
VARIATION = NamedConstant()
class Directions(Names):
"""
The four cardinal directions (north, east, south, west).
"""
NORTH = NamedConstant()
EAST = NamedConstant()
SOUTH = NamedConstant()
WEST = NamedConstant()
@implementer(ipositioning.IPositioningReceiver)
class BasePositioningReceiver(object):
"""
A base positioning receiver.
This class would be a good base class for building positioning
receivers. It implements the interface (so you don't have to) with stub
methods.
People who want to implement positioning receivers should subclass this
class and override the specific callbacks they want to handle.
"""
def timeReceived(self, time):
"""
Implements L{IPositioningReceiver.timeReceived} stub.
"""
def headingReceived(self, heading):
"""
Implements L{IPositioningReceiver.headingReceived} stub.
"""
def speedReceived(self, speed):
"""
Implements L{IPositioningReceiver.speedReceived} stub.
"""
def climbReceived(self, climb):
"""
Implements L{IPositioningReceiver.climbReceived} stub.
"""
def positionReceived(self, latitude, longitude):
"""
Implements L{IPositioningReceiver.positionReceived} stub.
"""
def positionErrorReceived(self, positionError):
"""
Implements L{IPositioningReceiver.positionErrorReceived} stub.
"""
def altitudeReceived(self, altitude):
"""
Implements L{IPositioningReceiver.altitudeReceived} stub.
"""
def beaconInformationReceived(self, beaconInformation):
"""
Implements L{IPositioningReceiver.beaconInformationReceived} stub.
"""
class InvalidSentence(Exception):
"""
An exception raised when a sentence is invalid.
"""
class InvalidChecksum(Exception):
"""
An exception raised when the checksum of a sentence is invalid.
"""
class Angle(FancyEqMixin, object):
"""
An object representing an angle.
@cvar _RANGE_EXPRESSIONS: A collection of expressions for the allowable
range for the angular value of a particular coordinate value.
@type _RANGE_EXPRESSIONS: C{dict} of L{Angles} constants to callables
@cvar _ANGLE_TYPE_NAMES: English names for angle types.
@type _ANGLE_TYPE_NAMES: C{dict} of L{Angles} constants to C{str}
"""
_RANGE_EXPRESSIONS = {
Angles.LATITUDE: lambda latitude: -90.0 < latitude < 90.0,
Angles.LONGITUDE: lambda longitude: -180.0 < longitude < 180.0,
Angles.HEADING: lambda heading: 0 <= heading < 360,
Angles.VARIATION: lambda variation: -180 < variation <= 180,
}
_ANGLE_TYPE_NAMES = {
Angles.LATITUDE: "Latitude",
Angles.LONGITUDE: "Longitude",
Angles.VARIATION: "Variation",
Angles.HEADING: "Heading",
}
compareAttributes = 'angleType', 'inDecimalDegrees'
def __init__(self, angle=None, angleType=None):
"""
Initializes an angle.
@param angle: The value of the angle in decimal degrees. (L{None} if
unknown).
@type angle: C{float} or L{None}
@param angleType: A symbolic constant describing the angle type. Should
be one of L{Angles} or {None} if unknown.
@raises ValueError: If the angle type is not the default argument,
but it is an unknown type (not in C{Angle._RANGE_EXPRESSIONS}),
or it is a known type but the supplied value was out of the
allowable range for said type.
"""
if angleType is not None and angleType not in self._RANGE_EXPRESSIONS:
raise ValueError("Unknown angle type")
if angle is not None and angleType is not None:
rangeExpression = self._RANGE_EXPRESSIONS[angleType]
if not rangeExpression(angle):
template = "Angle {0} not in allowed range for type {1}"
raise ValueError(template.format(angle, angleType))
self.angleType = angleType
self._angle = angle
@property
def inDecimalDegrees(self):
"""
The value of this angle in decimal degrees. This value is immutable.
@return: This angle expressed in decimal degrees, or L{None} if the
angle is unknown.
@rtype: C{float} (or L{None})
"""
return self._angle
@property
def inDegreesMinutesSeconds(self):
"""
The value of this angle as a degrees, minutes, seconds tuple. This
value is immutable.
@return: This angle expressed in degrees, minutes, seconds. L{None} if
the angle is unknown.
@rtype: 3-C{tuple} of C{int} (or L{None})
"""
if self._angle is None:
return None
degrees = abs(int(self._angle))
fractionalDegrees = abs(self._angle - int(self._angle))
decimalMinutes = 60 * fractionalDegrees
minutes = int(decimalMinutes)
fractionalMinutes = decimalMinutes - int(decimalMinutes)
decimalSeconds = 60 * fractionalMinutes
return degrees, minutes, int(decimalSeconds)
def setSign(self, sign):
"""
Sets the sign of this angle.
@param sign: The new sign. C{1} for positive and C{-1} for negative
signs, respectively.
@type sign: C{int}
@raise ValueError: If the C{sign} parameter is not C{-1} or C{1}.
"""
if sign not in (-1, 1):
raise ValueError("bad sign (got %s, expected -1 or 1)" % sign)
self._angle = sign * abs(self._angle)
def __float__(self):
"""
Returns this angle as a float.
@return: The float value of this angle, expressed in degrees.
@rtype: C{float}
"""
return self._angle
def __repr__(self):
"""
Returns a string representation of this angle.
@return: The string representation.
@rtype: C{str}
"""
return "<{s._angleTypeNameRepr} ({s._angleValueRepr})>".format(s=self)
@property
def _angleValueRepr(self):
"""
Returns a string representation of the angular value of this angle.
This is a helper function for the actual C{__repr__}.
@return: The string representation.
@rtype: C{str}
"""
if self.inDecimalDegrees is not None:
return "%s degrees" % round(self.inDecimalDegrees, 2)
else:
return "unknown value"
@property
def _angleTypeNameRepr(self):
"""
Returns a string representation of the type of this angle.
This is a helper function for the actual C{__repr__}.
@return: The string representation.
@rtype: C{str}
"""
try:
return self._ANGLE_TYPE_NAMES[self.angleType]
except KeyError:
return "Angle of unknown type"
class Heading(Angle):
"""
The heading of a mobile object.
@ivar variation: The (optional) magnetic variation.
The sign of the variation is positive for variations towards the east
(clockwise from north), and negative for variations towards the west
(counterclockwise from north).
If the variation is unknown or not applicable, this is L{None}.
@type variation: C{Angle} or L{None}.
@ivar correctedHeading: The heading, corrected for variation. If the
variation is unknown (L{None}), is None. This attribute is read-only
(its value is determined by the angle and variation attributes). The
value is coerced to being between 0 (inclusive) and 360 (exclusive).
"""
def __init__(self, angle=None, variation=None):
"""
Initializes an angle with an optional variation.
"""
Angle.__init__(self, angle, Angles.HEADING)
self.variation = variation
@classmethod
def fromFloats(cls, angleValue=None, variationValue=None):
"""
Constructs a Heading from the float values of the angle and variation.
@param angleValue: The angle value of this heading.
@type angleValue: C{float}
@param variationValue: The value of the variation of this heading.
@type variationValue: C{float}
@return A C{Heading } with the given values.
"""
variation = Angle(variationValue, Angles.VARIATION)
return cls(angleValue, variation)
@property
def correctedHeading(self):
"""
Corrects the heading by the given variation. This is sometimes known as
the true heading.
@return: The heading, corrected by the variation. If the variation or
the angle are unknown, returns L{None}.
@rtype: C{float} or L{None}
"""
if self._angle is None or self.variation is None:
return None
angle = (self.inDecimalDegrees - self.variation.inDecimalDegrees) % 360
return Angle(angle, Angles.HEADING)
def setSign(self, sign):
"""
Sets the sign of the variation of this heading.
@param sign: The new sign. C{1} for positive and C{-1} for negative
signs, respectively.
@type sign: C{int}
@raise ValueError: If the C{sign} parameter is not C{-1} or C{1}.
"""
if self.variation.inDecimalDegrees is None:
raise ValueError("can't set the sign of an unknown variation")
self.variation.setSign(sign)
compareAttributes = list(Angle.compareAttributes) + ["variation"]
def __repr__(self):
"""
Returns a string representation of this angle.
@return: The string representation.
@rtype: C{str}
"""
if self.variation is None:
variationRepr = "unknown variation"
else:
variationRepr = repr(self.variation)
return "<%s (%s, %s)>" % (
self._angleTypeNameRepr, self._angleValueRepr, variationRepr)
class Coordinate(Angle):
"""
A coordinate.
@ivar angle: The value of the coordinate in decimal degrees, with the usual
rules for sign (northern and eastern hemispheres are positive, southern
and western hemispheres are negative).
@type angle: C{float}
"""
def __init__(self, angle, coordinateType=None):
"""
Initializes a coordinate.
@param angle: The angle of this coordinate in decimal degrees. The
hemisphere is determined by the sign (north and east are positive).
If this coordinate describes a latitude, this value must be within
-90.0 and +90.0 (exclusive). If this value describes a longitude,
this value must be within -180.0 and +180.0 (exclusive).
@type angle: C{float}
@param coordinateType: The coordinate type. One of L{Angles.LATITUDE},
L{Angles.LONGITUDE} or L{None} if unknown.
"""
if coordinateType not in [Angles.LATITUDE, Angles.LONGITUDE, None]:
raise ValueError("coordinateType must be one of Angles.LATITUDE, "
"Angles.LONGITUDE or None, was {!r}"
.format(coordinateType))
Angle.__init__(self, angle, coordinateType)
@property
def hemisphere(self):
"""
Gets the hemisphere of this coordinate.
@return: A symbolic constant representing a hemisphere (one of
L{Angles})
"""
if self.angleType is Angles.LATITUDE:
if self.inDecimalDegrees < 0:
return Directions.SOUTH
else:
return Directions.NORTH
elif self.angleType is Angles.LONGITUDE:
if self.inDecimalDegrees < 0:
return Directions.WEST
else:
return Directions.EAST
else:
raise ValueError("unknown coordinate type (cant find hemisphere)")
class Altitude(FancyEqMixin, object):
"""
An altitude.
@ivar inMeters: The altitude represented by this object, in meters. This
attribute is read-only.
@type inMeters: C{float}
@ivar inFeet: As above, but expressed in feet.
@type inFeet: C{float}
"""
compareAttributes = 'inMeters',
def __init__(self, altitude):
"""
Initializes an altitude.
@param altitude: The altitude in meters.
@type altitude: C{float}
"""
self._altitude = altitude
@property
def inFeet(self):
"""
Gets the altitude this object represents, in feet.
@return: The altitude, expressed in feet.
@rtype: C{float}
"""
return self._altitude / METERS_PER_FOOT
@property
def inMeters(self):
"""
Returns the altitude this object represents, in meters.
@return: The altitude, expressed in feet.
@rtype: C{float}
"""
return self._altitude
def __float__(self):
"""
Returns the altitude represented by this object expressed in meters.
@return: The altitude represented by this object, expressed in meters.
@rtype: C{float}
"""
return self._altitude
def __repr__(self):
"""
Returns a string representation of this altitude.
@return: The string representation.
@rtype: C{str}
"""
return "<Altitude (%s m)>" % (self._altitude,)
class _BaseSpeed(FancyEqMixin, object):
"""
An object representing the abstract concept of the speed (rate of
movement) of a mobile object.
This primarily has behavior for converting between units and comparison.
"""
compareAttributes = 'inMetersPerSecond',
def __init__(self, speed):
"""
Initializes a speed.
@param speed: The speed that this object represents, expressed in
meters per second.
@type speed: C{float}
@raises ValueError: Raised if value was invalid for this particular
kind of speed. Only happens in subclasses.
"""
self._speed = speed
@property
def inMetersPerSecond(self):
"""
The speed that this object represents, expressed in meters per second.
This attribute is immutable.
@return: The speed this object represents, in meters per second.
@rtype: C{float}
"""
return self._speed
@property
def inKnots(self):
"""
Returns the speed represented by this object, expressed in knots. This
attribute is immutable.
@return: The speed this object represents, in knots.
@rtype: C{float}
"""
return self._speed / MPS_PER_KNOT
def __float__(self):
"""
Returns the speed represented by this object expressed in meters per
second.
@return: The speed represented by this object, expressed in meters per
second.
@rtype: C{float}
"""
return self._speed
def __repr__(self):
"""
Returns a string representation of this speed object.
@return: The string representation.
@rtype: C{str}
"""
speedValue = round(self.inMetersPerSecond, 2)
return "<%s (%s m/s)>" % (self.__class__.__name__, speedValue)
class Speed(_BaseSpeed):
"""
The speed (rate of movement) of a mobile object.
"""
def __init__(self, speed):
"""
Initializes a L{Speed} object.
@param speed: The speed that this object represents, expressed in
meters per second.
@type speed: C{float}
@raises ValueError: Raised if C{speed} is negative.
"""
if speed < 0:
raise ValueError("negative speed: %r" % (speed,))
_BaseSpeed.__init__(self, speed)
class Climb(_BaseSpeed):
"""
The climb ("vertical speed") of an object.
"""
def __init__(self, climb):
"""
Initializes a L{Climb} object.
@param climb: The climb that this object represents, expressed in
meters per second.
@type climb: C{float}
"""
_BaseSpeed.__init__(self, climb)
class PositionError(FancyEqMixin, object):
"""
Position error information.
@cvar _ALLOWABLE_THRESHOLD: The maximum allowable difference between PDOP
and the geometric mean of VDOP and HDOP. That difference is supposed
to be zero, but can be non-zero because of rounding error and limited
reporting precision. You should never have to change this value.
@type _ALLOWABLE_THRESHOLD: C{float}
@cvar _DOP_EXPRESSIONS: A mapping of DOP types (C[hvp]dop) to a list of
callables that take self and return that DOP type, or raise
C{TypeError}. This allows a DOP value to either be returned directly
if it's know, or computed from other DOP types if it isn't.
@type _DOP_EXPRESSIONS: C{dict} of C{str} to callables
@ivar pdop: The position dilution of precision. L{None} if unknown.
@type pdop: C{float} or L{None}
@ivar hdop: The horizontal dilution of precision. L{None} if unknown.
@type hdop: C{float} or L{None}
@ivar vdop: The vertical dilution of precision. L{None} if unknown.
@type vdop: C{float} or L{None}
"""
compareAttributes = 'pdop', 'hdop', 'vdop'
def __init__(self, pdop=None, hdop=None, vdop=None, testInvariant=False):
"""
Initializes a positioning error object.
@param pdop: The position dilution of precision. L{None} if unknown.
@type pdop: C{float} or L{None}
@param hdop: The horizontal dilution of precision. L{None} if unknown.
@type hdop: C{float} or L{None}
@param vdop: The vertical dilution of precision. L{None} if unknown.
@type vdop: C{float} or L{None}
@param testInvariant: Flag to test if the DOP invariant is valid or
not. If C{True}, the invariant (PDOP = (HDOP**2 + VDOP**2)*.5) is
checked at every mutation. By default, this is false, because the
vast majority of DOP-providing devices ignore this invariant.
@type testInvariant: c{bool}
"""
self._pdop = pdop
self._hdop = hdop
self._vdop = vdop
self._testInvariant = testInvariant
self._testDilutionOfPositionInvariant()
_ALLOWABLE_TRESHOLD = 0.01
def _testDilutionOfPositionInvariant(self):
"""
Tests if this positioning error object satisfies the dilution of
position invariant (PDOP = (HDOP**2 + VDOP**2)*.5), unless the
C{self._testInvariant} instance variable is C{False}.
@return: L{None} if the invariant was not satisfied or not tested.
@raises ValueError: Raised if the invariant was tested but not
satisfied.
"""
if not self._testInvariant:
return
for x in (self.pdop, self.hdop, self.vdop):
if x is None:
return
delta = abs(self.pdop - (self.hdop**2 + self.vdop**2)**.5)
if delta > self._ALLOWABLE_TRESHOLD:
raise ValueError("invalid combination of dilutions of precision: "
"position: %s, horizontal: %s, vertical: %s"
% (self.pdop, self.hdop, self.vdop))
_DOP_EXPRESSIONS = {
'pdop': [
lambda self: float(self._pdop),
lambda self: (self._hdop**2 + self._vdop**2)**.5,
],
'hdop': [
lambda self: float(self._hdop),
lambda self: (self._pdop**2 - self._vdop**2)**.5,
],
'vdop': [
lambda self: float(self._vdop),
lambda self: (self._pdop**2 - self._hdop**2)**.5,
],
}
def _getDOP(self, dopType):
"""
Gets a particular dilution of position value.
@param dopType: The type of dilution of position to get. One of
('pdop', 'hdop', 'vdop').
@type dopType: C{str}
@return: The DOP if it is known, L{None} otherwise.
@rtype: C{float} or L{None}
"""
for dopExpression in self._DOP_EXPRESSIONS[dopType]:
try:
return dopExpression(self)
except TypeError:
continue
def _setDOP(self, dopType, value):
"""
Sets a particular dilution of position value.
@param dopType: The type of dilution of position to set. One of
('pdop', 'hdop', 'vdop').
@type dopType: C{str}
@param value: The value to set the dilution of position type to.
@type value: C{float}
If this position error tests dilution of precision invariants,
it will be checked. If the invariant is not satisfied, the
assignment will be undone and C{ValueError} is raised.
"""
attributeName = "_" + dopType
oldValue = getattr(self, attributeName)
setattr(self, attributeName, float(value))
try:
self._testDilutionOfPositionInvariant()
except ValueError:
setattr(self, attributeName, oldValue)
raise
pdop = property(fget=lambda self: self._getDOP('pdop'),
fset=lambda self, value: self._setDOP('pdop', value))
hdop = property(fget=lambda self: self._getDOP('hdop'),
fset=lambda self, value: self._setDOP('hdop', value))
vdop = property(fget=lambda self: self._getDOP('vdop'),
fset=lambda self, value: self._setDOP('vdop', value))
_REPR_TEMPLATE = "<PositionError (pdop: %s, hdop: %s, vdop: %s)>"
def __repr__(self):
"""
Returns a string representation of positioning information object.
@return: The string representation.
@rtype: C{str}
"""
return self._REPR_TEMPLATE % (self.pdop, self.hdop, self.vdop)
class BeaconInformation(object):
"""
Information about positioning beacons (a generalized term for the reference
objects that help you determine your position, such as satellites or cell
towers).
@ivar seenBeacons: A set of visible beacons. Note that visible beacons are not
necessarily used in acquiring a positioning fix.
@type seenBeacons: C{set} of L{IPositioningBeacon}
@ivar usedBeacons: A set of the beacons that were used in obtaining a
positioning fix. This only contains beacons that are actually used, not
beacons for which it is unknown if they are used or not.
@type usedBeacons: C{set} of L{IPositioningBeacon}
"""
def __init__(self, seenBeacons=()):
"""
Initializes a beacon information object.
@param seenBeacons: A collection of beacons that are currently seen.
@type seenBeacons: iterable of L{IPositioningBeacon}s
"""
self.seenBeacons = set(seenBeacons)
self.usedBeacons = set()
def __repr__(self):
"""
Returns a string representation of this beacon information object.
The beacons are sorted by their identifier.
@return: The string representation.
@rtype: C{str}
"""
sortedBeacons = partial(sorted, key=attrgetter("identifier"))
usedBeacons = sortedBeacons(self.usedBeacons)
unusedBeacons = sortedBeacons(self.seenBeacons - self.usedBeacons)
template = ("<BeaconInformation ("
"used beacons ({numUsed}): {usedBeacons}, "
"unused beacons: {unusedBeacons})>")
formatted = template.format(numUsed=len(self.usedBeacons),
usedBeacons=usedBeacons,
unusedBeacons=unusedBeacons)
return formatted
@implementer(ipositioning.IPositioningBeacon)
class PositioningBeacon(object):
"""
A positioning beacon.
@ivar identifier: The unique identifier for this beacon. This is usually
an integer. For GPS, this is also known as the PRN.
@type identifier: Pretty much anything that can be used as a unique
identifier. Depends on the implementation.
"""
def __init__(self, identifier):
"""
Initializes a positioning beacon.
@param identifier: The identifier for this beacon.
@type identifier: Can be pretty much anything (see ivar documentation).
"""
self.identifier = identifier
def __hash__(self):
"""
Returns the hash of the identifier for this beacon.
@return: The hash of the identifier. (C{hash(self.identifier)})
@rtype: C{int}
"""
return hash(self.identifier)
def __repr__(self):
"""
Returns a string representation of this beacon.
@return: The string representation.
@rtype: C{str}
"""
return "<Beacon ({s.identifier})>".format(s=self)
class Satellite(PositioningBeacon):
"""
A satellite.
@ivar azimuth: The azimuth of the satellite. This is the heading (positive
angle relative to true north) where the satellite appears to be to the
device.
@ivar elevation: The (positive) angle above the horizon where this
satellite appears to be to the device.
@ivar signalToNoiseRatio: The signal to noise ratio of the signal coming
from this satellite.
"""
def __init__(self,
identifier,
azimuth=None,
elevation=None,
signalToNoiseRatio=None):
"""
Initializes a satellite object.
@param identifier: The PRN (unique identifier) of this satellite.
@type identifier: C{int}
@param azimuth: The azimuth of the satellite (see instance variable
documentation).
@type azimuth: C{float}
@param elevation: The elevation of the satellite (see instance variable
documentation).
@type elevation: C{float}
@param signalToNoiseRatio: The signal to noise ratio of the connection
to this satellite (see instance variable documentation).
@type signalToNoiseRatio: C{float}
"""
PositioningBeacon.__init__(self, int(identifier))
self.azimuth = azimuth
self.elevation = elevation
self.signalToNoiseRatio = signalToNoiseRatio
def __repr__(self):
"""
Returns a string representation of this Satellite.
@return: The string representation.
@rtype: C{str}
"""
template = ("<Satellite ({s.identifier}), "
"azimuth: {s.azimuth}, "
"elevation: {s.elevation}, "
"snr: {s.signalToNoiseRatio}>")
return template.format(s=self)
__all__ = [
'Altitude',
'Angle',
'Angles',
'BasePositioningReceiver',
'BeaconInformation',
'Climb',
'Coordinate',
'Directions',
'Heading',
'InvalidChecksum',
'InvalidSentence',
'METERS_PER_FOOT',
'MPS_PER_KNOT',
'MPS_PER_KPH',
'PositionError',
'PositioningBeacon',
'Satellite',
'Speed'
]
|
EricMuller/mynotes-backend
|
requirements/twisted/Twisted-17.1.0/src/twisted/positioning/base.py
|
Python
|
mit
| 28,410
|
from __future__ import absolute_import, print_function
from knit.conf import get_host_port, infer_extra_params, DEFAULTS, get_config
def test_get_host_port():
host, port = get_host_port('hdfs://foo.bar.com:8080')
assert host == 'foo.bar.com'
assert port == 8080
host, port = get_host_port('foo.bar.com:8080')
assert host == 'foo.bar.com'
assert port == 8080
host, port = get_host_port('foo.bar.com')
assert host == 'foo.bar.com'
assert port is None
def test_infer_extra_params():
# == defaults ==
extra = infer_extra_params({})
assert extra == DEFAULTS
# == replication_factor ==
extra = infer_extra_params({'dfs.replication': 10})
assert extra['replication_factor'] == 10
# == resourcemanager and port ==
# take port from webapp.address if provided
config = {'yarn.resourcemanager.webapp.address': 'priority1.com:1111',
'yarn.resourcemanager.hostname': 'priority2.com'}
extra = infer_extra_params(config)
assert extra['rm'] == 'priority1.com'
assert extra['rm_port'] == 1111
# Fallback to hostname and default port
config = {'yarn.resourcemanager.hostname': 'priority2.com'}
extra = infer_extra_params(config)
assert extra['rm'] == 'priority2.com'
assert extra['rm_port'] == DEFAULTS['rm_port']
# == resourcemanager https port ==
config = {'yarn.resourcemanager.webapp.https.address': 'address.com:1111'}
extra = infer_extra_params(config)
assert extra['rm_port_https'] == 1111
def test_get_config():
kwargs = dict(rm="e", rm_port=27182, replication_factor=1)
config = get_config(autodetect=False, **kwargs)
for k, v in kwargs.items():
assert config[k] == v
# Just specified kwargs
assert 'user' not in config
config = get_config(**kwargs)
for k, v in kwargs.items():
assert config[k] == v
# Not just specified kwargs
assert 'user' in config
|
blaze/knit
|
knit/tests/test_conf.py
|
Python
|
bsd-3-clause
| 1,942
|
'''
Created on Oct 7, 2013
@author: lakmal
'''
import uno,wap,unohelper
from com.sun.star.awt import XActionListener
from PIL import Image
import FileHandler as fh
class MyActionListener( unohelper.Base, XActionListener ):
def __init__(self, eventObject):
self.eventObject = eventObject #save the parent dialog of the button object
def actionPerformed(self, actionEvent):
oControl = actionEvent.Source #get the name of the object which created the event
name = oControl.getModel().getPropertyValue("Name")
if(name=="SelectFormulaButton"): #if the event is from the select formula button
query = self.eventObject.getControl("selectFormulaList").getSelectedItem()
query = query.split("\n")[0] #get the formula selected
txtList = FormulaListDialog().textDetailsAboutFormula(query)
#get the text details related to the formula
imgList = FormulaListDialog().imageDetailsAboutFormula(query)
#get the image details related to the formula
if not (txtList[0][0]=="N"): #if an exception is not thrown
oDialog = FormulaListDialog().createAvailableResourcesDialog(query,txtList, imgList)
oDialog.execute() #execute the available resources dialog
self.eventObject.endExecute() #finish the dialog
elif(name=="GetResourcesButton"): #if the event is created by the get resources button
count=1
selectedItems=[] #initially the selected items is null
selectedImages=[] #initially the selected images list is null
query = self.eventObject.getControl("formulaLabel").getModel().getPropertyValue("Label")
#retrieve the query name from the parent dialog
while (True): #loop until the checkboxes are finished
control =self.eventObject.getControl("text"+`count`)
if(control==None): #if no check box is found break
break
if(control.getState()==1): #if a selected checkbox is found append as true
selectedItems.append(True)
else:
selectedItems.append(False) #if a non selected check box is found append as false
count+=1
count=0
while (True): #do the same thing for image check boxes
control =self.eventObject.getControl("image"+`count`)
if(control==None):
break
if(control.getState()==1):
selectedImages.append(True)
else:
selectedImages.append(False)
count+=1
fh.FileHandler().addDetailsFileData(query, selectedItems, selectedImages)
#add details to the file
self.eventObject.endExecute() #finish executing the dialog
class FormulaListDialog():
def actionpppp(self,event):
print(event.source)
def createWaitingMessageBox(self):
#localContext = uno.getComponentContext()
#resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext )
#ctx = resolver.resolve( "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" )
ctx=uno.getComponentContext()
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext( "com.sun.star.frame.Desktop",ctx)
doc = desktop.getCurrentComponent() #get the current active document
if not hasattr(doc, "Text"):
doc = desktop.loadComponentFromURL( "private:factory/swriter","_blank", 0, () )
parentwin = doc.CurrentController.Frame.ContainerWindow
vclAttribute = uno.getConstantByName("com.sun.star.awt.VclWindowPeerAttribute.OK")
windowClass = uno.Enum("com.sun.star.awt.WindowClass","MODALTOP")
rectangle = self.createRectangle(50, 100, 300, 200) #create dialog rectangle
windowService ="messbox" #type of the dialog
msgbox = self.createMessageBox(ctx, smgr,windowClass, parentwin, rectangle, windowService, vclAttribute)
#create dialog according to the attributes given
msgbox.setMessageText("Fetching Data from Wolfram Alpha Math Engine")
msgbox.setCaptionText("In Progress") #set title and message
return msgbox
def createSelectTextMessageBox(self): #create the select text message box
#localContext = uno.getComponentContext()
#resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext )
#ctx = resolver.resolve( "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" )
ctx=uno.getComponentContext()
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext( "com.sun.star.frame.Desktop",ctx)
doc = desktop.getCurrentComponent() #get the current active component
if not hasattr(doc, "Text"):
doc = desktop.loadComponentFromURL( "private:factory/swriter","_blank", 0, () )
parentwin = doc.CurrentController.Frame.ContainerWindow
vclAttribute = uno.getConstantByName("com.sun.star.awt.VclWindowPeerAttribute.OK")
windowClass = uno.Enum("com.sun.star.awt.WindowClass","MODALTOP")
rectangle = self.createRectangle(50, 100, 300, 200)
windowService ="messbox"
msgbox = self.createMessageBox(ctx, smgr,windowClass, parentwin, rectangle, windowService, vclAttribute)
msgbox.setMessageText(" Select A Text and Press Button")
msgbox.setCaptionText("No Text")
return msgbox
def createFormulaListDialog(self,formulaList): #create formula list dialog
#localContext = uno.getComponentContext()
#resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext )
#ctx = resolver.resolve( "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" )
ctx=uno.getComponentContext()
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext( "com.sun.star.frame.Desktop",ctx)
doc = desktop.getCurrentComponent() #get the active document
if not hasattr(doc, "Text"):
doc = desktop.loadComponentFromURL( "private:factory/swriter","_blank", 0, () )
oDialog = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialog",ctx)
oDialogModel = self.createDialog(ctx,smgr,100,True,"DialogFormulaList",100,100,0,0,"Formula List",100)
self.createDialogButton(oDialogModel, "SelectFormulaButton", 15, 50, 25, 80, "Select Formula")
self.createListBox(oDialogModel, "selectFormulaList", 60, 80, 10, 10)
#create the list box to add formulas
oDialog.setModel(oDialogModel)
oDialog.setVisible(False)
oButton = oDialog.getControl("SelectFormulaButton")
#oButton.setActionCommand("commanded")
oButton.addActionListener(MyActionListener(oDialog))
oToolkit = smgr.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
oDialog.createPeer(oToolkit,oToolkit.getDesktopWindow())
self.addFormulasToList(formulaList, oDialog) #add formulas to list box
return oDialog
def createAvailableResourcesDialog(self,query,textList,imagesList):
#localContext = uno.getComponentContext()
#resolver = localContext.ServiceManager.createInstanceWithContext("com.sun.star.bridge.UnoUrlResolver", localContext )
#ctx = resolver.resolve( "uno:socket,host=localhost,port=2002;urp;StarOffice.ComponentContext" )
ctx=uno.getComponentContext()
smgr = ctx.ServiceManager
desktop = smgr.createInstanceWithContext( "com.sun.star.frame.Desktop",ctx)
doc = desktop.getCurrentComponent()
if not hasattr(doc, "Text"):
doc = desktop.loadComponentFromURL( "private:factory/swriter","_blank", 0, () )
oDialog = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialog",ctx)
oDialogModel = self.createDialog(ctx,smgr,120,True,"DialogResourceList",100,100,0,0,"Resource List",200)
self.createLabel(oDialogModel, "formulaLabel", query, 10, 100, 5, 5)
oDialog.setVisible(False)
self.createDialogButton(oDialogModel, "GetResourcesButton", 15, 50, 25, 100, "Get Resources")
txtEnd = self.addTextCheckBoxesToDialog(textList, oDialogModel) #get the end of the text
imgEnd = self.addImageTextBoxesToDialog(query,txtEnd,imagesList, oDialogModel) #get the end of the images
oDialogModel.Width =imgEnd #set the dialog width
oDialogModel.Height = max(100,txtEnd+25) #set the dialog height
oDialog.setModel(oDialogModel)
oButton1 = oDialog.getControl("GetResourcesButton")
oButton1.getModel().PositionY = txtEnd+5 #reposition the button
oButton1.setActionCommand("commanded")
oButton1.addActionListener(MyActionListener(oDialog))
oToolkit = smgr.createInstanceWithContext("com.sun.star.awt.Toolkit", ctx)
oDialog.createPeer(oToolkit,oToolkit.getDesktopWindow())
return oDialog
def createDialog(self,ctx,smgr,height,moveable,name,posX,posY,step,tabIndex,title,width):
#smgr=ctx.ServiceManger #set attributes of the dialog
oDialogModel = smgr.createInstanceWithContext("com.sun.star.awt.UnoControlDialogModel",ctx)
oDialogModel.Height = height
oDialogModel.Moveable = moveable
oDialogModel.Name=name
oDialogModel.PositionX = posX
oDialogModel.PositionY = posY
oDialogModel.Step = step
oDialogModel.TabIndex= tabIndex
oDialogModel.Title = title
oDialogModel.Width = width
return oDialogModel
def createMessageBox(self,ctx,smgr,windowClass,parentwin,rectangle,windowService,vclAttribute):
#set the attributes of the messagebox
aDescriptor = uno.createUnoStruct("com.sun.star.awt.WindowDescriptor")
# aDescriptor = WindowDescriptor
aDescriptor.Type = windowClass
aDescriptor.WindowServiceName = windowService
aDescriptor.ParentIndex = -1
aDescriptor.Parent = parentwin
aDescriptor.Bounds = rectangle
aDescriptor.WindowAttributes=vclAttribute
tk = aDescriptor.Parent.getToolkit()
msgbox = tk.createWindow(aDescriptor)
return msgbox
def createRectangle(self,height,width,xPos,yPos): #create a rectangle according to the parameters
Rectangle =uno.createUnoStruct("com.sun.star.awt.Rectangle")
Rectangle.Width =width
Rectangle.Height=height
Rectangle.X=xPos
Rectangle.Y=yPos
return Rectangle
def createDialogButton(self,oDialogModel,buttonName,height,width,posX,posY,label):
#create dialog button according to the given parameters
#oButton = oDialogModel.createInstance("com.sun.star.awt.UnoControlButton")
oButtonModel = oDialogModel.createInstance("com.sun.star.awt.UnoControlButtonModel")
#oButton.setModel(oButtonModel)
oDialogModel.insertByName(buttonName,oButtonModel)
oButtonModel.Name =buttonName
oButtonModel.Height =height
oButtonModel.Width =width
oButtonModel.PositionX =posX
oButtonModel.PositionY =posY
oButtonModel.Label =label
def createListBox(self,oDialogModel,listBoxName,height,width,posX,posY):
#create a list box according to the given parameters
oListBoxModel = oDialogModel.createInstance("com.sun.star.awt.UnoControlListBoxModel")
oDialogModel.insertByName(listBoxName,oListBoxModel)
oListBoxModel.Height =height
oListBoxModel.Width =width
oListBoxModel.PositionX =posX
oListBoxModel.PositionY =posY
def createCheckBox(self,oDialogModel,checkBoxName,label,height,width,posX,posY):
#create the checkbox according to the parameters
oCheckBoxModel = oDialogModel.createInstance("com.sun.star.awt.UnoControlCheckBoxModel")
oDialogModel.insertByName(checkBoxName,oCheckBoxModel)
oCheckBoxModel.Height =height
oCheckBoxModel.Width =width
oCheckBoxModel.PositionX =posX
oCheckBoxModel.PositionY =posY
oCheckBoxModel.Name = checkBoxName
oCheckBoxModel.Label = label
def createLabel(self,oDialogModel,labelName,label,height,width,posX,posY):
#create the label according to the given parameters
oFixedTextModel = oDialogModel.createInstance("com.sun.star.awt.UnoControlFixedTextModel")
oDialogModel.insertByName(labelName,oFixedTextModel)
oFixedTextModel.Height =height
oFixedTextModel.Width =width
oFixedTextModel.PositionX =posX
oFixedTextModel.PositionY =posY
oFixedTextModel.Name = labelName
oFixedTextModel.Label = label
def addFormulasToList(self,formulaList,oDialog): #add formulas to the formula list list box
listBox = oDialog.getControl("selectFormulaList")
count =len(formulaList)
for formula in formulaList:
print(formula)
listBox.addItem(formula,count)
count+=1
def addTextCheckBoxesToDialog(self,textList,oDialogModel):
y=15 #add pain text checkboxes to the dialog
x=5
count=1
for txt in textList:
name = "text"+`count`
print(name)
self.createCheckBox(oDialogModel,name , txt, 10, 100, x, y)
y+=15
count+=1
return y
def addImageTextBoxesToDialog(self,query,txtEnd,imageList,oDialogModel):
y=15 #add image checkboxes to the dialog
count=1
x=105
for img in imageList:
print(img)
imageFilePath = fh.FileHandler().createImageFilePathForTheQuery(query, img)
imageURL = "file:///"+imageFilePath
thmbFilePath=fh.FileHandler().createThumbImageFilePathForTheQuery(query, img)
thumbImageURL = "file:///"+thmbFilePath
print(imageURL)
im = Image.open(imageFilePath)
dim =150
size=(dim,dim) #create thumbnails
im.thumbnail(size, Image.ANTIALIAS)
im.save(thmbFilePath)
imtb = Image.open(thmbFilePath)
self.createCheckBox(oDialogModel,img , img, 150, 75, x, y)
self.setCheckBoxImageURL(oDialogModel, img, thumbImageURL)
x+=80
count+=1
return x
def setCheckBoxImageURL(self,oDialogModel,name,imageURL): #set the imageurl to a checkbox
imgChkBox = oDialogModel.getByName(name)
imgChkBox.ImageURL = imageURL
def textDetailsAboutFormula(self,query): #get the formual text details from file
fileName = fh.FileHandler().createFilePathForTheQuery(query)
try:
f = open(fileName, 'r')
resArray =f.readlines()
res = ''.join(resArray)
f.close()
textList=[]
waeqr = wap.WolframAlphaQueryResult(res) #get from titles
for pod in waeqr.Pods():
podObject = wap.Pod(pod)
for subPod in podObject.Subpods():
subPodObject = wap.Subpod(subPod)
if(subPodObject.Plaintext()!=[[]]):
title = subPodObject.Title()
if(title[0]!=''):
textList.append(title[0])
else:
textList.append(podObject.Title()[0])
return textList
except Exception:
textList=["No Text Found"]
print("exeption thrown")
return textList
def imageDetailsAboutFormula(self,query): #get the formula image details from file
fileName = fh.FileHandler().createFilePathForTheQuery(query)
try:
f = open(fileName, 'r')
resArray =f.readlines()
res = ''.join(resArray)
f.close()
imageList=[]
waeqr = wap.WolframAlphaQueryResult(res) #get from images
count = 0
for pod in waeqr.Pods():
podObject = wap.Pod(pod)
for subPod in podObject.Subpods():
subPodObject = wap.Subpod(subPod)
if(subPodObject.Img()!=[[]]):
imageList.append("image"+`count`)
count +=1
return imageList
except Exception:
imageList=["No Text Found"]
print("exeption thrown")
return imageList
|
tmtlakmal/EasyTuteLO
|
src/pythonpaths/FormulaListDialog.py
|
Python
|
lgpl-3.0
| 18,611
|
# Copyright 2012 IBM Corp.
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from unittest import mock
import ddt
from oslo_utils import timeutils
from manila.api.v2 import services
from manila import context
from manila import db
from manila import exception
from manila import policy
from manila import test
from manila.tests.api import fakes
fake_services_list = [
{
'binary': 'manila-scheduler',
'host': 'host1',
'availability_zone': {'name': 'manila1'},
'id': 1,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
},
{
'binary': 'manila-share',
'host': 'host1',
'availability_zone': {'name': 'manila1'},
'id': 2,
'disabled': True,
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27)},
{
'binary': 'manila-scheduler',
'host': 'host2',
'availability_zone': {'name': 'manila2'},
'id': 3,
'disabled': False,
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28)},
{
'binary': 'manila-share',
'host': 'host2',
'availability_zone': {'name': 'manila2'},
'id': 4,
'disabled': True,
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
},
]
fake_response_service_list = {'services': [
{
'id': 1,
'binary': 'manila-scheduler',
'host': 'host1',
'zone': 'manila1',
'status': 'disabled', 'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
},
{
'id': 2,
'binary': 'manila-share',
'host': 'host1',
'zone': 'manila1',
'status': 'disabled',
'state': 'up',
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
},
{
'id': 3,
'binary': 'manila-scheduler',
'host': 'host2',
'zone': 'manila2',
'status': 'enabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
},
{
'id': 4,
'binary': 'manila-share',
'host': 'host2',
'zone': 'manila2',
'status': 'disabled',
'state': 'down',
'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
},
]}
def fake_service_get_all(context):
return fake_services_list
def fake_service_get_by_host_binary(context, host, binary):
for service in fake_services_list:
if service['host'] == host and service['binary'] == binary:
return service
return None
def fake_service_get_by_id(value):
for service in fake_services_list:
if service['id'] == value:
return service
return None
def fake_service_update(context, service_id, values):
service = fake_service_get_by_id(service_id)
if service is None:
raise exception.ServiceNotFound(service_id=service_id)
else:
{'host': 'host1', 'binary': 'manila-share',
'disabled': values['disabled']}
def fake_utcnow():
return datetime.datetime(2012, 10, 29, 13, 42, 11)
@ddt.ddt
class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
self.mock_object(db, "service_get_all", fake_service_get_all)
self.mock_object(timeutils, "utcnow", fake_utcnow)
self.mock_object(db, "service_get_by_args",
fake_service_get_by_host_binary)
self.mock_object(db, "service_update", fake_service_update)
self.context = context.get_admin_context()
self.controller = services.ServiceController()
self.controller_legacy = services.ServiceControllerLegacy()
self.resource_name = self.controller.resource_name
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
@ddt.data(
('os-services', '1.0', services.ServiceControllerLegacy),
('os-services', '2.6', services.ServiceControllerLegacy),
('services', '2.7', services.ServiceController),
)
@ddt.unpack
def test_services_list(self, url, version, controller):
req = fakes.HTTPRequest.blank('/%s' % url, version=version)
req.environ['manila.context'] = self.context
res_dict = controller().index(req)
self.assertEqual(fake_response_service_list, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_host(self):
req = fakes.HTTPRequest.blank('/services?host=host1', version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [
fake_response_service_list['services'][0],
fake_response_service_list['services'][1],
]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_binary(self):
req = fakes.HTTPRequest.blank(
'/services?binary=manila-share', version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [
fake_response_service_list['services'][1],
fake_response_service_list['services'][3],
]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_zone(self):
req = fakes.HTTPRequest.blank('/services?zone=manila1', version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [
fake_response_service_list['services'][0],
fake_response_service_list['services'][1],
]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_status(self):
req = fakes.HTTPRequest.blank(
'/services?status=enabled', version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [
fake_response_service_list['services'][2],
]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_state(self):
req = fakes.HTTPRequest.blank('/services?state=up', version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [
fake_response_service_list['services'][0],
fake_response_service_list['services'][1],
]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
def test_services_list_with_host_binary(self):
req = fakes.HTTPRequest.blank(
"/services?binary=manila-share&state=up", version='2.7')
req.environ['manila.context'] = self.context
res_dict = self.controller.index(req)
response = {'services': [fake_response_service_list['services'][1], ]}
self.assertEqual(response, res_dict)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'index')
@ddt.data(
('os-services', '1.0', services.ServiceControllerLegacy),
('os-services', '2.6', services.ServiceControllerLegacy),
('services', '2.7', services.ServiceController),
)
@ddt.unpack
def test_services_enable(self, url, version, controller):
body = {'host': 'host1', 'binary': 'manila-share'}
req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version)
res_dict = controller().update(req, "enable", body)
self.assertFalse(res_dict['disabled'])
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
@ddt.data(
('os-services', '1.0', services.ServiceControllerLegacy),
('os-services', '2.6', services.ServiceControllerLegacy),
('services', '2.7', services.ServiceController),
)
@ddt.unpack
def test_services_disable(self, url, version, controller):
req = fakes.HTTPRequest.blank(
'/fooproject/%s/disable' % url, version=version)
body = {'host': 'host1', 'binary': 'manila-share'}
res_dict = controller().update(req, "disable", body)
self.assertTrue(res_dict['disabled'])
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'update')
@ddt.data(
('os-services', '2.7', services.ServiceControllerLegacy),
('services', '2.6', services.ServiceController),
('services', '1.0', services.ServiceController),
)
@ddt.unpack
def test_services_update_legacy_url_2_dot_7_api_not_found(self, url,
version,
controller):
req = fakes.HTTPRequest.blank(
'/fooproject/%s/fake' % url, version=version)
body = {'host': 'host1', 'binary': 'manila-share'}
self.assertRaises(
exception.VersionNotFoundForAPIMethod,
controller().update,
req, "disable", body,
)
@ddt.data(
('os-services', '2.7', services.ServiceControllerLegacy),
('services', '2.6', services.ServiceController),
('services', '1.0', services.ServiceController),
)
@ddt.unpack
def test_services_list_api_not_found(self, url, version, controller):
req = fakes.HTTPRequest.blank('/fooproject/%s' % url, version=version)
self.assertRaises(
exception.VersionNotFoundForAPIMethod, controller().index, req)
|
openstack/manila
|
manila/tests/api/v2/test_services.py
|
Python
|
apache-2.0
| 11,187
|
"""DNS Authenticator for Google Cloud DNS."""
import json
import logging
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from googleapiclient import discovery
from googleapiclient import errors as googleapiclient_errors
import httplib2
from oauth2client.service_account import ServiceAccountCredentials
from certbot import errors
from certbot.plugins import dns_common
logger = logging.getLogger(__name__)
ACCT_URL = 'https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount'
PERMISSIONS_URL = 'https://cloud.google.com/dns/access-control#permissions_and_roles'
METADATA_URL = 'http://metadata.google.internal/computeMetadata/v1/'
METADATA_HEADERS = {'Metadata-Flavor': 'Google'}
class Authenticator(dns_common.DNSAuthenticator):
"""DNS Authenticator for Google Cloud DNS
This Authenticator uses the Google Cloud DNS API to fulfill a dns-01 challenge.
"""
description = ('Obtain certificates using a DNS TXT record (if you are using Google Cloud DNS '
'for DNS).')
ttl = 60
@classmethod
def add_parser_arguments(cls, add: Callable[..., None],
default_propagation_seconds: int = 60) -> None:
super().add_parser_arguments(add, default_propagation_seconds=60)
add('credentials',
help=('Path to Google Cloud DNS service account JSON file. (See {0} for' +
'information about creating a service account and {1} for information about the' +
'required permissions.)').format(ACCT_URL, PERMISSIONS_URL),
default=None)
def more_info(self) -> str:
return 'This plugin configures a DNS TXT record to respond to a dns-01 challenge using ' + \
'the Google Cloud DNS API.'
def _setup_credentials(self) -> None:
if self.conf('credentials') is None:
try:
# use project_id query to check for availability of google metadata server
# we won't use the result but know we're not on GCP when an exception is thrown
_GoogleClient.get_project_id()
except (ValueError, httplib2.ServerNotFoundError):
raise errors.PluginError('Unable to get Google Cloud Metadata and no credentials'
' specified. Automatic credential lookup is only '
'available on Google Cloud Platform. Please configure'
' credentials using --dns-google-credentials <file>')
else:
self._configure_file('credentials',
'path to Google Cloud DNS service account JSON file')
dns_common.validate_file_permissions(self.conf('credentials'))
def _perform(self, domain: str, validation_name: str, validation: str) -> None:
self._get_google_client().add_txt_record(domain, validation_name, validation, self.ttl)
def _cleanup(self, domain: str, validation_name: str, validation: str) -> None:
self._get_google_client().del_txt_record(domain, validation_name, validation, self.ttl)
def _get_google_client(self) -> '_GoogleClient':
return _GoogleClient(self.conf('credentials'))
class _GoogleClient:
"""
Encapsulates all communication with the Google Cloud DNS API.
"""
def __init__(self, account_json: Optional[str] = None,
dns_api: Optional[discovery.Resource] = None) -> None:
scopes = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite']
if account_json is not None:
try:
credentials = ServiceAccountCredentials.from_json_keyfile_name(account_json, scopes)
with open(account_json) as account:
self.project_id = json.load(account)['project_id']
except Exception as e:
raise errors.PluginError(
"Error parsing credentials file '{}': {}".format(account_json, e))
else:
credentials = None
self.project_id = self.get_project_id()
if not dns_api:
self.dns = discovery.build('dns', 'v1',
credentials=credentials,
cache_discovery=False)
else:
self.dns = dns_api
def add_txt_record(self, domain: str, record_name: str, record_content: str,
record_ttl: int) -> None:
"""
Add a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the Google API
"""
zone_id = self._find_managed_zone_id(domain)
record_contents = self.get_existing_txt_rrset(zone_id, record_name)
if record_contents is None:
# If it wasn't possible to fetch the records at this label (missing .list permission),
# assume there aren't any (#5678). If there are actually records here, this will fail
# with HTTP 409/412 API errors.
record_contents = {"rrdatas": []}
add_records = record_contents["rrdatas"][:]
if "\""+record_content+"\"" in record_contents["rrdatas"]:
# The process was interrupted previously and validation token exists
return
add_records.append(record_content)
data = {
"kind": "dns#change",
"additions": [
{
"kind": "dns#resourceRecordSet",
"type": "TXT",
"name": record_name + ".",
"rrdatas": add_records,
"ttl": record_ttl,
},
],
}
if record_contents["rrdatas"]:
# We need to remove old records in the same request
data["deletions"] = [
{
"kind": "dns#resourceRecordSet",
"type": "TXT",
"name": record_name + ".",
"rrdatas": record_contents["rrdatas"],
"ttl": record_contents["ttl"],
},
]
changes = self.dns.changes()
try:
request = changes.create(project=self.project_id, managedZone=zone_id, body=data)
response = request.execute()
status = response['status']
change = response['id']
while status == 'pending':
request = changes.get(project=self.project_id, managedZone=zone_id, changeId=change)
response = request.execute()
status = response['status']
except googleapiclient_errors.Error as e:
logger.error('Encountered error adding TXT record: %s', e)
raise errors.PluginError('Error communicating with the Google Cloud DNS API: {0}'
.format(e))
def del_txt_record(self, domain: str, record_name: str, record_content: str,
record_ttl: int) -> None:
"""
Delete a TXT record using the supplied information.
:param str domain: The domain to use to look up the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:param str record_content: The record content (typically the challenge validation).
:param int record_ttl: The record TTL (number of seconds that the record may be cached).
:raises certbot.errors.PluginError: if an error occurs communicating with the Google API
"""
try:
zone_id = self._find_managed_zone_id(domain)
except errors.PluginError:
logger.warning('Error finding zone. Skipping cleanup.')
return
record_contents = self.get_existing_txt_rrset(zone_id, record_name)
if record_contents is None:
# If it wasn't possible to fetch the records at this label (missing .list permission),
# assume there aren't any (#5678). If there are actually records here, this will fail
# with HTTP 409/412 API errors.
record_contents = {"rrdatas": ["\"" + record_content + "\""], "ttl": record_ttl}
data = {
"kind": "dns#change",
"deletions": [
{
"kind": "dns#resourceRecordSet",
"type": "TXT",
"name": record_name + ".",
"rrdatas": record_contents["rrdatas"],
"ttl": record_contents["ttl"],
},
],
}
# Remove the record being deleted from the list
readd_contents = [r for r in record_contents["rrdatas"]
if r != "\"" + record_content + "\""]
if readd_contents:
# We need to remove old records in the same request
data["additions"] = [
{
"kind": "dns#resourceRecordSet",
"type": "TXT",
"name": record_name + ".",
"rrdatas": readd_contents,
"ttl": record_contents["ttl"],
},
]
changes = self.dns.changes()
try:
request = changes.create(project=self.project_id, managedZone=zone_id, body=data)
request.execute()
except googleapiclient_errors.Error as e:
logger.warning('Encountered error deleting TXT record: %s', e)
def get_existing_txt_rrset(self, zone_id: str, record_name: str) -> Optional[Dict[str, Any]]:
"""
Get existing TXT records from the RRset for the record name.
If an error occurs while requesting the record set, it is suppressed
and None is returned.
:param str zone_id: The ID of the managed zone.
:param str record_name: The record name (typically beginning with '_acme-challenge.').
:returns: The resourceRecordSet corresponding to `record_name` or None
:rtype: `resourceRecordSet <https://cloud.google.com/dns/docs/reference/v1/resourceRecordSets#resource>` or `None` # pylint: disable=line-too-long
"""
rrs_request = self.dns.resourceRecordSets()
# Add dot as the API returns absolute domains
record_name += "."
request = rrs_request.list(project=self.project_id, managedZone=zone_id, name=record_name,
type="TXT")
try:
response = request.execute()
except googleapiclient_errors.Error:
logger.info("Unable to list existing records. If you're "
"requesting a wildcard certificate, this might not work.")
logger.debug("Error was:", exc_info=True)
else:
if response and response["rrsets"]:
return response["rrsets"][0]
return None
def _find_managed_zone_id(self, domain: str) -> str:
"""
Find the managed zone for a given domain.
:param str domain: The domain for which to find the managed zone.
:returns: The ID of the managed zone, if found.
:rtype: str
:raises certbot.errors.PluginError: if the managed zone cannot be found.
"""
zone_dns_name_guesses = dns_common.base_domain_name_guesses(domain)
mz = self.dns.managedZones()
for zone_name in zone_dns_name_guesses:
try:
request = mz.list(project=self.project_id, dnsName=zone_name + '.')
response = request.execute()
zones = response['managedZones']
except googleapiclient_errors.Error as e:
raise errors.PluginError('Encountered error finding managed zone: {0}'
.format(e))
for zone in zones:
zone_id = zone['id']
if 'privateVisibilityConfig' not in zone:
logger.debug('Found id of %s for %s using name %s', zone_id, domain, zone_name)
return zone_id
raise errors.PluginError('Unable to determine managed zone for {0} using zone names: {1}.'
.format(domain, zone_dns_name_guesses))
@staticmethod
def get_project_id() -> str:
"""
Query the google metadata service for the current project ID
This only works on Google Cloud Platform
:raises ServerNotFoundError: Not running on Google Compute or DNS not available
:raises ValueError: Server is found, but response code is not 200
:returns: project id
"""
url = '{0}project/project-id'.format(METADATA_URL)
# Request an access token from the metadata server.
http = httplib2.Http()
r, content = http.request(url, headers=METADATA_HEADERS)
if r.status != 200:
raise ValueError("Invalid status code: {0}".format(r))
if isinstance(content, bytes):
return content.decode()
return content
|
letsencrypt/letsencrypt
|
certbot-dns-google/certbot_dns_google/_internal/dns_google.py
|
Python
|
apache-2.0
| 13,492
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import warnings
import numpy as np
from tensorflow.contrib.data.python.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_threads,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(
_map_fn, num_threads=num_threads, output_buffer_size=output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_threads = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(components, count, num_threads,
output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_threads_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_threads: num_threads_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_threads_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_threads_val, output_buffer_size_val)
def _testDisposeParallelMapDataset(self, explicit_dispose):
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
if explicit_dispose:
dispose_op = iterator.dispose_op()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
if explicit_dispose:
sess.run(dispose_op)
def testExplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(True)
def testImplicitDisposeParallelMapDataset(self):
self._testDisposeParallelMapDataset(False)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedThreads(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
with warnings.catch_warnings(record=True) as w:
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
output_buffer_size=2))
self.assertTrue(len(w) >= 1)
self.assertTrue(
("Dataset.map() is ignoring output_buffer_size since the argument "
"num_threads was not set. To buffer elements, set num_threads >= 1")
in [str(x.message) for x in w])
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapIgnoreError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for x in [1., 2., 3., 5.]:
self.assertEqual(x, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReadFileIgnoreError(self):
def write_string_to_file(value, filename):
with open(filename, "w") as f:
f.write(value)
filenames = [os.path.join(self.get_temp_dir(), "file_%d.txt" % i)
for i in range(5)]
for filename in filenames:
write_string_to_file(filename, filename)
dataset = (dataset_ops.Dataset.from_tensor_slices(filenames)
.map(io_ops.read_file, num_threads=2, output_buffer_size=2)
.ignore_errors())
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# All of the files are present.
sess.run(init_op)
for filename in filenames:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Delete one of the files.
os.remove(filenames[0])
# Attempting to read filenames[0] will fail, but ignore_errors()
# will catch the error.
sess.run(init_op)
for filename in filenames[1:]:
self.assertEqual(compat.as_bytes(filename), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
print(sess.run(get_next))
print(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
with self.test_session() as sess:
with self.assertRaisesRegexp(errors.FailedPreconditionError,
"Failed to capture resource"):
sess.run(init_op)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
|
mavenlin/tensorflow
|
tensorflow/contrib/data/python/kernel_tests/map_dataset_op_test.py
|
Python
|
apache-2.0
| 22,580
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015 Dariusz Suchojad <dsuch at zato.io>
Licensed under LGPLv3, see LICENSE.txt for terms and conditions.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
# stdlib
from contextlib import closing
# Zato
from zato.common.broker_message import CHANNEL
from zato.common.odb.model import ChannelSTOMP, Service
from zato.common.odb.query import channel_stomp_list
from zato.server.connection.stomp import create_stomp_session
from zato.server.service.internal import AdminService, ChangePasswordBase
from zato.server.service.meta import CreateEditMeta, DeleteMeta, GetListMeta, PingMeta
elem = 'stomp_channel'
model = ChannelSTOMP
label = 'a STOMP channel'
broker_message = CHANNEL
broker_message_prefix = 'STOMP_'
output_optional_extra = ['service_name']
create_edit_input_required_extra = ['service_name']
create_edit_rewrite = ['service_name']
list_func = channel_stomp_list
def instance_hook(self, input, instance, attrs):
# So they are not stored as None/NULL
instance.username = input.username or ''
instance.password = input.password or ''
with closing(self.odb.session()) as session:
instance.service_id = session.query(Service).\
filter(Service.name==input.service_name).\
filter(Service.cluster_id==input.cluster_id).\
one().id
class GetList(AdminService):
__metaclass__ = GetListMeta
class Create(AdminService):
__metaclass__ = CreateEditMeta
class Edit(AdminService):
__metaclass__ = CreateEditMeta
class Delete(AdminService):
__metaclass__ = DeleteMeta
class ChangePassword(ChangePasswordBase):
""" Changes the password of a STOMP channel.
"""
password_required = False
class SimpleIO(ChangePasswordBase.SimpleIO):
request_elem = 'zato_channel_stomp_change_password_request'
response_elem = 'zato_channel_stomp_change_password_response'
def handle(self):
def _auth(instance, password):
instance.password = password
return self._handle(ChannelSTOMP, _auth, CHANNEL.STOMP_CHANGE_PASSWORD.value)
class Ping(AdminService):
__metaclass__ = PingMeta
def ping(self, config):
session = create_stomp_session(config)
session.disconnect()
|
ivaano/zato
|
code/zato-server/src/zato/server/service/internal/channel/stomp.py
|
Python
|
gpl-3.0
| 2,297
|
"""
Test LMS Notes
"""
import random
from datetime import datetime
from unittest import skip
from uuid import uuid4
from flaky import flaky
from nose.plugins.attrib import attr
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc
from common.test.acceptance.fixtures.edxnotes import EdxNotesFixture, Note, Range
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.edxnotes import EdxNotesPage, EdxNotesPageNoContent, EdxNotesUnitPage
from common.test.acceptance.tests.helpers import EventsTestMixin, UniqueCourseTest
class EdxNotesTestMixin(UniqueCourseTest):
"""
Creates a course with initial data and contains useful helper methods.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EdxNotesTestMixin, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.note_unit_page = EdxNotesUnitPage(self.browser, self.course_id)
self.notes_page = EdxNotesPage(self.browser, self.course_id)
self.username = str(uuid4().hex)[:5]
self.email = "{}@email.com".format(self.username)
self.selector = "annotate-id"
self.edxnotes_fixture = EdxNotesFixture()
self.course_fixture = CourseFixture(
self.course_info["org"], self.course_info["number"],
self.course_info["run"], self.course_info["display_name"]
)
self.course_fixture.add_advanced_settings({
u"edxnotes": {u"value": True}
})
self.course_fixture.add_children(
XBlockFixtureDesc("chapter", "Test Section 1").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 1").add_children(
XBlockFixtureDesc("vertical", "Test Unit 1").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 1",
data="""
<p><span class="{}">Annotate this!</span></p>
<p>Annotate this</p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 2",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
XBlockFixtureDesc("vertical", "Test Unit 2").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 3",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
),
XBlockFixtureDesc("sequential", "Test Subsection 2").add_children(
XBlockFixtureDesc("vertical", "Test Unit 3").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 4",
data="""
<p><span class="{}">Annotate this!</span></p>
""".format(self.selector)
),
),
),
),
XBlockFixtureDesc("chapter", "Test Section 2").add_children(
XBlockFixtureDesc("sequential", "Test Subsection 3").add_children(
XBlockFixtureDesc("vertical", "Test Unit 4").add_children(
XBlockFixtureDesc(
"html",
"Test HTML 5",
data="""
<p><span class="{}">Annotate this!</span></p>
""".format(self.selector)
),
XBlockFixtureDesc(
"html",
"Test HTML 6",
data="""<p><span class="{}">Annotate this!</span></p>""".format(self.selector)
),
),
),
)).install()
self.addCleanup(self.edxnotes_fixture.cleanup)
AutoAuthPage(self.browser, username=self.username, email=self.email, course_id=self.course_id).visit()
def _add_notes(self):
xblocks = self.course_fixture.get_nested_xblocks(category="html")
notes_list = []
for index, xblock in enumerate(xblocks):
notes_list.append(
Note(
user=self.username,
usage_id=xblock.locator,
course_id=self.course_fixture._course_key,
ranges=[Range(startOffset=index, endOffset=index + 5)]
)
)
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
@attr(shard=4)
class EdxNotesDefaultInteractionsTest(EdxNotesTestMixin):
"""
Tests for creation, editing, deleting annotations inside annotatable components in LMS.
"""
def create_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
for note in component.create_note(".{}".format(self.selector)):
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_notes(self, components, offset=0):
self.assertGreater(len(components), 0)
index = offset
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.text = "TEST TEXT {}".format(index)
index += 1
def edit_tags_in_notes(self, components, tags):
self.assertGreater(len(components), 0)
index = 0
for component in components:
self.assertGreater(len(component.notes), 0)
for note in component.edit_note():
note.tags = tags[index]
index += 1
self.assertEqual(index, len(tags), "Number of supplied tags did not match components")
def remove_notes(self, components):
self.assertGreater(len(components), 0)
for component in components:
self.assertGreater(len(component.notes), 0)
component.remove_note()
def assert_notes_are_removed(self, components):
for component in components:
self.assertEqual(0, len(component.notes))
def assert_text_in_notes(self, notes):
actual = [note.text for note in notes]
expected = ["TEST TEXT {}".format(i) for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def assert_tags_in_notes(self, notes, expected_tags):
actual = [note.tags for note in notes]
expected = [expected_tags[i] for i in xrange(len(notes))]
self.assertEqual(expected, actual)
def test_can_create_notes(self):
"""
Scenario: User can create notes.
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add 2 notes for the first component and 1 note for the second
Then I see that notes were correctly created
When I change sequential position to "2"
And I add note for the annotatable component on the page
Then I see that note was correctly created
When I refresh the page
Then I see that note was correctly stored
When I change sequential position to "1"
Then I see that notes were correctly stored on the page
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
self.create_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.create_notes(components)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_edit_notes(self):
"""
Scenario: User can edit notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I change text in the notes
Then I see that notes were correctly changed
When I change sequential position to "2"
And I change the note on the page
Then I see that note was correctly changed
When I refresh the page
Then I see that edited note was correctly stored
When I change sequential position to "1"
Then I see that edited notes were correctly stored on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.edit_notes(components)
self.assert_text_in_notes(self.note_unit_page.notes)
components = self.note_unit_page.refresh()
self.assert_text_in_notes(self.note_unit_page.notes)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_text_in_notes(self.note_unit_page.notes)
def test_can_delete_notes(self):
"""
Scenario: User can delete notes.
Given I have a course with 3 components with notes
And I open the unit with 2 annotatable components
When I remove all notes on the page
Then I do not see any notes on the page
When I change sequential position to "2"
And I remove all notes on the page
Then I do not see any notes on the page
When I refresh the page
Then I do not see any notes on the page
When I change sequential position to "1"
Then I do not see any notes on the page
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
self.courseware_page.go_to_sequential_position(2)
components = self.note_unit_page.components
self.remove_notes(components)
self.assert_notes_are_removed(components)
components = self.note_unit_page.refresh()
self.assert_notes_are_removed(components)
self.courseware_page.go_to_sequential_position(1)
components = self.note_unit_page.components
self.assert_notes_are_removed(components)
@flaky # TODO: fix this, see TNL-6494
def test_can_create_note_with_tags(self):
"""
Scenario: a user of notes can define one with tags
Given I have a course with 3 annotatable components
And I open the unit with 2 annotatable components
When I add a note with tags for the first component
And I refresh the page
Then I see that note was correctly stored with its tags
"""
self.note_unit_page.visit()
components = self.note_unit_page.components
for note in components[0].create_note(".{}".format(self.selector)):
note.tags = ["fruit", "tasty"]
self.note_unit_page.refresh()
self.assertEqual(["fruit", "tasty"], self.note_unit_page.notes[0].tags)
def test_can_change_tags(self):
"""
Scenario: a user of notes can edit tags on notes
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I edit tags on the notes for the 2 annotatable components
Then I see that the tags were correctly changed
And I again edit tags on the notes for the 2 annotatable components
And I refresh the page
Then I see that the tags were correctly changed
"""
self._add_notes()
self.note_unit_page.visit()
components = self.note_unit_page.components
self.edit_tags_in_notes(components, [["hard"], ["apple", "pear"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [["hard"], ["apple", "pear"]])
self.edit_tags_in_notes(components, [[], ["avocado"]])
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
self.note_unit_page.refresh()
self.assert_tags_in_notes(self.note_unit_page.notes, [[], ["avocado"]])
def test_sr_labels(self):
"""
Scenario: screen reader labels exist for text and tags fields
Given I have a course with 3 components with notes
When I open the unit with 2 annotatable components
And I open the editor for each note
Then the text and tags fields both have screen reader labels
"""
self._add_notes()
self.note_unit_page.visit()
# First note is in the first annotatable component, will have field indexes 0 and 1.
for note in self.note_unit_page.components[0].edit_note():
self.assertTrue(note.has_sr_label(0, 0, "Note"))
self.assertTrue(note.has_sr_label(1, 1, "Tags (space-separated)"))
# Second note is in the second annotatable component, will have field indexes 2 and 3.
for note in self.note_unit_page.components[1].edit_note():
self.assertTrue(note.has_sr_label(0, 2, "Note"))
self.assertTrue(note.has_sr_label(1, 3, "Tags (space-separated)"))
@attr(shard=4)
class EdxNotesPageTest(EventsTestMixin, EdxNotesTestMixin):
"""
Tests for Notes page.
"""
def _add_notes(self, notes_list):
self.edxnotes_fixture.create_notes(notes_list)
self.edxnotes_fixture.install()
def _add_default_notes(self, tags=None, extra_notes=0):
"""
Creates 5 test notes by default & number of extra_notes will be created if specified.
If tags are not specified, will populate the notes with some test tag data.
If tags are specified, they will be used for each of the 3 notes that have tags.
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
# pylint: disable=attribute-defined-outside-init
self.raw_note_list = [
Note(
usage_id=xblocks[4].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this",
updated=datetime(2011, 1, 1, 1, 1, 1, 1).isoformat(),
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="",
quote=u"Annotate this",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["Review", "cool"] if tags is None else tags
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=18)],
tags=["Cool", "TODO"] if tags is None else tags
),
Note(
usage_id=xblocks[3].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fourth note",
quote="",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["review"] if tags is None else tags
),
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Fifth note",
quote="Annotate this",
updated=datetime(2015, 1, 1, 1, 1, 1, 1).isoformat()
),
]
if extra_notes > 0:
for __ in range(extra_notes):
self.raw_note_list.append(
Note(
usage_id=xblocks[random.choice([0, 1, 2, 3, 4, 5])].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text="Fourth note",
quote="",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=["review"] if tags is None else tags
)
)
self._add_notes(self.raw_note_list)
def assertNoteContent(self, item, text=None, quote=None, unit_name=None, time_updated=None, tags=None):
""" Verifies the expected properties of the note. """
self.assertEqual(text, item.text)
if item.quote is not None:
self.assertIn(quote, item.quote)
else:
self.assertIsNone(quote)
self.assertEqual(unit_name, item.unit_name)
self.assertEqual(time_updated, item.time_updated)
self.assertEqual(tags, item.tags)
def assertChapterContent(self, item, title=None, subtitles=None):
"""
Verifies the expected title and subsection titles (subtitles) for the given chapter.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.subtitles, subtitles)
def assertGroupContent(self, item, title=None, notes=None):
"""
Verifies the expected title and child notes for the given group.
"""
self.assertEqual(item.title, title)
self.assertEqual(item.notes, notes)
def assert_viewed_event(self, view=None):
"""
Verifies that the correct view event was captured for the Notes page.
"""
# There will always be an initial event for "Recent Activity" because that is the default view.
# If view is something besides "Recent Activity", expect 2 events, with the second one being
# the view name passed in.
if view == 'Recent Activity':
view = None
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.notes_page_viewed'},
number_of_matches=1 if view is None else 2
)
expected_events = [{'event': {'view': 'Recent Activity'}}]
if view:
expected_events.append({'event': {'view': view}})
self.assert_events_match(expected_events, actual_events)
def assert_unit_link_event(self, usage_id, view):
"""
Verifies that the correct used_unit_link event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.used_unit_link'},
number_of_matches=1
)
expected_events = [
{'event': {'component_usage_id': usage_id, 'view': view}}
]
self.assert_events_match(expected_events, actual_events)
def assert_search_event(self, search_string, number_of_results):
"""
Verifies that the correct searched event was captured for the Notes page.
"""
actual_events = self.wait_for_events(
event_filter={'event_type': 'edx.course.student_notes.searched'},
number_of_matches=1
)
expected_events = [
{'event': {'search_string': search_string, 'number_of_results': number_of_results}}
]
self.assert_events_match(expected_events, actual_events)
def _verify_pagination_info(
self,
notes_count_on_current_page,
header_text,
previous_button_enabled,
next_button_enabled,
current_page_number,
total_pages
):
"""
Verify pagination info
"""
self.assertEqual(self.notes_page.count(), notes_count_on_current_page)
self.assertEqual(self.notes_page.get_pagination_header_text(), header_text)
if total_pages > 1:
self.assertEqual(self.notes_page.footer_visible, True)
self.assertEqual(self.notes_page.is_previous_page_button_enabled(), previous_button_enabled)
self.assertEqual(self.notes_page.is_next_page_button_enabled(), next_button_enabled)
self.assertEqual(self.notes_page.get_current_page_number(), current_page_number)
self.assertEqual(self.notes_page.get_total_pages, total_pages)
else:
self.assertEqual(self.notes_page.footer_visible, False)
def search_and_verify(self):
"""
Add, search and verify notes.
"""
self._add_default_notes(extra_notes=22)
self.notes_page.visit()
# Run the search
self.notes_page.search("note")
# No error message appears
self.assertFalse(self.notes_page.is_error_visible)
self.assertIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(self.notes_page.get_total_pages, 2)
def test_no_content(self):
"""
Scenario: User can see `No content` message.
Given I have a course without notes
When I open Notes page
Then I see only "You do not have any notes within the course." message
"""
notes_page_empty = EdxNotesPageNoContent(self.browser, self.course_id)
notes_page_empty.visit()
self.assertIn(
"You have not made any notes in this course yet. Other students in this course are using notes to:",
notes_page_empty.no_content_text)
def test_notes_works_correctly_with_xss(self):
"""
Scenario: Note text & tags should be HTML and JS escaped
Given I am enrolled in a course with notes enabled
When I visit the Notes page, with a Notes text and tag containing HTML characters like < and >
Then the text and tags appear as expected due to having been properly escaped
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
self._add_notes([
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text='<script>alert("XSS")</script>',
quote="quote",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
tags=['<script>alert("XSS")</script>']
),
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key, # pylint: disable=protected-access
text='<b>bold</b>',
quote="quote",
updated=datetime(2014, 2, 1, 1, 1, 1, 1).isoformat(),
tags=['<i>bold</i>']
)
])
self.notes_page.visit()
notes = self.notes_page.notes
self.assertEqual(len(notes), 2)
self.assertNoteContent(
notes[0],
quote=u"quote",
text='<b>bold</b>',
unit_name="Test Unit 1",
time_updated="Feb 01, 2014 at 01:01 UTC",
tags=['<i>bold</i>']
)
self.assertNoteContent(
notes[1],
quote=u"quote",
text='<script>alert("XSS")</script>',
unit_name="Test Unit 1",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=['<script>alert("XSS")</script>']
)
def test_recent_activity_view(self):
"""
Scenario: User can view all notes by recent activity.
Given I have a course with 5 notes
When I open Notes page
Then I see 5 notes sorted by the updated date
And I see correct content in the notes
And an event has fired indicating that the Recent Activity view was selected
"""
self._add_default_notes()
self.notes_page.visit()
notes = self.notes_page.notes
self.assertEqual(len(notes), 5)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event()
def test_course_structure_view(self):
"""
Scenario: User can view all notes by location in Course.
Given I have a course with 5 notes
When I open Notes page
And I switch to "Location in Course" view
Then I see 2 groups, 3 sections and 5 notes
And I see correct content in the notes and groups
And an event has fired indicating that the Location in Course view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("structure")
notes = self.notes_page.notes
groups = self.notes_page.chapter_groups
sections = self.notes_page.subsection_groups
self.assertEqual(len(notes), 5)
self.assertEqual(len(groups), 2)
self.assertEqual(len(sections), 3)
self.assertChapterContent(
groups[0],
title=u"Test Section 1",
subtitles=[u"Test Subsection 1", u"Test Subsection 2"]
)
self.assertGroupContent(
sections[0],
title=u"Test Subsection 1",
notes=[u"Fifth note", u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[2],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
self.assertGroupContent(
sections[1],
title=u"Test Subsection 2",
notes=[u"Fourth note"]
)
self.assertNoteContent(
notes[3],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertChapterContent(
groups[1],
title=u"Test Section 2",
subtitles=[u"Test Subsection 3"],
)
self.assertGroupContent(
sections[2],
title=u"Test Subsection 3",
notes=[u"First note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Location in Course')
def test_tags_view(self):
"""
Scenario: User can view all notes by associated tags.
Given I have a course with 5 notes and I am viewing the Notes page
When I switch to the "Tags" view
Then I see 4 tag groups
And I see correct content in the notes and groups
And an event has fired indicating that the Tags view was selected
"""
self._add_default_notes()
self.notes_page.visit().switch_to_tab("tags")
notes = self.notes_page.notes
groups = self.notes_page.tag_groups
self.assertEqual(len(notes), 7)
self.assertEqual(len(groups), 4)
# Tag group "cool"
self.assertGroupContent(
groups[0],
title=u"cool (2)",
notes=[u"Third note", None]
)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[1],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "review"
self.assertGroupContent(
groups[1],
title=u"review (2)",
notes=[u"Fourth note", None]
)
self.assertNoteContent(
notes[2],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
unit_name="Test Unit 2",
time_updated="Jan 01, 2012 at 01:01 UTC",
tags=["Review", "cool"]
)
# Tag group "todo"
self.assertGroupContent(
groups[2],
title=u"todo (1)",
notes=["Third note"]
)
self.assertNoteContent(
notes[4],
quote=u"Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
# Notes with no tags
self.assertGroupContent(
groups[3],
title=u"[no tags] (2)",
notes=["Fifth note", "First note"]
)
self.assertNoteContent(
notes[5],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[6],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Tags')
def test_easy_access_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit works correctly.
Given I have a course with 5 notes
When I open Notes page
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Location in Course" view
And I click on the second unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I switch to "Tags" view
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
When go back to the Notes page
And I run the search with "Fifth" query
And I click on the first unit link
Then I see correct text on the unit page and a unit link event was fired
"""
def assert_page(note, usage_id, view):
""" Verify that clicking on the unit link works properly. """
quote = note.quote
note.go_to_unit()
self.courseware_page.wait_for_page()
self.assertIn(quote, self.courseware_page.xblock_component_html_content())
self.assert_unit_link_event(usage_id, view)
self.reset_event_tracking()
self._add_default_notes()
self.notes_page.visit()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Recent Activity")
self.notes_page.visit()
self.notes_page.switch_to_tab("structure")
note = self.notes_page.notes[1]
assert_page(note, self.raw_note_list[2]['usage_id'], "Location in Course")
self.notes_page.visit()
self.notes_page.switch_to_tab("tags")
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[2]['usage_id'], "Tags")
self.notes_page.visit()
self.notes_page.search("Fifth")
self.notes_page.wait_for_ajax()
note = self.notes_page.notes[0]
assert_page(note, self.raw_note_list[4]['usage_id'], "Search Results")
def test_search_behaves_correctly(self):
"""
Scenario: Searching behaves correctly.
Given I have a course with 5 notes
When I open Notes page
When I run the search with " " query
Then I see the following error message "Please enter a term in the search field."
And I do not see "Search Results" tab
When I run the search with "note" query
Then I see that error message disappears
And I see that "Search Results" tab appears with 4 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
"""
self._add_default_notes()
self.notes_page.visit()
# Run the search with whitespaces only
self.notes_page.search(" ")
# Displays error message
self.assertTrue(self.notes_page.is_error_visible)
self.assertEqual(self.notes_page.error_text, u"Please enter a term in the search field.")
# Search results tab does not appear
self.assertNotIn(u"Search Results", self.notes_page.tabs)
# Run the search with correct query
self.notes_page.search("note")
# Error message disappears
self.assertFalse(self.notes_page.is_error_visible)
self.assertIn(u"Search Results", self.notes_page.tabs)
notes = self.notes_page.notes
self.assertEqual(len(notes), 4)
self.assertNoteContent(
notes[0],
quote=u"Annotate this",
text=u"Fifth note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2015 at 01:01 UTC"
)
self.assertNoteContent(
notes[1],
text=u"Fourth note",
unit_name="Test Unit 3",
time_updated="Jan 01, 2014 at 01:01 UTC",
tags=["review"]
)
self.assertNoteContent(
notes[2],
quote="Annotate this",
text=u"Third note",
unit_name="Test Unit 1",
time_updated="Jan 01, 2013 at 01:01 UTC",
tags=["Cool", "TODO"]
)
self.assertNoteContent(
notes[3],
quote=u"Annotate this",
text=u"First note",
unit_name="Test Unit 4",
time_updated="Jan 01, 2011 at 01:01 UTC"
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 4)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_recent_activity(self):
"""
Scenario: Can scroll to a tag group from the Recent Activity view (default view)
Given I have a course with 5 notes and I open the Notes page
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit()
self._scroll_to_tag_and_verify("pear", 3)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_course_structure(self):
"""
Scenario: Can scroll to a tag group from the Course Structure view
Given I have a course with 5 notes and I open the Notes page and select the Course Structure view
When I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("structure")
self._scroll_to_tag_and_verify("squash", 5)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_search(self):
"""
Scenario: Can scroll to a tag group from the Search Results view
Given I have a course with 5 notes and I open the Notes page and perform a search
Then the Search view tab opens and gets focus
And when I click on a tag associated with a note
Then the Tags view tab gets focus and I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().search("note")
self._scroll_to_tag_and_verify("pumpkin", 4)
@skip("scroll to tag functionality is disabled")
def test_scroll_to_tag_from_tag_view(self):
"""
Scenario: Can scroll to a tag group from the Tags view
Given I have a course with 5 notes and I open the Notes page and select the Tag view
When I click on a tag associated with a note
Then I scroll to the section of notes associated with that tag
"""
self._add_default_notes(["apple", "banana", "kiwi", "pear", "pumpkin", "squash", "zucchini"])
self.notes_page.visit().switch_to_tab("tags")
self._scroll_to_tag_and_verify("kiwi", 2)
def _scroll_to_tag_and_verify(self, tag_name, group_index):
""" Helper method for all scroll to tag tests """
self.notes_page.notes[1].go_to_tag(tag_name)
# Because all the notes (with tags) have the same tags, they will end up ordered alphabetically.
pear_group = self.notes_page.tag_groups[group_index]
self.assertEqual(tag_name + " (3)", pear_group.title)
self.assertTrue(pear_group.scrolled_to_top(group_index))
def test_tabs_behaves_correctly(self):
"""
Scenario: Tabs behaves correctly.
Given I have a course with 5 notes
When I open Notes page
Then I see only "Recent Activity", "Location in Course", and "Tags" tabs
When I run the search with "note" query
And I see that "Search Results" tab appears with 4 notes found
Then I switch to "Recent Activity" tab
And I see all 5 notes
Then I switch to "Location in Course" tab
And I see all 2 groups and 5 notes
When I switch back to "Search Results" tab
Then I can still see 4 notes found
When I close "Search Results" tab
Then I see that "Recent Activity" tab becomes active
And "Search Results" tab disappears
And I see all 5 notes
"""
self._add_default_notes()
self.notes_page.visit()
# We're on Recent Activity tab.
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertEqual([u"Recent Activity", u"Location in Course", u"Tags"], self.notes_page.tabs)
self.notes_page.search("note")
# We're on Search Results tab
self.assertEqual(len(self.notes_page.tabs), 4)
self.assertIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 4)
# We can switch on Recent Activity tab and back.
self.notes_page.switch_to_tab("recent")
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("structure")
self.assertEqual(len(self.notes_page.chapter_groups), 2)
self.assertEqual(len(self.notes_page.notes), 5)
self.notes_page.switch_to_tab("search")
self.assertEqual(len(self.notes_page.notes), 4)
# Can close search results page
self.notes_page.close_tab()
self.assertEqual(len(self.notes_page.tabs), 3)
self.assertNotIn(u"Search Results", self.notes_page.tabs)
self.assertEqual(len(self.notes_page.notes), 5)
@flaky # TODO: fix this, see TNL-6493
def test_open_note_when_accessed_from_notes_page(self):
"""
Scenario: Ensure that the link to the Unit opens a note only once.
Given I have a course with 2 sequentials that contain respectively one note and two notes
When I open Notes page
And I click on the first unit link
Then I see the note opened on the unit page
When I switch to the second sequential
I do not see any note opened
When I switch back to first sequential
I do not see any note opened
"""
xblocks = self.course_fixture.get_nested_xblocks(category="html")
self._add_notes([
Note(
usage_id=xblocks[1].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Third note",
quote="Annotate this",
updated=datetime(2012, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
Note(
usage_id=xblocks[2].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="Second note",
quote="Annotate this",
updated=datetime(2013, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
Note(
usage_id=xblocks[0].locator,
user=self.username,
course_id=self.course_fixture._course_key,
text="First note",
quote="Annotate this",
updated=datetime(2014, 1, 1, 1, 1, 1, 1).isoformat(),
ranges=[Range(startOffset=0, endOffset=14)],
),
])
self.notes_page.visit()
item = self.notes_page.notes[0]
item.go_to_unit()
self.courseware_page.wait_for_page()
note = self.note_unit_page.notes[0]
self.assertTrue(note.is_visible)
note = self.note_unit_page.notes[1]
self.assertFalse(note.is_visible)
self.courseware_page.go_to_sequential_position(2)
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
self.courseware_page.go_to_sequential_position(1)
self.courseware_page.wait_for_ajax()
note = self.note_unit_page.notes[0]
self.assertFalse(note.is_visible)
def test_page_size_limit(self):
"""
Scenario: Verify that we can't get notes more than default page size.
Given that I am a registered user
And I have a course with 11 notes
When I open Notes page
Then I can see notes list contains 10 items
And I should see paging header and footer with correct data
And I should see disabled previous button
And I should also see enabled next button
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_single_page(self):
"""
Scenario: Notes list pagination works as expected for single page
Given that I am a registered user
And I have a course with 5 notes
When I open Notes page
Then I can see notes list contains 5 items
And I should see paging header and footer with correct data
And I should see disabled previous and next buttons
"""
self._add_default_notes()
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=5,
header_text='Showing 1-5 out of 5 total',
previous_button_enabled=False,
next_button_enabled=False,
current_page_number=1,
total_pages=1
)
def test_next_and_previous_page_button(self):
"""
Scenario: Next & Previous buttons are working as expected for notes list pagination
Given that I am a registered user
And I have a course with 26 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see disabled previous button
And I should see enabled next button
When I click on next page button in footer
Then I should be navigated to second page
And I should see a list with 1 item
And I should see paging header and footer with correct info
And I should see enabled previous button
And I should also see disabled next button
When I click on previous page button in footer
Then I should be navigated to first page
And I should see a list with 25 items
And I should see paging header and footer with correct info
And I should see disabled previous button
And I should also see enabled next button
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.notes_page.press_next_page_button()
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.notes_page.press_previous_page_button()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_pagination_with_valid_and_invalid_page_number(self):
"""
Scenario: Notes list pagination works as expected for valid & invalid page number
Given that I am a registered user
And I have a course with 26 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see total page value is 2
When I enter 2 in the page number input
Then I should be navigated to page 2
When I enter 3 in the page number input
Then I should not be navigated away from page 2
"""
self._add_default_notes(extra_notes=21)
self.notes_page.visit()
self.assertEqual(self.notes_page.get_total_pages, 2)
# test pagination with valid page number
self.notes_page.go_to_page(2)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
# test pagination with invalid page number
self.notes_page.go_to_page(3)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
def test_search_behaves_correctly_with_pagination(self):
"""
Scenario: Searching behaves correctly with pagination.
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list with 25 items
And I should see paging header and footer with correct data
And previous button is disabled
And next button is enabled
When I run the search with "note" query
Then I see no error message
And I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
"""
self.search_and_verify()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 26)
def test_search_with_next_and_prev_page_button(self):
"""
Scenario: Next & Previous buttons are working as expected for search
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list with 25 items
And I should see paging header and footer with correct data
And previous button is disabled
And next button is enabled
When I run the search with "note" query
Then I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
When I click on next page button in footer
Then I should be navigated to second page
And I should see a list with 1 item
And I should see paging header and footer with correct info
And I should see enabled previous button
And I should also see disabled next button
When I click on previous page button in footer
Then I should be navigated to first page
And I should see a list with 25 items
And I should see paging header and footer with correct info
And I should see disabled previous button
And I should also see enabled next button
"""
self.search_and_verify()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
self.assert_viewed_event('Search Results')
self.assert_search_event('note', 26)
self.notes_page.press_next_page_button()
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
self.notes_page.press_previous_page_button()
self._verify_pagination_info(
notes_count_on_current_page=25,
header_text='Showing 1-25 out of 26 total',
previous_button_enabled=False,
next_button_enabled=True,
current_page_number=1,
total_pages=2
)
def test_search_with_valid_and_invalid_page_number(self):
"""
Scenario: Notes list pagination works as expected for valid & invalid page number
Given that I am a registered user
And I have a course with 27 notes
When I open Notes page
Then I can see notes list contains 25 items
And I should see paging header and footer with correct data
And I should see total page value is 2
When I run the search with "note" query
Then I see that "Search Results" tab appears with 26 notes found
And an event has fired indicating that the Search Results view was selected
And an event has fired recording the search that was performed
When I enter 2 in the page number input
Then I should be navigated to page 2
When I enter 3 in the page number input
Then I should not be navigated away from page 2
"""
self.search_and_verify()
# test pagination with valid page number
self.notes_page.go_to_page(2)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
# test pagination with invalid page number
self.notes_page.go_to_page(3)
self._verify_pagination_info(
notes_count_on_current_page=1,
header_text='Showing 26-26 out of 26 total',
previous_button_enabled=True,
next_button_enabled=False,
current_page_number=2,
total_pages=2
)
@attr(shard=4)
class EdxNotesToggleSingleNoteTest(EdxNotesTestMixin):
"""
Tests for toggling single annotation.
"""
def setUp(self):
super(EdxNotesToggleSingleNoteTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_toggle_by_clicking_on_highlighted_text(self):
"""
Scenario: User can toggle a single note by clicking on highlighted text.
Given I have a course with components with notes
When I click on highlighted text
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_can_toggle_by_clicking_on_the_note(self):
"""
Scenario: User can toggle a single note by clicking on the note.
Given I have a course with components with notes
When I click on the note
And I move mouse out of the note
Then I see that the note is still shown
When I click outside the note
Then I see the the note is closed
"""
note = self.note_unit_page.notes[0]
note.show().click_on_viewer()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note.is_visible)
self.note_unit_page.click("body")
self.assertFalse(note.is_visible)
def test_interaction_between_notes(self):
"""
Scenario: Interactions between notes works well.
Given I have a course with components with notes
When I click on highlighted text in the first component
And I move mouse out of the note
Then I see that the note is still shown
When I click on highlighted text in the second component
Then I see that the new note is shown
"""
note_1 = self.note_unit_page.notes[0]
note_2 = self.note_unit_page.notes[1]
note_1.click_on_highlight()
self.note_unit_page.move_mouse_to("body")
self.assertTrue(note_1.is_visible)
note_2.click_on_highlight()
self.assertFalse(note_1.is_visible)
self.assertTrue(note_2.is_visible)
@attr(shard=4)
class EdxNotesToggleNotesTest(EdxNotesTestMixin):
"""
Tests for toggling visibility of all notes.
"""
def setUp(self):
super(EdxNotesToggleNotesTest, self).setUp()
self._add_notes()
self.note_unit_page.visit()
def test_can_disable_all_notes(self):
"""
Scenario: User can disable all notes.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I change sequential position to "2"
Then I still do not see any notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I do not see any notes on the subsection
"""
# Disable all notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
self.courseware_page.go_to_sequential_position(2)
self.assertEqual(len(self.note_unit_page.notes), 0)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertEqual(len(self.note_unit_page.notes), 0)
def test_can_reenable_all_notes(self):
"""
Scenario: User can toggle notes visibility.
Given I have a course with components with notes
And I open the unit with annotatable components
When I click on "Show notes" checkbox
Then I do not see any notes on the sequential position
When I click on "Show notes" checkbox again
Then I see that all notes appear
When I change sequential position to "2"
Then I still can see all notes on the sequential position
When I go to "Test Subsection 2" subsection
Then I can see all notes on the subsection
"""
# Disable notes
self.note_unit_page.toggle_visibility()
self.assertEqual(len(self.note_unit_page.notes), 0)
# Enable notes to make sure that I can enable notes without refreshing
# the page.
self.note_unit_page.toggle_visibility()
self.assertGreater(len(self.note_unit_page.notes), 0)
self.courseware_page.go_to_sequential_position(2)
self.assertGreater(len(self.note_unit_page.notes), 0)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section(u"Test Section 1", u"Test Subsection 2")
self.assertGreater(len(self.note_unit_page.notes), 0)
|
fintech-circle/edx-platform
|
common/test/acceptance/tests/lms/test_lms_edxnotes.py
|
Python
|
agpl-3.0
| 59,950
|
# Copyright (c) 2014 - 2016 townhallpinball.org
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from pin.lib import p
from . import mm3
def init():
p.load_modes((
"extra.mm3",
))
def load():
mm3.load()
|
town-hall-pinball/project-omega
|
pin/extra/__init__.py
|
Python
|
mit
| 1,234
|
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
CASSANDRA_ADDRESS = 'cassandra.cloudbrain.rocks'
RABBITMQ_ADDRESS = 'rabbitmq.cloudbrain.rocks'
WEBSERVER_ADDRESS = 'webserver.cloudbrain.rocks'
WEBSERVER_PORT = 8080
MOCK_DEVICE_ID = "mock"
# Metric metadata of wearable devices accepted by CloudBrain.
DEVICE_METADATA = [
{'device_name': 'openbci',
'device_type': 'eeg_headset',
'metrics':
[
{
'metric_name': 'eeg',
'num_channels': 8,
'metric_description': 'Raw eeg data coming from the OpenBCI channels'
}
]
},
{
'device_name': 'muse',
'device_type': 'eeg_headset',
'metrics':
[
{
'metric_name': 'eeg',
'num_channels': 4,
'metric_description': 'Raw eeg data coming from the 4 channels of the Muse'
},
{
'metric_name': 'horseshoe',
'num_channels': 4,
'metric_description': 'Status indicator for each channel (1 = good, 2 = ok, >=3 bad)'
},
{
'metric_name': 'concentration',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'mellow',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'acc',
'num_channels': 3,
'metric_description': None
},
{
'metric_name': 'delta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'theta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'beta_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'alpha_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'gamma_absolute',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'delta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'theta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'beta_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'alpha_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'gamma_relative',
'num_channels': 4,
'metric_description': None
},
{
'metric_name': 'is_good',
'num_channels': 4,
'metric_description': 'Strict data quality indicator for each channel, 0= bad, 1 = good.'
},
{
'metric_name': 'blink',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'jaw_clench',
'num_channels': 1,
'metric_description': None
},
]
},
{
'device_name': 'neurosky',
'device_type': 'eeg_headset',
'metrics': [
{
'metric_name': 'concentration',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'meditation',
'num_channels': 1,
'metric_description': None
},
{
'metric_name': 'signal_strength',
'num_channels': 1,
'metric_description': None
},
]
},
{
'device_name': 'pulsesensor',
'device_type': 'heart_rate_monitor',
'metrics': [
{
'metric_name': 'raw',
'num_channels': 1,
'metric_description': None
}
]
}
]
|
prescottprue/cloudbrain
|
cloudbrain/settings.py
|
Python
|
agpl-3.0
| 3,785
|
import os
import mock
from uuid import uuid4
from urllib.request import urlopen
import datetime
from django.test import TestCase
from django.core.files.base import ContentFile
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from boto.s3.key import Key
from storages.backends import s3boto
__all__ = (
'ParseTsExtendedCase',
'SafeJoinTest',
'S3BotoStorageTests',
#'S3BotoStorageFileTests',
)
class ParseTsExtendedCase(TestCase):
def test_normal(self):
value = s3boto.parse_ts_extended("Wed, 13 Mar 2013 12:45:49 GMT")
self.assertEquals(value, datetime.datetime(2013, 3, 13, 12, 45, 49))
class S3BotoTestCase(TestCase):
@mock.patch('storages.backends.s3boto.S3Connection')
def setUp(self, S3Connection):
self.storage = s3boto.S3BotoStorage()
self.storage._bucket = mock.MagicMock()
class SafeJoinTest(TestCase):
def test_normal(self):
path = s3boto.safe_join("", "path/to/somewhere", "other", "path/to/somewhere")
self.assertEquals(path, "path/to/somewhere/other/path/to/somewhere")
def test_with_dot(self):
path = s3boto.safe_join("", "path/./somewhere/../other", "..",
".", "to/./somewhere")
self.assertEquals(path, "path/to/somewhere")
def test_base_url(self):
path = s3boto.safe_join("base_url", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_base_url_with_slash(self):
path = s3boto.safe_join("base_url/", "path/to/somewhere")
self.assertEquals(path, "base_url/path/to/somewhere")
def test_suspicious_operation(self):
self.assertRaises(ValueError,
s3boto.safe_join, "base", "../../../../../../../etc/passwd")
class S3BotoStorageTests(S3BotoTestCase):
def test_storage_save(self):
"""
Test saving a file
"""
name = 'test_storage_save.txt'
content = ContentFile('new content')
self.storage.save(name, content)
self.storage.bucket.get_key.assert_called_once_with(name)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/plain')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/plain'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True
)
def test_storage_save_gzip(self):
"""
Test saving a file with gzip enabled.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
name = 'test_storage_save.css'
content = ContentFile("I should be gzip'd")
self.storage.save(name, content)
key = self.storage.bucket.get_key.return_value
key.set_metadata.assert_called_with('Content-Type', 'text/css')
key.set_contents_from_file.assert_called_with(
content,
headers={'Content-Type': 'text/css', 'Content-Encoding': 'gzip'},
policy=self.storage.default_acl,
reduced_redundancy=self.storage.reduced_redundancy,
rewind=True,
)
def test_compress_content_len(self):
"""
Test that file returned by _compress_content() is readable.
"""
if not s3boto.S3BotoStorage.gzip: # Gzip not available.
return
content = ContentFile("I should be gzip'd")
content = self.storage._compress_content(content)
self.assertTrue(len(content.read()) > 0)
def test_storage_open_write(self):
"""
Test opening a file in write mode
"""
name = 'test_open_for_writing.txt'
content = 'new content'
# Set the ACL header used when creating/writing data.
self.storage.bucket.connection.provider.acl_header = 'x-amz-acl'
# Set the mocked key's bucket
self.storage.bucket.get_key.return_value.bucket = self.storage.bucket
# Set the name of the mock object
self.storage.bucket.get_key.return_value.name = name
file = self.storage.open(name, 'w')
self.storage.bucket.get_key.assert_called_with(name)
file.write(content)
self.storage.bucket.initiate_multipart_upload.assert_called_with(
name,
headers={'x-amz-acl': 'public-read'},
reduced_redundancy=self.storage.reduced_redundancy,
)
# Save the internal file before closing
_file = file.file
file.close()
file._multipart.upload_part_from_file.assert_called_with(
_file, 1, headers=self.storage.headers,
)
file._multipart.complete_upload.assert_called_once()
#def test_storage_exists_and_delete(self):
# # show file does not exist
# name = self.prefix_path('test_exists.txt')
# self.assertFalse(self.storage.exists(name))
#
# # create the file
# content = 'new content'
# file = self.storage.open(name, 'w')
# file.write(content)
# file.close()
#
# # show file exists
# self.assertTrue(self.storage.exists(name))
#
# # delete the file
# self.storage.delete(name)
#
# # show file does not exist
# self.assertFalse(self.storage.exists(name))
def test_storage_listdir_base(self):
file_names = ["some/path/1.txt", "2.txt", "other/path/3.txt", "4.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("")
self.assertEqual(len(dirs), 2)
for directory in ["some", "other"]:
self.assertTrue(directory in dirs,
""" "%s" not in directory list "%s".""" % (
directory, dirs))
self.assertEqual(len(files), 2)
for filename in ["2.txt", "4.txt"]:
self.assertTrue(filename in files,
""" "%s" not in file list "%s".""" % (
filename, files))
def test_storage_listdir_subdir(self):
file_names = ["some/path/1.txt", "some/2.txt"]
self.storage.bucket.list.return_value = []
for p in file_names:
key = mock.MagicMock(spec=Key)
key.name = p
self.storage.bucket.list.return_value.append(key)
dirs, files = self.storage.listdir("some/")
self.assertEqual(len(dirs), 1)
self.assertTrue('path' in dirs,
""" "path" not in directory list "%s".""" % (dirs,))
self.assertEqual(len(files), 1)
self.assertTrue('2.txt' in files,
""" "2.txt" not in files list "%s".""" % (files,))
#def test_storage_size(self):
# name = self.prefix_path('test_storage_size.txt')
# content = 'new content'
# f = ContentFile(content)
# self.storage.save(name, f)
# self.assertEqual(self.storage.size(name), f.size)
#
#def test_storage_url(self):
# name = self.prefix_path('test_storage_size.txt')
# content = 'new content'
# f = ContentFile(content)
# self.storage.save(name, f)
# self.assertEqual(content, urlopen(self.storage.url(name)).read())
#class S3BotoStorageFileTests(S3BotoTestCase):
# def test_multipart_upload(self):
# nparts = 2
# name = self.prefix_path("test_multipart_upload.txt")
# mode = 'w'
# f = s3boto.S3BotoStorageFile(name, mode, self.storage)
# content_length = 1024 * 1024# 1 MB
# content = 'a' * content_length
#
# bytes = 0
# target = f._write_buffer_size * nparts
# while bytes < target:
# f.write(content)
# bytes += content_length
#
# # make the buffer roll over so f._write_counter
# # is incremented
# f.write("finished")
#
# # verify upload was multipart and correctly partitioned
# self.assertEqual(f._write_counter, nparts)
#
# # complete the upload
# f.close()
#
# # verify that the remaining buffered bytes were
# # uploaded when the file was closed.
# self.assertEqual(f._write_counter, nparts+1)
|
peterseymour/django-storages-1.1.8
|
storages/tests/s3boto.py
|
Python
|
bsd-3-clause
| 8,692
|
# Convert a single layer from a netCDF variable to a flt file.
# It is assumed that the netCDF variable dimensions are longitude,
# latitude and, optionally, a third dimension such as time or
# channel/waveength
import sys
import os.path
import numpy
import netCDF3
import nc3_handler as nh
def nctoflt(ncfile, fltstem, varname, iz=0):
"""Main function to process a netCDF file to binary flt
Output files have the stem name and suffix .flt and .hdr
If varname is 3D, then iz is the index of the first dimension used
to extract a 2D slice.
If the latitude runs south to north, then the grid is flipped before
being written
"""
ncobj = nh.nc3_open(ncfile,'r')
a = ncobj.variables[varname]
# Copy out into a numpy array and make sure we have only
# 2 dimensions and type float32.
b = numpy.float32(ncobj.variables[varname])
if len(b.shape) < 2 or len(b.shape) > 3:
raise ValueError("Only 2D and 3D data allowed (not "+len(b.shape)+"D)")
if len(b.shape) == 3:
b = numpy.float32(b[iz,::,::].reshape(b.shape[1], b.shape[2]))
fillValue = numpy.float32(ncobj.variables[varname]._FillValue)
latvec = ncobj.variables['latitude']
lonvec = ncobj.variables['longitude']
lat1 = latvec[0]
lat2 = latvec[len(latvec)-1]
# Reverse if latitude runs South to North
if lat1 < lat2:
x = lat2
lat2 = lat1
lat1 = x
b = b[::-1,]
lon1 = lonvec[0]
lon2 = lonvec[len(lonvec)-1]
dlat = abs(lat1-lat2)/(len(latvec)-1)
dlon = abs(lon2-lon1)/(len(lonvec)-1)
xll = lon1-dlon*0.5
yll = lat2-dlat*0.5
fltname = fltstem+'.flt'
if os.path.exists(fltname): os.unlink(fltname)
b.tofile(fltname)
f = file(fltstem+".hdr","w")
f.write("ncols %d\n" % b.shape[1])
f.write("nrows %d\n" % b.shape[0])
f.write("xllcorner %f\n" % xll)
f.write("yllcorner %f\n" % yll)
f.write("cellsize %f\n" % dlon)
f.write("NODATA_value %f\n" % fillValue)
if sys.byteorder == "little":
f.write("byteorder LSBFIRST\n")
else:
f.write("byteorder LSBLAST\n")
f.close()
attr = nh.nc3_get_attributes(ncobj)
nh.nc3_close(ncobj)
return attr
if __name__ == '__main__':
if 0:
ncfile = 'test/20110421_Etot.nc'
varname = 'Etot'
else:
ncfile = 'test/C-bawap.D1-20110101.D2-20110101.I-P1D.V-rain_day.P-raw.DC-20110630T075313.DM-20111207T222219.nc'
varname = 'rain_day'
try:
nctoflt(ncfile, 'testflt', varname)
except ValueError as e:
print "Wrong number of dimensions for variable=%s: %s" % (varname, e)
|
KimberleyOpie/common-tools
|
nc_to_formats/nctoflt.py
|
Python
|
apache-2.0
| 2,664
|
#!/usr/bin/env python
#
# Copyright 2004,2005 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from gnuradio import gr, atsc
import math
def main():
fg = gr.flow_graph()
u = gr.file_source(gr.sizeof_float,"/tmp/atsc_pipe_2")
input_rate = 19.2e6
IF_freq = 5.75e6
# 1/2 as wide because we're designing lp filter
symbol_rate = atsc.ATSC_SYMBOL_RATE/2.
NTAPS = 279
tt = gr.firdes.root_raised_cosine (1.0, input_rate, symbol_rate, .115, NTAPS)
# heterodyne the low pass coefficients up to the specified bandpass
# center frequency. Note that when we do this, the filter bandwidth
# is effectively twice the low pass (2.69 * 2 = 5.38) and hence
# matches the diagram in the ATSC spec.
arg = 2. * math.pi * IF_freq / input_rate
t=[]
for i in range(len(tt)):
t += [tt[i] * 2. * math.cos(arg * i)]
rrc = gr.fir_filter_fff(1, t)
fpll = atsc.fpll()
pilot_freq = IF_freq - 3e6 + 0.31e6
lower_edge = 6e6 - 0.31e6
upper_edge = IF_freq - 3e6 + pilot_freq
transition_width = upper_edge - lower_edge
lp_coeffs = gr.firdes.low_pass (1.0,
input_rate,
(lower_edge + upper_edge) * 0.5,
transition_width,
gr.firdes.WIN_HAMMING);
lp_filter = gr.fir_filter_fff (1,lp_coeffs)
alpha = 1e-5
iir = gr.single_pole_iir_filter_ff(alpha)
remove_dc = gr.sub_ff()
out = gr.file_sink(gr.sizeof_float,"/tmp/atsc_pipe_3")
# out = gr.file_sink(gr.sizeof_float,"/mnt/sata/atsc_data_float")
fg.connect(u, fpll, lp_filter)
fg.connect(lp_filter, iir)
fg.connect(lp_filter, (remove_dc,0))
fg.connect(iir, (remove_dc,1))
fg.connect(remove_dc, out)
fg.run()
if __name__ == '__main__':
main ()
|
trnewman/VT-USRP-daughterboard-drivers_python
|
gr-atsc/src/python/fpll.py
|
Python
|
gpl-3.0
| 2,429
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2013 Yury Konovalov <YKonovalov@gmail.com>
#
# This file is part of SSP.
#
# SSP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SSP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SSP. If not, see <http://www.gnu.org/licenses/>.
"""Tools to scan for WBEM devices"""
import logging
import subprocess
import time
import re
from ssp.chassis.netconfig import get_global_to_local_networks_projection, get_all_global_to_local_networks_projection, get_attrset_of_networks
from ssp.chassis.common.scanner import COM_SCANNER
from socket import gethostname,getaddrinfo
HWCONTROLS = [ "WBEM_SCANNER" ]
__all__ = HWCONTROLS
LOG = logging.getLogger("ssp.chassis.wbem.scanner")
LOG.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
ch.setFormatter(formatter)
LOG.addHandler(ch)
def Request_decorator(func):
"""Logs all requests."""
def decorator(self, *args, **kwargs):
if kwargs:
LOG.info("%s(%s)%s called.", func.func_name, args, kwargs)
else:
LOG.info("%s(%s) called.", func.func_name, args)
return func(self, *args, **kwargs)
return decorator
class WBEM_SCANNER(COM_SCANNER):
"""Represents an generic WBEM device scanner using SLP protocol."""
name="WBEM_SCANNER"
scanspec=[('SLP' ,'service:service-agent'),
('WBEM','service:wbem')]
""" Scan for specific service in form <TAG>,<SLP service name>
example: [('IBMBC','service:management-hardware.IBM:management-module')]"""
@Request_decorator
def _scan(self, networks={}):
"""Scan for IPMI devices on restricted set of networks."""
s = self.__scan_specific_ifaces_by_slptool(networks)
return s
@Request_decorator
def _scan_blindly(self):
"""Scan for IPMI devices on all networks"""
s = self.__scan_blindly_by_slptool()
return s
@Request_decorator
def __scan_specific_ifaces_by_slptool(self, networks={}):
"""Scan for SLP devices with rmcp_ping on restricted set of networks."""
if_ips=set()
if networks.keys():
for ifip in get_attrset_of_networks(networks,'iface_ip_and_preffix'):
if_ips.add(ifip.split('/')[0])
ifspec=",".join(if_ips)
s = self.__slptool(ifs=ifspec)
return s
@Request_decorator
def __scan_blindly_by_slptool(self):
"""Scan for IPMI devices with rmcp_ping without specifing interfaces"""
s = self.__slptool()
return s
@Request_decorator
def __slptool(self, ifs=None, scantype="SLP"):
"""Run slptool on IP and return list of hosts. scantype is 'WBEM' or 'SLP'
slptool can restrict """
hosts=[]
myhost=gethostname()
icmd=["slptool"]
if ifs:
icmd.append("-i")
icmd.append(ifs)
LOG.debug("scanning via " + ifs)
icmd.append("findsrvs")
for scantype,scanservice in self.scanspec:
cmd=list(icmd)
cmd.append(scanservice)
LOG.debug(str(cmd))
try:
s = subprocess.Popen(cmd, stderr=open('/dev/null', 'w'), stdout=subprocess.PIPE).communicate()[0]
print str(s)
for r in re.finditer(r"(?m)^.*//(?P<host>[^\s/,]+),+.*$",s):
h=r.groupdict()
h['type']=scantype
h['scanner']={'host': myhost, 'iface_ip': ifs, 'scanner': 'slptool', 'scanproto': scantype}
hosts.append(h)
except:
LOG.error("Cannot run slptool command.")
raise
return hosts
|
mdcic/ssp
|
ssp/chassis/wbem/scanner.py
|
Python
|
gpl-3.0
| 3,776
|
from compmod.models import RingCompression
from abapy import materials
from abapy.misc import load
import matplotlib.pyplot as plt
import numpy as np
import pickle, copy
import platform
#PAREMETERS
inner_radius, outer_radius = 45.18 , 50.36
Nt, Nr, Na = 20, 4, 8
Ne = Nt * Nr * Na
disp = 10
nFrames = 100
thickness = 20.02
E = 120000. * np.ones(Ne) # Young's modulus
nu = .3 * np.ones(Ne) # Poisson's ratio
Ssat =1000 * np.ones(Ne)
n = 200 * np.ones(Ne)
sy_mean = 200.
ray_param = sy_mean/1.253314
sy = np.random.rayleigh(ray_param, Ne)
labels = ['mat_{0}'.format(i+1) for i in xrange(len(sy))]
material = [materials.Bilinear(labels = labels[i], E = E[i], nu = nu[i], Ssat = Ssat[i], n=n[i], sy = sy[i]) for i in xrange(Ne)]
workdir = "D:\donnees_pyth/workdir/"
label = "ringCompression3DCompart"
elType = "CPE4"
cpus = 1
node = platform.node()
if node == 'lcharleux': abqlauncher = '/opt/Abaqus/6.9/Commands/abaqus' # Ludovic
if node == 'serv2-ms-symme': abqlauncher = '/opt/abaqus/Commands/abaqus' # Linux
if node == 'epua-pd47':
abqlauncher = 'C:/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe' # Local machine configuration
if node == 'SERV3-MS-SYMME':
abqlauncher = '"C:/Program Files (x86)/SIMULIA/Abaqus/6.11-2/exec/abq6112.exe"' # Local machine configuration
if node == 'epua-pd45':
abqlauncher = 'C:\SIMULIA/Abaqus/Commands/abaqus'
#TASKS
run_sim = True
plot = True
#MODEL DEFINITION
m = RingCompression( material = material,inner_radius = inner_radius,
outer_radius = outer_radius,
disp = disp/2,
thickness = thickness,
nFrames = nFrames,
Nr = Nr,
Nt = Nt,
Na = Na,
workdir = "D:\donnees_pyth/workdir/",
label = label,
elType = elType,
abqlauncher = abqlauncher,
cpus = 1,
compart = True,
is_3D = True)
# SIMULATION
m.MakeMesh()
if run_sim:
m.MakeInp()
m.Run()
m.PostProc()
# SOME PLOTS
mesh = m.mesh
outputs = load(workdir + label + '.pckl')
if outputs['completed']:
# Fields
def field_func(outputs, step):
"""
A function that defines the scalar field you want to plot
"""
return outputs['field']['S'][step].vonmises()
"""
def plot_mesh(ax, mesh, outputs, step, field_func =None, zone = 'upper right', cbar = True, cbar_label = 'Z', cbar_orientation = 'horizontal', disp = True):
A function that plots the deformed mesh with a given field on it.
mesh2 = copy.deepcopy(mesh)
if disp:
U = outputs['field']['U'][step]
mesh2.nodes.apply_displacement(U)
X,Y,Z,tri = mesh2.dump2triplot()
xb,yb,zb = mesh2.get_border()
xe, ye, ze = mesh2.get_edges()
if zone == "upper right": kx, ky = 1., 1.
if zone == "upper left": kx, ky = -1., 1.
if zone == "lower right": kx, ky = 1., -1.
if zone == "lower left": kx, ky = -1., -1.
ax.plot(kx * xb, ky * yb,'k-', linewidth = 2.)
ax.plot(kx * xe, ky * ye,'k-', linewidth = .5)
if field_func != None:
field = field_func(outputs, step)
grad = ax.tricontourf(kx * X, ky * Y, tri, field.data)
if cbar :
bar = plt.colorbar(grad, orientation = cbar_orientation)
bar.set_label(cbar_label)
fig = plt.figure("Fields")
plt.clf()
ax = fig.add_subplot(1, 1, 1)
ax.set_aspect('equal')
plt.grid()
plot_mesh(ax, mesh, outputs, 0, field_func, cbar_label = '$\sigma_{eq}$')
plot_mesh(ax, mesh, outputs, 0, field_func = None, cbar = False, disp = False)
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.savefig(workdir + label + '_fields.pdf')
"""
# Load vs disp
force = -2. * outputs['history']['force']
disp = -2. * outputs['history']['disp']
fig = plt.figure('Load vs. disp')
plt.clf()
plt.plot(disp.data[0], force.data[0], 'ro-', label = 'Loading', linewidth = 2.)
plt.plot(disp.data[1], force.data[1], 'bv-', label = 'Unloading', linewidth = 2.)
plt.legend(loc="upper left")
plt.grid()
plt.xlabel('Displacement, $U$')
plt.ylabel('Force, $F$')
plt.savefig(workdir + label + '_load-vs-disp.pdf')
|
lcharleux/compmod-doc
|
doc/example_code/models/ring_compression_3D_compart.py
|
Python
|
gpl-2.0
| 3,987
|
#!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
import operator
import os
OUT_CPP="src/qt/iQcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *iQcoin_strings[] = {\n')
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("iQcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
iQcoin/iQcoin
|
share/qt/extract_strings_qt.py
|
Python
|
mit
| 1,897
|
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for custom landing pages."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.tests import test_utils
import feconf
class FractionLandingRedirectPageTest(test_utils.GenericTestBase):
"""Test for redirecting landing page for fractions."""
def test_old_fractions_landing_url_without_viewer_type(self):
"""Test to validate the old Fractions landing url without viewerType
redirects to the new Fractions landing url.
"""
response = self.get_html_response(
feconf.FRACTIONS_LANDING_PAGE_URL, expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions',
response.headers['location'])
def test_old_fraction_landing_url_with_viewer_type(self):
"""Test to validate the old Fractions landing url with viewerType
redirects to the new Fractions landing url.
"""
response = self.get_html_response(
'%s?viewerType=student' % feconf.FRACTIONS_LANDING_PAGE_URL,
expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions',
response.headers['location'])
class TopicLandingRedirectPageTest(test_utils.GenericTestBase):
"""Test for redirecting the old landing page URL to the new one."""
def test_old_topic_url_redirect(self):
response = self.get_html_response(
'/learn/maths/fractions', expected_status_int=302)
self.assertEqual(
'http://localhost/math/fractions', response.headers['location'])
class TopicLandingPageTest(test_utils.GenericTestBase):
"""Test for showing landing pages."""
def test_valid_subject_and_topic_loads_correctly(self):
response = self.get_html_response('/math/fractions')
response.mustcontain('<topic-landing-page></topic-landing-page>')
class StewardsLandingPageTest(test_utils.GenericTestBase):
"""Test for showing the landing page for stewards (parents, teachers,
volunteers, or NGOs).
"""
def test_nonprofits_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_NONPROFITS_LANDING_PAGE_URL)
response.mustcontain(
'<stewards-landing-page></stewards-landing-page>')
def test_parents_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_PARENTS_LANDING_PAGE_URL)
response.mustcontain(
'<stewards-landing-page></stewards-landing-page>')
def test_teachers_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_TEACHERS_LANDING_PAGE_URL)
response.mustcontain('<stewards-landing-page></stewards-landing-page>')
def test_volunteers_landing_page(self):
response = self.get_html_response(
feconf.CUSTOM_VOLUNTEERS_LANDING_PAGE_URL)
response.mustcontain('<stewards-landing-page></stewards-landing-page>')
|
prasanna08/oppia
|
core/controllers/custom_landing_pages_test.py
|
Python
|
apache-2.0
| 3,649
|
# coding: utf-8
from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import boto3
from boto3.dynamodb.conditions import Attr
import os
import sys
import uuid
import re
# Path to modules needed to package local lambda function for upload
currentdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(os.path.join(currentdir, "./vendored"))
# Modules downloaded into the vendored directory
# Logging for Serverless
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# Initializing AWS services
sns = boto3.client('sns')
dynamodb = boto3.resource('dynamodb')
awslambda = boto3.client('lambda')
sts = boto3.client('sts')
def handler(event, context):
log.debug("Received event {}".format(json.dumps(event)))
cbInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_CBINFO'])
accountInfo = dynamodb.Table(os.environ['TAILOR_TABLENAME_ACCOUNTINFO'])
try:
print('context:resource-path', event['context']['resource-path'] == '/vpcflowlogs')
print('body-json:region', re.match("^us-[a-z]{4}-[1|2]$", event['body-json']['region']))
print('body-json:stackName', re.match("^[0-9a-z|_-]{4,35}$", event['body-json']['stackName']))
print('body-json:accountId', re.match("^[0-9]{12}$", event['body-json']['accountId']))
print('header:accountCbAlias', event['params']['header']['accountCbAlias'])
except Exception as e:
print(e)
print("regex not matching any values passed in request")
raise Exception({"code": "4000", "message": "ERROR: Bad request"})
# VPC Flow Logs logic
if event['context']['resource-path'] == '/vpcflowlogs' and \
re.match("^[0-9a-z|_-]{4,35}$", event['body-json']['stackName']) and \
re.match("^us-[a-z]{4}-[1|2]$", event['body-json']['region']) and \
re.match("^[0-9]{12}$", event['body-json']['accountId']) and \
re.match("^[a-z-]{4,15}$", event['params']['header']['accountCbAlias']):
requestId = str(uuid.uuid4())
region = event['body-json']['region']
accountId = event['body-json']['accountId']
stackName = event['body-json']['stackName']
accountCbAlias = event['params']['header']['accountCbAlias']
stage = event['stage-variables']['stage']
# Check if account already exists
getAccountId = accountInfo.scan(
ProjectionExpression='accountId, accountEmailAddress',
FilterExpression=Attr('accountId').eq(accountId)
)
if getAccountId['Count'] == 0:
print("Account not found")
raise Exception({"code": "4040", "message": "ERROR: Not found"})
elif int(getAccountId['Count']) > 0:
# Update accountInfo with new requestId
updateAccountInfo = accountInfo.update_item(
Key={
'accountEmailAddress': getAccountId['Items'][0]['accountEmailAddress']
},
UpdateExpression='SET #requestId = :val1',
ExpressionAttributeNames={'#requestId': "requestId"},
ExpressionAttributeValues={':val1': requestId}
)
# Lookup payer account number
getCbInfo = cbInfo.get_item(
Key={
'accountCbAlias': accountCbAlias
}
)
accountCbId = getCbInfo['Item']['accountCbId']
# Initialize credentials for linked account
la_aws_access_key_id, la_aws_secret_access_key, la_aws_session_token = \
initialize_la_services(account_cb_id=accountCbId, la_account_id=accountId)
# Lookup stackId
laCfn = boto3.client(
'cloudformation',
region_name=region,
aws_access_key_id=la_aws_access_key_id,
aws_secret_access_key=la_aws_secret_access_key,
aws_session_token=la_aws_session_token,
)
try:
describeStack = laCfn.describe_stacks(
StackName=stackName
)
stackId = describeStack['Stacks'][0]['StackId']
except Exception as e:
print(e)
print("Stack not found")
raise Exception({"code": "4040", "message": "ERROR: Not found"})
# Build Lambda invoke payload
message = "StackId='" + stackId + "'\nLogicalResourceId='core'\nNamespace='" + accountId + "'\nPhysicalResourceId='" + stackId + "'\nResourceStatus='CREATE_COMPLETE'\nStackName='" + stackName + "'\n"
payload = {"Records": [{"Sns": {"Message": message}}]}
# Call Lambda
awslambda.invoke(
FunctionName='talr-vpcflowlogs-' + stage,
InvocationType='Event',
Payload=json.dumps(payload)
)
return {"code": "2020", "message": "Request Accepted", "requestId": requestId}
else:
raise Exception({"code": "4000", "message": "ERROR: Bad request"})
def initialize_la_services(account_cb_id, la_account_id):
# Payer account credentials
payerAssumeRole = sts.assume_role(
RoleArn="arn:aws:iam::" + account_cb_id + ":role/tailor",
RoleSessionName="talrIamPayerAssumeRole"
)
payerCredentials = payerAssumeRole['Credentials']
payer_aws_access_key_id = payerCredentials['AccessKeyId']
payer_aws_secret_access_key = payerCredentials['SecretAccessKey']
payer_aws_session_token = payerCredentials['SessionToken']
# Linked account credentials
laSts = boto3.client(
'sts',
aws_access_key_id=payer_aws_access_key_id,
aws_secret_access_key=payer_aws_secret_access_key,
aws_session_token=payer_aws_session_token,
)
laAssumeRole = laSts.assume_role(
RoleArn="arn:aws:iam::" + la_account_id + ":role/PayerAccountAccessRole",
RoleSessionName="talrIamLaAssumeRole"
)
laCredentials = laAssumeRole['Credentials']
la_aws_access_key_id = laCredentials['AccessKeyId']
la_aws_secret_access_key = laCredentials['SecretAccessKey']
la_aws_session_token = laCredentials['SessionToken']
# Initialize IAM client with Linked Account credentials
laIam = boto3.client(
'iam',
aws_access_key_id=la_aws_access_key_id,
aws_secret_access_key=la_aws_secret_access_key,
aws_session_token=la_aws_session_token,
)
return (la_aws_access_key_id, la_aws_secret_access_key, la_aws_session_token)
|
alanwill/aws-tailor
|
sam/functions/talr-accountupdate-vpcflowlogs/handler.py
|
Python
|
gpl-3.0
| 6,571
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Investigator.location'
db.delete_column(u'survey_investigator', 'location_id')
def backwards(self, orm):
# Adding field 'Investigator.location'
db.add_column(u'survey_investigator', 'location',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['locations.Location'], null=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.aboutus': {
'Meta': {'object_name': 'AboutUs'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_rule'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rule'", 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_max_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_min_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answer_rule'", 'null': 'True', 'to': "orm['survey.QuestionOption']"}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.backend': {
'Meta': {'object_name': 'Backend'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'})
},
'survey.batch': {
'Meta': {'unique_together': "(('survey', 'name'),)", 'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'non_response': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.batchquestionorder': {
'Meta': {'object_name': 'BatchQuestionOrder'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_question_order'", 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_batch_order'", 'to': "orm['survey.Question']"})
},
'survey.enumerationarea': {
'Meta': {'object_name': 'EnumerationArea'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enumeration_area'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.formula': {
'Meta': {'object_name': 'Formula'},
'count': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_count'", 'null': 'True', 'to': "orm['survey.Question']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'denominator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_denominator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'denominator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'denominator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"}),
'groups': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'as_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'formula'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'numerator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'as_numerator'", 'null': 'True', 'to': "orm['survey.Question']"}),
'numerator_options': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'numerator_options'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['survey.QuestionOption']"})
},
'survey.groupcondition': {
'Meta': {'unique_together': "(('value', 'attribute', 'condition'),)", 'object_name': 'GroupCondition'},
'attribute': ('django.db.models.fields.CharField', [], {'default': "'AGE'", 'max_length': '20'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'EQUALS'", 'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'conditions'", 'symmetrical': 'False', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_code': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'random_sample_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'survey_household'", 'null': 'True', 'to': "orm['survey.Survey']"}),
'uid': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batch_completion_completed_households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead', '_ormbases': ['survey.HouseholdMember']},
u'householdmember_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['survey.HouseholdMember']", 'unique': 'True', 'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'})
},
'survey.householdmember': {
'Meta': {'object_name': 'HouseholdMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'household_member'", 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'survey.householdmemberbatchcompletion': {
'Meta': {'object_name': 'HouseholdMemberBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_member_batches'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdmembergroup': {
'Meta': {'object_name': 'HouseholdMemberGroup'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'unique': 'True', 'max_length': '5'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.CharField', [], {'default': "'Percentage'", 'max_length': '255'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicator'", 'to': "orm['survey.QuestionModule']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'backend': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Backend']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'ea': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enumeration_area'", 'null': 'True', 'to': "orm['survey.EnumerationArea']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'weights': ('django.db.models.fields.FloatField', [], {'default': '0'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.locationcode': {
'Meta': {'object_name': 'LocationCode'},
'code': ('django.db.models.fields.CharField', [], {'default': '0', 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'code'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.locationtypedetails': {
'Meta': {'object_name': 'LocationTypeDetails'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'has_code': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_code': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'location_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'details'", 'to': u"orm['locations.LocationType']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'unique': 'True', 'null': 'True', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.locationweight': {
'Meta': {'object_name': 'LocationWeight'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'weight'", 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'selection_probability': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_weight'", 'to': "orm['survey.Survey']"})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'multichoiceanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'numericalanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'batches': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_group'", 'null': 'True', 'to': "orm['survey.HouseholdMemberGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'module': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'module_question'", 'null': 'True', 'to': "orm['survey.QuestionModule']"}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.questionmodule': {
'Meta': {'object_name': 'QuestionModule'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'survey.questionoption': {
'Meta': {'ordering': "['order']", 'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'survey.randomhouseholdselection': {
'Meta': {'object_name': 'RandomHouseHoldSelection'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'no_of_households': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'selected_households': ('django.db.models.fields.TextField', [], {}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'random_household'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'}),
'has_sampling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'sample_size': ('django.db.models.fields.PositiveIntegerField', [], {'default': '10', 'max_length': '2'}),
'type': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Batch']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Household']"}),
'householdmember': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'is_old': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'textanswer'", 'null': 'True', 'to': "orm['survey.Question']"}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.unknowndobattribute': {
'Meta': {'object_name': 'UnknownDOBAttribute'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household_member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'unknown_dob_attribute'", 'to': "orm['survey.HouseholdMember']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
'survey.uploaderrorlog': {
'Meta': {'object_name': 'UploadErrorLog'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'row_number': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'survey.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'userprofile'", 'unique': 'True', 'to': u"orm['auth.User']"})
}
}
complete_apps = ['survey']
|
antsmc2/mics
|
survey/migrations/0126_auto__del_field_investigator_location.py
|
Python
|
bsd-3-clause
| 38,033
|
# coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
class V1PersistentVolumeClaimList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Swagger model
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'kind': 'str',
'api_version': 'str',
'metadata': 'V1ListMeta',
'items': 'list[V1PersistentVolumeClaim]'
}
self.attribute_map = {
'kind': 'kind',
'api_version': 'apiVersion',
'metadata': 'metadata',
'items': 'items'
}
self._kind = None
self._api_version = None
self._metadata = None
self._items = None
@property
def kind(self):
"""
Gets the kind of this V1PersistentVolumeClaimList.
kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:return: The kind of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1PersistentVolumeClaimList.
kind of object, in CamelCase; cannot be updated; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:param kind: The kind of this V1PersistentVolumeClaimList.
:type: str
"""
self._kind = kind
@property
def api_version(self):
"""
Gets the api_version of this V1PersistentVolumeClaimList.
version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources
:return: The api_version of this V1PersistentVolumeClaimList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1PersistentVolumeClaimList.
version of the schema the object should have; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#resources
:param api_version: The api_version of this V1PersistentVolumeClaimList.
:type: str
"""
self._api_version = api_version
@property
def metadata(self):
"""
Gets the metadata of this V1PersistentVolumeClaimList.
standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:return: The metadata of this V1PersistentVolumeClaimList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1PersistentVolumeClaimList.
standard list metadata; see http://releases.k8s.io/v1.0.4/docs/api-conventions.md#types-kinds
:param metadata: The metadata of this V1PersistentVolumeClaimList.
:type: V1ListMeta
"""
self._metadata = metadata
@property
def items(self):
"""
Gets the items of this V1PersistentVolumeClaimList.
a list of persistent volume claims; see http://releases.k8s.io/v1.0.4/docs/persistent-volumes.md#persistentvolumeclaims
:return: The items of this V1PersistentVolumeClaimList.
:rtype: list[V1PersistentVolumeClaim]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1PersistentVolumeClaimList.
a list of persistent volume claims; see http://releases.k8s.io/v1.0.4/docs/persistent-volumes.md#persistentvolumeclaims
:param items: The items of this V1PersistentVolumeClaimList.
:type: list[V1PersistentVolumeClaim]
"""
self._items = items
def to_dict(self):
"""
Return model properties dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Return model properties str
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
|
eshijia/magnum
|
magnum/common/pythonk8sclient/swagger_client/models/v1_persistent_volume_claim_list.py
|
Python
|
apache-2.0
| 5,511
|
from struct import pack, unpack
from operator import itemgetter
class Sentinel(object):
pass
class Message(tuple):
__slots__ = ()
_initialized = False
def __new__(_cls, *args, **kwargs):
if not _cls._initialized:
for i, x in enumerate(_cls._fields):
setattr(_cls, x, property(itemgetter(i)))
_cls._initialized = True
a = tuple([kwargs.pop(x, Sentinel()) for x in _cls._fields])
if kwargs:
raise Exception('Unknown fields: %s'%\
', '.join(list(kwargs.keys())))
def setdef(a, d, t):
if isinstance(a, Sentinel):
return t(d)
else:
return t(a)
ax = [setdef(x_y_z[0],x_y_z[1],x_y_z[2]) for x_y_z in zip(a, _cls._defaults, _cls._types)]
ret = tuple.__new__(_cls, ax)
return ret
def get_bytes(self):
msg = b''.join([x.get_bytes() for x in self])
l = len(msg) + 5
hdr = pack('!BHH', (l >> 16) & 0xff, l & 0xffff, self._msgtype)
#hdr = b''.join((chr((l >> 16) & 0xff),
# chr((l >> 8) & 0xff),
# chr(l & 0xff),
# chr((self._msgtype >> 8) & 0xff),
# chr(self._msgtype & 0xff)))
return hdr + msg
def __repr__(self):
s = ['%s=%r'%(k_v[0],k_v[1]) for k_v in zip(self._fields, self)]
return self.__class__.__name__ + '(%s)'%', '.join(s)
def __str__(self):
return self.__repr__()
@classmethod
def frombytes(_cls, b):
args = dict()
#print _cls, '%r'%len(b)
for n, t, d in zip(_cls._fields, _cls._types, _cls._defaults):
#print _cls, t, n, len(b)
(val, sz) = t.frombytes(b, d)
args[n] = val
b = b[sz:]
return _cls(**args)
class TInt(int):
def get_bytes(self):
return pack('!Bi', 1, self)
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
tv, v = unpack('!Bi', b[:5])
assert(tv == 1)
return (_cls(v), 5)
class TBool(int):
def get_bytes(self):
return pack('!Bi', 1, self)
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
tv, v = unpack('!BB', b[:2])
assert(tv == 2)
return (_cls(v), 2)
class TIntArray(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
(tv, nr) = unpack('!BH', b[:3])
#print 'IntArray', tv, nr // 4, len(b)
assert(tv == 5)
ofs = 3
out = list()
for i in range(nr // 4):
s = TInt(unpack('!i', b[ofs:ofs+4])[0])
out.append(s)
ofs += 4
return (_cls(out), ofs)
class TIntVector(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
(tv, nr) = unpack('!BH', b[:3])
assert(tv == 8)
ofs = 3
out = list()
for i in range(nr // 4):
s = TInt(unpack('!i', b[ofs:ofs+4])[0])
out.append(s)
ofs += 4
return (_cls(out), ofs)
class TInt2Array(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
(tv, tot_len, nr) = unpack('!BHB', b[:4])
assert(tv == 11)
if tot_len > len(b):
print('EVIL %d > %d'%(tot_len, len(b)))
return (tuple(), len(b))
assert(tot_len <= len(b))
ofs = 4
out = list()
for i in range(nr):
(s, sz) = TIntArray.frombytes(b[ofs:], '')
out.append(s)
ofs += sz
return (_cls(out), ofs)
class TInt3Array(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
(tv, tot_len, nr) = unpack('!BHB', b[:4])
assert(tv == 16)
ofs = 4
out = list()
for i in range(nr):
(s, sz) = TInt2Array.frombytes(b[ofs:], '')
out.append(s)
ofs += sz
return (_cls(out), ofs)
class TStr(str):
def fencode(self, text):
ret = bytearray()
s = text.encode('iso-8859-1')
for c in s:
if c >= 1 and c <= 127:
ret.append(c)
else:
ret.append(0xc0 | ((c >> 6) & 0x1f))
ret.append(0x80 | ((c & 0x3f)))
return bytes(ret)
def get_bytes(self):
e = self.fencode(self)
return pack('!BH', 3, len(e)) + e
@classmethod
def fdecode(_cls, s):
ret = b''
a = 0
cnt = 1
for c in s:
if (c & 0xe0) == 0xe0:
assert(cnt == 1)
a = (c & 0x0f)
cnt = 3
elif (c & 0xc0) == 0xc0:
assert(cnt == 1)
a = (c & 0x1f)
cnt = 2
elif c & 0x80:
assert(cnt == 2 or cnt == 1)
a <<= 6
a |= (c & 0x3f)
else:
a <<= 6
a |= c
cnt -= 1
if not cnt:
if a > 255:
x = bytearray(((a >> 8) & 0xff, a & 0xff))
ret += x
else:
ret += bytearray((a,))
a = 0
cnt = 1
dec = ret.decode('iso-8859-1')
#print('inp ', repr(s)[1:])
#print('ret ', repr(ret)[1:])
#print('dec', 'u' + repr(dec))
#print('enc ', repr(dec))
#print()
return dec
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
tv, sz = unpack('!BH', b[:3])
assert(tv == 3)
return (_cls(_cls.fdecode(b[3:3 + sz])), sz + 3)
class TStrArray(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
tv, totlen, nr = unpack('!BHH', b[:5])
assert(tv == 6)
ofs = 5
out = list()
for i in range(nr):
(s, sz) = TStr.frombytes(b[ofs:], '')
out.append(s)
ofs += sz
return (_cls(out), ofs)
class TStr2Array(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
tv, totlen_hi, totlen, nr = unpack('!BBHH', b[:6])
assert(tv == 9)
ofs = 6
out = list()
for i in range(nr):
(s, sz) = TStrArray.frombytes(b[ofs:], '')
out.append(s)
ofs += sz
return (_cls(out), ofs)
class TBoolArray(tuple):
def get_bytes(self):
raise NotImplementedError
@classmethod
def frombytes(_cls, b, d):
if not b:
return (_cls(d), 0)
(tv, nr) = unpack('!BH', b[:3])
assert(tv == 10)
ofs = 3
out = list()
for i in range(nr):
s = TBool(b[ofs])
out.append(s)
ofs += 1
return (_cls(out), ofs)
|
giannitedesco/funky
|
funky/message.py
|
Python
|
gpl-3.0
| 7,453
|
'''
Copyleft Sep 05, 2016 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 40;
pd.options.display.expand_frame_repr = False
import os;
home = os.path.expanduser('~') + '/'
import Utils.Util as utl
import Utils.Plots as pplt
import CLEAR.Libs.Markov as mkv
scores = pd.read_pickle(utl.outpath + 'real/HMM/h50.df')[0.5].sort_values('s', ascending=False)
i = ('3L', 8724429)
a = pd.concat([CD.loc[i].xs(0, level='TIME'), CD.loc[i].xs(37, level='TIME')], axis=1)
b = a.groupby(level=0).apply(lambda x: x.values.T.reshape(-1))
x = np.append(np.append(b[0], b[1]), b[2])
i = ('3L', 8035620)
CD = pd.read_pickle(utl.outpath + 'real/CD.df')
CD.loc[i]
cd = pd.DataFrame(pd.read_pickle(utl.outpath + 'real/CDEidx.df').loc[i]).T
pplt.plotSiteReal(CD.loc[i])
E = pd.read_pickle(utl.outpath + 'real/Emissions.df');
cd
S = np.arange(-0.5, 0.501, 0.01)
df = pd.concat(map(lambda s: mkv.computeLikelihoodReal((cd, E, s, 0.5, 0)), S));
df.index = S;
df.plot()
|
airanmehr/bio
|
Scripts/TimeSeriesPaper/Plot/likelihood.py
|
Python
|
mit
| 1,120
|
# This file is part of the Minecraft Overviewer.
#
# Minecraft Overviewer is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# Minecraft Overviewer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with the Overviewer. If not, see <http://www.gnu.org/licenses/>.
import codecs
import json
import logging
import os
import time
import traceback
from PIL import Image
from . import world
from . import util
from .files import FileReplacer, mirror_dir, get_fs_caps
class AssetManager:
"""\
These objects provide an interface to metadata and persistent data, and at the
same time, controls the generated javascript files in the output directory.
There should only be one instances of these per execution.
"""
def __init__(self, outputdir, custom_assets_dir=None):
"""\
Initializes the AssetManager with the top-level output directory.
It can read/parse and write/dump the overviewerConfig.js file into this
top-level directory.
"""
self.outputdir = outputdir
self.custom_assets_dir = custom_assets_dir
self.renders = dict()
self.fs_caps = get_fs_caps(self.outputdir)
# look for overviewerConfig in self.outputdir
config_loc = os.path.join(self.outputdir, "overviewerConfig.js")
try:
with open(config_loc) as c:
ovconf_str = "{" + "\n".join(c.readlines()[1:-1]) + "}"
self.overviewerConfig = json.loads(ovconf_str)
except Exception as e:
if os.path.exists(config_loc):
logging.warning("A previous overviewerConfig.js was found, "
"but I couldn't read it for some reason."
"Continuing with a blank config")
logging.debug(traceback.format_exc())
self.overviewerConfig = dict(tilesets=dict())
def get_tileset_config(self, name):
"Return the correct dictionary from the parsed overviewerConfig.js"
for conf in self.overviewerConfig['tilesets']:
if conf['path'] == name:
return conf
return dict()
def initialize(self, tilesets):
"""Similar to finalize() but calls the tilesets' get_initial_data()
instead of get_persistent_data() to compile the generated javascript
config.
"""
self._output_assets(tilesets, True)
def finalize(self, tilesets):
"""Called to output the generated javascript and all static files to
the output directory
"""
self._output_assets(tilesets, False)
def _output_assets(self, tilesets, initial):
if not initial:
def get_data(tileset):
return tileset.get_persistent_data()
else:
def get_data(tileset):
return tileset.get_initial_data()
# dictionary to hold the overviewerConfig.js settings that we will dump
# to JSON using dumps
dump = dict()
dump['CONST'] = dict(tileSize=384)
dump['CONST']['image'] = {
'defaultMarker': 'signpost.png',
'signMarker': 'signpost_icon.png',
'bedMarker': 'bed.png',
'spawnMarker': 'markers/marker_home.png',
'spawnMarker2x': 'markers/marker_home_2x.png',
'queryMarker': 'markers/marker_location.png',
'queryMarker2x': 'markers/marker_location_2x.png'
}
dump['CONST']['mapDivId'] = 'mcmap'
dump['CONST']['UPPERLEFT'] = world.UPPER_LEFT
dump['CONST']['UPPERRIGHT'] = world.UPPER_RIGHT
dump['CONST']['LOWERLEFT'] = world.LOWER_LEFT
dump['CONST']['LOWERRIGHT'] = world.LOWER_RIGHT
dump['CONST']['image']['compass'] = {
world.UPPER_LEFT: 'compass_upper-left.png',
world.UPPER_RIGHT: 'compass_upper-right.png',
world.LOWER_LEFT: 'compass_lower-left.png',
world.LOWER_RIGHT: 'compass_lower-right.png'
}
# based on the tilesets we have, group them by worlds
worlds = []
for tileset in tilesets:
full_name = get_data(tileset)['world']
if full_name not in worlds:
worlds.append(full_name)
dump['worlds'] = worlds
dump['map'] = dict()
dump['map']['debug'] = False
dump['map']['cacheTag'] = str(int(time.time()))
dump['map']['north_direction'] = 'lower-left' # only temporary
dump['map']['controls'] = {
'pan': True,
'zoom': True,
'spawn': True,
'compass': True,
'mapType': True,
'overlays': True,
'coordsBox': True,
}
dump['tilesets'] = []
for tileset in tilesets:
dump['tilesets'].append(get_data(tileset))
# write a blank image
blank = Image.new("RGBA", (1, 1), tileset.options.get('bgcolor'))
if tileset.options.get('imgformat') != 'png':
blank = blank.convert("RGB")
blank.save(os.path.join(self.outputdir, tileset.options.get('name'),
"blank." + tileset.options.get('imgformat')))
# write out config
jsondump = json.dumps(dump, indent=4)
with FileReplacer(os.path.join(self.outputdir, "overviewerConfig.js"),
capabilities=self.fs_caps) as tmpfile:
with codecs.open(tmpfile, 'w', encoding='UTF-8') as f:
f.write("var overviewerConfig = " + jsondump + ";\n")
# Copy assets, modify index.html
self.output_noconfig()
def output_noconfig(self):
# copy web assets into destdir:
global_assets = os.path.join(util.get_program_path(),
"overviewer_core", "data", "web_assets")
if not os.path.isdir(global_assets):
global_assets = os.path.join(util.get_program_path(), "web_assets")
mirror_dir(global_assets, self.outputdir, capabilities=self.fs_caps, force_writable=True)
if self.custom_assets_dir:
# We could have done something fancy here rather than just
# overwriting the global files, but apparently this what we used to
# do pre-rewrite.
mirror_dir(self.custom_assets_dir, self.outputdir, capabilities=self.fs_caps,
force_writable=True)
# symlink old icons dir because apache sux
iconsdir = os.path.join(self.outputdir, "icons")
if (os.name == "posix" and os.symlink in os.supports_dir_fd and
not os.path.islink(iconsdir) and not os.path.isdir(iconsdir)):
od_fd = os.open(self.outputdir, os.O_DIRECTORY)
try:
os.symlink("markers", "icons", target_is_directory=True, dir_fd=od_fd)
finally:
os.close(od_fd)
# write a dummy baseMarkers.js if none exists
basemarkers_path = os.path.join(self.outputdir, "baseMarkers.js")
if not os.path.exists(basemarkers_path):
with open(basemarkers_path, "w") as f:
f.write("// if you wants signs, please see genPOI.py\n")
# create overviewer.js from the source js files
js_src = os.path.join(util.get_program_path(),
"overviewer_core", "data", "js_src")
if not os.path.isdir(js_src):
js_src = os.path.join(util.get_program_path(), "js_src")
with FileReplacer(os.path.join(self.outputdir, "overviewer.js"),
capabilities=self.fs_caps) as tmpfile:
with open(tmpfile, "w") as fout:
# first copy in js_src/overviewer.js
with open(os.path.join(js_src, "overviewer.js"), 'r') as f:
fout.write(f.read())
# now copy in the rest
for js in os.listdir(js_src):
if not js.endswith("overviewer.js") and js.endswith(".js"):
with open(os.path.join(js_src, js)) as f:
fout.write(f.read())
# Add time and version in index.html
indexpath = os.path.join(self.outputdir, "index.html")
index = codecs.open(indexpath, 'r', encoding='UTF-8').read()
index = index.replace("{title}", "Minecraft Overviewer")
index = index.replace("{time}", time.strftime("%a, %d %b %Y %H:%M:%S %Z", time.localtime()))
versionstr = "%s (%s)" % (util.findGitVersion(),
util.findGitHash()[:7])
index = index.replace("{version}", versionstr)
with FileReplacer(indexpath, capabilities=self.fs_caps) as indexpath:
with codecs.open(indexpath, 'w', encoding='UTF-8') as output:
output.write(index)
|
CounterPillow/Minecraft-Overviewer
|
overviewer_core/assetmanager.py
|
Python
|
gpl-3.0
| 9,291
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
from rna_prop_ui import PropertyPanel
class MESH_MT_vertex_group_specials(Menu):
bl_label = "Vertex Group Specials"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
layout.operator("object.vertex_group_sort", icon='SORTALPHA', text="Sort by Name").sort_type = 'NAME'
layout.operator("object.vertex_group_sort", icon='ARMATURE_DATA', text="Sort by Bone Hierarchy").sort_type = 'BONE_HIERARCHY'
layout.operator("object.vertex_group_copy", icon='COPY_ID')
layout.operator("object.vertex_group_copy_to_linked", icon='LINK_AREA')
layout.operator("object.vertex_group_copy_to_selected", icon='LINK_AREA')
layout.operator("object.vertex_group_mirror", icon='ARROW_LEFTRIGHT').use_topology = False
layout.operator("object.vertex_group_mirror", text="Mirror Vertex Group (Topology)", icon='ARROW_LEFTRIGHT').use_topology = True
layout.operator("object.vertex_group_remove_from", icon='X', text="Remove from All Groups").use_all_groups = True
layout.operator("object.vertex_group_remove_from", icon='X', text="Clear Active Group").use_all_verts = True
layout.operator("object.vertex_group_remove", icon='X', text="Delete All Groups").all = True
layout.separator()
layout.operator("object.vertex_group_lock", icon='LOCKED', text="Lock All").action = 'LOCK'
layout.operator("object.vertex_group_lock", icon='UNLOCKED', text="UnLock All").action = 'UNLOCK'
layout.operator("object.vertex_group_lock", icon='LOCKED', text="Lock Invert All").action = 'INVERT'
class MESH_MT_shape_key_specials(Menu):
bl_label = "Shape Key Specials"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
layout.operator("object.shape_key_transfer", icon='COPY_ID') # icon is not ideal
layout.operator("object.join_shapes", icon='COPY_ID') # icon is not ideal
layout.operator("object.shape_key_mirror", icon='ARROW_LEFTRIGHT').use_topology = False
layout.operator("object.shape_key_mirror", text="Mirror Shape Key (Topology)", icon='ARROW_LEFTRIGHT').use_topology = True
layout.operator("object.shape_key_add", icon='ZOOMIN', text="New Shape From Mix").from_mix = True
layout.operator("object.shape_key_remove", icon='X', text="Delete All Shapes").all = True
layout.operator("object.shape_key_move", icon='TRIA_UP_BAR', text="Move To Top").type = 'TOP'
layout.operator("object.shape_key_move", icon='TRIA_DOWN_BAR', text="Move To Bottom").type = 'BOTTOM'
class MESH_UL_vgroups(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, bpy.types.VertexGroup))
vgroup = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(vgroup, "name", text="", emboss=False, icon_value=icon)
icon = 'LOCKED' if vgroup.lock_weight else 'UNLOCKED'
layout.prop(vgroup, "lock_weight", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MESH_UL_shape_keys(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, bpy.types.ShapeKey))
obj = active_data
# key = data
key_block = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
split = layout.split(0.66, False)
split.prop(key_block, "name", text="", emboss=False, icon_value=icon)
row = split.row(align=True)
if key_block.mute or (obj.mode == 'EDIT' and not (obj.use_shape_key_edit_mode and obj.type == 'MESH')):
row.active = False
if not item.id_data.use_relative:
row.prop(key_block, "frame", text="", emboss=False)
elif index > 0:
row.prop(key_block, "value", text="", emboss=False)
else:
row.label(text="")
row.prop(key_block, "mute", text="", emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MESH_UL_uvmaps_vcols(UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
# assert(isinstance(item, (bpy.types.MeshTexturePolyLayer, bpy.types.MeshLoopColorLayer)))
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(item, "name", text="", emboss=False, icon_value=icon)
icon = 'RESTRICT_RENDER_OFF' if item.active_render else 'RESTRICT_RENDER_ON'
layout.prop(item, "active_render", text="", icon=icon, emboss=False)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class MeshButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
return context.mesh and (engine in cls.COMPAT_ENGINES)
class DATA_PT_context_mesh(MeshButtonsPanel, Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
ob = context.object
mesh = context.mesh
space = context.space_data
if ob:
layout.template_ID(ob, "data")
elif mesh:
layout.template_ID(space, "pin_id")
class DATA_PT_normals(MeshButtonsPanel, Panel):
bl_label = "Normals"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
mesh = context.mesh
split = layout.split()
col = split.column()
col.prop(mesh, "use_auto_smooth")
sub = col.column()
sub.active = mesh.use_auto_smooth and not mesh.has_custom_normals
sub.prop(mesh, "auto_smooth_angle", text="Angle")
split.prop(mesh, "show_double_sided")
class DATA_PT_texture_space(MeshButtonsPanel, Panel):
bl_label = "Texture Space"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
mesh = context.mesh
layout.prop(mesh, "texture_mesh")
layout.separator()
layout.prop(mesh, "use_auto_texspace")
row = layout.row()
row.column().prop(mesh, "texspace_location", text="Location")
row.column().prop(mesh, "texspace_size", text="Size")
class DATA_PT_vertex_groups(MeshButtonsPanel, Panel):
bl_label = "Vertex Groups"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
obj = context.object
return (obj and obj.type in {'MESH', 'LATTICE'} and (engine in cls.COMPAT_ENGINES))
def draw(self, context):
layout = self.layout
ob = context.object
group = ob.vertex_groups.active
rows = 2
if group:
rows = 4
row = layout.row()
row.template_list("MESH_UL_vgroups", "", ob, "vertex_groups", ob.vertex_groups, "active_index", rows=rows)
col = row.column(align=True)
col.operator("object.vertex_group_add", icon='ZOOMIN', text="")
col.operator("object.vertex_group_remove", icon='ZOOMOUT', text="").all = False
col.menu("MESH_MT_vertex_group_specials", icon='DOWNARROW_HLT', text="")
if group:
col.separator()
col.operator("object.vertex_group_move", icon='TRIA_UP', text="").direction = 'UP'
col.operator("object.vertex_group_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
if ob.vertex_groups and (ob.mode == 'EDIT' or (ob.mode == 'WEIGHT_PAINT' and ob.type == 'MESH' and ob.data.use_paint_mask_vertex)):
row = layout.row()
sub = row.row(align=True)
sub.operator("object.vertex_group_assign", text="Assign")
sub.operator("object.vertex_group_remove_from", text="Remove")
sub = row.row(align=True)
sub.operator("object.vertex_group_select", text="Select")
sub.operator("object.vertex_group_deselect", text="Deselect")
layout.prop(context.tool_settings, "vertex_group_weight", text="Weight")
class DATA_PT_shape_keys(MeshButtonsPanel, Panel):
bl_label = "Shape Keys"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
@classmethod
def poll(cls, context):
engine = context.scene.render.engine
obj = context.object
return (obj and obj.type in {'MESH', 'LATTICE', 'CURVE', 'SURFACE'} and (engine in cls.COMPAT_ENGINES))
def draw(self, context):
layout = self.layout
ob = context.object
key = ob.data.shape_keys
kb = ob.active_shape_key
enable_edit = ob.mode != 'EDIT'
enable_edit_value = False
if ob.show_only_shape_key is False:
if enable_edit or (ob.type == 'MESH' and ob.use_shape_key_edit_mode):
enable_edit_value = True
row = layout.row()
rows = 2
if kb:
rows = 4
row.template_list("MESH_UL_shape_keys", "", key, "key_blocks", ob, "active_shape_key_index", rows=rows)
col = row.column()
sub = col.column(align=True)
sub.operator("object.shape_key_add", icon='ZOOMIN', text="").from_mix = False
sub.operator("object.shape_key_remove", icon='ZOOMOUT', text="").all = False
sub.menu("MESH_MT_shape_key_specials", icon='DOWNARROW_HLT', text="")
if kb:
col.separator()
sub = col.column(align=True)
sub.operator("object.shape_key_move", icon='TRIA_UP', text="").type = 'UP'
sub.operator("object.shape_key_move", icon='TRIA_DOWN', text="").type = 'DOWN'
split = layout.split(percentage=0.4)
row = split.row()
row.enabled = enable_edit
row.prop(key, "use_relative")
row = split.row()
row.alignment = 'RIGHT'
sub = row.row(align=True)
sub.label() # XXX, for alignment only
subsub = sub.row(align=True)
subsub.active = enable_edit_value
subsub.prop(ob, "show_only_shape_key", text="")
sub.prop(ob, "use_shape_key_edit_mode", text="")
sub = row.row()
if key.use_relative:
sub.operator("object.shape_key_clear", icon='X', text="")
else:
sub.operator("object.shape_key_retime", icon='RECOVER_LAST', text="")
if key.use_relative:
if ob.active_shape_key_index != 0:
row = layout.row()
row.active = enable_edit_value
row.prop(kb, "value")
split = layout.split()
col = split.column(align=True)
col.active = enable_edit_value
col.label(text="Range:")
col.prop(kb, "slider_min", text="Min")
col.prop(kb, "slider_max", text="Max")
col = split.column(align=True)
col.active = enable_edit_value
col.label(text="Blend:")
col.prop_search(kb, "vertex_group", ob, "vertex_groups", text="")
col.prop_search(kb, "relative_key", key, "key_blocks", text="")
else:
layout.prop(kb, "interpolation")
row = layout.column()
row.active = enable_edit_value
row.prop(key, "eval_time")
class DATA_PT_uv_texture(MeshButtonsPanel, Panel):
bl_label = "UV Maps"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
me = context.mesh
row = layout.row()
col = row.column()
col.template_list("MESH_UL_uvmaps_vcols", "uvmaps", me, "uv_textures", me.uv_textures, "active_index", rows=1)
col = row.column(align=True)
col.operator("mesh.uv_texture_add", icon='ZOOMIN', text="")
col.operator("mesh.uv_texture_remove", icon='ZOOMOUT', text="")
class DATA_PT_vertex_colors(MeshButtonsPanel, Panel):
bl_label = "Vertex Colors"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
me = context.mesh
row = layout.row()
col = row.column()
col.template_list("MESH_UL_uvmaps_vcols", "vcols", me, "vertex_colors", me.vertex_colors, "active_index", rows=1)
col = row.column(align=True)
col.operator("mesh.vertex_color_add", icon='ZOOMIN', text="")
col.operator("mesh.vertex_color_remove", icon='ZOOMOUT', text="")
class DATA_PT_customdata(MeshButtonsPanel, Panel):
bl_label = "Geometry Data"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
def draw(self, context):
layout = self.layout
obj = context.object
me = context.mesh
col = layout.column()
col.operator("mesh.customdata_mask_clear", icon='X')
col.operator("mesh.customdata_skin_clear", icon='X')
if me.has_custom_normals:
col.operator("mesh.customdata_custom_splitnormals_clear", icon='X')
else:
col.operator("mesh.customdata_custom_splitnormals_add", icon='ZOOMIN')
col = layout.column()
col.enabled = (obj.mode != 'EDIT')
col.prop(me, "use_customdata_vertex_bevel")
col.prop(me, "use_customdata_edge_bevel")
col.prop(me, "use_customdata_edge_crease")
class DATA_PT_custom_props_mesh(MeshButtonsPanel, PropertyPanel, Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.Mesh
if __name__ == "__main__": # only for live edit.
bpy.utils.register_module(__name__)
|
Microvellum/Fluid-Designer
|
win64-vc/2.78/scripts/startup/bl_ui/properties_data_mesh.py
|
Python
|
gpl-3.0
| 15,183
|
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import json
from mock.mock import MagicMock, call, patch
from stacks.utils.RMFTestCase import *
import resource_management.core.source
from test_storm_base import TestStormBase
class TestStormNimbus(TestStormBase):
CONFIG_OVERRIDES = {"serviceName":"STORM", "role":"NIMBUS"}
def test_configure_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "configure",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertNoMoreResources()
def test_start_default(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "start",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_default()
self.assertResourceCalled('Execute', 'supervisorctl start storm-nimbus',
wait_for_finish = False,
)
self.assertNoMoreResources()
@patch("os.path.exists")
def test_stop_default(self, path_exists_mock):
# Last bool is for the pid file
path_exists_mock.side_effect = [False, False, True]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "stop",
config_file="default.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'supervisorctl stop storm-nimbus',
wait_for_finish = False,
)
self.assertNoMoreResources()
def test_configure_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "configure",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertNoMoreResources()
def test_start_secured(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "start",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assert_configure_secured()
self.assertResourceCalled('Execute', 'supervisorctl start storm-nimbus',
wait_for_finish = False,
)
self.assertNoMoreResources()
@patch("os.path.exists")
def test_stop_secured(self, path_exists_mock):
# Last bool is for the pid file
path_exists_mock.side_effect = [False, False, True]
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "stop",
config_file="secured.json",
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES
)
self.assertResourceCalled('Execute', 'supervisorctl stop storm-nimbus',
wait_for_finish = False,
)
self.assertNoMoreResources()
def test_pre_upgrade_restart(self):
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "pre_upgrade_restart",
config_file="default.json",
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.2.1.0-2067'), sudo=True)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.2.1.0-2067'), sudo=True)
self.assertNoMoreResources()
def test_pre_upgrade_restart_23(self):
config_file = self.get_src_folder()+"/test/python/stacks/2.1/configs/default.json"
with open(config_file, "r") as f:
json_content = json.load(f)
version = '2.3.0.0-1234'
json_content['commandParams']['version'] = version
mocks_dict = {}
self.executeScript(self.COMMON_SERVICES_PACKAGE_DIR + "/scripts/nimbus_prod.py",
classname = "Nimbus",
command = "pre_upgrade_restart",
config_dict = json_content,
config_overrides = self.CONFIG_OVERRIDES,
stack_version = self.STACK_VERSION,
target = RMFTestCase.TARGET_COMMON_SERVICES,
mocks_dict = mocks_dict)
self.assertResourceCalledIgnoreEarlier("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-client', '2.3.0.0-1234'), sudo=True)
self.assertResourceCalled("Execute", ('ambari-python-wrap', '/usr/bin/hdp-select', 'set', 'storm-nimbus', '2.3.0.0-1234'), sudo=True)
|
arenadata/ambari
|
ambari-server/src/test/python/stacks/2.1/STORM/test_storm_nimbus_prod.py
|
Python
|
apache-2.0
| 6,401
|
"""
Generated by 'django-admin startproject' using Django 1.9.12.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Application definition
INSTALLED_APPS = [
# ... all django defaults
'webpack_loader',
'webpack_helper',
# ... all your remaining apps
'livereload' # ensures that django refreshes on any changes
]
MIDDLEWARE_CLASSES = [
# ... all your other middleware
'livereload.middleware.LiveReloadScript'
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# NOTE: All the settings below are already set to sensible
# defaults
WEBPACK_HELPER = {}
# The ocation of your node_modules (will be used to find webpack binary)
# Can be a relative or absolute path
WEBPACK_HELPER['NODE_MODULES'] = 'node_modules'
# the base output directory for webpack
# Can be a relative or absolute path
WEBPACK_HELPER['BASE_DIR'] = os.path.join(BASE_DIR, 'webpack')
# --- paths relative to WEBACK_HELPER['BASE_DIR']
# the folder that will be used to collect all webpack
# related output
# statistics generated by `webpack-bundle-tracker` (node app)
WEBACK_HELPER['STATS_DIR'] = 'stats/'
# static files (all files in here will be exposed to the internet by django)
WEBACK_HELPER['STATIC_DIR'] = 'static/'
# webpack_helper generated configuration files go in here as `appname.json`
WEBACK_HELPER['CONFIG_DIR'] = 'config/'
# --- relative to STATIC_DIR!
# this is where webpack places your compiled bundles
WEBACK_HELPER['BUNDLE_DIR'] = 'bundles/'
# Where will the bundles live when we SERVE the website?
# Where will the contents of `static/bundles/` become visible?
# it can be a full URL
WEBPACK_HELPER['PUBLIC_PATH_BASE'] = STATIC_URL
# We do this so that django's collectstatic copies our bundles to the STATIC_ROOT
# or syncs them to whatever storage we use.
STATICFILES_DIRS = [
# ... all your other directories
] + [os.path.join(WEBPACK_HELPER['BASE_DIR'], WEBACK_HELPER['STATIC_DIR'])]
# NOTE You must add an entry below for each app you
# wish to include.
# See django-webpack-loader's documentation for more details
WEBPACK_LOADER = {
'DEFAULT': {
# bundles go here (relative to our static apps directory)
# i.e. `bundle/` is inside the staticfiles dir {WEBPACK_BASE_DIR}/static/
'BUNDLE_DIR_NAME': 'bundles/',
# NOTE the stats file path is built as follows: {WEBPACK_BASE_DIR}/{WEBPACK_STATS_DIR}/{applabel}.json
'STATS_FILE': os.path.join(WEBPACK_HELPER['BASE_DIR'], 'webpack/stats/applabel.json'),
}
}
|
chriscz/django-webpack-helper
|
settings.py
|
Python
|
mpl-2.0
| 2,881
|
from decimal import *
class PI:
#Sets decimal to 25 digits of precision
getcontext().prec = 1000
@staticmethod
def factorial(n):
# if n<1:
# return 1
# else:
# return n * PI.factorial(n-1)
result = 1
for i in xrange(2, n+1):
result *= i
return result
@staticmethod
def plouffBig(n): #http://en.wikipedia.org/wiki/Bailey%E2%80%93Borwein%E2%80%93Plouffe_formula
pi = Decimal(0)
k = 0
while k < n:
pi += (Decimal(1)/(16**k))*((Decimal(4)/(8*k+1))-(Decimal(2)/(8*k+4))-(Decimal(1)/(8*k+5))-(Decimal(1)/(8*k+6)))
k += 1
return pi
@staticmethod
def bellardBig(n): #http://en.wikipedia.org/wiki/Bellard%27s_formula
pi = Decimal(0)
k = 0
while k < n:
pi += (Decimal(-1)**k/(1024**k))*( Decimal(256)/(10*k+1) + Decimal(1)/(10*k+9) - Decimal(64)/(10*k+3) - Decimal(32)/(4*k+1) - Decimal(4)/(10*k+5) - Decimal(4)/(10*k+7) -Decimal(1)/(4*k+3))
k += 1
pi = pi * 1/(2**6)
return pi
@staticmethod
def chudnovskyBig(n): #http://en.wikipedia.org/wiki/Chudnovsky_algorithm
pi = Decimal(0)
k = 0
while k < n:
pi += (Decimal(-1)**k)*(Decimal(PI.factorial(6*k))/((PI.factorial(k)**3)*(PI.factorial(3*k)))* (13591409+545140134*k)/(640320**(3*k)))
k += 1
pi = pi * Decimal(10005).sqrt()/4270934400
pi = pi**(-1)
return pi
@staticmethod
def calculate():
return PI.bellardBig(1000)
|
susemeee/Chunsabot-framework
|
chunsabot/pi.py
|
Python
|
mit
| 1,587
|
import sys, socket, re, time, os.path, os
#from pyrrd.rrd import DataSource, RRA, RRD
#from pyrrd.graph import DEF, LINE, GPRINT, Graph
RRD_DB_LOCATION = '/www/rrdtool'
RRD_IMAGES_LOCATION = '/www/rrdtool'
def get_energy():
tx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
tx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
tx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
tx_sock.settimeout(2.0)
rx_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rx_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
rx_sock.settimeout(2.0)
rx_sock.bind(('0.0.0.0', 9761))
msg = '100,@?W'
tx_sock.sendto(msg, ('255.255.255.255', 9760))
data = rx_sock.recv(1024)
valid = re.compile(r'^\d{1,3},\?W=([0-9,]+)\r\n$')
match = valid.match(data)
if match:
power = match.group(1).split(',')
return {
'current': power[0],
'max_today': power[1],
'total_today': power[2],
'total_yesterday': power[3],
}
return None
def get_rrd_database():
rrd_db = '/www/rrdtool/power.rrd'
if not os.path.isfile(rrd_db):
print ("Creating RRD database for power")
os.system("rrdtool create %s \
-s 60 \
DS:power:GAUGE:120:U:U \
RRA:AVERAGE:0.5:1:10080 \
RRA:AVERAGE:0.5:60:720 \
RRA:AVERAGE:0.5:180:480 \
RRA:AVERAGE:0.5:1440:730" % rrd_db)
return rrd_db
def process_energy():
energy = get_energy()
print ("Current energy usage: %s" % energy)
rrd = get_rrd_database()
# insert value into rrd
os.system("rrdtool update %s \
-t power \
N:%s" % (rrd, energy['current']))
# create graphs
create_graph(rrd, 'hour')
create_graph(rrd, 'day')
create_graph(rrd, 'week')
create_graph(rrd, 'month')
create_graph(rrd, 'year')
def create_graph(rrd, interval):
os.system("rrdtool graph '/www/rrdtool/power-%s.png' \
--lazy \
-s -1%s \
-t 'Power Usage (last %s)' \
-h 200 \
-w 900 \
-a PNG \
-v Watts \
-l 0 \
DEF:power=%s:power:AVERAGE \
AREA:power#0000FF:Power \
GPRINT:power:MIN:\" Min\\: %%2.lf\" \
GPRINT:power:MAX:\" Max\\: %%2.lf\" \
GPRINT:power:AVERAGE:\" Avg\\: %%4.1lf\" \
GPRINT:power:LAST:\" Current\\: %%2.lf Watts\\n\" \
" % (interval, interval, interval, rrd))
if __name__ == "__main__":
process_energy()
|
araines/energymonitor
|
energymonitor.py
|
Python
|
mit
| 2,340
|
# Copyright (c) 2013-2014 Parallels, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import shlex
import subprocess
from pcsnovadriver.pcs import prlsdkapi_proxy
pc = prlsdkapi_proxy.consts
def compress_ploop(src, dst):
cmd1 = ['tar', 'cO', '-C', src, '.']
cmd2 = ['prlcompress', '-p']
dst_file = open(dst, 'w')
try:
p1 = subprocess.Popen(cmd1, stdout=subprocess.PIPE)
except Exception:
dst_file.close()
try:
p2 = subprocess.Popen(cmd2, stdin=p1.stdout, stdout=dst_file)
except Exception:
p1.kill()
p1.wait()
raise
finally:
dst_file.close()
p1.stdout.close()
ret1 = p1.wait()
ret2 = p2.wait()
msg = ""
if ret1:
msg = '%r returned %d' % (cmd1, ret1)
if ret2:
msg += ', %r returned %d' % (cmd2, ret2)
if msg:
raise Exception(msg)
def uncompress_ploop(src_path, dst_path, src_file=None, root_helper=""):
cmd1 = ['prlcompress', '-u']
cmd2 = shlex.split(root_helper) + ['tar', 'x', '-C', dst_path]
if src_file is None:
src_file = open(src_path)
try:
p1 = subprocess.Popen(cmd1, stdin=src_file, stdout=subprocess.PIPE)
finally:
src_file.close()
try:
p2 = subprocess.Popen(cmd2, stdin=p1.stdout)
except Exception:
p1.kill()
p1.wait()
raise
p1.stdout.close()
ret1 = p1.wait()
ret2 = p2.wait()
msg = ""
if ret1:
msg = '%r returned %d' % (cmd1, ret1)
if ret2:
msg += ', %r returned %d' % (cmd2, ret2)
if msg:
raise Exception(msg)
def _get_ct_boot_disk(ve):
"Get first disk in config."
hdd_count = ve.get_devs_count_by_type(pc.PDE_HARD_DISK)
if hdd_count < 1:
raise Exception("There are no hard disks in VE.")
return ve.get_dev_by_type(pc.PDE_HARD_DISK, 0)
def _get_vm_boot_disk(ve):
"Get first hard disk from the boot devices list."
n = ve.get_boot_dev_count()
for i in xrange(n):
bootdev = ve.get_boot_dev(i)
if bootdev.get_type() != pc.PDE_HARD_DISK:
continue
hdd = ve.get_dev_by_type(pc.PDE_HARD_DISK,
bootdev.get_index())
return hdd
else:
raise Exception("Can't find boot hard disk.")
def get_boot_disk(ve):
if ve.get_vm_type() == pc.PVT_VM:
return _get_vm_boot_disk(ve)
else:
return _get_ct_boot_disk(ve)
def getstatusoutput(cmd):
"""getstatusoutput from commands module supports only string
commands, which isn't convenient.
"""
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
out = p.stdout.read()
ret = p.wait()
return ret, out
def system_exc(cmd):
"""Run command and raise exception in case of non-zero
exit code.
"""
ret = subprocess.call(cmd)
if ret:
raise Exception("'%r' returned %d" % (cmd, ret))
def convert_image(src, dst, disk_format, root_helper=''):
"""Convert image from ploop format to any, that qemu-img supports.
src: path to directory with ploop
dst: path to output file name
disk_format: disk format string
"""
dd_path = os.path.join(src, 'DiskDescriptor.xml')
cmd = shlex.split(root_helper) + ['ploop', 'mount', dd_path]
ret, out = getstatusoutput(cmd)
try:
ro = re.search('dev=(\S+)', out)
if not ro:
raise Exception('Invalid output from %r: %s' % (cmd, out))
ploop_dev = ro.group(1)
system_exc(shlex.split(root_helper) + ['qemu-img', 'convert',
'-f', 'raw', '-O', disk_format, ploop_dev, dst])
finally:
system_exc(shlex.split(root_helper) + ['ploop', 'umount', dd_path])
class CPloopUploader(object):
def __init__(self, hdd_path):
self.hdd_path = hdd_path
def start(self):
self.cmd1 = ['tar', 'cO', '-C', self.hdd_path, '.']
self.cmd2 = ['prlcompress', '-p']
self.p1 = subprocess.Popen(self.cmd1, stdout=subprocess.PIPE)
try:
self.p2 = subprocess.Popen(self.cmd2, stdin=self.p1.stdout,
stdout=subprocess.PIPE)
except Exception:
self.p1.kill()
self.p1.wait()
raise
self.p1.stdout.close()
return self.p2.stdout
def wait(self):
ret1 = self.p1.wait()
ret2 = self.p2.wait()
msg = ""
if ret1:
msg = '%r returned %d' % (self.cmd1, ret1)
if ret2:
msg += ', %r returned %d' % (self.cmd2, ret2)
if msg:
raise Exception(msg)
class PloopMount(object):
"""This class is for mounting ploop devices using with statement:
with PloopMount('/parallels/my-vm/harddisk.hdd') as dev_path:
# do something
:param path: A path to parallels harddisk dir
:param chown: If true, chown device to nova:nova
:param root_helper: root_helper
"""
def __init__(self, path, chown=False, root_helper=""):
self.path = path
self.root_helper = root_helper
self.chown = chown
def __enter__(self):
self.dd_path = os.path.join(self.path, 'DiskDescriptor.xml')
cmd = (shlex.split(self.root_helper) +
['ploop', 'mount', self.dd_path])
ret, out = getstatusoutput(cmd)
if ret:
raise Exception("Can't mount ploop %s" % self.path)
ro = re.search('dev=(\S+)', out)
if not ro:
raise Exception('Invalid output from %r: %s' % (cmd, out))
self.ploop_dev = ro.group(1)
if self.chown:
cmd = (shlex.split(self.root_helper) +
['chown', 'nova:nova', self.ploop_dev])
ret, out = getstatusoutput(cmd)
if ret:
self._umount()
raise Exception("chown failed with code %d" % ret)
return self.ploop_dev
def _umount(self):
system_exc(shlex.split(self.root_helper) +
['ploop', 'umount', self.dd_path])
def __exit__(self, type, value, traceback):
self._umount()
|
CloudServer/pcs-nova-driver
|
pcsnovadriver/pcs/utils.py
|
Python
|
apache-2.0
| 6,694
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Facilities for creating multiple test combinations.
Here is an example of testing various optimizers in Eager and Graph mode:
class AdditionExample(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(mode=["graph", "eager"],
optimizer=[AdamOptimizer(),
GradientDescentOptimizer()]))
def testOptimizer(self, optimizer):
... f(optimizer)...
This will run `testOptimizer` 4 times with the specified optimizers: 2 in
Eager and 2 in Graph mode.
The test will be provided with arguments that match the arguments of combine
by name. It is necessary to request all arguments, except for `mode`, which is
optional.
`combine()` function is available for creating a cross product of various
options. `times()` function exists for creating a product of N `combine()`-ed
results. See below.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import sys
import types
import unittest
from absl.testing import parameterized
import six
from tensorflow.contrib.cluster_resolver import TPUClusterResolver
from tensorflow.contrib.distribute.python import mirrored_strategy as mirrored_lib
from tensorflow.contrib.distribute.python import one_device_strategy as one_device_lib
from tensorflow.contrib.distribute.python import tpu_strategy as tpu_lib
from tensorflow.contrib.optimizer_v2 import adagrad as adagrad_v2
from tensorflow.contrib.optimizer_v2 import adam as adam_v2
from tensorflow.contrib.optimizer_v2 import gradient_descent as gradient_descent_v2
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import rmsprop
from tensorflow.python.util import tf_inspect
GPU_TEST = "test_gpu" in sys.argv[0]
TPU_TEST = "test_tpu" in sys.argv[0]
def generate(combinations):
"""A decorator for generating test cases of a test method or a test class.
Args:
combinations: a list of dictionaries created using combine() and times().
Restrictions:
-- the "mode" argument can be either "eager" or "graph". It's "graph" by
default.
-- arguments of the test method must match by name to get the corresponding
value of the combination. Tests must accept all arguments except the
"mode", "required_tpu" and "required_gpus".
-- "distribution" argument is special and optional. It is meant for passing
instances of DistributionStrategy. Each instance is to be passed as via
`NamedDistribution`. If using "distribution", "required_gpus" and
"required_tpu" should be specified via the NamedDistribution instance,
rather than as separate arguments.
-- "required_tpu" argument is special and optional. If not `None`, then the
test will be skipped if TPUs aren't available.
-- "required_gpus" argument is special and optional. If not `None`, then the
test will be skipped if the specified number of GPUs aren't available.
Returns:
a decorator that will cause the test method or the test class to be run
under the specified conditions.
Raises:
ValueError - if "mode" argument wasn't either "eager" or "graph" or if other
arguments were not accepted by the test method.
"""
def decorator(test_method_or_class):
"""The decorator to be returned."""
# Generate good test names that can be used with --test_filter.
named_combinations = []
for combination in combinations:
# We use OrderedDicts in `combine()` and `times()` to ensure stable
# order of keys in each dictionary.
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format(
"".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) + [("testcase_name",
"_test{}".format(name))]))
if isinstance(test_method_or_class, type):
class_object = test_method_or_class
class_object._test_method_ids = test_method_ids = {}
for name, test_method in six.iteritems(class_object.__dict__.copy()):
if (name.startswith(unittest.TestLoader.testMethodPrefix) and
isinstance(test_method, types.FunctionType)):
delattr(class_object, name)
methods = {}
parameterized._update_class_dict_for_param_test_case(
class_object.__name__, methods, test_method_ids, name,
parameterized._ParameterizedTestIter(
_augment_with_special_arguments(test_method),
named_combinations, parameterized._NAMED, name))
for method_name, method in six.iteritems(methods):
setattr(class_object, method_name, method)
return class_object
else:
test_method = _augment_with_special_arguments(test_method_or_class)
return parameterized.named_parameters(*named_combinations)(test_method)
return decorator
def _augment_with_special_arguments(test_method):
def decorated(self, **kwargs):
"""A wrapped test method that treats some arguments in a special way."""
mode = kwargs.pop("mode", "graph")
distribution = kwargs.get("distribution", None)
required_tpu = kwargs.pop("required_tpu", False)
required_gpus = kwargs.pop("required_gpus", None)
if distribution:
assert required_gpus is None, (
"Do not use `required_gpus` and `distribution` together.")
assert required_tpu is False, (
"Do not use `required_tpu` and `distribution` together.")
required_gpus = distribution.required_gpus
required_tpu = distribution.required_tpu
if required_tpu and not TPU_TEST:
self.skipTest("Test requires a TPU, but it's not available.")
if not required_tpu and TPU_TEST:
self.skipTest("Test that doesn't require a TPU.")
if not required_gpus:
if GPU_TEST:
self.skipTest("Test that doesn't require GPUs.")
elif context.num_gpus() < required_gpus:
# TODO(priyag): Consider allowing tests in graph mode using soft
# placement.
self.skipTest(
"{} GPUs are not available for this test. {} GPUs are available".
format(required_gpus, context.num_gpus()))
# At this point, `kwargs` doesn't have `required_gpus` or `required_tpu`
# that the user might have specified. `kwargs` still has `mode`, which
# the test is allowed to accept or ignore.
requested_arguments = tf_inspect.getfullargspec(test_method).args
missing_arguments = set(list(kwargs.keys()) + ["self"]).difference(
set(requested_arguments + ["mode"]))
if missing_arguments:
raise ValueError("The test is missing arguments {} .".format(
missing_arguments))
kwargs_to_pass = {}
for arg in requested_arguments:
if arg == "self":
kwargs_to_pass[arg] = self
else:
kwargs_to_pass[arg] = kwargs[arg]
if mode == "eager":
with context.eager_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
elif mode == "graph":
with ops.Graph().as_default(), context.graph_mode():
if distribution:
kwargs_to_pass["distribution"] = distribution.strategy
test_method(**kwargs_to_pass)
else:
raise ValueError(
"'mode' has to be either 'eager' or 'graph' and not {}".format(
mode))
return decorated
def combine(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]`
or `option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
if not kwargs:
return [OrderedDict()]
sort_by_key = lambda k: k[0][0]
kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
first = list(kwargs.items())[0]
rest = dict(list(kwargs.items())[1:])
rest_combined = combine(**rest)
key = first[0]
values = first[1]
if not isinstance(values, list):
values = [values]
return [
OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
for v in values
for combined in rest_combined
]
def times(*combined):
"""Generate a product of N sets of combinations.
times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])
Args:
*combined: N lists of dictionaries that specify combinations.
Returns:
a list of dictionaries for each combination.
Raises:
ValueError: if some of the inputs have overlapping keys.
"""
assert combined
if len(combined) == 1:
return combined[0]
first = combined[0]
rest_combined = times(*combined[1:])
combined_results = []
for a in first:
for b in rest_combined:
if set(a.keys()).intersection(set(b.keys())):
raise ValueError("Keys need to not overlap: {} vs {}".format(
a.keys(), b.keys()))
combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
return combined_results
class NamedObject(object):
"""A class that translates an object into a good test name."""
def __init__(self, name, obj):
self._name = name
self._obj = obj
def __getattr__(self, name):
return getattr(self._obj, name)
def __call__(self, *args, **kwargs):
return self._obj(*args, **kwargs)
def __repr__(self):
return self._name
class NamedDistribution(object):
"""Translates DistributionStrategy and its data into a good name."""
def __init__(self, name, distribution_fn, required_gpus=None,
required_tpu=False):
self._distribution_fn = distribution_fn
self._name = name
self._required_gpus = required_gpus
self._required_tpu = required_tpu
def __repr__(self):
return self._name
@property
def strategy(self):
return self._distribution_fn()
@property
def required_gpus(self):
return self._required_gpus
@property
def required_tpu(self):
return self._required_tpu
# pylint: disable=g-long-lambda
default_strategy = NamedDistribution(
"Default",
distribution_strategy_context._get_default_distribution_strategy, # pylint: disable=protected-access
required_gpus=None)
one_device_strategy = NamedDistribution(
"OneDeviceCPU", lambda: one_device_lib.OneDeviceStrategy("/cpu:0"),
required_gpus=None)
tpu_strategy = NamedDistribution(
"TPU", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=2),
required_tpu=True)
tpu_strategy_one_step = NamedDistribution(
"TPUOneStep", lambda: tpu_lib.TPUStrategy(
TPUClusterResolver(""), steps_per_run=1),
required_tpu=True)
mirrored_strategy_with_one_cpu = NamedDistribution(
"Mirrored1CPU",
lambda: mirrored_lib.MirroredStrategy(["/cpu:0"]))
mirrored_strategy_with_one_gpu = NamedDistribution(
"Mirrored1GPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0"]),
required_gpus=1)
mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"MirroredCPUAndGPU",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
mirrored_strategy_with_two_gpus = NamedDistribution(
"Mirrored2GPUs",
lambda: mirrored_lib.MirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
core_mirrored_strategy_with_one_cpu = NamedDistribution(
"CoreMirrored1CPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/cpu:0"]))
core_mirrored_strategy_with_one_gpu = NamedDistribution(
"CoreMirrored1GPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_gpu_and_cpu = NamedDistribution(
"CoreMirroredCPUAndGPU",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/cpu:0"]),
required_gpus=1)
core_mirrored_strategy_with_two_gpus = NamedDistribution(
"CoreMirrored2GPUs",
lambda: mirrored_lib.CoreMirroredStrategy(["/gpu:0", "/gpu:1"]),
required_gpus=2)
gradient_descent_optimizer_v1_fn = NamedObject(
"GradientDescentV1", lambda: gradient_descent.GradientDescentOptimizer(0.2))
adagrad_optimizer_v1_fn = NamedObject(
"AdagradV1", lambda: adagrad.AdagradOptimizer(0.001))
adam_optimizer_v1_fn = NamedObject("AdamV1",
lambda: adam.AdamOptimizer(0.001, epsilon=1))
rmsprop_optimizer_v1_fn = NamedObject(
"RmsPropV1", lambda: rmsprop.RMSPropOptimizer(0.001))
optimizers_v1 = [gradient_descent_optimizer_v1_fn, adagrad_optimizer_v1_fn]
gradient_descent_optimizer_v2_fn = NamedObject(
"GradientDescentV2",
lambda: gradient_descent_v2.GradientDescentOptimizer(0.2))
adagrad_optimizer_v2_fn = NamedObject(
"AdagradV2", lambda: adagrad_v2.AdagradOptimizer(0.001))
adam_optimizer_v2_fn = NamedObject(
"AdamV2", lambda: adam_v2.AdamOptimizer(0.001, epsilon=1))
optimizers_v2 = [gradient_descent_optimizer_v2_fn, adagrad_optimizer_v2_fn]
graph_and_eager_modes = ["graph", "eager"]
def distributions_and_v1_optimizers():
"""A common set of combination with DistributionStrategies and Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v1)
def distributions_and_v2_optimizers():
"""DistributionStrategies and V2 Optimizers."""
return combine(
distribution=[
one_device_strategy,
mirrored_strategy_with_gpu_and_cpu,
mirrored_strategy_with_two_gpus,
core_mirrored_strategy_with_gpu_and_cpu,
core_mirrored_strategy_with_two_gpus,
],
optimizer_fn=optimizers_v2)
|
asimshankar/tensorflow
|
tensorflow/contrib/distribute/python/combinations.py
|
Python
|
apache-2.0
| 15,137
|
from __future__ import print_function
import os
from bs4 import BeautifulSoup
import requests
import sys
import pkgutil
import pprint
def parse(content, no_request):
soup = BeautifulSoup(content, 'lxml')
data = {'global': {
'title': '',
'desc': '',
'picture_url': '',
'picture_width': '',
'picture_height': '',
'video_url': '',
'video_width': '',
'video_height': ''
}}
pkgpath = os.path.join(os.path.dirname(__file__), 'backends')
for _, name, _ in pkgutil.iter_modules([pkgpath]):
mod = __import__(".".join(['embeder.backends', name]),
fromlist=['parse', 'MAPPING'])
data[name] = {}
backend_data = mod.parse(soup, no_request)
data[name] = backend_data
for key, value in backend_data.items():
if key in data['global']:
data['global'][key] = value
if hasattr(mod, 'MAPPING'):
for global_key, backend_key in mod.MAPPING.items():
if backend_key in backend_data:
data['global'][global_key] = backend_data[backend_key]
return data
def get(link, no_request=False):
response = requests.get(link)
return parse(response.text, no_request)
if __name__ == '__main__':
try:
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(get(sys.argv[1]))
except IndexError:
print("USAGE: python", __file__, "<url>", file=sys.stderr)
|
fgaudin/Embeder
|
embeder/embed.py
|
Python
|
lgpl-3.0
| 1,554
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import is_string, py2to3, PY3
from .comments import Comment
if PY3:
unicode = str
@py2to3
class Setting(object):
def __init__(self, setting_name, parent=None, comment=None):
self.setting_name = setting_name
self.parent = parent
self._set_initial_value()
self._set_comment(comment)
self._populated = False
def _set_initial_value(self):
self.value = []
def _set_comment(self, comment):
self.comment = Comment(comment)
def reset(self):
self.__init__(self.setting_name, self.parent)
@property
def source(self):
return self.parent.source if self.parent is not None else None
@property
def directory(self):
return self.parent.directory if self.parent is not None else None
def populate(self, value, comment=None):
"""Mainly used at parsing time, later attributes can be set directly."""
if not self._populated:
self._populate(value)
self._set_comment(comment)
self._populated = True
else:
self._set_initial_value()
self._set_comment(None)
self.report_invalid_syntax("Setting '%s' used multiple times."
% self.setting_name, 'ERROR')
def _populate(self, value):
self.value = value
def is_set(self):
return bool(self.value)
def is_for_loop(self):
return False
def report_invalid_syntax(self, message, level='ERROR'):
self.parent.report_invalid_syntax(message, level)
def _string_value(self, value):
return value if is_string(value) else ' '.join(value)
def _concat_string_with_value(self, string, value):
if string:
return string + ' ' + self._string_value(value)
return self._string_value(value)
def as_list(self):
return self._data_as_list() + self.comment.as_list()
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.extend(self.value)
return ret
def __nonzero__(self):
return self.is_set()
def __iter__(self):
return iter(self.value or ())
def __unicode__(self):
return unicode(self.value or '')
class StringValueJoiner(object):
def __init__(self, separator):
self._separator = separator
def join_string_with_value(self, string, value):
if string:
return string + self._separator + self.string_value(value)
return self.string_value(value)
def string_value(self, value):
if is_string(value):
return value
return self._separator.join(value)
class Documentation(Setting):
def _set_initial_value(self):
self.value = ''
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def _string_value(self, value):
return value if is_string(value) else ''.join(value)
def _data_as_list(self):
return [self.setting_name, self.value]
class Template(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = self._concat_string_with_value(self.value, value)
def is_set(self):
return self.value is not None
def is_active(self):
return self.value and self.value.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.value:
ret.append(self.value)
return ret
class Fixture(Setting):
# `keyword`, `is_comment` and `assign` make the API compatible with Step.
@property
def keyword(self):
return self.name or ''
def is_comment(self):
return False
def _set_initial_value(self):
self.name = None
self.args = []
self.assign = ()
def _populate(self, value):
if not self.name:
self.name = value[0] if value else ''
value = value[1:]
self.args.extend(value)
def is_set(self):
return self.name is not None
def is_active(self):
return self.name and self.name.upper() != 'NONE'
def _data_as_list(self):
ret = [self.setting_name]
if self.name or self.args:
ret.append(self.name or '')
if self.args:
ret.extend(self.args)
return ret
class Timeout(Setting):
def _set_initial_value(self):
self.value = None
self.message = ''
def _populate(self, value):
if not self.value:
self.value = value[0] if value else ''
value = value[1:]
self.message = self._concat_string_with_value(self.message, value)
def is_set(self):
return self.value is not None
def _data_as_list(self):
ret = [self.setting_name]
if self.value or self.message:
ret.append(self.value or '')
if self.message:
ret.append(self.message)
return ret
class Tags(Setting):
def _set_initial_value(self):
self.value = None
def _populate(self, value):
self.value = (self.value or []) + value
def is_set(self):
return self.value is not None
def __add__(self, other):
if not isinstance(other, Tags):
raise TypeError('Tags can only be added with tags')
tags = Tags('Tags')
tags.value = (self.value or []) + (other.value or [])
return tags
class Arguments(Setting):
pass
class Return(Setting):
pass
class Metadata(Setting):
setting_name = 'Metadata'
def __init__(self, parent, name, value, comment=None, joined=False):
self.parent = parent
self.name = name
joiner = StringValueJoiner('' if joined else ' ')
self.value = joiner.join_string_with_value('', value)
self._set_comment(comment)
def reset(self):
pass
def is_set(self):
return True
def _data_as_list(self):
return [self.setting_name, self.name, self.value]
class _Import(Setting):
def __init__(self, parent, name, args=None, alias=None, comment=None):
self.parent = parent
self.name = name
self.args = args or []
self.alias = alias
self._set_comment(comment)
def reset(self):
pass
@property
def type(self):
return type(self).__name__
def is_set(self):
return True
def _data_as_list(self):
return [self.type, self.name] + self.args
def report_invalid_syntax(self, message, level='ERROR', parent=None):
parent = parent or getattr(self, 'parent', None)
if parent:
parent.report_invalid_syntax(message, level)
else:
from robot.api import logger
logger.write(message, level)
class Library(_Import):
def __init__(self, parent, name, args=None, alias=None, comment=None):
if args and not alias:
args, alias = self._split_alias(args, parent)
_Import.__init__(self, parent, name, args, alias, comment)
def _split_alias(self, args, parent):
if len(args) > 1 and is_string(args[-2]):
with_name = args[-2]
if with_name.upper() == 'WITH NAME':
# TODO: Require all uppercase 'WITH NAME' in RF 3.1.
# https://github.com/robotframework/robotframework/issues/2263
if with_name != 'WITH NAME':
self._deprecation_warning(with_name, parent)
return args[:-2], args[-1]
return args, None
def _deprecation_warning(self, with_name, parent):
message = ("Using 'WITH NAME' syntax when importing libraries case "
"insensitively like '%s' is deprecated. Use all upper case "
"format 'WITH NAME' instead." % with_name)
self.report_invalid_syntax(message, 'WARN', parent)
def _data_as_list(self):
data = ['Library', self.name] + self.args
if self.alias:
data += ['WITH NAME', self.alias]
return data
class Resource(_Import):
def __init__(self, parent, name, invalid_args=None, comment=None):
if invalid_args:
name += ' ' + ' '.join(invalid_args)
_Import.__init__(self, parent, name, comment=comment)
class Variables(_Import):
def __init__(self, parent, name, args=None, comment=None):
_Import.__init__(self, parent, name, args, comment=comment)
class _DataList(object):
def __init__(self, parent):
self._parent = parent
self.data = []
def add(self, meta):
self._add(meta)
def _add(self, meta):
self.data.append(meta)
def _parse_name_and_value(self, value):
name = value[0] if value else ''
return name, value[1:]
def __getitem__(self, index):
return self.data[index]
def __setitem__(self, index, item):
self.data[index] = item
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
class ImportList(_DataList):
def populate_library(self, data, comment):
self._populate(Library, data, comment)
def populate_resource(self, data, comment):
self._populate(Resource, data, comment)
def populate_variables(self, data, comment):
self._populate(Variables, data, comment)
def _populate(self, item_class, data, comment):
name, value = self._parse_name_and_value(data)
self._add(item_class(self._parent, name, value, comment=comment))
class MetadataList(_DataList):
def populate(self, name, value, comment):
self._add(Metadata(self._parent, name, value, comment, joined=True))
|
jaloren/robotframework
|
src/robot/parsing/settings.py
|
Python
|
apache-2.0
| 10,412
|
import sys
import types
import unittest2
if sys.version_info[:2] == (2,3):
from sets import Set as set
from sets import ImmutableSet as frozenset
class Test_TestLoader(unittest2.TestCase):
### Tests for TestLoader.loadTestsFromTestCase
################################################################
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
def test_loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure it does the right thing even if no tests were found
def test_loadTestsFromTestCase__no_matches(self):
class Foo(unittest2.TestCase):
def foo_bar(self): pass
empty_suite = unittest2.TestSuite()
loader = unittest2.TestLoader()
self.assertEqual(loader.loadTestsFromTestCase(Foo), empty_suite)
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# What happens if loadTestsFromTestCase() is given an object
# that isn't a subclass of TestCase? Specifically, what happens
# if testCaseClass is a subclass of TestSuite?
#
# This is checked for specifically in the code, so we better add a
# test for it.
def test_loadTestsFromTestCase__TestSuite_subclass(self):
class NotATestCase(unittest2.TestSuite):
pass
loader = unittest2.TestLoader()
try:
loader.loadTestsFromTestCase(NotATestCase)
except TypeError:
pass
else:
self.fail('Should raise TypeError')
# "Return a suite of all tests cases contained in the TestCase-derived
# class testCaseClass"
#
# Make sure loadTestsFromTestCase() picks up the default test method
# name (as specified by TestCase), even though the method name does
# not match the default TestLoader.testMethodPrefix string
def test_loadTestsFromTestCase__default_method_name(self):
class Foo(unittest2.TestCase):
def runTest(self):
pass
loader = unittest2.TestLoader()
# This has to be false for the test to succeed
self.assertFalse('runTest'.startswith(loader.testMethodPrefix))
suite = loader.loadTestsFromTestCase(Foo)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [Foo('runTest')])
################################################################
### /Tests for TestLoader.loadTestsFromTestCase
### Tests for TestLoader.loadTestsFromModule
################################################################
# "This method searches `module` for classes derived from TestCase"
def test_loadTestsFromModule__TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
expected = [loader.suiteClass([MyTestCase('test')])]
self.assertEqual(list(suite), expected)
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (no TestCase instances)?
def test_loadTestsFromModule__no_TestCase_instances(self):
m = types.ModuleType('m')
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "This method searches `module` for classes derived from TestCase"
#
# What happens if no tests are found (TestCases instances, but no tests)?
def test_loadTestsFromModule__no_TestCase_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [loader.suiteClass()])
# "This method searches `module` for classes derived from TestCase"s
#
# What happens if loadTestsFromModule() is given something other
# than a module?
#
# XXX Currently, it succeeds anyway. This flexibility
# should either be documented or loadTestsFromModule() should
# raise a TypeError
#
# XXX Certain people are using this behaviour. We'll add a test for it
def test_loadTestsFromModule__not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# Check that loadTestsFromModule honors (or not) a module
# with a load_tests function.
def test_loadTestsFromModule__load_tests(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
load_tests_args = []
def load_tests(loader, tests, pattern):
self.assertIsInstance(tests, unittest2.TestSuite)
load_tests_args.extend((loader, tests, pattern))
return tests
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEquals(load_tests_args, [loader, suite, None])
load_tests_args = []
suite = loader.loadTestsFromModule(m, use_load_tests=False)
self.assertEquals(load_tests_args, [])
def test_loadTestsFromModule__faulty_load_tests(self):
m = types.ModuleType('m')
def load_tests(loader, tests, pattern):
raise TypeError('some failure')
m.load_tests = load_tests
loader = unittest2.TestLoader()
suite = loader.loadTestsFromModule(m)
self.assertIsInstance(suite, unittest2.TestSuite)
self.assertEqual(suite.countTestCases(), 1)
test = list(suite)[0]
self.assertRaisesRegexp(TypeError, "some failure", test.m)
################################################################
### /Tests for TestLoader.loadTestsFromModule()
### Tests for TestLoader.loadTestsFromName()
################################################################
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromName__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('')
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the name contains invalid characters?
def test_loadTestsFromName__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromName('abc () //')
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve ... to a
# module"
#
# What happens when a module by that name can't be found?
def test_loadTestsFromName__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf')
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromName failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module is found, but the attribute can't?
def test_loadTestsFromName__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('unittest2.sdasfasfasdf')
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when we provide the module, but the attribute can't be
# found?
def test_loadTestsFromName__relative_unknown_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('sdasfasfasdf', unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise ValueError when passed an empty
# name relative to a provided module?
#
# XXX Should probably raise a ValueError instead of an AttributeError
def test_loadTestsFromName__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('', unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when an impossible name is given, relative to the provided
# `module`?
def test_loadTestsFromName__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromName('abc () //', unittest2)
except ValueError:
pass
except AttributeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromName raise TypeError when the `module` argument
# isn't a module object?
#
# XXX Accepts the not-a-module object, ignorning the object's type
# This should raise an exception or the method name should be changed
#
# XXX Some people are relying on this, so keep it for now
def test_loadTestsFromName__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('test_2', NotAModule)
reference = [MyTestCase('test')]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromName__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1', m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may
# resolve either to ... a test case class"
def test_loadTestsFromName__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
def test_loadTestsFromName__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testsuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
def test_loadTestsFromName__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does loadTestsFromName() raise the proper exception when trying to
# resolve "a test method within a test case class" that doesn't exist
# for the given name (relative to a provided module)?
def test_loadTestsFromName__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('testcase_1.testfoo', m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromName__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestSuite', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1, testcase_2])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromName__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__callable__TestCase_instance_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
loader.suiteClass = SubTestSuite
suite = loader.loadTestsFromName('return_TestCase', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [testcase_1])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test method within a test case class"
#*****************************************************************
#Override the suiteClass attribute to ensure that the suiteClass
#attribute is used
def test_loadTestsFromName__relative_testmethod_ProperSuiteClass(self):
class SubTestSuite(unittest2.TestSuite):
pass
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
loader.suiteClass=SubTestSuite
suite = loader.loadTestsFromName('testcase_1.test', m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [MyTestCase('test')])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens if the callable returns something else?
def test_loadTestsFromName__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromName('return_wrong', m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromName failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromName__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromName(module_name)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### Tests for TestLoader.loadTestsFromName()
### Tests for TestLoader.loadTestsFromNames()
################################################################
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
#
# What happens if that sequence of names is empty?
def test_loadTestsFromNames__empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "Similar to loadTestsFromName(), but takes a sequence of names rather
# than a single name."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens if that sequence of names is empty?
#
# XXX Should this raise a ValueError or just return an empty TestSuite?
def test_loadTestsFromNames__relative_empty_name_list(self):
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames([], unittest2)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [])
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Is ValueError raised in response to an empty name?
def test_loadTestsFromNames__empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''])
except ValueError, e:
self.assertEqual(str(e), "Empty module name")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when presented with an impossible module name?
def test_loadTestsFromNames__malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise ValueError or ImportError?
try:
loader.loadTestsFromNames(['abc () //'])
except ValueError:
pass
except ImportError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when no module can be found for the given name?
def test_loadTestsFromNames__unknown_module_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'])
except ImportError, e:
self.assertEqual(str(e), "No module named sdasfasfasdf")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ImportError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# What happens when the module can be found, but not the attribute?
def test_loadTestsFromNames__unknown_attr_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['unittest2.sdasfasfasdf', 'unittest2'])
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromNames failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when given an unknown attribute on a specified `module`
# argument?
def test_loadTestsFromNames__unknown_name_relative_1(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# Do unknown attributes (relative to a provided module) still raise an
# exception even in the presence of valid attribute names?
def test_loadTestsFromNames__unknown_name_relative_2(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['TestCase', 'sdasfasfasdf'], unittest2)
except AttributeError, e:
self.assertEqual(str(e), "'module' object has no attribute 'sdasfasfasdf'")
else:
self.fail("TestLoader.loadTestsFromName failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when faced with the empty string?
#
# XXX This currently raises AttributeError, though ValueError is probably
# more appropriate
def test_loadTestsFromNames__relative_empty_name(self):
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames([''], unittest2)
except AttributeError:
pass
else:
self.fail("Failed to raise ValueError")
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
# ...
# "The method optionally resolves name relative to the given module"
#
# What happens when presented with an impossible attribute name?
def test_loadTestsFromNames__relative_malformed_name(self):
loader = unittest2.TestLoader()
# XXX Should this raise AttributeError or ValueError?
try:
loader.loadTestsFromNames(['abc () //'], unittest2)
except AttributeError:
pass
except ValueError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise ValueError")
# "The method optionally resolves name relative to the given module"
#
# Does loadTestsFromNames() make sure the provided `module` is in fact
# a module?
#
# XXX This validation is currently not done. This flexibility should
# either be documented or a TypeError should be raised.
def test_loadTestsFromNames__relative_not_a_module(self):
class MyTestCase(unittest2.TestCase):
def test(self):
pass
class NotAModule(object):
test_2 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['test_2'], NotAModule)
reference = [unittest2.TestSuite([MyTestCase('test')])]
self.assertEqual(list(suite), reference)
# "The specifier name is a ``dotted name'' that may resolve either to
# a module, a test case class, a TestSuite instance, a test method
# within a test case class, or a callable object which returns a
# TestCase or TestSuite instance."
#
# Does it raise an exception if the name resolves to an invalid
# object?
def test_loadTestsFromNames__relative_bad_object(self):
m = types.ModuleType('m')
m.testcase_1 = object()
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1'], m)
except TypeError:
pass
else:
self.fail("Should have raised TypeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a test case class"
def test_loadTestsFromNames__relative_TestCase_subclass(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = loader.suiteClass([MyTestCase('test')])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a TestSuite instance"
def test_loadTestsFromNames__relative_TestSuite(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testsuite = unittest2.TestSuite([MyTestCase('test')])
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testsuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [m.testsuite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
def test_loadTestsFromNames__relative_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['testcase_1.test'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([MyTestCase('test')])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to ... a
# test method within a test case class"
#
# Does the method gracefully handle names that initially look like they
# resolve to "a test method within a test case class" but don't?
def test_loadTestsFromNames__relative_invalid_testmethod(self):
m = types.ModuleType('m')
class MyTestCase(unittest2.TestCase):
def test(self):
pass
m.testcase_1 = MyTestCase
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['testcase_1.testfoo'], m)
except AttributeError, e:
self.assertEqual(str(e), "type object 'MyTestCase' has no attribute 'testfoo'")
else:
self.fail("Failed to raise AttributeError")
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a ... TestSuite instance"
def test_loadTestsFromNames__callable__TestSuite(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
testcase_2 = unittest2.FunctionTestCase(lambda: None)
def return_TestSuite():
return unittest2.TestSuite([testcase_1, testcase_2])
m.return_TestSuite = return_TestSuite
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestSuite'], m)
self.assertIsInstance(suite, loader.suiteClass)
expected = unittest2.TestSuite([testcase_1, testcase_2])
self.assertEqual(list(suite), [expected])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase ... instance"
def test_loadTestsFromNames__callable__TestCase_instance(self):
m = types.ModuleType('m')
testcase_1 = unittest2.FunctionTestCase(lambda: None)
def return_TestCase():
return testcase_1
m.return_TestCase = return_TestCase
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['return_TestCase'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# Are staticmethods handled correctly?
def test_loadTestsFromNames__callable__call_staticmethod(self):
m = types.ModuleType('m')
class Test1(unittest2.TestCase):
def test(self):
pass
testcase_1 = Test1('test')
class Foo(unittest2.TestCase):
def foo():
return testcase_1
foo = staticmethod(foo)
m.Foo = Foo
loader = unittest2.TestLoader()
suite = loader.loadTestsFromNames(['Foo.foo'], m)
self.assertIsInstance(suite, loader.suiteClass)
ref_suite = unittest2.TestSuite([testcase_1])
self.assertEqual(list(suite), [ref_suite])
# "The specifier name is a ``dotted name'' that may resolve ... to
# ... a callable object which returns a TestCase or TestSuite instance"
#
# What happens when the callable returns something else?
def test_loadTestsFromNames__callable__wrong_type(self):
m = types.ModuleType('m')
def return_wrong():
return 6
m.return_wrong = return_wrong
loader = unittest2.TestLoader()
try:
loader.loadTestsFromNames(['return_wrong'], m)
except TypeError:
pass
else:
self.fail("TestLoader.loadTestsFromNames failed to raise TypeError")
# "The specifier can refer to modules and packages which have not been
# imported; they will be imported as a side-effect"
def test_loadTestsFromNames__module_not_loaded(self):
# We're going to try to load this module as a side-effect, so it
# better not be loaded before we try.
#
module_name = 'unittest2.test.dummy'
sys.modules.pop(module_name, None)
loader = unittest2.TestLoader()
try:
suite = loader.loadTestsFromNames([module_name])
self.assertIsInstance(suite, loader.suiteClass)
self.assertEqual(list(suite), [unittest2.TestSuite()])
# module should now be loaded, thanks to loadTestsFromName()
self.assertIn(module_name, sys.modules)
finally:
if module_name in sys.modules:
del sys.modules[module_name]
################################################################
### /Tests for TestLoader.loadTestsFromNames()
### Tests for TestLoader.getTestCaseNames()
################################################################
# "Return a sorted sequence of method names found within testCaseClass"
#
# Test.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames(self):
class Test(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), ['test_1', 'test_2'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Does getTestCaseNames() behave appropriately if no tests are found?
def test_getTestCaseNames__no_tests(self):
class Test(unittest2.TestCase):
def foobar(self): pass
loader = unittest2.TestLoader()
self.assertEqual(loader.getTestCaseNames(Test), [])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Are not-TestCases handled gracefully?
#
# XXX This should raise a TypeError, not return a list
#
# XXX It's too late in the 2.5 release cycle to fix this, but it should
# probably be revisited for 2.6
def test_getTestCaseNames__not_a_TestCase(self):
class BadCase(int):
def test_foo(self):
pass
loader = unittest2.TestLoader()
names = loader.getTestCaseNames(BadCase)
self.assertEqual(names, ['test_foo'])
# "Return a sorted sequence of method names found within testCaseClass"
#
# Make sure inherited names are handled.
#
# TestP.foobar is defined to make sure getTestCaseNames() respects
# loader.testMethodPrefix
def test_getTestCaseNames__inheritance(self):
class TestP(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foobar(self): pass
class TestC(TestP):
def test_1(self): pass
def test_3(self): pass
loader = unittest2.TestLoader()
names = ['test_1', 'test_2', 'test_3']
self.assertEqual(loader.getTestCaseNames(TestC), names)
################################################################
### /Tests for TestLoader.getTestCaseNames()
### Tests for TestLoader.testMethodPrefix
################################################################
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = [unittest2.TestSuite([Foo('foo_bar')])]
tests_2 = [unittest2.TestSuite([Foo('test_1'), Foo('test_2')])]
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(list(loader.loadTestsFromModule(m)), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([Foo('foo_bar')])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromName('Foo', m), tests_2)
# "String giving the prefix of method names which will be interpreted as
# test methods"
#
# Implicit in the documentation is that testMethodPrefix is respected by
# all loadTestsFrom* methods.
def test_testMethodPrefix__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests_1 = unittest2.TestSuite([unittest2.TestSuite([Foo('foo_bar')])])
tests_2 = unittest2.TestSuite([Foo('test_1'), Foo('test_2')])
tests_2 = unittest2.TestSuite([tests_2])
loader = unittest2.TestLoader()
loader.testMethodPrefix = 'foo'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_1)
loader.testMethodPrefix = 'test'
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests_2)
# "The default value is 'test'"
def test_testMethodPrefix__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.testMethodPrefix == 'test')
################################################################
### /Tests for TestLoader.testMethodPrefix
### Tests for TestLoader.sortTestMethodsUsing
################################################################
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromTestCase(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromModule(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromModule(m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromName(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = loader.suiteClass([Foo('test_2'), Foo('test_1')])
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames() and all the loadTestsFromX() methods"
def test_sortTestMethodsUsing__loadTestsFromNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
m.Foo = Foo
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
tests = [loader.suiteClass([Foo('test_2'), Foo('test_1')])]
self.assertEqual(list(loader.loadTestsFromNames(['Foo'], m)), tests)
# "Function to be used to compare method names when sorting them in
# getTestCaseNames()"
#
# Does it actually affect getTestCaseNames()?
def test_sortTestMethodsUsing__getTestCaseNames(self):
def reversed_cmp(x, y):
return -cmp(x, y)
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = reversed_cmp
test_names = ['test_2', 'test_1']
self.assertEqual(loader.getTestCaseNames(Foo), test_names)
# "The default value is the built-in cmp() function"
def test_sortTestMethodsUsing__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.sortTestMethodsUsing is cmp)
# "it can be set to None to disable the sort."
#
# XXX How is this different from reassigning cmp? Are the tests returned
# in a random order or something? This behaviour should die
def test_sortTestMethodsUsing__None(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
loader = unittest2.TestLoader()
loader.sortTestMethodsUsing = None
test_names = ['test_2', 'test_1']
self.assertEqual(set(loader.getTestCaseNames(Foo)), set(test_names))
################################################################
### /Tests for TestLoader.sortTestMethodsUsing
### Tests for TestLoader.suiteClass
################################################################
# "Callable object that constructs a test suite from a list of tests."
def test_suiteClass__loadTestsFromTestCase(self):
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromTestCase(Foo), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromModule(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromModule(m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromName(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [Foo('test_1'), Foo('test_2')]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromName('Foo', m), tests)
# It is implicit in the documentation for TestLoader.suiteClass that
# all TestLoader.loadTestsFrom* methods respect it. Let's make sure
def test_suiteClass__loadTestsFromNames(self):
m = types.ModuleType('m')
class Foo(unittest2.TestCase):
def test_1(self): pass
def test_2(self): pass
def foo_bar(self): pass
m.Foo = Foo
tests = [[Foo('test_1'), Foo('test_2')]]
loader = unittest2.TestLoader()
loader.suiteClass = list
self.assertEqual(loader.loadTestsFromNames(['Foo'], m), tests)
# "The default value is the TestSuite class"
def test_suiteClass__default_value(self):
loader = unittest2.TestLoader()
self.assertTrue(loader.suiteClass is unittest2.TestSuite)
if __name__ == '__main__':
unittest2.main()
|
supercheetah/diceroller
|
pyinstaller/PyInstaller/lib/unittest2/test/test_loader.py
|
Python
|
artistic-2.0
| 49,503
|
# [START all]
import argparse
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.client import GoogleCredentials
def main(project_id):
# [START build_service]
# Grab the application's default credentials from the environment.
credentials = GoogleCredentials.get_application_default()
# Construct the service object for interacting with the BigQuery API.
bigquery_service = build('bigquery', 'v2', credentials=credentials)
# [END build_service]
try:
# [START run_query]
query_request = bigquery_service.jobs()
query_data = {
'query':
"""
SELECT a.name, a.Total, a.Female, a.Male,
GREATEST(a.Male, a.Female) as confidence
FROM
(SELECT name, SUM(number) as Total,
SUM(CASE
WHEN gender = "F"
THEN number
ELSE 0
END)/Total as Female,
SUM(CASE
WHEN gender = "M"
THEN number
ELSE 0
END)/Total as Male,
FROM [fh-bigquery:popular_names.usa_1910_2013] GROUP BY name) as a ORDER BY confidence DESC;
"""
}
query_response = query_request.query(
projectId=project_id,
body=query_data).execute()
# [END run_query]
# [START print_results]
file_name="data/name_gender.tsv"
print('Query Results stored in: %s' % file_name)
with open(file_name, "wb+") as fp:
for row in query_response['rows']:
print >> fp, '\t'.join(field['v'] for field in row['f'])
# [END print_results]
except HttpError as err:
print('Error: {}'.format(err.content))
raise err
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('project_id', help='Your Google Cloud Project ID.')
args = parser.parse_args()
main(args.project_id)
# [END all]
|
napsternxg/GenderPrediction
|
data_download.py
|
Python
|
gpl-3.0
| 1,931
|
"""
WSGI config for letusorderit project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "letusorderit.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
mfa/letusorderit
|
letusorderit/letusorderit/wsgi.py
|
Python
|
gpl-3.0
| 399
|
import re
# Examines non-YAML text (probably from a J2 template file?) for
# variable references.
class TextAnalyzer(object):
def __init__(self):
self.references = set()
def get_references(self):
return self.references
def add_text(self, text):
refs = re.findall('{{\s+([^\s]+)\s+[^}]*}}', text)
for ref in refs:
self.references.add(ref)
refs = re.findall('{% for [^\s]+ in ([^\s]+)\s+%}', text)
for ref in refs:
self.references.add(ref)
|
lostbearlabs/ansible-vars
|
lib/ansiblevars/text_analyzer.py
|
Python
|
unlicense
| 530
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Interpreter version: python 2.7
#
# Imports =====================================================================
# Functions & classes =========================================================
def _by_attr(xdom, attr):
"""
From `xdom` pick element with attributes defined by `attr`.
Args:
xdom (obj): DOM parsed by :mod:`xmltodict`.
attr (dict): Dictionary defining all the arguments.
Returns:
obj: List in case that multiple records were returned, or OrderedDict \
instance in case that there was only one. Blank array in case of \
no matching tag.
"""
out = []
for tag in xdom:
for attr_name, val in attr.iteritems():
if attr_name not in tag:
break
if val is not None and tag[attr_name] != val:
break
out.append(tag)
return out[0] if len(out) == 1 else out
class Modes(object):
"""
Container holding informations about modes which may be used by registrar
to register documents.
Attributes:
by_resolver (bool): True if the mode can be used.
by_registrar (bool): True if the mode can be used.
by_reservation (bool): True if the mode can be used.
"""
def __init__(self, by_resolver=False, by_registrar=False,
by_reservation=False):
self.by_resolver = by_resolver
self.by_registrar = by_registrar
self.by_reservation = by_reservation
def __eq__(self, other):
return all([
self.by_resolver == other.by_resolver,
self.by_registrar == other.by_registrar,
self.by_reservation == other.by_reservation,
])
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "Modes(%r%r%r)" % (
self.by_resolver,
self.by_registrar,
self.by_reservation
)
@staticmethod
def from_xmldict(modes_tag):
"""
Parse Modes information from XML.
Args:
modes_tags (obj): OrderedDict ``<modes>`` tag returned from
:mod:`xmltodict`.
Returns:
obj: :class:`.Modes` instance.
"""
by_resolver = _by_attr(modes_tag, attr={"@name": "BY_RESOLVER"})
by_registrar = _by_attr(modes_tag, attr={"@name": "BY_REGISTRAR"})
by_reservation = _by_attr(modes_tag, attr={"@name": "BY_RESERVATION"})
return Modes(
by_resolver=by_resolver["@enabled"].lower() == "true",
by_registrar=by_registrar["@enabled"].lower() == "true",
by_reservation=by_reservation["@enabled"].lower() == "true",
)
|
edeposit/cz-urnnbn-api
|
src/cz_urnnbn_api/api_structures/modes.py
|
Python
|
mit
| 2,772
|
# -*- coding: utf-8 -*-
#
# pyspark documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 28 15:17:47 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shutil
import errno
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Remove previously generated rst files. Ignore errors just in case it stops
# generating whole docs.
shutil.rmtree(
"%s/reference/api" % os.path.dirname(os.path.abspath(__file__)), ignore_errors=True)
shutil.rmtree(
"%s/reference/pyspark.pandas/api" % os.path.dirname(os.path.abspath(__file__)),
ignore_errors=True)
try:
os.mkdir("%s/reference/api" % os.path.dirname(os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.mkdir("%s/reference/pyspark.pandas/api" % os.path.dirname(
os.path.abspath(__file__)))
except OSError as e:
if e.errno != errno.EEXIST:
raise
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.2'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'nbsphinx', # Converts Jupyter Notebook to reStructuredText files for Sphinx.
# For ipython directive in reStructuredText files. It is generated by the notebook.
'IPython.sphinxext.ipython_console_highlighting',
'numpydoc', # handle NumPy documentation formatted docstrings.
'sphinx_plotly_directive', # For visualize plot result
]
# plotly plot directive
plotly_include_source = True
plotly_html_show_formats = False
plotly_html_show_source_link = False
plotly_pre_code = """import numpy as np
import pandas as pd
import pyspark.pandas as ps"""
numpydoc_show_class_members = False
# Links used globally in the RST files.
# These are defined here to allow link substitutions dynamically.
rst_epilog = """
.. |binder| replace:: Live Notebook
.. _binder: https://mybinder.org/v2/gh/apache/spark/{0}?filepath=python%2Fdocs%2Fsource%2Fgetting_started%2Fquickstart.ipynb
.. |examples| replace:: Examples
.. _examples: https://github.com/apache/spark/tree/{0}/examples/src/main/python
.. |downloading| replace:: Downloading
.. _downloading: https://spark.apache.org/docs/{1}/building-spark.html
.. |building_spark| replace:: Building Spark
.. _building_spark: https://spark.apache.org/docs/{1}/#downloading
""".format(
os.environ.get("GIT_HASH", "master"),
os.environ.get("RELEASE_VERSION", "latest"),
)
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySpark'
copyright = ''
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'master'
# The full version, including alpha/beta/rc tags.
release = os.environ.get('RELEASE_VERSION', version)
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '.DS_Store', '**.ipynb_checkpoints']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for autodoc --------------------------------------------------
# Look at the first line of the docstring for function and method signatures.
autodoc_docstring_signature = True
autosummary_generate = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'pydata_sphinx_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../../../docs/img/spark-logo-reverse.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'css/pyspark.css',
]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysparkdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pyspark.tex', 'pyspark Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspark', 'pyspark Documentation',
['Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspark', 'pyspark Documentation',
'Author', 'pyspark', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = 'pyspark'
epub_author = 'Author'
epub_publisher = 'Author'
epub_copyright = '2014, Author'
# The basename for the epub file. It defaults to the project name.
#epub_basename = 'pyspark'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
def setup(app):
# The app.add_javascript() is deprecated.
getattr(app, "add_js_file", getattr(app, "add_javascript"))('copybutton.js')
# Skip sample endpoint link (not expected to resolve)
linkcheck_ignore = [r'https://kinesis.us-east-1.amazonaws.com']
|
cloud-fan/spark
|
python/docs/source/conf.py
|
Python
|
apache-2.0
| 12,894
|
# Copyright (c) 2020, Djaodjin Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import re
from django.core.exceptions import ImproperlyConfigured
from django.urls import base
from django.utils.datastructures import MultiValueDict
from django.utils.regex_helper import normalize
from .compat import (RegexURLResolver as DjangoRegexURLResolver,
RegexURLPattern as DjangoRegexURLPattern, lru_cache, six)
from .thread_locals import get_current_site
class SitePrefixPattern(object):
def __init__(self):
self.converters = {}
@staticmethod
def _get_path_prefix():
path_prefix = ""
current_site = get_current_site()
if current_site and current_site.path_prefix:
path_prefix = "%s/" % current_site.path_prefix
return path_prefix
@property
def regex(self):
# This is only used by reverse() and cached in _reverse_dict.
return re.compile(self._get_path_prefix(), re.UNICODE)
def match(self, path):
path_prefix = self._get_path_prefix()
if path.startswith(path_prefix):
return path[len(path_prefix):], (), {}
return None
def check(self):
return []
def describe(self):
return "'{}'".format(self)
def __str__(self):
return self._get_path_prefix()
class BaseRegexURLResolver(DjangoRegexURLResolver):
"""
A URL resolver that always matches the active organization code
as URL prefix.
"""
def __init__(self, regex, urlconf_name, *args, **kwargs):
super(BaseRegexURLResolver, self).__init__(
regex, urlconf_name, *args, **kwargs)
@staticmethod
def _get_path_prefix():
path_prefix = "_"
current_site = get_current_site()
if current_site and current_site.path_prefix:
path_prefix = current_site.path_prefix
return path_prefix
# Implementation Note:
# Copy/Pasted `RegexURLResolver._populate` here because that was the only
# way to override `language_code = get_language()` to use a dynamic path
# prefix `path_prefix = self._get_path_prefix()`.
def _populate(self):
# Short-circuit if called recursively in this thread to prevent
# infinite recursion. Concurrent threads may call this at the same
# time and will need to continue, so set 'populating' on a
# thread-local variable.
#pylint:disable=protected-access,too-many-locals
if getattr(self._local, 'populating', False):
return
try:
self._local.populating = True
lookups = MultiValueDict()
namespaces = {}
apps = {}
path_prefix = self._get_path_prefix()
for url_pattern in reversed(self.url_patterns):
if isinstance(url_pattern, DjangoRegexURLPattern):
self._callback_strs.add(url_pattern.lookup_str)
# could be RegexURLPattern.regex or RegexURLResolver.regex here.
p_pattern = url_pattern.regex.pattern
if p_pattern.startswith('^'):
p_pattern = p_pattern[1:]
if isinstance(url_pattern, DjangoRegexURLResolver):
if url_pattern.namespace:
namespaces[url_pattern.namespace] = (
p_pattern, url_pattern)
if url_pattern.app_name:
apps.setdefault(
url_pattern.app_name, []).append(
url_pattern.namespace)
else:
parent_pat = url_pattern.regex.pattern
for name in url_pattern.reverse_dict:
for _, pat, defaults \
in url_pattern.reverse_dict.getlist(name):
new_matches = normalize(parent_pat + pat)
lookups.appendlist(
name,
(
new_matches,
p_pattern + pat,
dict(defaults,
**url_pattern.default_kwargs),
)
)
for namespace, (prefix, sub_pattern) \
in url_pattern.namespace_dict.items():
namespaces[namespace] = (
p_pattern + prefix, sub_pattern)
for app_name, namespace_list in \
url_pattern.app_dict.items():
apps.setdefault(app_name, []).extend(namespace_list)
if not getattr(url_pattern._local, 'populating', False):
url_pattern._populate()
self._callback_strs.update(url_pattern._callback_strs)
else:
bits = normalize(p_pattern)
lookups.appendlist(url_pattern.callback, (
bits, p_pattern, url_pattern.default_args))
if url_pattern.name is not None:
lookups.appendlist(url_pattern.name, (
bits, p_pattern, url_pattern.default_args))
self._reverse_dict[path_prefix] = lookups
self._namespace_dict[path_prefix] = namespaces
self._app_dict[path_prefix] = apps
self._populated = True
finally:
self._local.populating = False
@property
def reverse_dict(self):
path_prefix = self._get_path_prefix()
if path_prefix not in self._reverse_dict:
self._populate()
return self._reverse_dict[path_prefix]
@property
def namespace_dict(self):
path_prefix = self._get_path_prefix()
if path_prefix not in self._namespace_dict:
self._populate()
return self._namespace_dict[path_prefix]
@property
def app_dict(self):
path_prefix = self._get_path_prefix()
if path_prefix not in self._app_dict:
self._populate()
return self._app_dict[path_prefix]
try:
from .urlresolvers_py3_django2 import RegexURLResolver
except (ImportError, SyntaxError): # <= Django2, Python 2
class RegexURLResolver(BaseRegexURLResolver):
pass
except ModuleNotFoundError: # <= Django2, Python 3
class RegexURLResolver(BaseRegexURLResolver):
pass
class SiteRegexURLResolver(RegexURLResolver):
"""
A URL resolver that always matches the active organization code
as URL prefix.
"""
def __init__(self, regex, urlconf_name, *args, **kwargs):
super(SiteRegexURLResolver, self).__init__(
regex, urlconf_name, *args, **kwargs)
@property
def regex(self):
path_prefix = self._get_path_prefix()
if path_prefix != '_':
# site will be None when 'manage.py show_urls' is invoked.
return re.compile('^%s/' % path_prefix, re.UNICODE)
return re.compile('^', re.UNICODE)
def site_patterns(*args):
"""
Adds the live organization prefix to every URL pattern within this
function. This may only be used in the root URLconf, not in an included
URLconf.
"""
pattern_list = args
return [SiteRegexURLResolver(SitePrefixPattern(), pattern_list)]
try:
from django.urls.resolvers import RegexPattern
@lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(RegexPattern(r'^/'), urlconf)
def url_sites(regex, view, kwargs=None, name=None, prefix=''):
#pylint:disable=unused-argument
if not view:
raise ImproperlyConfigured(
'Empty URL pattern view name not permitted (for pattern %r)'
% regex)
if isinstance(view, (list, tuple)):
# For include(...) processing.
pattern = RegexPattern(regex, is_endpoint=False)
urlconf_module, app_name, namespace = view
return RegexURLResolver(
pattern,
urlconf_module,
kwargs,
app_name=app_name,
namespace=namespace,
)
elif callable(view):
pattern = RegexPattern(regex, name=name, is_endpoint=True)
return DjangoRegexURLPattern(pattern, view, kwargs, name)
else:
raise TypeError('view must be a callable or a list/tuple'\
' in the case of include().')
except ImportError:
def url_sites(regex, view, kwargs=None, name=None, prefix='',
pattern=DjangoRegexURLPattern, resolver=RegexURLResolver):
"""
Modified `django.conf.urls.url` with allows to specify custom
RegexURLPattern and RegexURLResolver classes.
"""
#pylint:disable=too-many-arguments
if isinstance(view, (list, tuple)):
# For include(...) processing.
return resolver(regex, view[0], kwargs, *view[1:])
else:
if isinstance(view, six.string_types):
if not view:
raise ImproperlyConfigured('Empty URL pattern view'\
' name not permitted (for pattern %r)' % regex)
if prefix:
view = prefix + '.' + view
return pattern(regex, view, kwargs, name)
@lru_cache(maxsize=None)
def get_resolver(urlconf=None):
if urlconf is None:
from django.conf import settings
urlconf = settings.ROOT_URLCONF
return RegexURLResolver(r'^/', urlconf)
# Severe monkey patching without which calling the top level resolver
# `_reverse_with_prefix` method is not updating caches for *path_prefix*.
base.get_resolver = get_resolver
|
djaodjin/djaodjin-multitier
|
multitier/urlresolvers.py
|
Python
|
bsd-2-clause
| 11,381
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
"""
Creates a country database with ISO 2 and ISO 3 character codes, ISO number,
name and international dialing codes.
The data comes from three sources:
* United Nations statistics
* WorldAtlas.com
* Wikipedia
This code merges data from the three sources, considering the UN data
to be most up-to-date (demonstrably so at the time of writing) and
applies some changes, such as renaming "Viet Nam" "Vietnam" and calling
the Vatican "HOLY SEE (VATICAN CITY STATE)" rather than just "Holy See"
which may bypass many.
In electing to use UN names where possible it is likely that some may not
be considered politically appropriate in some areas. We have chosen not to
impose our opinion and thus take the UN nomenclature without modification.
Based on screen-scraping, this is almost certainly going to break at some point
in the future. We will attempt to keep it updated.
Usage:
./build.py # output csv to stdout
./build.py -t json # output json to stdout
./build.py -v # as above, but noise comes to stderr
./build.py -i # as above, but ignore entities with no IDC
"""
import csv
import json
import sys
import re
import urllib2
from optparse import OptionParser
from tidylib import tidy_document
from BeautifulSoup import BeautifulSoup
# map countries as in Wikipedia to countries as in UN data
COUNTRY_MAPPINGS = {
# 'WIKIPEDIA NAME': 'OUR DATA NAME (GENERALLY UN SOURCED)',
'UNITED STATES': 'UNITED STATES OF AMERICA',
'SAINT MARTIN (FRANCE)': 'SAINT-MARTIN (FRENCH PART)',
'SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS':
'SOUTH GEORGIA AND SOUTH S.S.',
'CARIBBEAN NETHERLANDS': 'NETHERLANDS ANTILLES',
'LAOS': "LAO PEOPLE'S DEMOCRATIC REPUBLIC",
'BURMA': 'MYANMAR',
'MICRONESIA, FEDERATED STATES OF': 'MICRONESIA (FEDERATED STATES OF)',
'KOREA, NORTH': "DEMOCRATIC PEOPLE'S REPUBLIC OF KOREA",
'KOREA, SOUTH': 'REPUBLIC OF KOREA',
'CONGO, DEMOCRATIC REPUBLIC OF THE (ZAIRE)':
'DEMOCRATIC REPUBLIC OF THE CONGO',
'US VIRGIN ISLANDS': 'UNITED STATES VIRGIN ISLANDS',
'MACAU': 'CHINA, MACAO SPECIAL ADMINISTRATIVE REGION',
'FAROE ISLANDS': 'FAEROE ISLANDS',
'EAST TIMOR': 'TIMOR-LESTE',
'PALESTINIAN TERRITORIES': 'STATE OF PALESTINE',
'VATICAN CITY STATE (HOLY SEE)': 'HOLY SEE (VATICAN CITY STATE)',
u'SAINT BARTHÉLEMY': u'SAINT-BARTHÉLEMY',
'SINT MAARTEN (NETHERLANDS)': 'SINT MAARTEN (DUTCH PART)',
u'SÃO TOMÉ AND PRÍNCIPE': 'SAO TOME AND PRINCIPE',
'SINT EUSTATIUS': 'BONAIRE, SAINT EUSTATIUS AND SABA',
}
# countries that have no ISO3/2 or IDC
IGNORE_COUNTRIES = [
'830', # CHANNEL ISLANDS
'680', # SARK
]
def chunk(s, chunksize):
"""
Return a list of s chunked into chunksize bits. e.g.
chunk("abcdefgh", 2) -> ['ab', 'cd', 'ef', 'gh']
"""
index = 0
length = len(s)
chunks = []
while index < length:
chunks.append(s[index:index+chunksize])
index += chunksize
return chunks
def fix_entities(s):
"""
Process a string to replace HTML entities with their Unicode equivalents
"""
return unicode(BeautifulSoup(s,
convertEntities=BeautifulSoup.HTML_ENTITIES))
def download_un_data():
page = urllib2.urlopen(
"http://unstats.un.org/unsd/methods/m49/m49alpha.htm")
# this page has mal-formed tags which break the parse, so let's fix those
page, _ = tidy_document(page.read(), options={'numeric-entities': 1})
soup = BeautifulSoup(page)
table = soup.findAll('table',
attrs={'border': '0',
'cellpadding': '2',
'cellspacing': '0'}
)[0]
rows = table.findAll('tr')[1:]
country_dict = {}
for row in rows:
number, name, iso3 = [x.text for x in row.findAll('td')]
if number in IGNORE_COUNTRIES:
continue
# fix a quirk or two
if name == 'Viet Nam':
name = 'Vietnam'
if name == 'Holy See':
name = 'Holy See (Vatican City State)'
country_dict[iso3] = dict(number=number,
name=fix_entities(name),
iso3=iso3)
return country_dict
def download_worldatlas_data():
page = urllib2.urlopen("http://www.worldatlas.com/aatlas/ctycodes.htm")
soup = BeautifulSoup(page)
table = soup.findAll('table',
attrs={'width': '870',
'cellpadding': '0',
'cellspacing': '0'}
)[0]
iso2_list = chunk(table.findAll('td')[0].text[2:], 2)
iso3_list = chunk(table.findAll('td')[1].text[2:], 3)
number_list = chunk(table.findAll('td')[2].text[3:], 3)
# getting the countries a little more tedious
s = table.findAll('td')[3].prettify()
s = s.replace('<br />', '|')
s = s.replace('\n', '')
s = s.replace('</font>', '')
s = s.replace('<font>', '')
s = s[s.find('|'):]
s = s.replace('</td>', '')
name_list = [fix_entities(x.strip()) for x in s.split('|') if x.strip()]
merged = zip(iso2_list, iso3_list, number_list, name_list)
# now make a dict keyed on iso3
keys = ['iso2', 'iso3', 'number', 'name']
country_dict = {}
for row in merged:
country_dict[row[1]] = dict(zip(keys, row))
return country_dict
def download_wikipedia_idc():
"""
Download IDC list from wikipedia.
"""
url = 'http://en.wikipedia.org/wiki/International_dialing_codes'
req = urllib2.Request(url,
headers={'User-Agent':
'Mozilla/5.0 (X11; U; Linux i686)'})
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
rows = soup.findAll('table',
attrs={'class':
'wikitable sortable'})[0].findAll('tr')[1:]
country_codes = {}
for row in rows:
country, numbers = [td.text for td in row.findAll('td')]
country = fix_entities(country)
numbers = re.sub('\[.*\]', '', numbers)
numbers = [n.strip() for n in numbers.split(',')]
numbers = [n[n.find('+'):] for n in numbers if n]
country_codes[country.upper()] = numbers
return country_codes
def reindex(dataset, key):
"""
Take a dict of dicts and return a new dict keyed on the
value of the named key in the data
"""
newdict = {}
for data in dataset.values():
newdict[data[key]] = data
return newdict
def blend_un_wad(und, wad):
"""
Blend UN data (und) and World Atlas data (wad). WAD is larger and
they disagree only in the ISO number so create new set with both.
There has been found to be a discrepency in ISO3 code with Romania where
the World Atlas appears wrong. There's also a discrepency with East Timor/
Timor-Leste
Therefore to merge UN into World Atlas:
* if both UN and WA data are available for the ISO3 code, and the
numbers differ, assign over the UN number.
* if there is no UN data for the ISO3 code, check for a record of the
same ISO Number code. If found, assign ISO3 and uppercase name
We also user UN Names rather than WA names.
Next we merge unique entries in the UN data into our blended set.
"""
newdata = wad.copy()
patched = []
isonum = reindex(und, 'number')
for iso3, data in newdata.items():
PATCHED = False
try:
undata = und[iso3]
unname = undata['name'].upper()
if data['number'] != undata['number']:
PATCHED = True
if data['name'] != unname:
PATCHED = True
data['number'] = undata['number']
data['name'] = unname
except KeyError:
# just in case ISO3 differs at the UN, check with the ISO number
try:
undata = isonum[data['number']]
oldiso3 = iso3
iso3 = undata['iso3']
data['iso3'] = iso3
data['name'] = undata['name'].upper()
newdata[iso3] = data
del(newdata[oldiso3])
PATCHED = True
except KeyError:
# OK - there really is no UN entry for this country
pass
if PATCHED:
patched.append(data)
newdata[iso3] = data
uniso3 = set(und.keys())
blendiso3 = set(newdata.keys())
missing_iso3 = list(uniso3 - blendiso3)
for iso3 in missing_iso3:
data = und[iso3]
data['iso2'] = ''
data['name'] = data['name'].upper()
newdata[iso3] = data
patched.append(data)
return newdata, patched
def _split_numbers(numbers):
"""
Receive a list of IDCs and IDC+region code and split into
a dict to augment the country data.
"""
result = {}
# remove '+' prefix and split IDC from area code
numbers = [x.replace('+', '').strip().split(' ') for x in numbers]
idc = numbers[0][0]
result['idc'] = idc
if len(numbers[0]) > 1:
region_codes = [x.pop(1) for x in numbers]
regions = ['region_a', 'region_b', 'region_c', 'region_d']
for rc in region_codes:
result[regions.pop(0)] = rc
return result
def map_numbers(data, country_codes, verbose=False):
"""
Augments data with IDC codes. Change is in-place; function returns nothing.
"""
byname = reindex(data, 'name')
for country, numbers in country_codes.items():
if country in COUNTRY_MAPPINGS:
country = COUNTRY_MAPPINGS[country]
try:
data = byname[country]
# Vatican is assigned +379 but does not use it, so remove
# this solitary weird point
if country == 'HOLY SEE (VATICAN CITY STATE)':
numbers = ['+39 066']
data.update(_split_numbers(numbers))
except KeyError:
possible_countries = [c for c in byname.keys()
if c.find(country) >= 0]
if len(possible_countries) == 1:
data = byname[possible_countries[0]]
data.update(_split_numbers(numbers))
if verbose:
sys.stderr.write("{} --> {}\n"
.format(country, possible_countries[0]))
elif len(possible_countries) > 1:
if verbose:
sys.stderr.write(u"Cannot find {}, possible matches: {}\n"
.format(country, possible_countries))
else:
if verbose:
sys.stderr.write(u"Cannot find country: {}\n"
.format(country))
def output_csv(data):
"""
Write CSV to stdout
"""
iso3list = data.keys()
iso3list.sort()
columns = ['number', 'iso3', 'iso2', 'name', 'idc',
'region_a', 'region_b', 'region_c', 'region_d']
try:
writer = csv.writer(sys.stdout)
writer.writerow(columns)
for iso3 in iso3list:
entry = data[iso3]
writer.writerow([entry.get(c, '').encode('utf-8')
for c in columns])
finally:
sys.stdout.flush()
def make_dataset(format, verbose, ignore):
und = download_un_data()
wad = download_worldatlas_data()
blend, patched = blend_un_wad(und, wad)
country_codes = download_wikipedia_idc()
map_numbers(blend, country_codes, verbose)
if ignore:
# strip out entries with no IDC
iso3list = blend.keys()
for iso3 in iso3list:
if blend[iso3].get('idc', '') == '':
del(blend[iso3])
if format == 'csv':
output_csv(blend)
elif format == 'json':
print(json.dumps(blend, indent=4))
if verbose:
for data in blend.values():
sys.stderr.write(u"{}\t {}\t {}\n".format(data['iso2'],
data['iso3'],
data['name'],))
sys.stderr.write("{} entities in database\n".format(len(blend)))
sys.stderr.write("{} entities with numbers\n"
.format(len([x for x in blend.keys()
if blend[x].get('idc', None)])))
sys.stderr.write("{} patched entities\n".format(len(patched)))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-v', '--verbose',
action='store_true',
default=False,
help='Show verbose output')
parser.add_option('-i', '--ignore-no-idc',
action='store_true',
dest='ignore',
default=False,
help='Do not output countries for which we have '
'no IDC')
parser.add_option('-t', '--format',
action='store',
choices=['csv', 'json'],
default='csv',
help='Output format: json or csv (default)')
options, args = parser.parse_args()
make_dataset(options.format, options.verbose, options.ignore)
|
Rethought/globaldict
|
build.py
|
Python
|
bsd-3-clause
| 13,459
|
__author__ = 'kennyjoseph'
from utility_code.run_helpers import *
from utility_code.evaluation import *
from gensim.models.word2vec import Word2Vec
from twitter_dm.utility.general_utils import tab_stringify_newline as tsn
CONLL_FILE = "processed_data/final_all_conll_w_all_features.txt"
model, all_dictionaries, ark_clusters, sets, names = get_init_data('gensim_model/glove_twitter_50_raw_model.txt.gz',
"processed_data/50mpaths2",
"dictionaries/*/*",
BOOTSTRAPPED_DICTIONARY_LOCATION)
features_from_conll_file, dict_for_filter = get_all_features(CONLL_FILE,
all_dictionaries,
ark_clusters,
sets, names)
cutoff_param = .0001
output, models, preds = run_all_on_test_ids(0,
[],
model,
features_from_conll_file,
dict_for_filter,
eval_params = [.45],
cutoff_params=[cutoff_param],
use_filtered_params=[True],
datasets_to_use = ['full'],
regularization_params = [.65])
labels, features, obj_inds,\
word_list, head_word_list,word_minus_one_list,\
last_entity_word_list = configure_features_for_wordvectors_and_remove_twitterner(features_from_conll_file)
count_vec = CountVectorizer(tokenizer=lambda x: x.split("|"),min_df=cutoff_param)
count_vec.fit_transform(features)
orig_feature_names = count_vec.get_feature_names()
print 'here... '
pub_conll, dict_for_filter_pub = get_all_features("test_data/final_conll_pub.txt",
all_dictionaries,
ark_clusters,
sets,names)
labels_pub, features_pub, obj_inds_pub,\
word_list_pub, head_list_pub, word_minus_one_list_pub,\
last_entity_word_list_pub = configure_features_for_wordvectors_and_remove_twitterner(pub_conll)
cv_pub = CountVectorizer(tokenizer=lambda x: x.split("|"),vocabulary=orig_feature_names)
X_pub = cv_pub.fit_transform(features_pub)
w_vec_pub = get_vector_rep_from_wordlist(word_list_pub, model, 50,True)
head_vec_pub = get_vector_rep_from_wordlist(head_list_pub, model, 50,True)
last_vec_pub = get_vector_rep_from_wordlist(last_entity_word_list_pub, model,50,True)
test_inds, a, stopword_test_inds, b = get_train_test_inds_w_filter([],
dict_for_filter_pub,
obj_inds_pub)
y_pub = np.array(labels_pub)
D = np.concatenate((X_pub.todense(),w_vec_pub,head_vec_pub,last_vec_pub),axis=1)
predicted_prob = models[0].predict_proba(D[test_inds,:])
stopword_test_inds_0 = []
stopword_test_inds_1 = []
for x in stopword_test_inds:
if y_pub[x] == 1:
stopword_test_inds_1.append(x)
else:
stopword_test_inds_0.append(x)
if len(stopword_test_inds):
extra_tn = len(stopword_test_inds_0)
extra_fn = len(stopword_test_inds_1)
y_pub = np.concatenate((y_pub[test_inds],np.array([0]*extra_tn),np.array([1]*extra_fn)),axis=0)
predicted_prob = np.concatenate((predicted_prob,[[1,0]]*(extra_tn+extra_fn)),axis=0)
test_inds = test_inds + stopword_test_inds_0 + stopword_test_inds_1
output_file = open("results/final_model_pub_res.tsv","w")
eval_out = evaluate(.5, y_pub, predicted_prob,obj_inds_pub,test_inds,True,True,True)
output_file.write(tsn(["final_model"] + eval_out[1:]))
output_file.close()
from utility_code.dependency_parse_object import DependencyParseObject
test_data = {DependencyParseObject(x[0]).tweet_id : x for x in
read_grouped_by_newline_file("test_data/final_conll_pub.txt")}
write_out_predictions("results/predictions_pub_data.txt",test_data,obj_inds_pub,test_inds,y_pub,predicted_prob)
|
kennyjoseph/identity_extraction_pub
|
python/12_run_final_model_on_pub_data.py
|
Python
|
mit
| 4,401
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import netaddr
from netaddr.strategy import ipv4
import neutronclient.v2_0.client as nclient
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import retrying
from murano.common import auth_utils
from murano.common import exceptions as exc
from murano.common.i18n import _LI
from murano.dsl import dsl
from murano.dsl import helpers
from murano.dsl import session_local_storage
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@dsl.name('io.murano.system.NetworkExplorer')
class NetworkExplorer(object):
def __init__(self, this, region_name=None):
session = helpers.get_execution_session()
self._project_id = session.project_id
self._settings = CONF.networking
self._available_cidrs = self._generate_possible_cidrs()
self._region = this.find_owner('io.murano.CloudRegion')
self._region_name = region_name
@staticmethod
@session_local_storage.execution_session_memoize
def _get_client(region_name):
neutron_settings = CONF.neutron
return nclient.Client(**auth_utils.get_session_client_parameters(
service_type='network', region=region_name, conf=neutron_settings
))
@property
def _client(self):
region = self._region_name or (
None if self._region is None else self._region['name'])
return self._get_client(region)
# NOTE(starodubcevna): to avoid simultaneous router requests we use retry
# decorator with random delay 1-10 seconds between attempts and maximum
# delay time 30 seconds.
@retrying.retry(retry_on_exception=lambda e: isinstance(e,
exc.RouterInfoException),
wait_random_min=1000, wait_random_max=10000,
stop_max_delay=30000)
def get_default_router(self):
router_name = self._settings.router_name
routers = self._client.list_routers(
tenant_id=self._project_id, name=router_name).get('routers')
if len(routers) == 0:
LOG.debug('Router {name} not found'.format(name=router_name))
if self._settings.create_router:
LOG.debug('Attempting to create Router {router}'.
format(router=router_name))
external_network = self._settings.external_network
kwargs = {'id': external_network} \
if uuidutils.is_uuid_like(external_network) \
else {'name': external_network}
networks = self._client.list_networks(**kwargs).get('networks')
ext_nets = list(filter(lambda n: n['router:external'],
networks))
if len(ext_nets) == 0:
raise KeyError('Router %s could not be created, '
'no external network found' % router_name)
nid = ext_nets[0]['id']
body_data = {
'router': {
'name': router_name,
'external_gateway_info': {
'network_id': nid
},
'admin_state_up': True,
}
}
router = self._client.create_router(
body=body_data).get('router')
LOG.info(_LI('Created router: {id}').format(id=router['id']))
return router['id']
else:
raise KeyError('Router %s was not found' % router_name)
else:
if routers[0]['external_gateway_info'] is None:
raise exc.RouterInfoException('Please set external gateway for'
' the router %s ' % router_name)
router_id = routers[0]['id']
return router_id
def get_available_cidr(self, router_id, net_id):
"""Uses hash of network IDs to minimize the collisions
Different nets will attempt to pick different cidrs out of available
range.
If the cidr is taken will pick another one.
"""
taken_cidrs = self._get_cidrs_taken_by_router(router_id)
id_hash = hash(net_id)
num_fails = 0
while num_fails < len(self._available_cidrs):
cidr = self._available_cidrs[
(id_hash + num_fails) % len(self._available_cidrs)]
if any(self._cidrs_overlap(cidr, taken_cidr) for taken_cidr in
taken_cidrs):
num_fails += 1
else:
return str(cidr)
return None
def get_default_dns(self):
return self._settings.default_dns
def get_external_network_id_for_router(self, router_id):
router = self._client.show_router(router_id).get('router')
if not router or 'external_gateway_info' not in router:
return None
return router['external_gateway_info'].get('network_id')
def get_external_network_id_for_network(self, network_id):
network = self._client.show_network(network_id).get('network')
if network.get('router:external', False):
return network_id
# Get router interfaces of the network
router_ports = self._client.list_ports(
**{'device_owner': 'network:router_interface',
'network_id': network_id}).get('ports')
# For each router this network is connected to
# check if the router has external_gateway set
for router_port in router_ports:
ext_net_id = self.getExternalNetworkIdForRouter(
router_port.get('device_id'))
if ext_net_id:
return ext_net_id
return None
def _get_cidrs_taken_by_router(self, router_id):
if not router_id:
return []
ports = self._client.list_ports(device_id=router_id)['ports']
subnet_ids = []
for port in ports:
for fixed_ip in port['fixed_ips']:
subnet_ids.append(fixed_ip['subnet_id'])
all_subnets = self._client.list_subnets()['subnets']
filtered_cidrs = [netaddr.IPNetwork(subnet['cidr']) for subnet in
all_subnets if subnet['id'] in subnet_ids]
return filtered_cidrs
@staticmethod
def _cidrs_overlap(cidr1, cidr2):
return (cidr1 in cidr2) or (cidr2 in cidr1)
def _generate_possible_cidrs(self):
bits_for_envs = int(
math.ceil(math.log(self._settings.max_environments, 2)))
bits_for_hosts = int(math.ceil(math.log(self._settings.max_hosts, 2)))
width = ipv4.width
mask_width = width - bits_for_hosts - bits_for_envs
net = netaddr.IPNetwork(
'{0}/{1}'.format(self._settings.env_ip_template, mask_width))
return list(net.subnet(width - bits_for_hosts))
def list_networks(self):
return self._client.list_networks()['networks']
def list_subnetworks(self):
return self._client.list_subnets()['subnets']
def list_ports(self):
return self._client.list_ports()['ports']
def list_neutron_extensions(self):
return self._client.list_extensions()['extensions']
|
DavidPurcell/murano_temp
|
murano/engine/system/net_explorer.py
|
Python
|
apache-2.0
| 7,838
|
#!/usr/bin/env python
import os, sys, pyexiv2
from pyexiv2.utils import make_fraction
import argparse
from lib.geo import compute_bearing, dms_to_decimal, offset_bearing
from lib.sequence import Sequence
from lib.exifedit import ExifEdit
'''
Interpolates the direction of an image based on the coordinates stored in
the EXIF tag of the next image in a set of consecutive images.
Uses the capture time in EXIF and looks up an interpolated lat, lon, bearing
for each image, and writes the values to the EXIF of the image.
An offset angele relative to the direction of movement may be given as an optional
argument to compensate for a sidelooking camera. This angle should be positive for
clockwise offset. eg. 90 for a rightlooking camera and 270 (or -90) for a left looking camera
@attention: Requires pyexiv2; see install instructions at http://tilloy.net/dev/pyexiv2/
@author: mprins
@license: MIT
'''
def write_direction_to_image(filename, direction):
'''
Write the direction to the exif tag of the photograph.
@param filename: photograph filename
@param direction: direction of view in degrees
'''
exif = ExifEdit(filename)
try:
exif.add_direction(direction, precision=10)
exif.write()
print("Added direction to: {0} ({1} degrees)".format(filename, float(direction)))
except ValueError, e:
print("Skipping {0}: {1}".format(filename, e))
def get_args():
parser = argparse.ArgumentParser(description='Interpolate direction given GPS positions')
parser.add_argument('path', help='path to your photos')
parser.add_argument('--offset_angle',
type=float, help='offset angle relative to camera position', default=0.0)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = get_args()
path = args.path
# offset angle, relative to camera position, clockwise is positive
offset_angle = args.offset_angle
s = Sequence(path)
bearings = s.interpolate_direction(offset_angle)
for image_name, bearing in bearings.iteritems():
write_direction_to_image(image_name, bearing)
|
gisma/uavRmp
|
inst/python/interpolate_direction.py
|
Python
|
gpl-3.0
| 2,118
|
#!/usr/bin/env python
"""
This is a time sharing example...
The main code of this example is like an operating system that shares time
between the two 'threads' or 'processes'. Each one has a state,stack and all.
This is a co-operative system: if one of the processes never calls yield or goes
into a blocking call then the entire system stalls.
What is this good for?
Handling many pseudo clients in one thread in python. Just make sure you never do
IO in your code and just ask the 'OS' (main code) using the return value of yield
to do the calls for you. The 'OS' will return the data needed via the 'next()' method.
This way you can handle thousands of connections in one python thread (this is actually
what happens with twisted).
"""
def evens():
for x in range(0, 100, 2):
print('evens say ', x)
yield
def odds():
for x in range(10001, 10101, 2):
print('odds say ', x)
yield
c1 = evens()
c1.__next__()
c2 = odds()
c2.__next__()
for x in range(10):
c1.__next__()
c2.__next__()
|
veltzer/demos-python
|
src/examples/short/generators/yield_advanced.py
|
Python
|
gpl-3.0
| 1,038
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import time
from keystoneauth1 import adapter
from keystoneauth1 import discover
from keystoneauth1 import exceptions as ka_exc
from keystoneauth1.identity import v2 as v2_auth
from keystoneauth1.identity import v3 as v3_auth
from keystoneauth1 import session
from oslo_utils import strutils
import six.moves.urllib.parse as urlparse
from . import utils
from . import exc
from .. import monitoring_utils
from .ecl.common.apiclient import auth
from .ecl.common.apiclient import client
from .ecl.common.apiclient import exceptions
def _discover_auth_versions(session, auth_url):
# discover the API versions the server is supporting based on the
# given URL
v2_auth_url = None
v3_auth_url = None
try:
ks_discover = discover.Discover(session=session, url=auth_url)
v2_auth_url = ks_discover.url_for('2.0')
v3_auth_url = ks_discover.url_for('3.0')
except ka_exc.DiscoveryFailure:
raise
except exceptions.ClientException:
# Identity service may not support discovery. In that case,
# try to determine version from auth_url
url_parts = urlparse.urlparse(auth_url)
(scheme, netloc, path, params, query, fragment) = url_parts
path = path.lower()
if path.startswith('/v3'):
v3_auth_url = auth_url
elif path.startswith('/v2'):
v2_auth_url = auth_url
else:
raise exc.CommandError('Unable to determine the Keystone '
'version to authenticate with '
'using the given auth_url.')
return v2_auth_url, v3_auth_url
def _get_keystone_session(**kwargs):
# TODO(fabgia): the heavy lifting here should be really done by Keystone.
# Unfortunately Keystone does not support a richer method to perform
# discovery and return a single viable URL. A bug against Keystone has
# been filed: https://bugs.launchpad.net/python-keystoneclient/+bug/1330677
# first create a Keystone session
cacert = kwargs.pop('cacert', None)
cert = kwargs.pop('cert', None)
key = kwargs.pop('key', None)
insecure = kwargs.pop('insecure', False)
auth_url = kwargs.pop('auth_url', None)
project_id = kwargs.pop('project_id', None)
project_name = kwargs.pop('project_name', None)
token = kwargs['token']
timeout = kwargs.get('timeout')
if insecure:
verify = False
else:
verify = cacert or True
if cert and key:
# passing cert and key together is deprecated in favour of the
# requests lib form of having the cert and key as a tuple
cert = (cert, key)
# create the keystone client session
ks_session = session.Session(verify=verify, cert=cert, timeout=timeout)
v2_auth_url, v3_auth_url = _discover_auth_versions(ks_session, auth_url)
username = kwargs.pop('username', None)
user_id = kwargs.pop('user_id', None)
user_domain_name = kwargs.pop('user_domain_name', None)
user_domain_id = kwargs.pop('user_domain_id', None)
project_domain_name = kwargs.pop('project_domain_name', None)
project_domain_id = kwargs.pop('project_domain_id', None)
use_domain = (user_domain_id or user_domain_name or
project_domain_id or project_domain_name)
use_v3 = v3_auth_url and (use_domain or (not v2_auth_url))
use_v2 = v2_auth_url and not use_domain
if use_v3 and token:
auth = v3_auth.Token(
v3_auth_url,
token=token,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2 and token:
auth = v2_auth.Token(
v2_auth_url,
token=token,
tenant_id=project_id,
tenant_name=project_name)
elif use_v3:
# the auth_url as v3 specified
# e.g. http://no.where:5000/v3
# Keystone will return only v3 as viable option
auth = v3_auth.Password(
v3_auth_url,
username=username,
password=kwargs.pop('password', None),
user_id=user_id,
user_domain_name=user_domain_name,
user_domain_id=user_domain_id,
project_name=project_name,
project_id=project_id,
project_domain_name=project_domain_name,
project_domain_id=project_domain_id)
elif use_v2:
# the auth_url as v2 specified
# e.g. http://no.where:5000/v2.0
# Keystone will return only v2 as viable option
auth = v2_auth.Password(
v2_auth_url,
username,
kwargs.pop('password', None),
tenant_id=project_id,
tenant_name=project_name)
else:
raise exc.CommandError('Unable to determine the Keystone version '
'to authenticate with using the given '
'auth_url.')
ks_session.auth = auth
return ks_session
def _get_endpoint(ks_session, **kwargs):
"""Get an endpoint using the provided keystone session."""
# set service specific endpoint types
endpoint_type = kwargs.get('endpoint_type') or 'publicURL'
service_type = kwargs.get('service_type') or 'monitoring'
endpoint = ks_session.get_endpoint(service_type=service_type,
interface=endpoint_type,
region_name=kwargs.get('region_name'))
return endpoint
class AuthPlugin(auth.BaseAuthPlugin):
opt_names = ['tenant_id', 'region_name', 'auth_token',
'service_type', 'endpoint_type', 'cacert',
'auth_url', 'insecure', 'cert_file', 'key_file',
'cert', 'key', 'tenant_name', 'project_name',
'project_id', 'project_domain_id', 'project_domain_name',
'user_id', 'user_domain_id', 'user_domain_name',
'password', 'username', 'endpoint']
def __init__(self, auth_system=None, **kwargs):
self.opt_names.extend(self.common_opt_names)
super(AuthPlugin, self).__init__(auth_system, **kwargs)
# NOTE(sileht): backward compat
if self.opts.get('auth_token') and not self.opts.get('token'):
self.opts['token'] = self.opts.get('auth_token')
def _do_authenticate(self, http_client):
token = self.opts.get('token')
endpoint = self.opts.get('endpoint')
if not (endpoint and token):
ks_kwargs = self._get_ks_kwargs(http_timeout=http_client.timeout)
ks_session = _get_keystone_session(**ks_kwargs)
if not token:
token = lambda: ks_session.get_token()
if not endpoint:
endpoint = _get_endpoint(ks_session, **ks_kwargs)
self.opts['token'] = token
self.opts['endpoint'] = endpoint
def _get_ks_kwargs(self, http_timeout):
project_id = (self.opts.get('project_id') or
self.opts.get('tenant_id'))
project_name = (self.opts.get('project_name') or
self.opts.get('tenant_name'))
token = self.opts.get('token')
ks_kwargs = {
'username': self.opts.get('username'),
'password': self.opts.get('password'),
'user_id': self.opts.get('user_id'),
'user_domain_id': self.opts.get('user_domain_id'),
'user_domain_name': self.opts.get('user_domain_name'),
'project_id': project_id,
'project_name': project_name,
'project_domain_name': self.opts.get('project_domain_name'),
'project_domain_id': self.opts.get('project_domain_id'),
'auth_url': self.opts.get('auth_url'),
'cacert': self.opts.get('cacert'),
'cert': self.opts.get('cert'),
'key': self.opts.get('key'),
'insecure': strutils.bool_from_string(
self.opts.get('insecure')),
'endpoint_type': self.opts.get('endpoint_type'),
'service_type': self.opts.get('service_type'),
'region_name': self.opts.get('region_name'),
'timeout': http_timeout,
'token': token() if callable(token) else token,
}
return ks_kwargs
def token_and_endpoint(self, endpoint_type, service_type):
token = self.opts.get('token')
if callable(token):
token = token()
return token, self.opts.get('endpoint')
def sufficient_options(self):
"""Check if all required options are present.
:raises: AuthPluginOptionsMissing
"""
has_token = self.opts.get('token')
has_project_domain_or_tenant = (self.opts.get('project_id') or
(self.opts.get('project_name') and
(self.opts.get('user_domain_name') or
self.opts.get('user_domain_id'))) or
(self.opts.get('tenant_id') or
self.opts.get('tenant_name')))
has_credential = (self.opts.get('username')
and has_project_domain_or_tenant
and self.opts.get('password')
and self.opts.get('auth_url'))
missing = not (has_token or has_credential)
if missing:
missing_opts = []
opts = ['token', 'endpoint', 'username', 'password', 'auth_url',
'tenant_id', 'tenant_name']
for opt in opts:
if not self.opts.get(opt):
missing_opts.append(opt)
raise exceptions.AuthPluginOptionsMissing(missing_opts)
def _adjust_kwargs(kwargs):
client_kwargs = {
'username': kwargs.get('os_username'),
'password': kwargs.get('os_password'),
'tenant_id': kwargs.get('os_tenant_id'),
'tenant_name': kwargs.get('os_tenant_name'),
'auth_url': kwargs.get('os_auth_url'),
'region_name': kwargs.get('os_region_name'),
'service_type': kwargs.get('os_service_type'),
'endpoint_type': kwargs.get('os_endpoint_type'),
'insecure': kwargs.get('os_insecure'),
'cacert': kwargs.get('os_cacert'),
'cert_file': kwargs.get('os_cert'),
'key_file': kwargs.get('os_key'),
'token': kwargs.get('os_token') or kwargs.get('os_auth_token'),
'user_domain_name': kwargs.get('os_user_domain_name'),
'user_domain_id': kwargs.get('os_user_domain_id'),
'project_domain_name': kwargs.get('os_project_domain_name'),
'project_domain_id': kwargs.get('os_project_domain_id'),
}
client_kwargs.update(kwargs)
client_kwargs['endpoint_type'] = 'publicURL'
client_kwargs['token'] = (client_kwargs.get('token') or
kwargs.get('token') or
kwargs.get('auth_token'))
timeout = kwargs.get('timeout')
if timeout is not None:
timeout = int(timeout)
if timeout <= 0:
timeout = None
insecure = strutils.bool_from_string(kwargs.get('insecure'))
verify = kwargs.get('verify')
if verify is None:
if insecure:
verify = False
else:
verify = client_kwargs.get('cacert') or True
cert = client_kwargs.get('cert_file')
key = client_kwargs.get('key_file')
if cert and key:
cert = cert, key
client_kwargs.update({'verify': verify, 'cert': cert, 'timeout': timeout})
return client_kwargs
def Client(version, *args, **kwargs):
client_kwargs = _adjust_kwargs(kwargs)
from .v2.client import Client
return Client(*args, **client_kwargs)
# client_kwargs = _adjust_kwargs(kwargs)
# module = utils.import_versioned_module(version, 'client')
# client_class = getattr(module, 'Client')
# return client_class(*args, **client_kwargs)
def get_client(version, **kwargs):
"""Get an authenticated client, based on the credentials in the kwargs.
:param version: the API version to use ('1' or '2')
:param kwargs: keyword args containing credentials, either:
* session: a keystoneauth/keystoneclient session object
* service_type: The default service_type for URL discovery
* service_name: The default service_name for URL discovery
* interface: The default interface for URL discovery
(Default: public)
* region_name: The default region_name for URL discovery
* endpoint_override: Always use this endpoint URL for requests
for this ceiloclient
* auth: An auth plugin to use instead of the session one
* user_agent: The User-Agent string to set
(Default is python-ceilometer-client)
* connect_retries: the maximum number of retries that should be
attempted for connection errors
* logger: A logging object
or (DEPRECATED):
* os_auth_token: (DEPRECATED) pre-existing token to re-use,
use os_token instead
* os_token: pre-existing token to re-use
* ceilometer_url: (DEPRECATED) Ceilometer API endpoint,
use os_endpoint instead
* os_endpoint: Ceilometer API endpoint
or (DEPRECATED):
* os_username: name of user
* os_password: user's password
* os_user_id: user's id
* os_user_domain_id: the domain id of the user
* os_user_domain_name: the domain name of the user
* os_project_id: the user project id
* os_tenant_id: V2 alternative to os_project_id
* os_project_name: the user project name
* os_tenant_name: V2 alternative to os_project_name
* os_project_domain_name: domain name for the user project
* os_project_domain_id: domain id for the user project
* os_auth_url: endpoint to authenticate against
* os_cert|os_cacert: path of CA TLS certificate
* os_key: SSL private key
* os_insecure: allow insecure SSL (no cert verification)
"""
endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')
return Client(version, endpoint, **kwargs)
def get_auth_plugin(endpoint, **kwargs):
auth_plugin = AuthPlugin(
auth_url=kwargs.get('auth_url'),
service_type=kwargs.get('service_type'),
token=kwargs.get('token'),
endpoint_type=kwargs.get('endpoint_type'),
insecure=kwargs.get('insecure'),
region_name=kwargs.get('region_name'),
cacert=kwargs.get('cacert'),
tenant_id=kwargs.get('project_id') or kwargs.get('tenant_id'),
endpoint=endpoint,
username=kwargs.get('username'),
password=kwargs.get('password'),
tenant_name=kwargs.get('tenant_name') or kwargs.get('project_name'),
user_domain_name=kwargs.get('user_domain_name'),
user_domain_id=kwargs.get('user_domain_id'),
project_domain_name=kwargs.get('project_domain_name'),
project_domain_id=kwargs.get('project_domain_id')
)
return auth_plugin
LEGACY_OPTS = ('auth_plugin', 'auth_url', 'token', 'insecure', 'cacert',
'tenant_id', 'project_id', 'username', 'password',
'project_name', 'tenant_name',
'user_domain_name', 'user_domain_id',
'project_domain_name', 'project_domain_id',
'key_file', 'cert_file', 'verify', 'timeout', 'cert')
def _construct_http_client(**kwargs):
kwargs = kwargs.copy()
if kwargs.get('session') is not None:
# Drop legacy options
for opt in LEGACY_OPTS:
kwargs.pop(opt, None)
# Drop aodh_endpoint from kwargs
kwargs.pop('aodh_endpoint', None)
return SessionClient(
session=kwargs.pop('session'),
service_type=kwargs.pop('service_type', 'monitoring') or 'monitoring',
interface=kwargs.pop('interface', kwargs.pop('endpoint_type',
'publicURL')),
region_name=kwargs.pop('region_name', None),
user_agent=kwargs.pop('user_agent', 'monitoringclient'),
auth=kwargs.get('auth'),
timings=kwargs.pop('timings', None),
**kwargs)
else:
return client.BaseClient(client.HTTPClient(
auth_plugin=kwargs.get('auth_plugin'),
region_name=kwargs.get('region_name'),
endpoint_type=kwargs.get('endpoint_type'),
original_ip=kwargs.get('original_ip'),
verify=kwargs.get('verify'),
cert=kwargs.get('cert'),
timeout=kwargs.get('timeout'),
timings=kwargs.get('timings'),
keyring_saver=kwargs.get('keyring_saver'),
debug=kwargs.get('debug'),
user_agent=kwargs.get('user_agent'),
http=kwargs.get('http')
))
@contextlib.contextmanager
def record_time(times, enabled, *args):
"""Record the time of a specific action.
:param times: A list of tuples holds time data.
:type times: list
:param enabled: Whether timing is enabled.
:type enabled: bool
:param args: Other data to be stored besides time data, these args
will be joined to a string.
"""
if not enabled:
yield
else:
start = time.time()
yield
end = time.time()
times.append((' '.join(args), start, end))
class SessionClient(adapter.LegacyJsonAdapter):
def __init__(self, *args, **kwargs):
self.times = []
self.timings = kwargs.pop('timings', False)
super(SessionClient, self).__init__(*args, **kwargs)
def request(self, url, method, **kwargs):
kwargs.setdefault('headers', kwargs.get('headers', {}))
# NOTE(sileht): The standard call raises errors from
# keystoneauth, where we need to raise the monitoringclient errors.
raise_exc = kwargs.pop('raise_exc', True)
with record_time(self.times, self.timings, method, url):
resp, body = super(SessionClient, self).request(url,
method,
raise_exc=False,
**kwargs)
if raise_exc and resp.status_code >= 400:
print(monitoring_utils._print_resp_error(resp.json()['error']))
raise exc.from_response(resp, body)
return resp
|
nttcom/eclcli
|
eclcli/monitoring/monitoringclient/client.py
|
Python
|
apache-2.0
| 19,427
|
#!/usr/bin/python
from mininet.net import Mininet
from mininet.topo import Topo,SingleSwitchTopo
from mininet.cli import CLI
from mininet.node import UserSwitch,RemoteController
from mininet.term import makeTerm
import os, time
######Starting controller
os.system("xterm -e 'ryu-manager ~/ryu/ryu/app/openstate/forwarding_consistency_1_to_many.py'&")
######Starting mininet
mytopo=SingleSwitchTopo(4)
time.sleep(1)
print("\n********************************** HELP *********************************************")
print("\nType \"python ~/ryu/ryu/app/openstate/echo_server.py 200\" in h2's xterm")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 300\" in h3's xterm")
print("Type \"python ~/ryu/ryu/app/openstate/echo_server.py 400\" in h4's xterm")
print("Type \"nc 10.0.0.2 80\" in all h1's xterms\n")
print("In order to test new path selection, close and reopen netcat")
print("\nTo exit type \"ctrl+D\" or exit")
print("*************************************************************************************")
net = Mininet(topo=mytopo,switch=UserSwitch,controller=RemoteController,cleanup=True,autoSetMacs=True,listenPort=6634,autoStaticArp=True)
net.start()
h1,h2,h3,h4 = net.hosts[0], net.hosts[1], net.hosts[2], net.hosts[3]
for i in range(3):
makeTerm(h1)
makeTerm(h2)
makeTerm(h3)
makeTerm(h4)
CLI(net)
net.stop()
os.system("sudo mn -c")
os.system("kill -9 $(pidof -x ryu-manager)")
|
Tesi-Luca-Davide/ryu
|
ryu/app/openstate/playground/start_1_to_many.py
|
Python
|
apache-2.0
| 1,415
|
#!/usr/bin/env python
'''
simple rover simulator core
'''
from aircraft import Aircraft
import util, time, math
from math import degrees, radians, sin, cos, pi, asin
from rotmat import Vector3, Matrix3
class Rover(Aircraft):
'''a simple rover'''
def __init__(self,
max_speed=20,
max_accel=30,
wheelbase=0.335,
wheeltrack=0.296,
max_wheel_turn=35,
turning_circle=1.8,
skid_turn_rate=140, # degrees/sec
skid_steering=False):
Aircraft.__init__(self)
self.max_speed = max_speed
self.max_accel = max_accel
self.turning_circle = turning_circle
self.wheelbase = wheelbase
self.wheeltrack = wheeltrack
self.max_wheel_turn = max_wheel_turn
self.last_time = self.time_now
self.skid_steering = skid_steering
self.skid_turn_rate = skid_turn_rate
if self.skid_steering:
# these are taken from a 6V wild thumper with skid steering,
# with a sabertooth controller
self.max_accel = 14
self.max_speed = 4
def turn_circle(self, steering):
'''return turning circle (diameter) in meters for steering angle proportion in degrees
'''
if abs(steering) < 1.0e-6:
return 0
return self.turning_circle * sin(radians(35)) / sin(radians(steering*35))
def yaw_rate(self, steering, speed):
'''return yaw rate in degrees/second given steering_angle and speed'''
if self.skid_steering:
return steering * self.skid_turn_rate
if abs(steering) < 1.0e-6 or abs(speed) < 1.0e-6:
return 0
d = self.turn_circle(steering)
c = pi * d
t = c / speed
rate = 360.0 / t
return rate
def lat_accel(self, steering_angle, speed):
'''return lateral acceleration in m/s/s'''
yaw_rate = self.yaw_rate(steering_angle, speed)
accel = radians(yaw_rate) * speed
return accel
def lat_accel2(self, steering_angle, speed):
'''return lateral acceleration in m/s/s'''
mincircle = self.wheelbase/sin(radians(35))
steer = steering_angle/35
return steer * (speed**2) * (2/mincircle)
def steering_angle(self, lat_accel, speed):
'''return steering angle to achieve the given lat_accel'''
mincircle = self.wheelbase/sin(radians(35))
steer = 0.5 * lat_accel * mincircle / (speed**2)
return steer * 35
def update(self, state):
# if in skid steering mode the steering and throttle values are used for motor1 and motor2
if self.skid_steering:
motor1 = state.steering # left motor
motor2 = state.throttle # right motor
steering = motor1 - motor2
throttle = 0.5*(motor1 + motor2)
else:
steering = state.steering
throttle = state.throttle
# how much time has passed?
t = self.time_now
delta_time = t - self.last_time
self.last_time = t
# speed in m/s in body frame
velocity_body = self.dcm.transposed() * self.velocity
# speed along x axis, +ve is forward
speed = velocity_body.x
# yaw rate in degrees/s
yaw_rate = self.yaw_rate(steering, speed)
# target speed with current throttle
target_speed = throttle * self.max_speed
# linear acceleration in m/s/s - very crude model
accel = self.max_accel * (target_speed - speed) / self.max_speed
# print('speed=%f throttle=%f steering=%f yaw_rate=%f accel=%f' % (speed, state.throttle, state.steering, yaw_rate, accel))
self.gyro = Vector3(0,0,radians(yaw_rate))
# update attitude
self.dcm.rotate(self.gyro * delta_time)
self.dcm.normalize()
# accel in body frame due to motor
accel_body = Vector3(accel, 0, 0)
# add in accel due to direction change
accel_body.y += radians(yaw_rate) * speed
# now in earth frame
accel_earth = self.dcm * accel_body
accel_earth += Vector3(0, 0, self.gravity)
# if we're on the ground, then our vertical acceleration is limited
# to zero. This effectively adds the force of the ground on the aircraft
accel_earth.z = 0
# work out acceleration as seen by the accelerometers. It sees the kinematic
# acceleration (ie. real movement), plus gravity
self.accel_body = self.dcm.transposed() * (accel_earth + Vector3(0, 0, -self.gravity))
# new velocity vector
self.velocity += accel_earth * delta_time
# new position vector
old_position = self.position.copy()
self.position += self.velocity * delta_time
# update lat/lon/altitude
self.update_position(delta_time)
if __name__ == "__main__":
r = Rover()
d1 = r.turn_circle(r.max_wheel_turn)
print("turn_circle=", d1)
steer = 0.4*35
speed = 2.65
yrate = r.yaw_rate(steer, speed)
yaccel = r.lat_accel(steer, speed)
yaccel2 = r.lat_accel2(steer, speed)
print yaccel, yaccel2
sangle = r.steering_angle(yaccel, speed)
print steer, sangle
yrate2 = degrees(yaccel / speed)
t = 360.0 / yrate2
c = speed * t
d2 = c / pi
steer2 = degrees(asin(r.wheelbase / (d2 - (r.wheeltrack/2))))
print steer, steer2
|
MagicAttacker/APM602
|
Tools/autotest/pysim/rover.py
|
Python
|
gpl-3.0
| 5,483
|
import collections
import raco.algebra
import raco.scheme as scheme
import raco.myrial.myrial_test as myrial_test
from raco import types
class ReachableTest(myrial_test.MyrialTestCase):
edge_table = collections.Counter([
(1, 2),
(2, 3),
(3, 4),
(4, 3),
(3, 5),
(4, 13),
(5, 4),
(1, 9),
(7, 1),
(6, 1),
(10, 11),
(11, 12),
(12, 10),
(13, 4),
(10, 1)])
edge_schema = scheme.Scheme([("src", types.LONG_TYPE),
("dst", types.LONG_TYPE)])
edge_key = "public:adhoc:edges"
def setUp(self):
super(ReachableTest, self).setUp()
self.db.ingest(ReachableTest.edge_key,
ReachableTest.edge_table,
ReachableTest.edge_schema)
def test_reachable(self):
with open('examples/reachable.myl') as fh:
query = fh.read()
expected = collections.Counter([
(1,),
(2,),
(3,),
(4,),
(5,),
(9,),
(13,),
])
self.check_result(query, expected, skip_json=True)
def test_multi_condition_join(self):
query = """
Edge = SCAN(public:adhoc:edges);
Symmetric = [FROM Edge AS E1, Edge AS E2
WHERE E1.src==E2.dst
AND E2.src==E1.dst
AND E1.src < E1.dst
EMIT E1.src AS src, E1.dst AS dst];
STORE(Symmetric, OUTPUT);
"""
table = ReachableTest.edge_table
expected = collections.Counter(
[(a, b) for (a, b) in table for (c, d) in table
if a == d and b == c and a < b])
self.check_result(query, expected)
def test_cross_plus_selection_becomes_join(self):
"""Test that the optimizer compiles away cross-products."""
with open('examples/reachable.myl') as fh:
query = fh.read()
def plan_contains_cross(plan):
def f(op):
if isinstance(op, raco.algebra.CrossProduct) and not \
isinstance(op.left, raco.algebra.SingletonRelation):
yield True
return any(plan.postorder(f))
statements = self.parser.parse(query)
self.processor.evaluate(statements)
lp = self.processor.get_logical_plan()
self.assertTrue(plan_contains_cross(lp))
pp = self.processor.get_physical_plan()
self.assertFalse(plan_contains_cross(pp))
|
uwescience/raco
|
raco/myrial/reachable_tests.py
|
Python
|
bsd-3-clause
| 2,583
|
from datetime import timedelta
from datetime import datetime
from collections import defaultdict
import requests
import os
import urllib
import logging
import arrow
from impactstoryanalytics.widgets.widget import Widget
from impactstoryanalytics.lib import mixpanel_export
import uservoice
logger = logging.getLogger("impactstoryanalytics.widget_api_helpers")
## Utility functions
def get_raw_dataclip_data(query_url):
#example query_url: "https://dataclips.heroku.com/feblvvoknanzuiumyiawutmqdwbo.json"
raw_data = requests.get(query_url).json()
#print raw_data
return raw_data
def perc(num, den, round_to=2):
try:
return round(100 * num / den, round_to)
except ZeroDivisionError:
return None
class Converter():
@classmethod
def from_x_y_format(cls, lines):
events = defaultdict(dict)
for line in lines:
event_name = line["name"]
new_events_dict = cls.events_dict_from_line(line)
events = cls.merge_new_events_dict(events, new_events_dict, event_name)
events_list = cls.events_list_from_dict(events)
return events_list
@classmethod
def events_dict_from_line(cls, line):
ts_values = zip(line["x"], line["y"])
events_dict = {}
for ts_value in ts_values:
timestamp, value = ts_value
events_dict[timestamp] = value
return events_dict
@classmethod
def merge_new_events_dict(cls, old_events_dict, new_events_dict, event_name):
for ts, value in new_events_dict.iteritems():
old_events_dict[ts][event_name] = value
return old_events_dict
@classmethod
def events_list_from_dict(cls, events_dict):
events_list = []
for ts in sorted(events_dict.keys()):
dict_to_add = events_dict[ts]
dict_to_add["start_iso"] = arrow.get(ts).isoformat(" ")
events_list.append(dict_to_add)
return events_list
class Keenio():
def __init__(self, queries, shared_params={}):
default_params = {
"timeframe": "this_30_days",
"interval": "daily",
"timezone": 0,
}
url_roots = {
"context" : "https://api.keen.io/3.0/projects/51df37f0897a2c7fcd000000/queries",
"production": "https://api.keen.io/3.0/projects/51d858213843314922000002/queries"
}
api_keys = {
"context" : "b915f0ca9fcbe1cc4760640adf9f09fa1d330f74c763bfd1aa867d6148f528055a3f97afc6b111e8905ef78bfe7f97d1d2dd2b7ddbb0f9ed8e586fd69d79f12f2215d06298924631d8ccfa7a12845dde94921855ae223c69ad26789dca2ec5fd26296a80af72c3a014df5554948bac8e",
"production": "69023dd079bdb913522954c0f9bb010766be7e87a543674f8ee5d3a66e9b127f5ee641546858bf2c260af4831cd2f7bba4e37c22efb4b21b57bab2a36b9e8e3eccd57db3c75114ba0f788013a08f404738535e9a7eb8a29a30592095e5347e446cf61d50d5508a624934584e17a436ba"
}
self.queries = queries
for query in self.queries:
#set in priority order, highest priority last
self.queries[query]["params"] = dict(default_params.items() + shared_params.items() + queries[query]["params"].items())
#print self.queries[query]["params"]
for query in self.queries:
self.queries[query]["url"] = url_roots[self.queries[query]["project"]]
self.queries[query]["url"] += "/" + self.queries[query]["analysis"]
self.queries[query]["url"] += "?api_key=" + api_keys[self.queries[query]["project"]]
self.queries[query]["url"] += "&" + urllib.urlencode(self.queries[query]["params"])
print self.queries[query]["url"]
self.timebins = defaultdict(dict)
def timebin_extraction_data(self, raw_data):
pans = Widget.get_time_pan_list(100)
for row_from_keen in raw_data:
iso_time = row_from_keen["keen"]["timestamp"]
time = arrow.get(str(iso_time), 'YYYY-MM-DDTHH:mm:ss')
for key in row_from_keen.keys():
if key not in ["keen", "userId"]:
pans.stomp_to_pan(time, key, row_from_keen[key])
return pans.replace_NAs_with_zeroes().as_list()
def limit_to_timeframe(self, response, query_name):
try:
timeframe = self.queries[query_name]["params"]["timeframe"]
except KeyError:
#no timeframe so no change
return response
if ("this" in timeframe):
end_index = None
end_index_int = 0
else:
end_index = -1
end_index_int = -1
if ("30_days" in timeframe):
start_index = -30 - end_index_int
elif ("7_days" in timeframe):
start_index = -7 - end_index_int
response = response[start_index:end_index]
return response
def get_raw_data(self, return_raw_response=False):
response = []
for query_name in self.queries:
print "sending a query to keenio: " + query_name
r = requests.get(self.queries[query_name]["url"])
#print r.text
raw_data = r.json()["result"]
if return_raw_response:
return self.get_raw_raw_data_dict()
if self.queries[query_name]["analysis"] == "extraction":
response = self.timebin_extraction_data(raw_data)
#keenio extraction doesn't respect timeframe so do it ourselves
response = self.limit_to_timeframe(response, query_name)
else:
for row_from_keen in raw_data:
new_row = self.create_row(row_from_keen, query_name)
self.timebins[new_row["start_iso"]].update(new_row)
if not response:
response = self.timebins_as_list()
if "this" in self.queries[self.queries.keys()[0]]["params"]["timeframe"]:
response[-1]["end_iso"] = datetime.utcnow().isoformat()
return response
def get_raw_raw_data_dict(self):
response = {}
for query_name in self.queries:
print "sending a query to keenio: " + query_name
r = requests.get(self.queries[query_name]["url"])
raw_data = r.json()["result"]
response[query_name] = raw_data
return response
def create_row(self, row_from_keen, value_name):
return {
"start_iso": row_from_keen["timeframe"]["start"],
"end_iso": row_from_keen["timeframe"]["end"],
value_name: row_from_keen["value"]
}
def timebins_as_list(self):
ret = []
for k in sorted(self.timebins.keys()):
ret.append(self.timebins[k])
return ret
@classmethod
def ungroup(cls, rows, dict_key, group_by, prepend_group_name=False):
for row in rows:
for groupDict in row[dict_key]:
key = groupDict[group_by]
if prepend_group_name:
key = group_by + "_" + str(key)
val = groupDict["result"]
row[key] = val
del row[dict_key]
return rows
class Mixpanel():
@classmethod
def get_funnel_data(cls, api, funnel, funnel_params):
logger.info("Getting funnel data for " + funnel["name"])
funnel_params["funnel_id"] = funnel["funnel_id"]
funnel_data = api.request(['funnels'], funnel_params)
#print json.dumps(funnel_data, indent=4)
logger.info("found data")
return funnel_data["data"]
@classmethod
def get_funnels(cls, api):
funnels = api.request(['funnels', 'list'], {})
return funnels
@classmethod
def get_data(cls, funnel_name=None):
api = mixpanel_export.Mixpanel(
api_key = os.getenv("MIXPANEL_API_KEY"),
api_secret = os.getenv("MIXPANEL_API_SECRET")
)
funnels = cls.get_funnels(api)
funnel_params = {
# The first date in yyyy-mm-dd format from which a user can begin the first step in the funnel. This date is inclusive.
"to_date": datetime.utcnow().isoformat()[0:10] # today
,"from_date": (datetime.utcnow() - timedelta(days=30)).isoformat()[0:10]
# The number of days each user has to complete the funnel, starting from the time they
# triggered the first step in the funnel. May not be greater than 60 days.
# Note that we will query for events past the end of to_date to look for funnel completions.
#The default value is 14.
,"length": 1
# The number of days you want your results bucketed into. The default value is 1
,"interval": 1
}
response = {}
for funnel in funnels:
if funnel_name:
if (funnel_name != funnel["name"]):
continue
response[funnel["name"]] = cls.get_funnel_data(api, funnel, funnel_params)
return response
class Uservoice():
@classmethod
def get_uservoice_owner(cls):
SUBDOMAIN_NAME = 'impactstory'
API_KEY = os.getenv("USERVOICE_API_KEY")
API_SECRET = os.getenv("USERVOICE_API_SECRET")
client = uservoice.Client(SUBDOMAIN_NAME, API_KEY, API_SECRET)
owner = client.login_as_owner()
return owner
@classmethod
def get_ticket_stats(cls):
logger.info("Getting uservoice ticket stats")
owner = cls.get_uservoice_owner()
api_response = owner.get("/api/v1/reports/agent_backlog.json")
interesting_fields = [
"without_response_count",
"waiting_for_agent_count",
"total_count",
"median_open_time"
]
ticket_dict = dict((field, 0) for field in interesting_fields)
median_open_days = []
for agent in api_response["entries"]:
for field in interesting_fields:
if field == "median_open_time":
median_open_days += [open_time/(60.0*60*24) for open_time in agent["open_times"]]
else:
try:
ticket_dict[field] += agent[field]
except KeyError:
ticket_dict[field] += 0
median_open_days.sort()
try:
median_days = median_open_days[int(len(median_open_days)/2)]
ticket_dict["median_open_days"] = round(median_days, 1)
except IndexError:
ticket_dict["median_open_days"] = 0
logger.info("Found uservoice tickets: {all} total, {user} where a user answered last".format(
all=ticket_dict["total_count"],
user=ticket_dict["waiting_for_agent_count"]))
return ticket_dict
@classmethod
def get_ticket_details(cls):
logger.info("Getting uservoice ticket details")
owner = cls.get_uservoice_owner()
tickets = owner.get("/api/v1/tickets?state=open&per_page=100")["tickets"]
return tickets
@classmethod
def get_suggestion_counts(cls):
logger.info("Getting uservoice open suggestion count")
owner = cls.get_uservoice_owner()
suggestions_active = owner.get("/api/v1/suggestions?filter=active&per_page=1000")["suggestions"]
suggestions_inbox = owner.get("/api/v1/suggestions?filter=inbox&per_page=1000")["suggestions"]
suggestions = suggestions_active + suggestions_inbox
suggestion_dict = {}
for suggestion in suggestions:
status = "inbox"
if suggestion["status"]:
status = suggestion["status"]["name"]
suggestion_dict[status] = 1 + suggestion_dict.get(status, 0)
logger.info("Found uservoice suggestions: {total} total".format(
total=len(suggestions)))
return(suggestion_dict)
@classmethod
def get_closed_suggestion_count(cls):
logger.info("Getting uservoice closed suggestion count")
owner = cls.get_uservoice_owner()
closed_suggestions = owner.get("/api/v1/suggestions?filter=closed&per_page=1000")["suggestions"]
logger.info("Found uservoice suggestions: {total} total".format(
total=len(closed_suggestions)))
return(closed_suggestions)
@classmethod
def get_suggestion_details(cls):
logger.info("Getting uservoice suggestion details")
owner = cls.get_uservoice_owner()
suggestions_active = owner.get("/api/v1/suggestions?filter=active&per_page=1000")["suggestions"]
suggestions_inbox = owner.get("/api/v1/suggestions?filter=inbox&per_page=1000")["suggestions"]
suggestions = suggestions_active + suggestions_inbox
return suggestions
class Couchdb():
@classmethod
def get_view(cls, full_view_name, reduce_state=False):
logger.info("getting view from couch")
(design_doc_name, view_name) = full_view_name.split("/")
logger.info("full_view_name: " + full_view_name)
if reduce_state:
couch_query = "_design/{design_doc_name}/_view/{view_name}?reduce=true&group=true".format(
design_doc_name=design_doc_name,
view_name=view_name)
else:
couch_query = "_design/{design_doc_name}/_view/{view_name}".format(
design_doc_name=design_doc_name,
view_name=view_name)
logger.info("couch_querycouch_query: " + couch_query)
url = "/".join([
os.getenv("CLOUDANT_URL"),
os.getenv("CLOUDANT_DB"),
couch_query
])
logger.info("couchdb url: " + url)
response = requests.get(url).json()
return response["rows"]
|
Impactstory/impactstory-analytics
|
impactstoryanalytics/widgets/widget_api_helpers.py
|
Python
|
mit
| 13,794
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module defining the Django auth backend class for the Keystone API. """
import logging
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from keystoneclient import exceptions as keystone_exceptions
from openstack_auth import exceptions
from openstack_auth import user as auth_user
from openstack_auth import utils
LOG = logging.getLogger(__name__)
KEYSTONE_CLIENT_ATTR = "_keystoneclient"
class KeystoneBackend(object):
"""Django authentication backend class for use with
``django.contrib.auth``.
"""
def check_auth_expiry(self, auth_ref):
if not utils.check_token_expiration(auth_ref):
msg = _("The authentication token issued by the Identity service "
"has expired.")
LOG.warning("The authentication token issued by the Identity "
"service appears to have expired before it was "
"issued. This may indicate a problem with either your "
"server or client configuration.")
raise exceptions.KeystoneAuthException(msg)
return True
def get_user(self, user_id):
"""Returns the current user (if authenticated) based on the user ID
and session data.
Note: this required monkey-patching the ``contrib.auth`` middleware
to make the ``request`` object available to the auth backend class.
"""
if (hasattr(self, 'request') and
user_id == self.request.session["user_id"]):
token = self.request.session['token']
endpoint = self.request.session['region_endpoint']
services_region = self.request.session['services_region']
user = auth_user.create_user_from_token(self.request, token,
endpoint, services_region)
return user
else:
return None
def authenticate(self, request=None, username=None, password=None,
user_domain_name=None, auth_url=None):
"""Authenticates a user via the Keystone Identity API. """
LOG.debug('Beginning user authentication for user "%s".' % username)
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
ca_cert = getattr(settings, "OPENSTACK_SSL_CACERT", None)
endpoint_type = getattr(
settings, 'OPENSTACK_ENDPOINT_TYPE', 'publicURL')
# keystone client v3 does not support logging in on the v2 url any more
if utils.get_keystone_version() >= 3:
auth_url = auth_url.replace('v2.0', 'v3')
keystone_client = utils.get_keystone_client()
try:
client = keystone_client.Client(
user_domain_name=user_domain_name,
username=username,
password=password,
auth_url=auth_url,
insecure=insecure,
cacert=ca_cert,
debug=settings.DEBUG)
unscoped_auth_ref = client.auth_ref
unscoped_token = auth_user.Token(auth_ref=unscoped_auth_ref)
except (keystone_exceptions.Unauthorized,
keystone_exceptions.Forbidden,
keystone_exceptions.NotFound) as exc:
msg = _('Invalid user name or password.')
LOG.debug(str(exc))
raise exceptions.KeystoneAuthException(msg)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure) as exc:
msg = _("An error occurred authenticating. "
"Please try again later.")
LOG.debug(str(exc))
raise exceptions.KeystoneAuthException(msg)
# Check expiry for our unscoped auth ref.
self.check_auth_expiry(unscoped_auth_ref)
# Check if token is automatically scoped to default_project
if unscoped_auth_ref.project_scoped:
auth_ref = unscoped_auth_ref
else:
# For now we list all the user's projects and iterate through.
try:
if utils.get_keystone_version() < 3:
projects = client.tenants.list()
else:
client.management_url = auth_url
projects = client.projects.list(
user=unscoped_auth_ref.user_id)
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure) as exc:
msg = _('Unable to retrieve authorized projects.')
raise exceptions.KeystoneAuthException(msg)
# Abort if there are no projects for this user
if not projects:
msg = _('You are not authorized for any projects.')
raise exceptions.KeystoneAuthException(msg)
while projects:
project = projects.pop()
try:
client = keystone_client.Client(
tenant_id=project.id,
token=unscoped_auth_ref.auth_token,
auth_url=auth_url,
insecure=insecure,
cacert=ca_cert,
debug=settings.DEBUG)
auth_ref = client.auth_ref
break
except (keystone_exceptions.ClientException,
keystone_exceptions.AuthorizationFailure):
auth_ref = None
if auth_ref is None:
msg = _("Unable to authenticate to any available projects.")
raise exceptions.KeystoneAuthException(msg)
# Check expiry for our new scoped token.
self.check_auth_expiry(auth_ref)
# If we made it here we succeeded. Create our User!
user = auth_user.create_user_from_token(
request,
auth_user.Token(auth_ref),
client.service_catalog.url_for(endpoint_type=endpoint_type))
if request is not None:
request.session['unscoped_token'] = unscoped_token.id
request.user = user
# Support client caching to save on auth calls.
setattr(request, KEYSTONE_CLIENT_ATTR, client)
LOG.debug('Authentication completed for user "%s".' % username)
return user
def get_group_permissions(self, user, obj=None):
"""Returns an empty set since Keystone doesn't support "groups". """
# Keystone V3 added "groups". The Auth token response includes the
# roles from the user's Group assignment. It should be fine just
# returning an empty set here.
return set()
def get_all_permissions(self, user, obj=None):
"""Returns a set of permission strings that this user has through
his/her Keystone "roles".
The permissions are returned as ``"openstack.{{ role.name }}"``.
"""
if user.is_anonymous() or obj is not None:
return set()
# TODO(gabrielhurley): Integrate policy-driven RBAC
# when supported by Keystone.
role_perms = set(["openstack.roles.%s" % role['name'].lower()
for role in user.roles])
service_perms = set(["openstack.services.%s" % service['type'].lower()
for service in user.service_catalog
if user.services_region in
[endpoint.get('region', None) for endpoint
in service.get('endpoints', [])]])
return role_perms | service_perms
def has_perm(self, user, perm, obj=None):
"""Returns True if the given user has the specified permission. """
if not user.is_active:
return False
return perm in self.get_all_permissions(user, obj)
def has_module_perms(self, user, app_label):
"""Returns True if user has any permissions in the given app_label.
Currently this matches for the app_label ``"openstack"``.
"""
if not user.is_active:
return False
for perm in self.get_all_permissions(user):
if perm[:perm.index('.')] == app_label:
return True
return False
|
citrix-openstack-build/django_openstack_auth
|
openstack_auth/backend.py
|
Python
|
apache-2.0
| 8,852
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import wizards
from . import reports
|
andhit-r/opnsynid-accounting-report
|
opnsynid_balance_sheet_aeroo_report/__init__.py
|
Python
|
agpl-3.0
| 1,022
|
# -*- coding: utf-8 -*-
# ***********************************
# Author: Pedro Jorge De Los Santos
# E-mail: delossantosmfq@gmail.com
# License: MIT License
# ***********************************
import numpy as np
from nusa.core import *
from nusa.model import *
from nusa.element import *
def test5():
"""
Beer & Johnston. Mechanics of materials
Problem 9.75 , pp. 589.
"""
# Input data
E = 29e6 # psi
b, h = 2.0, 4.0 #in
I = (1/12.0)*b*h**3
w = 1e3/12.0 # lb/in
L1 = 2*12 # in
L2 = 3*12 #in
P1 = -1e3 # lb
P2 = -w*L2/2.0
P3 = -w*L2/2.0
M2 = -w*L2**2/12.0
M3 = w*L2**2/12.0
# Model
m1 = BeamModel("Beam Model")
# Nodes
n1 = Node((0,0))
n2 = Node((L1,0))
n3 = Node((L1+L2,0))
# Elements
e1 = Beam((n1,n2),E,I)
e2 = Beam((n2,n3),E,I)
# Add elements
for nd in (n1,n2,n3): m1.add_node(nd)
for el in (e1,e2): m1.add_element(el)
m1.add_force(n1, (P1,))
m1.add_force(n2, (P2,))
m1.add_force(n3, (P3,))
m1.add_moment(n2, (M2,))
m1.add_moment(n3, (M3,))
m1.add_constraint(n3, ux=0, uy=0, ur=0) # fixed
m1.solve() # Solve model
# Slope and deflection in n1
print("Displacement in node 1: {0}\nSlope in node 1: {1}".format(n1.uy, n1.ur))
if __name__ == '__main__':
test5()
|
JorgeDeLosSantos/nusa
|
examples/beam/beam_5.py
|
Python
|
mit
| 1,354
|
import os
import os.path
import struct
import pytest
from file_manip_toolkit.unfman.CustomFormat import CustomFormat, interleave, deinterleave
# This is probably gonna get deleted/moved to other places
TESTDATA = bytearray.fromhex('0A 0B 0C 0D A0 B0 C0 D0')
TESTDATA2 = bytearray.fromhex('FA FB FC FD AF BF CF DF')
TESTFILE = os.path.normpath('tests/test_data/eswap/vm3.15')
@pytest.mark.parametrize('test_input, nsplit, expected', [
(TESTDATA, 1, ('0A 0C A0 C0', '0B 0D B0 D0')),
(TESTDATA, 2, ('0A 0B A0 B0', '0C 0D C0 D0')),
])
def test_deinterleave(test_input, nsplit, expected):
result = deinterleave(test_input, nsplit, 2)
assert result[0] == bytearray.fromhex(expected[0])
assert result[1] == bytearray.fromhex(expected[1])
#todo - figure out what exception the following cases are raising
#(TESTDATA, 8, 2),
#(TESTDATA, 1, 1),
@pytest.mark.parametrize('test_input, nbytes, nsplit', [
(TESTDATA, 3, 2),
(TESTDATA, 99, 2),
])
def test_deinterleave_exception(test_input, nbytes, nsplit):
with pytest.raises(struct.error):
deinterleave(test_input, nbytes, nsplit)
@pytest.mark.parametrize('test_input, nsplit, expected', [
([TESTDATA, TESTDATA2], 2, '0A 0B FA FB 0C 0D FC FD A0 B0 AF BF C0 D0 CF DF'),
([TESTDATA, TESTDATA2], 4, '0A 0B 0C 0D FA FB FC FD A0 B0 C0 D0 AF BF CF DF'),
])
def test_interleave(test_input, nsplit, expected):
result = interleave(test_input, nsplit)
assert result == bytearray.fromhex(expected)
def test_interleave_exception():
with pytest.raises(struct.error):
interleave([TESTDATA, TESTDATA2], 99)
def test_open_file():
assert CustomFormat.open_file(TESTFILE)
def test_open_file_exception(tmpdir):
fn = tmpdir.mkdir('data')
with pytest.raises(OSError):
CustomFormat.open_file(str(fn))
with pytest.raises(FileNotFoundError):
CustomFormat.open_file("vm.txt")
|
goosechooser/file-manip-toolkit
|
tests/file_manip_test.py
|
Python
|
mit
| 1,899
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Download OSM data covering the area of a slippy-map tile
#
# Features:
# * Recursive (downloads are all at z15, and merged if necessary to get
# a larger area)
# * Cached (all downloads stored in cache/z/x/y/data.osm)
#
# DON'T RUN THIS ON LARGE AREAS WITHOUT ASKING THE OPERATOR OF THE
# API SERVER. Currently it's limited to downloading a z-13 area or smaller
#----------------------------------------------------------------------------
# Copyright 2008, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from core.tilenames import *
from urllib import *
from OsmMerge import OsmMerge
import os
def GetOsmTileData(z, x, y, AllowSplit=False):
"""Download OSM data for the region covering a slippy-map tile"""
if x < 0 or y < 0 or z < 0 or z > 25:
print("Disallowed %d,%d at %d" % (x, y, z))
return
DownloadLevel = 15 # All primary downloads are done at a particular zoom level
MergeLevels = 2 # How many layers 'below' the download level to go
directory = 'cache/%d/%d' % (z, x)
filename = '%s/%d.osm' % (directory, y)
if not os.path.exists(directory):
os.makedirs(directory)
if z == DownloadLevel:
# Download the data
(S, W, N, E) = tileEdges(x, y, z)
# Which API to use
if 1:
URL = 'http://%s/api/0.5/map?bbox=%f,%f,%f,%f' % ('api.openstreetmap.org', W, S, E, N)
else:
URL = 'http://%s/api/0.5/*[bbox=%f,%f,%f,%f]' % ('www.informationfreeway.org', W, S, E, N)
if not os.path.exists(filename): # TODO: allow expiry of old data
print("Downloading %s\n from %s" % (filename, URL))
try:
urlretrieve(URL, filename)
print("Done")
except:
print("Error downloading " + filename)
# unlink(filename)
return
else:
print("Using cached %s" % filename)
return filename
elif z < DownloadLevel - MergeLevels:
print("Zoom %d not allowed" % z)
return
elif z < DownloadLevel:
# merge smaller tiles
filenames = []
for i in (0, 1):
for j in (0, 1):
lx = x * 2 + i
ly = y * 2 + j
lz = z + 1
print("Downloading subtile %d,%d at %d" % (x, y, z))
# download (or otherwise obtain) each subtile
filenames.append(GetOsmTileData(lz, lx, ly, AllowSplit))
# merge them together
print("Merging tiles together")
OsmMerge(filename, filenames)
return filename
else:
# use larger tile
while z > DownloadLevel:
z -= 1
x = int(x / 2)
y = int(y / 2)
return GetOsmTileData(z, x, y)
if __name__ == "__main__":
"""test mode"""
GetOsmTileData(14, 7788, 6360, True)
|
ryfx/modrana
|
modules/pyrender/OsmTileData.py
|
Python
|
gpl-3.0
| 3,691
|
"""
Representations of the structure of a static directory being served by holtz.
"""
from holtz import compat
class Directory(object):
"""
A directory being served.
"""
def __init__(self):
self.subdirectories = compat.OrderedDict()
self.entries = []
class Entry(object):
"""
An entry in a directory.
"""
def __init__(self, condition, effect):
self.condition = condition
self.effect = effect
|
lvh/holtz
|
holtz/structure.py
|
Python
|
isc
| 460
|
from pyqtcss import available_styles, get_style
if len(available_styles()) == 0:
from setup import compile_css
compile_css()
|
sommerc/pyqt-stylesheets
|
__init__.py
|
Python
|
bsd-3-clause
| 133
|
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio gr-air-modes package. It provides a library and
application for receiving Mode S / ADS-B signals from aircraft. Use
uhd_modes.py as the main application for receiving signals. cpr.py
provides an implementation of Compact Position Reporting. altitude.py
implements Gray-coded altitude decoding. Various plugins exist for SQL,
KML, and PlanePlotter-compliant SBS-1 emulation output. mlat.py provides
an experimental implementation of a multilateration solver.
'''
# ----------------------------------------------------------------
# Temporary workaround for ticket:181 (swig+python problem)
import sys
_RTLD_GLOBAL = 0
try:
from dl import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
try:
from DLFCN import RTLD_GLOBAL as _RTLD_GLOBAL
except ImportError:
pass
if _RTLD_GLOBAL != 0:
_dlopenflags = sys.getdlopenflags()
sys.setdlopenflags(_dlopenflags|_RTLD_GLOBAL)
# ----------------------------------------------------------------
# import swig generated symbols into the gr-air-modes namespace
from air_modes_swig import *
# import any pure python here
#
try:
import zmq
except ImportError:
raise RuntimeError("PyZMQ not found! Please install libzmq and PyZMQ to run gr-air-modes")
from rx_path import rx_path
from zmq_socket import zmq_pubsub_iface
from parse import *
from msprint import output_print
from sql import output_sql
from sbs1 import output_sbs1
from kml import output_kml, output_jsonp
from raw_server import raw_server
from radio import modes_radio
from exceptions import *
from az_map import *
from types import *
from altitude import *
from cpr import cpr_decoder
from html_template import html_template
#this is try/excepted in case the user doesn't have numpy installed
try:
from flightgear import output_flightgear
from Quaternion import *
except ImportError:
print "gr-air-modes warning: numpy+scipy not installed, FlightGear interface not supported"
pass
# ----------------------------------------------------------------
# Tail of workaround
if _RTLD_GLOBAL != 0:
sys.setdlopenflags(_dlopenflags) # Restore original flags
# ----------------------------------------------------------------
|
koppa/gr-air-modes
|
python/__init__.py
|
Python
|
gpl-3.0
| 3,039
|
from os import path
from undine.database.sqlite import SQLiteConnector
from undine.server.driver.base_driver import BaseDriver
from undine.server.information import ConfigInfo, WorkerInfo, InputInfo
from undine.server.information import TaskInfo
from undine.utils.exception import UndineException
class SQLiteDriver(BaseDriver):
_QUERY = {
'fetch': "SELECT tid, cid, iid, wid, reportable FROM task "
"WHERE state = 'R' LIMIT 1",
'config': "SELECT cid, name, config FROM config "
"WHERE cid = ?",
'worker': "SELECT wid, worker_dir, command, arguments, file_input"
" FROM worker WHERE wid = ?",
'input': "SELECT iid, name, items FROM input "
"WHERE iid = ?",
'preempt': "UPDATE task SET state ='I' WHERE tid = ?",
'done': "UPDATE task SET state ='D' WHERE tid = ?",
'cancel': "UPDATE task SET state ='C' WHERE tid = ?",
'fail': "UPDATE task SET state ='F' WHERE tid = ?",
'result': "INSERT INTO result VALUES (:tid, :content)",
'error': "INSERT INTO error VALUES (:tid, :message)",
'count': "SELECT COUNT(tid) FROM task WHERE state ='R'"
}
#
# Constructor & Destructor
#
def __init__(self, config, config_dir):
BaseDriver.__init__(self, config, config_dir)
# 1. Check input parameter is no missing
if 'db_file' not in config:
raise UndineException("'db_file' is not set in driver section")
if not path.isfile(config['db_file']):
raise UndineException("'db_file' is not exists")
self._sqlite = SQLiteConnector(config)
#
# Inherited methods
#
def fetch(self):
row = self._sqlite.fetch_a_tuple(self._QUERY['fetch'])
return TaskInfo(tid=row[0], cid=row[1], iid=row[2], wid=row[3],
reportable=bool(row[4]))
def config(self, cid):
row = self._sqlite.fetch_a_tuple(self._QUERY['config'], cid)
return ConfigInfo(cid=row[0], name=row[1], config=row[2],
dir=self._config_dir,
ext=self._config_ext)
def worker(self, wid):
row = self._sqlite.fetch_a_tuple(self._QUERY['worker'], wid)
return WorkerInfo(wid=row[0], dir=row[1], cmd=row[2],
arguments=row[3], file_input=row[4])
def inputs(self, iid):
row = self._sqlite.fetch_a_tuple(self._QUERY['input'], iid)
return InputInfo(iid=row[0], name=row[1], items=row[2])
def preempt(self, tid):
self._sqlite.execute_single_dml(self._QUERY['preempt'], tid)
return True
def done(self, tid, content, report):
queries = [self._sqlite.sql(self._QUERY['done'], tid)]
if report:
queries.append(self._sqlite.sql(self._QUERY['result'],
tid=tid, content=content))
self._sqlite.execute_multiple_dml(queries)
return True
def cancel(self, tid):
self._sqlite.execute_single_dml(self._QUERY['cancel'], tid)
def fail(self, tid, message):
queries = [self._sqlite.sql(self._QUERY['fail'], tid),
self._sqlite.sql(self._QUERY['error'],
tid=tid, message=message)]
self._sqlite.execute_multiple_dml(queries)
self._error_logging('tid({0})'.format(tid), message)
def is_ready(self):
return bool(self._sqlite.fetch_a_tuple(self._QUERY['count'])[0])
|
Sungup/Undine
|
undine/server/driver/sqlite.py
|
Python
|
mit
| 3,538
|
#(C) Copyright Syd Logan 2019-2020
#(C) Copyright Thousand Smiles Foundation 2019-2020
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
'''
unit tests for ent diagnosis extra application. Assumes django server is up
and running on the specified host and port
'''
import unittest
import getopt, sys
import json
from tschartslib.service.serviceapi import ServiceAPI
from tschartslib.tscharts.tscharts import Login, Logout
from tschartslib.patient.patient import CreatePatient, DeletePatient
from tschartslib.clinic.clinic import CreateClinic, DeleteClinic
from tschartslib.entdiagnosis.entdiagnosis import CreateENTDiagnosis, DeleteENTDiagnosis
class CreateENTDiagnosisExtra(ServiceAPI):
def __init__(self, host, port, token):
super(CreateENTDiagnosisExtra, self).__init__()
self.setHttpMethod("POST")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._payload = {}
self.setPayload(self._payload)
self.setURL("tscharts/v1/entdiagnosisextra/")
def setENTDiagnosis(self, val):
self._payload[u"entdiagnosis"] = val
self.setPayload(self._payload)
def setName(self, val):
self._payload[u"name"] = val
self.setPayload(self._payload)
def setValue(self, val):
self._payload[u"value"] = val
self.setPayload(self._payload)
class GetENTDiagnosisExtra(ServiceAPI):
def makeURL(self):
hasQArgs = False
if not self._id == None:
base = "tscharts/v1/entdiagnosisextra/{}/".format(self._id)
else:
base = "tscharts/v1/entdiagnosisextra/"
if not self._entdiagnosis == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "entdiagnosis={}".format(self._entdiagnosis)
hasQArgs = True
if not self._name == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "name={}".format(self._name)
hasQArgs = True
if not self._value == None:
if not hasQArgs:
base += "?"
else:
base += "&"
base += "value={}".format(self._value)
hasQArgs = True
self.setURL(base)
def __init__(self, host, port, token):
super(GetENTDiagnosisExtra, self).__init__()
self.setHttpMethod("GET")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._entdiagnosis = None
self._name = None
self._value = None
self._id = None
self.makeURL()
def setId(self, id):
self._id = id;
self.makeURL()
def setENTDiagnosis(self, val):
self._entdiagnosis = val
self.makeURL()
def setName(self, val):
self._name = val
self.makeURL()
def setValue(self, val):
self._value = val
self.makeURL()
class UpdateENTDiagnosisExtra(ServiceAPI):
def __init__(self, host, port, token, id):
super(UpdateENTDiagnosisExtra, self).__init__()
self.setHttpMethod("PUT")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self._payload = {}
self.setPayload(self._payload)
self.setURL("tscharts/v1/entdiagnosisextra/{}/".format(id))
def setENTDiagnosis(self, val):
self._payload[u"entdiagnosis"] = val
self.setPayload(self._payload)
def setName(self, val):
self._payload[u"name"] = val
self.setPayload(self._payload)
def setValue(self, val):
self._payload[u"value"] = val
self.setPayload(self._payload)
class DeleteENTDiagnosisExtra(ServiceAPI):
def __init__(self, host, port, token, id):
super(DeleteENTDiagnosisExtra, self).__init__()
self.setHttpMethod("DELETE")
self.setHost(host)
self.setPort(port)
self.setToken(token)
self.setURL("tscharts/v1/entdiagnosisextra/{}/".format(id))
class TestTSENTDiagnosisExtra(unittest.TestCase):
def setUp(self):
login = Login(host, port, username, password)
ret = login.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("token" in ret[1])
global token
token = ret[1]["token"]
self.maxDiff = None
def testCreateENTDiagnosisExtra(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "patient@example.com"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "maria.sanchez@example.com"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateENTDiagnosis(host, port, token)
sent = x.generateRandomPayload()
x.setPatient(patientid)
x.setClinic(clinicid)
x.setComment("A comment")
x.setUsername("Gomez")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
diagnosisid = int(ret[1]["id"])
x = CreateENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(diagnosisid)
x.setName("Somethingitis")
x.setValue("75")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertEqual(ret[1]["name"], "Somethingitis")
self.assertEqual(ret[1]["value"], "75")
x = DeleteENTDiagnosisExtra(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
# non-existent entdiagnosis
x = CreateENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(7890)
x.setName("Somethingelseitis")
x.setValue("Some random text")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteENTDiagnosis(host, port, token, diagnosisid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testDeleteENTDiagnosisExtra(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "patient@example.com"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "maria.sanchez@example.com"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateENTDiagnosis(host, port, token)
sent = x.generateRandomPayload()
x.setPatient(patientid)
x.setClinic(clinicid)
x.setComment("A comment")
x.setUsername("Gomez")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
diagnosisid = int(ret[1]["id"])
x = CreateENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(diagnosisid)
x.setName("Somethingitis")
x.setValue("75")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = DeleteENTDiagnosisExtra(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404) # not found
x = DeleteENTDiagnosisExtra(host, port, token, 9999)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteENTDiagnosisExtra(host, port, token, None)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteENTDiagnosisExtra(host, port, token, "")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = DeleteENTDiagnosisExtra(host, port, token, "Hello")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = DeleteENTDiagnosis(host, port, token, diagnosisid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testUpdateENTDiagnosisExtra(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "patient@example.com"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "maria.sanchez@example.com"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateENTDiagnosis(host, port, token)
sent = x.generateRandomPayload()
x.setComment("A comment")
x.setUsername("Gomez")
x.setPatient(patientid)
x.setClinic(clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
diagnosisid = int(ret[1]["id"])
x = CreateENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(diagnosisid)
x.setName("Somethingitis")
x.setValue("75")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
id = int(ret[1]["id"])
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("entdiagnosis" in ret[1])
diagnosisId = int(ret[1]["entdiagnosis"])
self.assertTrue(diagnosisid == diagnosisId)
self.assertTrue("name" in ret[1])
self.assertTrue(ret[1]["name"] == "Somethingitis")
self.assertTrue(ret[1]["value"] == "75")
x = UpdateENTDiagnosisExtra(host, port, token, id)
x.setValue("right is different than left")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("entdiagnosis" in ret[1])
diagnosisId = int(ret[1]["entdiagnosis"])
self.assertTrue(diagnosisid == diagnosisId)
self.assertTrue("name" in ret[1])
self.assertTrue(ret[1]["name"] == "Somethingitis")
self.assertTrue(ret[1]["value"] == "right is different than left")
x = UpdateENTDiagnosisExtra(host, port, token, id)
x.setName("xyz")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = GetENTDiagnosisExtra(host, port, token)
x.setId(id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("entdiagnosis" in ret[1])
diagnosisId = int(ret[1]["entdiagnosis"])
self.assertTrue(diagnosisid == diagnosisId)
self.assertTrue("name" in ret[1])
self.assertTrue(ret[1]["name"] == "xyz")
self.assertTrue(ret[1]["value"] == "right is different than left")
x = UpdateENTDiagnosisExtra(host, port, token, None)
x.setName("xyz")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = UpdateENTDiagnosisExtra(host, port, token, 6789)
x.setName("xyz")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 404)
x = UpdateENTDiagnosisExtra(host, port, token, "")
x.setName("xyz")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 400)
x = DeleteENTDiagnosisExtra(host, port, token, id)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteENTDiagnosis(host, port, token, diagnosisid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def testGetAllENTDiagnosisExtra(self):
x = CreateClinic(host, port, token, "Ensenada", "02/05/2016", "02/06/2016")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
self.assertTrue("id" in ret[1])
clinicid = int(ret[1]["id"])
data = {}
data["paternal_last"] = "3abcd1234"
data["maternal_last"] = "yyyyyy"
data["first"] = "zzzzzzz"
data["middle"] = ""
data["suffix"] = "Jr."
data["prefix"] = ""
data["dob"] = "04/01/1962"
data["gender"] = "Female"
data["street1"] = "1234 First Ave"
data["street2"] = ""
data["city"] = "Ensenada"
data["colonia"] = ""
data["state"] = u"Baja California"
data["phone1"] = "1-111-111-1111"
data["phone2"] = ""
data["email"] = "patient@example.com"
data["emergencyfullname"] = "Maria Sanchez"
data["emergencyphone"] = "1-222-222-2222"
data["emergencyemail"] = "maria.sanchez@example.com"
x = CreatePatient(host, port, token, data)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
patientid = int(ret[1]["id"])
x = CreateENTDiagnosis(host, port, token)
sent = x.generateRandomPayload()
x.setPatient(patientid)
x.setClinic(clinicid)
x.setUsername("Gomez")
x.setComment("A comment")
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
entdiagnosisid = int(ret[1]["id"])
delids = []
for i in range(0, 100):
x = CreateENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(entdiagnosisid)
x.setName("name{}".format(i))
x.setValue("value{}".format(i))
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
delids.append(ret[1]["id"])
x = GetENTDiagnosisExtra(host, port, token)
x.setENTDiagnosis(entdiagnosisid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
rtcs = ret[1]
self.assertTrue(len(rtcs) == 100)
for x in delids:
y = DeleteENTDiagnosisExtra(host, port, token, x)
ret = y.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteENTDiagnosis(host, port, token, entdiagnosisid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeleteClinic(host, port, token, clinicid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
x = DeletePatient(host, port, token, patientid)
ret = x.send(timeout=30)
self.assertEqual(ret[0], 200)
def usage():
print("entdiagnosisextra [-h host] [-p port] [-u username] [-w password]")
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], "h:p:u:w:")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
global host
host = "127.0.0.1"
global port
port = 8000
global username
username = None
global password
password = None
for o, a in opts:
if o == "-h":
host = a
elif o == "-p":
port = int(a)
elif o == "-u":
username = a
elif o == "-w":
password = a
else:
assert False, "unhandled option"
unittest.main(argv=[sys.argv[0]])
if __name__ == "__main__":
main()
|
slogan621/tscharts
|
tschartslib/entdiagnosisextra/entdiagnosisextra.py
|
Python
|
apache-2.0
| 18,912
|
# Copyright 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
import pypowervm.entities as ent
from pypowervm.tasks.hdisk import _rbd as rbd
import pypowervm.tests.test_fixtures as fx
from pypowervm.wrappers import job
class TestRbd(testtools.TestCase):
def setUp(self):
super(TestRbd, self).setUp()
entry = ent.Entry({}, ent.Element('Dummy', None), None)
self.mock_job = job.Job(entry)
self.adpt = self.useFixture(fx.AdapterFx()).adpt
@mock.patch('pypowervm.wrappers.job.Job.create_job_parameter')
@mock.patch('pypowervm.wrappers.job.Job.wrap')
@mock.patch('pypowervm.wrappers.job.Job.run_job')
@mock.patch('pypowervm.wrappers.job.Job.get_job_results_as_dict')
def test_rbd_exists(self, mock_job_res, mock_run_job, mock_job_w,
mock_job_p):
mock_job_w.return_value = self.mock_job
mock_uuid = 'uuid'
mock_name = 'pool/image'
mock_parm = mock.MagicMock()
mock_job_p.return_value = mock_parm
args = ['VirtualIOServer', mock_uuid]
kwargs = {'suffix_type': 'do', 'suffix_parm': ('RBDExists')}
mock_job_res.return_value = {'exists': 'true'}
self.assertTrue(rbd.rbd_exists(self.adpt, mock_uuid, mock_name))
self.adpt.read.assert_called_once_with(*args, **kwargs)
mock_run_job.assert_called_once_with(mock_uuid, job_parms=[mock_parm],
timeout=120)
mock_job_p.assert_any_call('name', mock_name)
self.assertEqual(1, mock_run_job.call_count)
mock_job_res.return_value = {'exists': 'false'}
mock_job_p.return_value = mock_parm
self.assertFalse(rbd.rbd_exists(self.adpt, mock_uuid, mock_name))
|
powervm/pypowervm
|
pypowervm/tests/tasks/hdisk/test_rbd.py
|
Python
|
apache-2.0
| 2,335
|
# Copyright 2015 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
import time
import unittest
import warnings
import mock
import pytest
import six
import google.api_core.exceptions
try:
from google.cloud import bigquery_storage_v1beta1
from google.cloud.bigquery_storage_v1beta1.gapic.transports import (
big_query_storage_grpc_transport,
)
except ImportError: # pragma: NO COVER
bigquery_storage_v1beta1 = None
big_query_storage_grpc_transport = None
try:
import pandas
except (ImportError, AttributeError): # pragma: NO COVER
pandas = None
try:
import pyarrow
import pyarrow.types
except ImportError: # pragma: NO COVER
pyarrow = None
try:
from tqdm import tqdm
except (ImportError, AttributeError): # pragma: NO COVER
tqdm = None
from google.cloud.bigquery.dataset import DatasetReference
def _mock_client():
from google.cloud.bigquery import client
mock_client = mock.create_autospec(client.Client)
mock_client.project = "my-project"
return mock_client
class _SchemaBase(object):
def _verify_field(self, field, r_field):
self.assertEqual(field.name, r_field["name"])
self.assertEqual(field.field_type, r_field["type"])
self.assertEqual(field.mode, r_field.get("mode", "NULLABLE"))
def _verifySchema(self, schema, resource):
r_fields = resource["schema"]["fields"]
self.assertEqual(len(schema), len(r_fields))
for field, r_field in zip(schema, r_fields):
self._verify_field(field, r_field)
class TestEncryptionConfiguration(unittest.TestCase):
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.table import EncryptionConfiguration
return EncryptionConfiguration
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
encryption_config = self._make_one()
self.assertIsNone(encryption_config.kms_key_name)
def test_ctor_with_key(self):
encryption_config = self._make_one(kms_key_name=self.KMS_KEY_NAME)
self.assertEqual(encryption_config.kms_key_name, self.KMS_KEY_NAME)
class TestTableReference(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.table import TableReference
return TableReference
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor_defaults(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference("project_1", "dataset_1")
table_ref = self._make_one(dataset_ref, "table_1")
self.assertEqual(table_ref.dataset_id, dataset_ref.dataset_id)
self.assertEqual(table_ref.table_id, "table_1")
def test_to_api_repr(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference("project_1", "dataset_1")
table_ref = self._make_one(dataset_ref, "table_1")
resource = table_ref.to_api_repr()
self.assertEqual(
resource,
{"projectId": "project_1", "datasetId": "dataset_1", "tableId": "table_1"},
)
def test_from_api_repr(self):
from google.cloud.bigquery.dataset import DatasetReference
from google.cloud.bigquery.table import TableReference
dataset_ref = DatasetReference("project_1", "dataset_1")
expected = self._make_one(dataset_ref, "table_1")
got = TableReference.from_api_repr(
{"projectId": "project_1", "datasetId": "dataset_1", "tableId": "table_1"}
)
self.assertEqual(expected, got)
def test_from_string(self):
cls = self._get_target_class()
got = cls.from_string("string-project.string_dataset.string_table")
self.assertEqual(got.project, "string-project")
self.assertEqual(got.dataset_id, "string_dataset")
self.assertEqual(got.table_id, "string_table")
def test_from_string_w_prefix(self):
cls = self._get_target_class()
got = cls.from_string("google.com:string-project.string_dataset.string_table")
self.assertEqual(got.project, "google.com:string-project")
self.assertEqual(got.dataset_id, "string_dataset")
self.assertEqual(got.table_id, "string_table")
def test_from_string_legacy_string(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string("string-project:string_dataset.string_table")
def test_from_string_w_incorrect_prefix(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string("google.com.string-project.string_dataset.string_table")
def test_from_string_not_fully_qualified(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string("string_table")
with self.assertRaises(ValueError):
cls.from_string("string_dataset.string_table")
with self.assertRaises(ValueError):
cls.from_string("a.b.c.d")
def test_from_string_with_default_project(self):
cls = self._get_target_class()
got = cls.from_string(
"string_dataset.string_table", default_project="default-project"
)
self.assertEqual(got.project, "default-project")
self.assertEqual(got.dataset_id, "string_dataset")
self.assertEqual(got.table_id, "string_table")
def test_from_string_ignores_default_project(self):
cls = self._get_target_class()
got = cls.from_string(
"string-project.string_dataset.string_table",
default_project="default-project",
)
self.assertEqual(got.project, "string-project")
self.assertEqual(got.dataset_id, "string_dataset")
self.assertEqual(got.table_id, "string_table")
def test___eq___wrong_type(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset_ref = DatasetReference("project_1", "dataset_1")
table = self._make_one(dataset_ref, "table_1")
other = object()
self.assertNotEqual(table, other)
self.assertEqual(table, mock.ANY)
def test___eq___project_mismatch(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
other_dataset = DatasetReference("project_2", "dataset_1")
table = self._make_one(dataset, "table_1")
other = self._make_one(other_dataset, "table_1")
self.assertNotEqual(table, other)
def test___eq___dataset_mismatch(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
other_dataset = DatasetReference("project_1", "dataset_2")
table = self._make_one(dataset, "table_1")
other = self._make_one(other_dataset, "table_1")
self.assertNotEqual(table, other)
def test___eq___table_mismatch(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
table = self._make_one(dataset, "table_1")
other = self._make_one(dataset, "table_2")
self.assertNotEqual(table, other)
def test___eq___equality(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
table = self._make_one(dataset, "table_1")
other = self._make_one(dataset, "table_1")
self.assertEqual(table, other)
def test___hash__set_equality(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
table1 = self._make_one(dataset, "table1")
table2 = self._make_one(dataset, "table2")
set_one = {table1, table2}
set_two = {table1, table2}
self.assertEqual(set_one, set_two)
def test___hash__not_equals(self):
from google.cloud.bigquery.dataset import DatasetReference
dataset = DatasetReference("project_1", "dataset_1")
table1 = self._make_one(dataset, "table1")
table2 = self._make_one(dataset, "table2")
set_one = {table1}
set_two = {table2}
self.assertNotEqual(set_one, set_two)
def test___repr__(self):
dataset = DatasetReference("project1", "dataset1")
table1 = self._make_one(dataset, "table1")
expected = (
"TableReference(DatasetReference('project1', 'dataset1'), " "'table1')"
)
self.assertEqual(repr(table1), expected)
class TestTable(unittest.TestCase, _SchemaBase):
PROJECT = "prahj-ekt"
DS_ID = "dataset-name"
TABLE_NAME = "table-name"
KMS_KEY_NAME = "projects/1/locations/us/keyRings/1/cryptoKeys/1"
@staticmethod
def _get_target_class():
from google.cloud.bigquery.table import Table
return Table
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.006
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
self.ETAG = "ETAG"
self.TABLE_FULL_ID = "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)
self.RESOURCE_URL = "http://example.com/path/to/resource"
self.NUM_BYTES = 12345
self.NUM_ROWS = 67
self.NUM_EST_BYTES = 1234
self.NUM_EST_ROWS = 23
def _make_resource(self):
self._setUpConstants()
return {
"creationTime": self.WHEN_TS * 1000,
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_NAME,
},
"schema": {
"fields": [
{"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "age", "type": "INTEGER", "mode": "REQUIRED"},
]
},
"etag": "ETAG",
"id": self.TABLE_FULL_ID,
"lastModifiedTime": self.WHEN_TS * 1000,
"location": "US",
"selfLink": self.RESOURCE_URL,
"numRows": self.NUM_ROWS,
"numBytes": self.NUM_BYTES,
"type": "TABLE",
"streamingBuffer": {
"estimatedRows": str(self.NUM_EST_ROWS),
"estimatedBytes": str(self.NUM_EST_BYTES),
"oldestEntryTime": self.WHEN_TS * 1000,
},
"externalDataConfiguration": {
"sourceFormat": "CSV",
"csvOptions": {"allowJaggedRows": True, "encoding": "encoding"},
},
"labels": {"x": "y"},
}
def _verifyReadonlyResourceProperties(self, table, resource):
if "creationTime" in resource:
self.assertEqual(table.created, self.WHEN)
else:
self.assertIsNone(table.created)
if "etag" in resource:
self.assertEqual(table.etag, self.ETAG)
else:
self.assertIsNone(table.etag)
if "numRows" in resource:
self.assertEqual(table.num_rows, self.NUM_ROWS)
else:
self.assertIsNone(table.num_rows)
if "numBytes" in resource:
self.assertEqual(table.num_bytes, self.NUM_BYTES)
else:
self.assertIsNone(table.num_bytes)
if "selfLink" in resource:
self.assertEqual(table.self_link, self.RESOURCE_URL)
else:
self.assertIsNone(table.self_link)
if "streamingBuffer" in resource:
self.assertEqual(table.streaming_buffer.estimated_rows, self.NUM_EST_ROWS)
self.assertEqual(table.streaming_buffer.estimated_bytes, self.NUM_EST_BYTES)
self.assertEqual(table.streaming_buffer.oldest_entry_time, self.WHEN)
else:
self.assertIsNone(table.streaming_buffer)
self.assertEqual(table.full_table_id, self.TABLE_FULL_ID)
self.assertEqual(
table.table_type, "TABLE" if "view" not in resource else "VIEW"
)
def _verifyResourceProperties(self, table, resource):
self._verifyReadonlyResourceProperties(table, resource)
if "expirationTime" in resource:
self.assertEqual(table.expires, self.EXP_TIME)
else:
self.assertIsNone(table.expires)
self.assertEqual(table.description, resource.get("description"))
self.assertEqual(table.friendly_name, resource.get("friendlyName"))
self.assertEqual(table.location, resource.get("location"))
if "view" in resource:
self.assertEqual(table.view_query, resource["view"]["query"])
self.assertEqual(
table.view_use_legacy_sql, resource["view"].get("useLegacySql", True)
)
else:
self.assertIsNone(table.view_query)
self.assertIsNone(table.view_use_legacy_sql)
if "schema" in resource:
self._verifySchema(table.schema, resource)
else:
self.assertEqual(table.schema, [])
if "externalDataConfiguration" in resource:
edc = table.external_data_configuration
self.assertEqual(edc.source_format, "CSV")
self.assertEqual(edc.options.allow_jagged_rows, True)
if "labels" in resource:
self.assertEqual(table.labels, {"x": "y"})
else:
self.assertEqual(table.labels, {})
if "encryptionConfiguration" in resource:
self.assertIsNotNone(table.encryption_configuration)
self.assertEqual(
table.encryption_configuration.kms_key_name,
resource["encryptionConfiguration"]["kmsKeyName"],
)
else:
self.assertIsNone(table.encryption_configuration)
def test_ctor(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
self.assertEqual(table.table_id, self.TABLE_NAME)
self.assertEqual(table.project, self.PROJECT)
self.assertEqual(table.dataset_id, self.DS_ID)
self.assertEqual(table.reference.table_id, self.TABLE_NAME)
self.assertEqual(table.reference.project, self.PROJECT)
self.assertEqual(table.reference.dataset_id, self.DS_ID)
self.assertEqual(
table.path,
"/projects/%s/datasets/%s/tables/%s"
% (self.PROJECT, self.DS_ID, self.TABLE_NAME),
)
self.assertEqual(table.schema, [])
self.assertIsNone(table.created)
self.assertIsNone(table.etag)
self.assertIsNone(table.modified)
self.assertIsNone(table.num_bytes)
self.assertIsNone(table.num_rows)
self.assertIsNone(table.self_link)
self.assertIsNone(table.full_table_id)
self.assertIsNone(table.table_type)
self.assertIsNone(table.description)
self.assertIsNone(table.expires)
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.location)
self.assertIsNone(table.view_query)
self.assertIsNone(table.view_use_legacy_sql)
self.assertIsNone(table.external_data_configuration)
self.assertEqual(table.labels, {})
self.assertIsNone(table.encryption_configuration)
self.assertIsNone(table.time_partitioning)
self.assertIsNone(table.clustering_fields)
def test_ctor_w_schema(self):
from google.cloud.bigquery.schema import SchemaField
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
table = self._make_one(table_ref, schema=[full_name, age])
self.assertEqual(table.schema, [full_name, age])
def test_ctor_string(self):
table = self._make_one("some-project.some_dset.some_tbl")
self.assertEqual(table.project, "some-project")
self.assertEqual(table.dataset_id, "some_dset")
self.assertEqual(table.table_id, "some_tbl")
def test_ctor_tablelistitem(self):
from google.cloud.bigquery.table import Table, TableListItem
import datetime
from google.cloud._helpers import _millis, UTC
self.WHEN_TS = 1437767599.125
self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
project = "test-project"
dataset_id = "test_dataset"
table_id = "coffee_table"
resource = {
"creationTime": self.WHEN_TS * 1000,
"expirationTime": _millis(self.EXP_TIME),
"kind": "bigquery#table",
"id": "{}:{}.{}".format(project, dataset_id, table_id),
"tableReference": {
"projectId": project,
"datasetId": dataset_id,
"tableId": table_id,
},
"friendlyName": "Mahogany Coffee Table",
"type": "TABLE",
"timePartitioning": {
"type": "DAY",
"field": "mycolumn",
"expirationMs": "10000",
},
"labels": {"some-stuff": "this-is-a-label"},
"clustering": {"fields": ["string"]},
}
table_list_item = TableListItem(resource)
table = Table(table_list_item)
self.assertIsNone(table.created)
self.assertEqual(table.reference.project, project)
self.assertEqual(table.reference.dataset_id, dataset_id)
self.assertEqual(table.reference.table_id, table_id)
def test_ctor_string_wo_project_id(self):
with pytest.raises(ValueError):
# Project ID is missing.
self._make_one("some_dset.some_tbl")
def test_num_bytes_getter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
# Check with no value set.
self.assertIsNone(table.num_bytes)
num_bytes = 1337
# Check with integer value set.
table._properties = {"numBytes": num_bytes}
self.assertEqual(table.num_bytes, num_bytes)
# Check with a string value set.
table._properties = {"numBytes": str(num_bytes)}
self.assertEqual(table.num_bytes, num_bytes)
# Check with invalid int value.
table._properties = {"numBytes": "x"}
with self.assertRaises(ValueError):
getattr(table, "num_bytes")
def test_num_rows_getter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
# Check with no value set.
self.assertIsNone(table.num_rows)
num_rows = 42
# Check with integer value set.
table._properties = {"numRows": num_rows}
self.assertEqual(table.num_rows, num_rows)
# Check with a string value set.
table._properties = {"numRows": str(num_rows)}
self.assertEqual(table.num_rows, num_rows)
# Check with invalid int value.
table._properties = {"numRows": "x"}
with self.assertRaises(ValueError):
getattr(table, "num_rows")
def test_schema_setter_non_sequence(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(TypeError):
table.schema = object()
def test_schema_setter_invalid_field(self):
from google.cloud.bigquery.schema import SchemaField
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
with self.assertRaises(ValueError):
table.schema = [full_name, object()]
def test_schema_setter_valid_fields(self):
from google.cloud.bigquery.schema import SchemaField
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
table.schema = [full_name, age]
self.assertEqual(table.schema, [full_name, age])
def test_schema_setter_invalid_mapping_representation(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
full_name = {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}
invalid_field = {"name": "full_name", "typeooo": "STRING", "mode": "REQUIRED"}
with self.assertRaises(Exception):
table.schema = [full_name, invalid_field]
def test_schema_setter_valid_mapping_representation(self):
from google.cloud.bigquery.schema import SchemaField
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
full_name = {"name": "full_name", "type": "STRING", "mode": "REQUIRED"}
job_status = {
"name": "is_employed",
"type": "STRUCT",
"mode": "NULLABLE",
"fields": [
{"name": "foo", "type": "DATE", "mode": "NULLABLE"},
{"name": "bar", "type": "BYTES", "mode": "REQUIRED"},
],
}
table.schema = [full_name, job_status]
expected_schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField(
"is_employed",
"STRUCT",
mode="NULLABLE",
fields=[
SchemaField("foo", "DATE", mode="NULLABLE"),
SchemaField("bar", "BYTES", mode="REQUIRED"),
],
),
]
self.assertEqual(table.schema, expected_schema)
def test_props_set_by_server(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _millis
CREATED = datetime.datetime(2015, 7, 29, 12, 13, 22, tzinfo=UTC)
MODIFIED = datetime.datetime(2015, 7, 29, 14, 47, 15, tzinfo=UTC)
TABLE_FULL_ID = "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME)
URL = "http://example.com/projects/%s/datasets/%s/tables/%s" % (
self.PROJECT,
self.DS_ID,
self.TABLE_NAME,
)
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table._properties["creationTime"] = _millis(CREATED)
table._properties["etag"] = "ETAG"
table._properties["lastModifiedTime"] = _millis(MODIFIED)
table._properties["numBytes"] = 12345
table._properties["numRows"] = 66
table._properties["selfLink"] = URL
table._properties["id"] = TABLE_FULL_ID
table._properties["type"] = "TABLE"
self.assertEqual(table.created, CREATED)
self.assertEqual(table.etag, "ETAG")
self.assertEqual(table.modified, MODIFIED)
self.assertEqual(table.num_bytes, 12345)
self.assertEqual(table.num_rows, 66)
self.assertEqual(table.self_link, URL)
self.assertEqual(table.full_table_id, TABLE_FULL_ID)
self.assertEqual(table.table_type, "TABLE")
def test_description_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.description = 12345
def test_description_setter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.description = "DESCRIPTION"
self.assertEqual(table.description, "DESCRIPTION")
def test_expires_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.expires = object()
def test_expires_setter(self):
import datetime
from google.cloud._helpers import UTC
WHEN = datetime.datetime(2015, 7, 28, 16, 39, tzinfo=UTC)
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.expires = WHEN
self.assertEqual(table.expires, WHEN)
def test_friendly_name_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.friendly_name = 12345
def test_friendly_name_setter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.friendly_name = "FRIENDLY"
self.assertEqual(table.friendly_name, "FRIENDLY")
def test_view_query_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.view_query = 12345
def test_view_query_setter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.view_query = "select * from foo"
self.assertEqual(table.view_query, "select * from foo")
self.assertEqual(table.view_use_legacy_sql, False)
table.view_use_legacy_sql = True
self.assertEqual(table.view_use_legacy_sql, True)
def test_view_query_deleter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.view_query = "select * from foo"
del table.view_query
self.assertIsNone(table.view_query)
self.assertIsNone(table.view_use_legacy_sql)
def test_view_use_legacy_sql_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.view_use_legacy_sql = 12345
def test_view_use_legacy_sql_setter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.view_use_legacy_sql = True
table.view_query = "select * from foo"
self.assertEqual(table.view_use_legacy_sql, True)
self.assertEqual(table.view_query, "select * from foo")
def test_external_data_configuration_setter(self):
from google.cloud.bigquery.external_config import ExternalConfig
external_config = ExternalConfig("CSV")
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.external_data_configuration = external_config
self.assertEqual(
table.external_data_configuration.source_format,
external_config.source_format,
)
def test_external_data_configuration_setter_none(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.external_data_configuration = None
self.assertIsNone(table.external_data_configuration)
def test_external_data_configuration_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.external_data_configuration = 12345
def test_labels_update_in_place(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
del table._properties["labels"] # don't start w/ existing dict
labels = table.labels
labels["foo"] = "bar" # update in place
self.assertEqual(table.labels, {"foo": "bar"})
def test_labels_setter_bad_value(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.labels = 12345
def test_from_string(self):
cls = self._get_target_class()
got = cls.from_string("string-project.string_dataset.string_table")
self.assertEqual(got.project, "string-project")
self.assertEqual(got.dataset_id, "string_dataset")
self.assertEqual(got.table_id, "string_table")
def test_from_string_legacy_string(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string("string-project:string_dataset.string_table")
def test_from_string_not_fully_qualified(self):
cls = self._get_target_class()
with self.assertRaises(ValueError):
cls.from_string("string_dataset.string_table")
def test_from_api_repr_missing_identity(self):
self._setUpConstants()
RESOURCE = {}
klass = self._get_target_class()
with self.assertRaises(KeyError):
klass.from_api_repr(RESOURCE)
def test_from_api_repr_bare(self):
self._setUpConstants()
RESOURCE = {
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_NAME,
},
"type": "TABLE",
}
klass = self._get_target_class()
table = klass.from_api_repr(RESOURCE)
self.assertEqual(table.table_id, self.TABLE_NAME)
self._verifyResourceProperties(table, RESOURCE)
def test_from_api_repr_w_properties(self):
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _millis
RESOURCE = self._make_resource()
RESOURCE["view"] = {"query": "select fullname, age from person_ages"}
RESOURCE["type"] = "VIEW"
RESOURCE["location"] = "EU"
self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
RESOURCE["expirationTime"] = _millis(self.EXP_TIME)
klass = self._get_target_class()
table = klass.from_api_repr(RESOURCE)
self._verifyResourceProperties(table, RESOURCE)
def test_from_api_with_encryption(self):
self._setUpConstants()
RESOURCE = {
"id": "%s:%s.%s" % (self.PROJECT, self.DS_ID, self.TABLE_NAME),
"tableReference": {
"projectId": self.PROJECT,
"datasetId": self.DS_ID,
"tableId": self.TABLE_NAME,
},
"encryptionConfiguration": {"kmsKeyName": self.KMS_KEY_NAME},
"type": "TABLE",
}
klass = self._get_target_class()
table = klass.from_api_repr(RESOURCE)
self._verifyResourceProperties(table, RESOURCE)
def test_to_api_repr_w_custom_field(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table._properties["newAlphaProperty"] = "unreleased property"
resource = table.to_api_repr()
exp_resource = {
"tableReference": table_ref.to_api_repr(),
"labels": {},
"newAlphaProperty": "unreleased property",
}
self.assertEqual(resource, exp_resource)
def test__build_resource_w_custom_field(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table._properties["newAlphaProperty"] = "unreleased property"
resource = table._build_resource(["newAlphaProperty"])
exp_resource = {"newAlphaProperty": "unreleased property"}
self.assertEqual(resource, exp_resource)
def test__build_resource_w_custom_field_not_in__properties(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table = self._make_one(dataset.table(self.TABLE_NAME))
table.bad = "value"
with self.assertRaises(ValueError):
table._build_resource(["bad"])
def test_range_partitioning(self):
from google.cloud.bigquery.table import RangePartitioning
from google.cloud.bigquery.table import PartitionRange
table = self._make_one("proj.dset.tbl")
assert table.range_partitioning is None
table.range_partitioning = RangePartitioning(
field="col1", range_=PartitionRange(start=-512, end=1024, interval=128)
)
assert table.range_partitioning.field == "col1"
assert table.range_partitioning.range_.start == -512
assert table.range_partitioning.range_.end == 1024
assert table.range_partitioning.range_.interval == 128
table.range_partitioning = None
assert table.range_partitioning is None
def test_range_partitioning_w_wrong_type(self):
object_under_test = self._make_one("proj.dset.tbl")
with pytest.raises(ValueError, match="RangePartitioning"):
object_under_test.range_partitioning = object()
def test_require_partitioning_filter(self):
table = self._make_one("proj.dset.tbl")
assert table.require_partition_filter is None
table.require_partition_filter = True
assert table.require_partition_filter
table.require_partition_filter = False
assert table.require_partition_filter is not None
assert not table.require_partition_filter
table.require_partition_filter = None
assert table.require_partition_filter is None
def test_time_partitioning_getter(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table._properties["timePartitioning"] = {
"type": "DAY",
"field": "col1",
"expirationMs": "123456",
"requirePartitionFilter": False,
}
self.assertIsInstance(table.time_partitioning, TimePartitioning)
self.assertEqual(table.time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(table.time_partitioning.field, "col1")
self.assertEqual(table.time_partitioning.expiration_ms, 123456)
with warnings.catch_warnings(record=True) as warned:
self.assertFalse(table.time_partitioning.require_partition_filter)
assert len(warned) == 1
self.assertIs(warned[0].category, PendingDeprecationWarning)
def test_time_partitioning_getter_w_none(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table._properties["timePartitioning"] = None
self.assertIsNone(table.time_partitioning)
del table._properties["timePartitioning"]
self.assertIsNone(table.time_partitioning)
def test_time_partitioning_getter_w_empty(self):
from google.cloud.bigquery.table import TimePartitioning
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
# Even though there are required properties according to the API
# specification, sometimes time partitioning is populated as an empty
# object. See internal bug 131167013.
table._properties["timePartitioning"] = {}
self.assertIsInstance(table.time_partitioning, TimePartitioning)
self.assertIsNone(table.time_partitioning.type_)
self.assertIsNone(table.time_partitioning.field)
self.assertIsNone(table.time_partitioning.expiration_ms)
with warnings.catch_warnings(record=True) as warned:
self.assertIsNone(table.time_partitioning.require_partition_filter)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_time_partitioning_setter(self):
from google.cloud.bigquery.table import TimePartitioning
from google.cloud.bigquery.table import TimePartitioningType
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
time_partitioning = TimePartitioning(type_=TimePartitioningType.DAY)
table.time_partitioning = time_partitioning
self.assertEqual(table.time_partitioning.type_, TimePartitioningType.DAY)
# Both objects point to the same properties dict
self.assertIs(
table._properties["timePartitioning"], time_partitioning._properties
)
time_partitioning.expiration_ms = 10000
# Changes to TimePartitioning object are reflected in Table properties
self.assertEqual(
table.time_partitioning.expiration_ms, time_partitioning.expiration_ms
)
def test_time_partitioning_setter_bad_type(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with self.assertRaises(ValueError):
table.time_partitioning = {"timePartitioning": {"type": "DAY"}}
def test_time_partitioning_setter_none(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.time_partitioning = None
self.assertIsNone(table.time_partitioning)
def test_partitioning_type_setter(self):
from google.cloud.bigquery.table import TimePartitioningType
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with warnings.catch_warnings(record=True) as warned:
self.assertIsNone(table.partitioning_type)
table.partitioning_type = TimePartitioningType.DAY
self.assertEqual(table.partitioning_type, "DAY")
self.assertEqual(len(warned), 3)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_partitioning_type_setter_w_time_partitioning_set(self):
from google.cloud.bigquery.table import TimePartitioning
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.time_partitioning = TimePartitioning()
with warnings.catch_warnings(record=True) as warned:
table.partitioning_type = "NEW_FAKE_TYPE"
self.assertEqual(table.partitioning_type, "NEW_FAKE_TYPE")
self.assertEqual(len(warned), 2)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_partitioning_expiration_setter_w_time_partitioning_set(self):
from google.cloud.bigquery.table import TimePartitioning
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.time_partitioning = TimePartitioning()
with warnings.catch_warnings(record=True) as warned:
table.partition_expiration = 100000
self.assertEqual(table.partition_expiration, 100000)
self.assertEqual(len(warned), 2)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_partition_expiration_setter(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
with warnings.catch_warnings(record=True) as warned:
self.assertIsNone(table.partition_expiration)
table.partition_expiration = 100
self.assertEqual(table.partition_expiration, 100)
# defaults to 'DAY' when expiration is set and type is not set
self.assertEqual(table.partitioning_type, "DAY")
self.assertEqual(len(warned), 4)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_clustering_fields_setter_w_fields(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
fields = ["email", "phone"]
table.clustering_fields = fields
self.assertEqual(table.clustering_fields, fields)
self.assertEqual(table._properties["clustering"], {"fields": fields})
def test_clustering_fields_setter_w_none(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
fields = ["email", "phone"]
table._properties["clustering"] = {"fields": fields}
table.clustering_fields = None
self.assertEqual(table.clustering_fields, None)
self.assertFalse("clustering" in table._properties)
def test_clustering_fields_setter_w_none_noop(self):
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
table.clustering_fields = None
self.assertEqual(table.clustering_fields, None)
self.assertFalse("clustering" in table._properties)
def test_encryption_configuration_setter(self):
# Previously, the EncryptionConfiguration class was in the table module, not the
# encryption_configuration module. It was moved to support models encryption.
# This test import from the table module to ensure that the previous location
# continues to function as an alias.
from google.cloud.bigquery.table import EncryptionConfiguration
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = self._make_one(table_ref)
encryption_configuration = EncryptionConfiguration(
kms_key_name=self.KMS_KEY_NAME
)
table.encryption_configuration = encryption_configuration
self.assertEqual(table.encryption_configuration.kms_key_name, self.KMS_KEY_NAME)
table.encryption_configuration = None
self.assertIsNone(table.encryption_configuration)
def test___repr__(self):
from google.cloud.bigquery.table import TableReference
dataset = DatasetReference("project1", "dataset1")
table1 = self._make_one(TableReference(dataset, "table1"))
expected = (
"Table(TableReference("
"DatasetReference('project1', 'dataset1'), "
"'table1'))"
)
self.assertEqual(repr(table1), expected)
class Test_row_from_mapping(unittest.TestCase, _SchemaBase):
PROJECT = "prahj-ekt"
DS_ID = "dataset-name"
TABLE_NAME = "table-name"
def _call_fut(self, mapping, schema):
from google.cloud.bigquery.table import _row_from_mapping
return _row_from_mapping(mapping, schema)
def test__row_from_mapping_wo_schema(self):
from google.cloud.bigquery.table import Table, _TABLE_HAS_NO_SCHEMA
MAPPING = {"full_name": "Phred Phlyntstone", "age": 32}
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
table = Table(table_ref)
with self.assertRaises(ValueError) as exc:
self._call_fut(MAPPING, table.schema)
self.assertEqual(exc.exception.args, (_TABLE_HAS_NO_SCHEMA,))
def test__row_from_mapping_w_invalid_schema(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
MAPPING = {
"full_name": "Phred Phlyntstone",
"age": 32,
"colors": ["red", "green"],
"bogus": "WHATEVER",
}
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
colors = SchemaField("colors", "DATETIME", mode="REPEATED")
bogus = SchemaField("joined", "STRING", mode="BOGUS")
table = Table(table_ref, schema=[full_name, age, colors, bogus])
with self.assertRaises(ValueError) as exc:
self._call_fut(MAPPING, table.schema)
self.assertIn("Unknown field mode: BOGUS", str(exc.exception))
def test__row_from_mapping_w_schema(self):
from google.cloud.bigquery.schema import SchemaField
from google.cloud.bigquery.table import Table
MAPPING = {
"full_name": "Phred Phlyntstone",
"age": 32,
"colors": ["red", "green"],
"extra": "IGNORED",
}
dataset = DatasetReference(self.PROJECT, self.DS_ID)
table_ref = dataset.table(self.TABLE_NAME)
full_name = SchemaField("full_name", "STRING", mode="REQUIRED")
age = SchemaField("age", "INTEGER", mode="REQUIRED")
colors = SchemaField("colors", "DATETIME", mode="REPEATED")
joined = SchemaField("joined", "STRING", mode="NULLABLE")
table = Table(table_ref, schema=[full_name, age, colors, joined])
self.assertEqual(
self._call_fut(MAPPING, table.schema),
("Phred Phlyntstone", 32, ["red", "green"], None),
)
class TestTableListItem(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.bigquery.table import TableListItem
return TableListItem
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _setUpConstants(self):
import datetime
from google.cloud._helpers import UTC
self.WHEN_TS = 1437767599.125
self.WHEN = datetime.datetime.utcfromtimestamp(self.WHEN_TS).replace(tzinfo=UTC)
self.EXP_TIME = datetime.datetime(2015, 8, 1, 23, 59, 59, tzinfo=UTC)
def test_ctor(self):
from google.cloud._helpers import _millis
self._setUpConstants()
project = "test-project"
dataset_id = "test_dataset"
table_id = "coffee_table"
resource = {
"creationTime": self.WHEN_TS * 1000,
"expirationTime": _millis(self.EXP_TIME),
"kind": "bigquery#table",
"id": "{}:{}.{}".format(project, dataset_id, table_id),
"tableReference": {
"projectId": project,
"datasetId": dataset_id,
"tableId": table_id,
},
"friendlyName": "Mahogany Coffee Table",
"type": "TABLE",
"timePartitioning": {
"type": "DAY",
"field": "mycolumn",
"expirationMs": "10000",
},
"labels": {"some-stuff": "this-is-a-label"},
"clustering": {"fields": ["string"]},
}
table = self._make_one(resource)
self.assertEqual(table.created, self.WHEN)
self.assertEqual(table.expires, self.EXP_TIME)
self.assertEqual(table.project, project)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.table_id, table_id)
self.assertEqual(
table.full_table_id, "{}:{}.{}".format(project, dataset_id, table_id)
)
self.assertEqual(table.reference.project, project)
self.assertEqual(table.reference.dataset_id, dataset_id)
self.assertEqual(table.reference.table_id, table_id)
self.assertEqual(table.friendly_name, "Mahogany Coffee Table")
self.assertEqual(table.table_type, "TABLE")
self.assertEqual(table.time_partitioning.type_, "DAY")
self.assertEqual(table.time_partitioning.expiration_ms, 10000)
self.assertEqual(table.time_partitioning.field, "mycolumn")
self.assertEqual(table.labels["some-stuff"], "this-is-a-label")
self.assertIsNone(table.view_use_legacy_sql)
self.assertEqual(table.clustering_fields, ["string"])
with warnings.catch_warnings(record=True) as warned:
self.assertEqual(table.partitioning_type, "DAY")
self.assertEqual(table.partition_expiration, 10000)
self.assertEqual(len(warned), 2)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_ctor_view(self):
project = "test-project"
dataset_id = "test_dataset"
table_id = "just_looking"
resource = {
"kind": "bigquery#table",
"id": "{}:{}.{}".format(project, dataset_id, table_id),
"tableReference": {
"projectId": project,
"datasetId": dataset_id,
"tableId": table_id,
},
"type": "VIEW",
}
table = self._make_one(resource)
self.assertEqual(table.project, project)
self.assertEqual(table.dataset_id, dataset_id)
self.assertEqual(table.table_id, table_id)
self.assertEqual(
table.full_table_id, "{}:{}.{}".format(project, dataset_id, table_id)
)
self.assertEqual(table.reference.project, project)
self.assertEqual(table.reference.dataset_id, dataset_id)
self.assertEqual(table.reference.table_id, table_id)
self.assertEqual(table.table_type, "VIEW")
# Server default for useLegacySql is True.
self.assertTrue(table.view_use_legacy_sql)
def test_ctor_missing_properties(self):
resource = {
"tableReference": {
"projectId": "testproject",
"datasetId": "testdataset",
"tableId": "testtable",
}
}
table = self._make_one(resource)
self.assertEqual(table.project, "testproject")
self.assertEqual(table.dataset_id, "testdataset")
self.assertEqual(table.table_id, "testtable")
self.assertIsNone(table.created)
self.assertIsNone(table.expires)
self.assertIsNone(table.clustering_fields)
self.assertIsNone(table.full_table_id)
self.assertIsNone(table.friendly_name)
self.assertIsNone(table.table_type)
self.assertIsNone(table.time_partitioning)
self.assertEqual(table.labels, {})
self.assertIsNone(table.view_use_legacy_sql)
with warnings.catch_warnings(record=True) as warned:
self.assertIsNone(table.partitioning_type)
self.assertIsNone(table.partition_expiration)
self.assertEqual(len(warned), 2)
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_ctor_wo_project(self):
resource = {
"tableReference": {"datasetId": "testdataset", "tableId": "testtable"}
}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_ctor_wo_dataset(self):
resource = {
"tableReference": {"projectId": "testproject", "tableId": "testtable"}
}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_ctor_wo_table(self):
resource = {
"tableReference": {"projectId": "testproject", "datasetId": "testdataset"}
}
with self.assertRaises(ValueError):
self._make_one(resource)
def test_ctor_wo_reference(self):
with self.assertRaises(ValueError):
self._make_one({})
def test_labels_update_in_place(self):
resource = {
"tableReference": {
"projectId": "testproject",
"datasetId": "testdataset",
"tableId": "testtable",
}
}
table = self._make_one(resource)
labels = table.labels
labels["foo"] = "bar" # update in place
self.assertEqual(table.labels, {"foo": "bar"})
class TestRow(unittest.TestCase):
def test_row(self):
from google.cloud.bigquery.table import Row
VALUES = (1, 2, 3)
row = Row(VALUES, {"a": 0, "b": 1, "c": 2})
self.assertEqual(row.a, 1)
self.assertEqual(row[1], 2)
self.assertEqual(row["c"], 3)
self.assertEqual(len(row), 3)
self.assertEqual(row.values(), VALUES)
self.assertEqual(set(row.keys()), set({"a": 1, "b": 2, "c": 3}.keys()))
self.assertEqual(set(row.items()), set({"a": 1, "b": 2, "c": 3}.items()))
self.assertEqual(row.get("a"), 1)
self.assertEqual(row.get("d"), None)
self.assertEqual(row.get("d", ""), "")
self.assertEqual(row.get("d", default=""), "")
self.assertEqual(repr(row), "Row((1, 2, 3), {'a': 0, 'b': 1, 'c': 2})")
self.assertFalse(row != row)
self.assertFalse(row == 3)
with self.assertRaises(AttributeError):
row.z
with self.assertRaises(KeyError):
row["z"]
class Test_EmptyRowIterator(unittest.TestCase):
def _make_one(self):
from google.cloud.bigquery.table import _EmptyRowIterator
return _EmptyRowIterator()
def test_total_rows_eq_zero(self):
row_iterator = self._make_one()
self.assertEqual(row_iterator.total_rows, 0)
@mock.patch("google.cloud.bigquery.table.pyarrow", new=None)
def test_to_arrow_error_if_pyarrow_is_none(self):
row_iterator = self._make_one()
with self.assertRaises(ValueError):
row_iterator.to_arrow()
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow(self):
row_iterator = self._make_one()
tbl = row_iterator.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 0)
@mock.patch("google.cloud.bigquery.table.pandas", new=None)
def test_to_dataframe_error_if_pandas_is_none(self):
row_iterator = self._make_one()
with self.assertRaises(ValueError):
row_iterator.to_dataframe()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe(self):
row_iterator = self._make_one()
df = row_iterator.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 0) # verify the number of rows
class TestRowIterator(unittest.TestCase):
def _class_under_test(self):
from google.cloud.bigquery.table import RowIterator
return RowIterator
def _make_one(
self,
client=None,
api_request=None,
path=None,
schema=None,
table=None,
**kwargs
):
from google.cloud.bigquery.table import TableReference
if client is None:
client = _mock_client()
if api_request is None:
api_request = mock.sentinel.api_request
if path is None:
path = "/foo"
if schema is None:
schema = []
if table is None:
table = TableReference.from_string("my-project.my_dataset.my_table")
return self._class_under_test()(
client, api_request, path, schema, table=table, **kwargs
)
def test_constructor(self):
from google.cloud.bigquery.table import _item_to_row
from google.cloud.bigquery.table import _rows_page_start
client = _mock_client()
path = "/some/path"
iterator = self._make_one(client=client, path=path)
# Objects are set without copying.
self.assertIs(iterator.client, client)
self.assertIs(iterator.item_to_value, _item_to_row)
self.assertIs(iterator._page_start, _rows_page_start)
# Properties have the expect value.
self.assertEqual(iterator.extra_params, {})
self.assertEqual(iterator._items_key, "rows")
self.assertIsNone(iterator.max_results)
self.assertEqual(iterator.path, path)
self.assertFalse(iterator._started)
self.assertIsNone(iterator.total_rows)
# Changing attributes.
self.assertEqual(iterator.page_number, 0)
self.assertIsNone(iterator.next_page_token)
self.assertEqual(iterator.num_results, 0)
def test_constructor_with_table(self):
from google.cloud.bigquery.table import Table
table = Table("proj.dset.tbl")
table._properties["numRows"] = 100
iterator = self._make_one(table=table)
self.assertIs(iterator._table, table)
self.assertEqual(iterator.total_rows, 100)
def test_constructor_with_dict_schema(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
{"name": "full_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "age", "type": "INT64", "mode": "NULLABLE"},
]
iterator = self._make_one(schema=schema)
expected_schema = [
SchemaField("full_name", "STRING", mode="REQUIRED"),
SchemaField("age", "INT64", mode="NULLABLE"),
]
self.assertEqual(iterator.schema, expected_schema)
def test_iterate(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
self.assertEqual(row_iterator.num_results, 0)
rows_iter = iter(row_iterator)
val1 = six.next(rows_iter)
self.assertEqual(val1.name, "Phred Phlyntstone")
self.assertEqual(row_iterator.num_results, 1)
val2 = six.next(rows_iter)
self.assertEqual(val2.name, "Bharney Rhubble")
self.assertEqual(row_iterator.num_results, 2)
with self.assertRaises(StopIteration):
six.next(rows_iter)
api_request.assert_called_once_with(method="GET", path=path, query_params={})
def test_page_size(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(
_mock_client(), api_request, path, schema, page_size=4
)
row_iterator._get_next_page_response()
api_request.assert_called_once_with(
method="GET",
path=path,
query_params={"maxResults": row_iterator._page_size},
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField(
"child",
"RECORD",
mode="REPEATED",
fields=[
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
],
),
]
rows = [
{
"f": [
{"v": "Bharney Rhubble"},
{"v": "33"},
{
"v": [
{"v": {"f": [{"v": "Whamm-Whamm Rhubble"}, {"v": "3"}]}},
{"v": {"f": [{"v": "Hoppy"}, {"v": "1"}]}},
]
},
]
},
{
"f": [
{"v": "Wylma Phlyntstone"},
{"v": "29"},
{
"v": [
{"v": {"f": [{"v": "Bepples Phlyntstone"}, {"v": "0"}]}},
{"v": {"f": [{"v": "Dino"}, {"v": "4"}]}},
]
},
]
},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
tbl = row_iterator.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 2)
# Check the schema.
self.assertEqual(tbl.schema[0].name, "name")
self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
self.assertEqual(tbl.schema[1].name, "age")
self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
child_field = tbl.schema[2]
self.assertEqual(child_field.name, "child")
self.assertTrue(pyarrow.types.is_list(child_field.type))
self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))
self.assertEqual(child_field.type.value_type[0].name, "name")
self.assertEqual(child_field.type.value_type[1].name, "age")
# Check the data.
tbl_data = tbl.to_pydict()
names = tbl_data["name"]
ages = tbl_data["age"]
children = tbl_data["child"]
self.assertEqual(names, ["Bharney Rhubble", "Wylma Phlyntstone"])
self.assertEqual(ages, [33, 29])
self.assertEqual(
children,
[
[
{"name": "Whamm-Whamm Rhubble", "age": 3},
{"name": "Hoppy", "age": 1},
],
[{"name": "Bepples Phlyntstone", "age": 0}, {"name": "Dino", "age": 4}],
],
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow_w_nulls(self):
from google.cloud.bigquery.schema import SchemaField
schema = [SchemaField("name", "STRING"), SchemaField("age", "INTEGER")]
rows = [
{"f": [{"v": "Donkey"}, {"v": 32}]},
{"f": [{"v": "Diddy"}, {"v": 29}]},
{"f": [{"v": "Dixie"}, {"v": None}]},
{"f": [{"v": None}, {"v": 111}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
tbl = row_iterator.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 4)
# Check the schema.
self.assertEqual(tbl.schema[0].name, "name")
self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
self.assertEqual(tbl.schema[1].name, "age")
self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
# Check the data.
tbl_data = tbl.to_pydict()
names = tbl_data["name"]
ages = tbl_data["age"]
self.assertEqual(names, ["Donkey", "Diddy", "Dixie", None])
self.assertEqual(ages, [32, 29, None, 111])
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow_w_unknown_type(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField("sport", "UNKNOWN_TYPE", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}, {"v": "volleyball"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}, {"v": "basketball"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
tbl = row_iterator.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 2)
# Check the schema.
self.assertEqual(tbl.schema[0].name, "name")
self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
self.assertEqual(tbl.schema[1].name, "age")
self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
self.assertEqual(tbl.schema[2].name, "sport")
# Check the data.
tbl_data = tbl.to_pydict()
names = tbl_data["name"]
ages = tbl_data["age"]
sports = tbl_data["sport"]
self.assertEqual(names, ["Bharney Rhubble", "Wylma Phlyntstone"])
self.assertEqual(ages, [33, 29])
self.assertEqual(sports, ["volleyball", "basketball"])
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_arrow_w_empty_table(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
SchemaField(
"child",
"RECORD",
mode="REPEATED",
fields=[
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
],
),
]
rows = []
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
tbl = row_iterator.to_arrow()
self.assertIsInstance(tbl, pyarrow.Table)
self.assertEqual(tbl.num_rows, 0)
# Check the schema.
self.assertEqual(tbl.schema[0].name, "name")
self.assertTrue(pyarrow.types.is_string(tbl.schema[0].type))
self.assertEqual(tbl.schema[1].name, "age")
self.assertTrue(pyarrow.types.is_int64(tbl.schema[1].type))
child_field = tbl.schema[2]
self.assertEqual(child_field.name, "child")
self.assertTrue(pyarrow.types.is_list(child_field.type))
self.assertTrue(pyarrow.types.is_struct(child_field.type.value_type))
self.assertEqual(child_field.type.value_type[0].name, "name")
self.assertEqual(child_field.type.value_type[1].name, "age")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_arrow_w_bqstorage(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.transport = mock.create_autospec(
big_query_storage_grpc_transport.BigQueryStorageGrpcTransport
)
streams = [
# Use two streams we want to check frames are read from each stream.
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
]
session = bigquery_storage_v1beta1.types.ReadSession(streams=streams)
arrow_schema = pyarrow.schema(
[
pyarrow.field("colA", pyarrow.int64()),
# Not alphabetical to test column order.
pyarrow.field("colC", pyarrow.float64()),
pyarrow.field("colB", pyarrow.string()),
]
)
session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
bqstorage_client.create_read_session.return_value = session
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
bqstorage_client.read_rows.return_value = mock_rowstream
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_rowstream.rows.return_value = mock_rows
expected_num_rows = 2
expected_num_columns = 3
page_items = [
pyarrow.array([1, -1]),
pyarrow.array([2.0, 4.0]),
pyarrow.array(["abc", "def"]),
]
mock_page = mock.create_autospec(reader.ReadRowsPage)
mock_page.to_arrow.return_value = pyarrow.RecordBatch.from_arrays(
page_items, arrow_schema
)
mock_pages = (mock_page, mock_page, mock_page)
type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
schema = [
schema.SchemaField("colA", "INTEGER"),
schema.SchemaField("colC", "FLOAT"),
schema.SchemaField("colB", "STRING"),
]
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
schema,
table=mut.TableReference.from_string("proj.dset.tbl"),
selected_fields=schema,
)
actual_tbl = row_iterator.to_arrow(bqstorage_client=bqstorage_client)
# Are the columns in the expected order?
self.assertEqual(actual_tbl.num_columns, expected_num_columns)
self.assertEqual(actual_tbl.schema[0].name, "colA")
self.assertEqual(actual_tbl.schema[1].name, "colC")
self.assertEqual(actual_tbl.schema[2].name, "colB")
# Have expected number of rows?
total_pages = len(streams) * len(mock_pages)
total_rows = expected_num_rows * total_pages
self.assertEqual(actual_tbl.num_rows, total_rows)
# Don't close the client if it was passed in.
bqstorage_client.transport.channel.close.assert_not_called()
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_arrow_w_bqstorage_creates_client(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
mock_client = _mock_client()
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.transport = mock.create_autospec(
big_query_storage_grpc_transport.BigQueryStorageGrpcTransport
)
mock_client._create_bqstorage_client.return_value = bqstorage_client
session = bigquery_storage_v1beta1.types.ReadSession()
bqstorage_client.create_read_session.return_value = session
row_iterator = mut.RowIterator(
mock_client,
None, # api_request: ignored
None, # path: ignored
[
schema.SchemaField("colA", "STRING"),
schema.SchemaField("colC", "STRING"),
schema.SchemaField("colB", "STRING"),
],
table=mut.TableReference.from_string("proj.dset.tbl"),
)
row_iterator.to_arrow(create_bqstorage_client=True)
mock_client._create_bqstorage_client.assert_called_once()
bqstorage_client.transport.channel.close.assert_called_once()
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_arrow_w_bqstorage_no_streams(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession()
arrow_schema = pyarrow.schema(
[
pyarrow.field("colA", pyarrow.string()),
# Not alphabetical to test column order.
pyarrow.field("colC", pyarrow.string()),
pyarrow.field("colB", pyarrow.string()),
]
)
session.arrow_schema.serialized_schema = arrow_schema.serialize().to_pybytes()
bqstorage_client.create_read_session.return_value = session
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
[
schema.SchemaField("colA", "STRING"),
schema.SchemaField("colC", "STRING"),
schema.SchemaField("colB", "STRING"),
],
table=mut.TableReference.from_string("proj.dset.tbl"),
)
actual_table = row_iterator.to_arrow(bqstorage_client=bqstorage_client)
self.assertEqual(actual_table.num_columns, 3)
self.assertEqual(actual_table.num_rows, 0)
self.assertEqual(actual_table.schema[0].name, "colA")
self.assertEqual(actual_table.schema[1].name, "colC")
self.assertEqual(actual_table.schema[2].name, "colB")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@unittest.skipIf(tqdm is None, "Requires `tqdm`")
@mock.patch("tqdm.tqdm_gui")
@mock.patch("tqdm.tqdm_notebook")
@mock.patch("tqdm.tqdm")
def test_to_arrow_progress_bar(self, tqdm_mock, tqdm_notebook_mock, tqdm_gui_mock):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
progress_bars = (
("tqdm", tqdm_mock),
("tqdm_notebook", tqdm_notebook_mock),
("tqdm_gui", tqdm_gui_mock),
)
for progress_bar_type, progress_bar_mock in progress_bars:
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
tbl = row_iterator.to_arrow(progress_bar_type=progress_bar_type)
progress_bar_mock.assert_called()
progress_bar_mock().update.assert_called()
progress_bar_mock().close.assert_called_once()
self.assertEqual(tbl.num_rows, 4)
@mock.patch("google.cloud.bigquery.table.pyarrow", new=None)
def test_to_arrow_w_pyarrow_none(self):
schema = []
rows = []
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with self.assertRaises(ValueError):
row_iterator.to_arrow()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_iterable(self):
from google.cloud.bigquery.schema import SchemaField
import types
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
path = "/foo"
api_request = mock.Mock(
side_effect=[
{
"rows": [{"f": [{"v": "Bengt"}, {"v": "32"}]}],
"pageToken": "NEXTPAGE",
},
{"rows": [{"f": [{"v": "Sven"}, {"v": "33"}]}]},
]
)
row_iterator = self._make_one(
_mock_client(), api_request, path, schema, page_size=1, max_results=5
)
dfs = row_iterator.to_dataframe_iterable()
self.assertIsInstance(dfs, types.GeneratorType)
df_1 = next(dfs)
self.assertIsInstance(df_1, pandas.DataFrame)
self.assertEqual(df_1.name.dtype.name, "object")
self.assertEqual(df_1.age.dtype.name, "int64")
self.assertEqual(len(df_1), 1) # verify the number of rows
self.assertEqual(
df_1["name"][0], "Bengt"
) # verify the first value of 'name' column
self.assertEqual(df_1["age"][0], 32) # verify the first value of 'age' column
df_2 = next(dfs)
self.assertEqual(len(df_2), 1) # verify the number of rows
self.assertEqual(df_2["name"][0], "Sven")
self.assertEqual(df_2["age"][0], 33)
@mock.patch("google.cloud.bigquery.table.pandas", new=None)
def test_to_dataframe_iterable_error_if_pandas_is_none(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with pytest.raises(ValueError, match="pandas"):
row_iterator.to_dataframe_iterable()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
df = row_iterator.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 4) # verify the number of rows
self.assertEqual(list(df), ["name", "age"]) # verify the column names
self.assertEqual(df.name.dtype.name, "object")
self.assertEqual(df.age.dtype.name, "int64")
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(tqdm is None, "Requires `tqdm`")
@mock.patch("tqdm.tqdm_gui")
@mock.patch("tqdm.tqdm_notebook")
@mock.patch("tqdm.tqdm")
def test_to_dataframe_progress_bar(
self, tqdm_mock, tqdm_notebook_mock, tqdm_gui_mock
):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
progress_bars = (
("tqdm", tqdm_mock),
("tqdm_notebook", tqdm_notebook_mock),
("tqdm_gui", tqdm_gui_mock),
)
for progress_bar_type, progress_bar_mock in progress_bars:
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
df = row_iterator.to_dataframe(progress_bar_type=progress_bar_type)
progress_bar_mock.assert_called()
progress_bar_mock().update.assert_called()
progress_bar_mock().close.assert_called_once()
self.assertEqual(len(df), 4)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@mock.patch("google.cloud.bigquery.table.tqdm", new=None)
def test_to_dataframe_no_tqdm_no_progress_bar(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with warnings.catch_warnings(record=True) as warned:
df = row_iterator.to_dataframe()
self.assertEqual(len(warned), 0)
self.assertEqual(len(df), 4)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@mock.patch("google.cloud.bigquery.table.tqdm", new=None)
def test_to_dataframe_no_tqdm(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with warnings.catch_warnings(record=True) as warned:
df = row_iterator.to_dataframe(progress_bar_type="tqdm")
self.assertEqual(len(warned), 1)
for warning in warned:
self.assertIs(warning.category, UserWarning)
# Even though the progress bar won't show, downloading the dataframe
# should still work.
self.assertEqual(len(df), 4)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(tqdm is None, "Requires `tqdm`")
@mock.patch("tqdm.tqdm_gui", new=None) # will raise TypeError on call
@mock.patch("tqdm.tqdm_notebook", new=None) # will raise TypeError on call
@mock.patch("tqdm.tqdm", new=None) # will raise TypeError on call
def test_to_dataframe_tqdm_error(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
for progress_bar_type in ("tqdm", "tqdm_notebook", "tqdm_gui"):
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with warnings.catch_warnings(record=True) as warned:
df = row_iterator.to_dataframe(progress_bar_type=progress_bar_type)
self.assertEqual(len(df), 4) # all should be well
# Warn that a progress bar was requested, but creating the tqdm
# progress bar failed.
for warning in warned:
self.assertIs(warning.category, UserWarning)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_w_empty_results(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
api_request = mock.Mock(return_value={"rows": []})
row_iterator = self._make_one(_mock_client(), api_request, schema=schema)
df = row_iterator.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 0) # verify the number of rows
self.assertEqual(list(df), ["name", "age"]) # verify the column names
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_logs_tabledata_list(self):
from google.cloud.bigquery.table import Table
mock_logger = mock.create_autospec(logging.Logger)
api_request = mock.Mock(return_value={"rows": []})
row_iterator = self._make_one(
_mock_client(), api_request, table=Table("debug-proj.debug_dset.debug_tbl")
)
with mock.patch("google.cloud.bigquery.table._LOGGER", mock_logger):
row_iterator.to_dataframe()
mock_logger.debug.assert_any_call(
"Started reading table 'debug-proj.debug_dset.debug_tbl' with tabledata.list."
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_w_various_types_nullable(self):
import datetime
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("start_timestamp", "TIMESTAMP"),
SchemaField("seconds", "INT64"),
SchemaField("miles", "FLOAT64"),
SchemaField("payment_type", "STRING"),
SchemaField("complete", "BOOL"),
SchemaField("date", "DATE"),
]
row_data = [
[None, None, None, None, None, None],
["1.4338368E9", "420", "1.1", "Cash", "true", "1999-12-01"],
["1.3878117E9", "2580", "17.7", "Cash", "false", "1953-06-14"],
["1.3855653E9", "2280", "4.4", "Credit", "true", "1981-11-04"],
]
rows = [{"f": [{"v": field} for field in row]} for row in row_data]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
df = row_iterator.to_dataframe()
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 4) # verify the number of rows
exp_columns = [field.name for field in schema]
self.assertEqual(list(df), exp_columns) # verify the column names
for index, row in df.iterrows():
if index == 0:
self.assertTrue(row.isnull().all())
else:
self.assertIsInstance(row.start_timestamp, pandas.Timestamp)
self.assertIsInstance(row.seconds, float)
self.assertIsInstance(row.payment_type, str)
self.assertIsInstance(row.complete, bool)
self.assertIsInstance(row.date, datetime.date)
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_column_dtypes(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("start_timestamp", "TIMESTAMP"),
SchemaField("seconds", "INT64"),
SchemaField("miles", "FLOAT64"),
SchemaField("km", "FLOAT64"),
SchemaField("payment_type", "STRING"),
SchemaField("complete", "BOOL"),
SchemaField("date", "DATE"),
]
row_data = [
["1.4338368E9", "420", "1.1", "1.77", "Cash", "true", "1999-12-01"],
["1.3878117E9", "2580", "17.7", "28.5", "Cash", "false", "1953-06-14"],
["1.3855653E9", "2280", "4.4", "7.1", "Credit", "true", "1981-11-04"],
]
rows = [{"f": [{"v": field} for field in row]} for row in row_data]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
df = row_iterator.to_dataframe(dtypes={"km": "float16"})
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 3) # verify the number of rows
exp_columns = [field.name for field in schema]
self.assertEqual(list(df), exp_columns) # verify the column names
self.assertEqual(df.start_timestamp.dtype.name, "datetime64[ns, UTC]")
self.assertEqual(df.seconds.dtype.name, "int64")
self.assertEqual(df.miles.dtype.name, "float64")
self.assertEqual(df.km.dtype.name, "float16")
self.assertEqual(df.payment_type.dtype.name, "object")
self.assertEqual(df.complete.dtype.name, "bool")
self.assertEqual(df.date.dtype.name, "object")
@mock.patch("google.cloud.bigquery.table.pandas", new=None)
def test_to_dataframe_error_if_pandas_is_none(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = self._make_one(_mock_client(), api_request, path, schema)
with self.assertRaises(ValueError):
row_iterator.to_dataframe()
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_max_results_w_bqstorage_warning(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
bqstorage_client = mock.Mock()
row_iterator = self._make_one(
client=_mock_client(),
api_request=api_request,
path=path,
schema=schema,
max_results=42,
)
with warnings.catch_warnings(record=True) as warned:
row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
matches = [
warning
for warning in warned
if warning.category is UserWarning
and "cannot use bqstorage_client" in str(warning).lower()
and "tabledata.list" in str(warning)
]
self.assertEqual(len(matches), 1, msg="User warning was not emitted.")
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_max_results_w_create_bqstorage_warning(self):
from google.cloud.bigquery.schema import SchemaField
schema = [
SchemaField("name", "STRING", mode="REQUIRED"),
SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
mock_client = _mock_client()
row_iterator = self._make_one(
client=mock_client,
api_request=api_request,
path=path,
schema=schema,
max_results=42,
)
with warnings.catch_warnings(record=True) as warned:
row_iterator.to_dataframe(create_bqstorage_client=True)
matches = [
warning
for warning in warned
if warning.category is UserWarning
and "cannot use bqstorage_client" in str(warning).lower()
and "tabledata.list" in str(warning)
]
self.assertEqual(len(matches), 1, msg="User warning was not emitted.")
mock_client._create_bqstorage_client.assert_not_called()
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_creates_client(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
mock_client = _mock_client()
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.transport = mock.create_autospec(
big_query_storage_grpc_transport.BigQueryStorageGrpcTransport
)
mock_client._create_bqstorage_client.return_value = bqstorage_client
session = bigquery_storage_v1beta1.types.ReadSession()
bqstorage_client.create_read_session.return_value = session
row_iterator = mut.RowIterator(
mock_client,
None, # api_request: ignored
None, # path: ignored
[
schema.SchemaField("colA", "STRING"),
schema.SchemaField("colC", "STRING"),
schema.SchemaField("colB", "STRING"),
],
table=mut.TableReference.from_string("proj.dset.tbl"),
)
row_iterator.to_dataframe(create_bqstorage_client=True)
mock_client._create_bqstorage_client.assert_called_once()
bqstorage_client.transport.channel.close.assert_called_once()
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_no_streams(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession()
bqstorage_client.create_read_session.return_value = session
row_iterator = mut.RowIterator(
_mock_client(),
api_request=None,
path=None,
schema=[
schema.SchemaField("colA", "IGNORED"),
schema.SchemaField("colC", "IGNORED"),
schema.SchemaField("colB", "IGNORED"),
],
table=mut.TableReference.from_string("proj.dset.tbl"),
)
got = row_iterator.to_dataframe(bqstorage_client)
column_names = ["colA", "colC", "colB"]
self.assertEqual(list(got), column_names)
self.assertTrue(got.empty)
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_dataframe_w_bqstorage_logs_session(self):
from google.cloud.bigquery.table import Table
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession()
session.name = "projects/test-proj/locations/us/sessions/SOMESESSION"
bqstorage_client.create_read_session.return_value = session
mock_logger = mock.create_autospec(logging.Logger)
row_iterator = self._make_one(
_mock_client(), table=Table("debug-proj.debug_dset.debug_tbl")
)
with mock.patch("google.cloud.bigquery._pandas_helpers._LOGGER", mock_logger):
row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
mock_logger.debug.assert_any_call(
"Started reading table 'debug-proj.debug_dset.debug_tbl' "
"with BQ Storage API session 'projects/test-proj/locations/us/sessions/SOMESESSION'."
)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_dataframe_w_bqstorage_empty_streams(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
arrow_fields = [
pyarrow.field("colA", pyarrow.int64()),
# Not alphabetical to test column order.
pyarrow.field("colC", pyarrow.float64()),
pyarrow.field("colB", pyarrow.utf8()),
]
arrow_schema = pyarrow.schema(arrow_fields)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession(
streams=[{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"}],
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
)
bqstorage_client.create_read_session.return_value = session
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
bqstorage_client.read_rows.return_value = mock_rowstream
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_rowstream.rows.return_value = mock_rows
mock_pages = mock.PropertyMock(return_value=())
type(mock_rows).pages = mock_pages
schema = [
schema.SchemaField("colA", "IGNORED"),
schema.SchemaField("colC", "IGNORED"),
schema.SchemaField("colB", "IGNORED"),
]
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
schema,
table=mut.TableReference.from_string("proj.dset.tbl"),
selected_fields=schema,
)
got = row_iterator.to_dataframe(bqstorage_client)
column_names = ["colA", "colC", "colB"]
self.assertEqual(list(got), column_names)
self.assertTrue(got.empty)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_dataframe_w_bqstorage_nonempty(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
arrow_fields = [
pyarrow.field("colA", pyarrow.int64()),
# Not alphabetical to test column order.
pyarrow.field("colC", pyarrow.float64()),
pyarrow.field("colB", pyarrow.utf8()),
]
arrow_schema = pyarrow.schema(arrow_fields)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.transport = mock.create_autospec(
big_query_storage_grpc_transport.BigQueryStorageGrpcTransport
)
streams = [
# Use two streams we want to check frames are read from each stream.
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
]
session = bigquery_storage_v1beta1.types.ReadSession(
streams=streams,
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
)
bqstorage_client.create_read_session.return_value = session
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
bqstorage_client.read_rows.return_value = mock_rowstream
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_rowstream.rows.return_value = mock_rows
page_items = [
{"colA": 1, "colB": "abc", "colC": 2.0},
{"colA": -1, "colB": "def", "colC": 4.0},
]
mock_page = mock.create_autospec(reader.ReadRowsPage)
mock_page.to_dataframe.return_value = pandas.DataFrame(
page_items, columns=["colA", "colB", "colC"]
)
mock_pages = (mock_page, mock_page, mock_page)
type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
schema = [
schema.SchemaField("colA", "IGNORED"),
schema.SchemaField("colC", "IGNORED"),
schema.SchemaField("colB", "IGNORED"),
]
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
schema,
table=mut.TableReference.from_string("proj.dset.tbl"),
selected_fields=schema,
)
got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
# Are the columns in the expected order?
column_names = ["colA", "colC", "colB"]
self.assertEqual(list(got), column_names)
# Have expected number of rows?
total_pages = len(streams) * len(mock_pages)
total_rows = len(page_items) * total_pages
self.assertEqual(len(got.index), total_rows)
# Don't close the client if it was passed in.
bqstorage_client.transport.channel.close.assert_not_called()
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_dataframe_w_bqstorage_multiple_streams_return_unique_index(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
arrow_fields = [pyarrow.field("colA", pyarrow.int64())]
arrow_schema = pyarrow.schema(arrow_fields)
streams = [
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
]
session = bigquery_storage_v1beta1.types.ReadSession(
streams=streams,
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.create_read_session.return_value = session
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
bqstorage_client.read_rows.return_value = mock_rowstream
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_rowstream.rows.return_value = mock_rows
page_data_frame = pandas.DataFrame(
[{"colA": 1}, {"colA": -1}], columns=["colA"]
)
mock_page = mock.create_autospec(reader.ReadRowsPage)
mock_page.to_dataframe.return_value = page_data_frame
mock_pages = (mock_page, mock_page, mock_page)
type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
row_iterator = self._make_one(
schema=[schema.SchemaField("colA", "IGNORED")],
table=mut.TableReference.from_string("proj.dset.tbl"),
)
got = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
self.assertEqual(list(got), ["colA"])
total_pages = len(streams) * len(mock_pages)
total_rows = len(page_data_frame) * total_pages
self.assertEqual(len(got.index), total_rows)
self.assertTrue(got.index.is_unique)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(tqdm is None, "Requires `tqdm`")
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
@mock.patch("tqdm.tqdm")
def test_to_dataframe_w_bqstorage_updates_progress_bar(self, tqdm_mock):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
# Speed up testing.
mut._PROGRESS_INTERVAL = 0.01
arrow_fields = [pyarrow.field("testcol", pyarrow.int64())]
arrow_schema = pyarrow.schema(arrow_fields)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
streams = [
# Use two streams we want to check that progress bar updates are
# sent from each stream.
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
]
session = bigquery_storage_v1beta1.types.ReadSession(
streams=streams,
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
)
bqstorage_client.create_read_session.return_value = session
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
bqstorage_client.read_rows.return_value = mock_rowstream
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_rowstream.rows.return_value = mock_rows
mock_page = mock.create_autospec(reader.ReadRowsPage)
page_items = [-1, 0, 1]
type(mock_page).num_items = mock.PropertyMock(return_value=len(page_items))
def blocking_to_dataframe(*args, **kwargs):
# Sleep for longer than the waiting interval. This ensures the
# progress_queue gets written to more than once because it gives
# the worker->progress updater time to sum intermediate updates.
time.sleep(2 * mut._PROGRESS_INTERVAL)
return pandas.DataFrame({"testcol": page_items})
mock_page.to_dataframe.side_effect = blocking_to_dataframe
mock_pages = (mock_page, mock_page, mock_page, mock_page, mock_page)
type(mock_rows).pages = mock.PropertyMock(return_value=mock_pages)
schema = [schema.SchemaField("testcol", "IGNORED")]
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
schema,
table=mut.TableReference.from_string("proj.dset.tbl"),
selected_fields=schema,
)
row_iterator.to_dataframe(
bqstorage_client=bqstorage_client, progress_bar_type="tqdm"
)
# Make sure that this test updated the progress bar once per page from
# each stream.
total_pages = len(streams) * len(mock_pages)
expected_total_rows = total_pages * len(page_items)
progress_updates = [
args[0] for args, kwargs in tqdm_mock().update.call_args_list
]
# Should have sent >1 update due to delay in blocking_to_dataframe.
self.assertGreater(len(progress_updates), 1)
self.assertEqual(sum(progress_updates), expected_total_rows)
tqdm_mock().close.assert_called_once()
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
@unittest.skipIf(pyarrow is None, "Requires `pyarrow`")
def test_to_dataframe_w_bqstorage_exits_on_keyboardinterrupt(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
from google.cloud.bigquery_storage_v1beta1 import reader
# Speed up testing.
mut._PROGRESS_INTERVAL = 0.01
arrow_fields = [
pyarrow.field("colA", pyarrow.int64()),
# Not alphabetical to test column order.
pyarrow.field("colC", pyarrow.float64()),
pyarrow.field("colB", pyarrow.utf8()),
]
arrow_schema = pyarrow.schema(arrow_fields)
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
session = bigquery_storage_v1beta1.types.ReadSession(
streams=[
# Use two streams because one will fail with a
# KeyboardInterrupt, and we want to check that the other stream
# ends early.
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/1234"},
{"name": "/projects/proj/dataset/dset/tables/tbl/streams/5678"},
],
arrow_schema={"serialized_schema": arrow_schema.serialize().to_pybytes()},
)
bqstorage_client.create_read_session.return_value = session
def blocking_to_dataframe(*args, **kwargs):
# Sleep for longer than the waiting interval so that we know we're
# only reading one page per loop at most.
time.sleep(2 * mut._PROGRESS_INTERVAL)
return pandas.DataFrame(
{"colA": [1, -1], "colB": ["abc", "def"], "colC": [2.0, 4.0]},
columns=["colA", "colB", "colC"],
)
mock_page = mock.create_autospec(reader.ReadRowsPage)
mock_page.to_dataframe.side_effect = blocking_to_dataframe
mock_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_pages = mock.PropertyMock(return_value=(mock_page, mock_page, mock_page))
type(mock_rows).pages = mock_pages
mock_rowstream = mock.create_autospec(reader.ReadRowsStream)
mock_rowstream.rows.return_value = mock_rows
mock_cancelled_rows = mock.create_autospec(reader.ReadRowsIterable)
mock_cancelled_pages = mock.PropertyMock(side_effect=KeyboardInterrupt)
type(mock_cancelled_rows).pages = mock_cancelled_pages
mock_cancelled_rowstream = mock.create_autospec(reader.ReadRowsStream)
mock_cancelled_rowstream.rows.return_value = mock_cancelled_rows
bqstorage_client.read_rows.side_effect = (
mock_cancelled_rowstream,
mock_rowstream,
)
schema = [
schema.SchemaField("colA", "IGNORED"),
schema.SchemaField("colB", "IGNORED"),
schema.SchemaField("colC", "IGNORED"),
]
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
schema,
table=mut.TableReference.from_string("proj.dset.tbl"),
selected_fields=schema,
)
with pytest.raises(KeyboardInterrupt):
row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
# Should not have fetched the third page of results because exit_early
# should have been set.
self.assertLessEqual(mock_page.to_dataframe.call_count, 2)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_fallback_to_tabledata_list(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.create_read_session.side_effect = google.api_core.exceptions.InternalServerError(
"can't read with bqstorage_client"
)
iterator_schema = [
schema.SchemaField("name", "STRING", mode="REQUIRED"),
schema.SchemaField("age", "INTEGER", mode="REQUIRED"),
]
rows = [
{"f": [{"v": "Phred Phlyntstone"}, {"v": "32"}]},
{"f": [{"v": "Bharney Rhubble"}, {"v": "33"}]},
{"f": [{"v": "Wylma Phlyntstone"}, {"v": "29"}]},
{"f": [{"v": "Bhettye Rhubble"}, {"v": "27"}]},
]
path = "/foo"
api_request = mock.Mock(return_value={"rows": rows})
row_iterator = mut.RowIterator(
_mock_client(),
api_request,
path,
iterator_schema,
table=mut.Table("proj.dset.tbl"),
)
df = row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 4) # verify the number of rows
self.assertEqual(list(df), ["name", "age"]) # verify the column names
self.assertEqual(df.name.dtype.name, "object")
self.assertEqual(df.age.dtype.name, "int64")
@unittest.skipIf(pandas is None, "Requires `pandas`")
def test_to_dataframe_tabledata_list_w_multiple_pages_return_unique_index(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
iterator_schema = [schema.SchemaField("name", "STRING", mode="REQUIRED")]
path = "/foo"
api_request = mock.Mock(
side_effect=[
{"rows": [{"f": [{"v": "Bengt"}]}], "pageToken": "NEXTPAGE"},
{"rows": [{"f": [{"v": "Sven"}]}]},
]
)
row_iterator = mut.RowIterator(
_mock_client(),
api_request,
path,
iterator_schema,
table=mut.Table("proj.dset.tbl"),
)
df = row_iterator.to_dataframe(bqstorage_client=None)
self.assertIsInstance(df, pandas.DataFrame)
self.assertEqual(len(df), 2)
self.assertEqual(list(df), ["name"])
self.assertEqual(df.name.dtype.name, "object")
self.assertTrue(df.index.is_unique)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_raises_auth_error(self):
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
bqstorage_client.create_read_session.side_effect = google.api_core.exceptions.Forbidden(
"TEST BigQuery Storage API not enabled. TEST"
)
path = "/foo"
api_request = mock.Mock(return_value={"rows": []})
row_iterator = mut.RowIterator(
_mock_client(), api_request, path, [], table=mut.Table("proj.dset.tbl")
)
with pytest.raises(google.api_core.exceptions.Forbidden):
row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
@unittest.skipIf(pandas is None, "Requires `pandas`")
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_raises_import_error(self):
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
path = "/foo"
api_request = mock.Mock(return_value={"rows": []})
row_iterator = mut.RowIterator(
_mock_client(), api_request, path, [], table=mut.Table("proj.dset.tbl")
)
with mock.patch.object(mut, "bigquery_storage_v1beta1", None), pytest.raises(
ValueError
) as exc_context:
row_iterator.to_dataframe(bqstorage_client=bqstorage_client)
assert mut._NO_BQSTORAGE_ERROR in str(exc_context.value)
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_partition(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
[schema.SchemaField("colA", "IGNORED")],
table=mut.TableReference.from_string("proj.dset.tbl$20181225"),
)
with pytest.raises(ValueError):
row_iterator.to_dataframe(bqstorage_client)
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_to_dataframe_w_bqstorage_snapshot(self):
from google.cloud.bigquery import schema
from google.cloud.bigquery import table as mut
bqstorage_client = mock.create_autospec(
bigquery_storage_v1beta1.BigQueryStorageClient
)
row_iterator = mut.RowIterator(
_mock_client(),
None, # api_request: ignored
None, # path: ignored
[schema.SchemaField("colA", "IGNORED")],
table=mut.TableReference.from_string("proj.dset.tbl@1234567890000"),
)
with pytest.raises(ValueError):
row_iterator.to_dataframe(bqstorage_client)
class TestPartitionRange(unittest.TestCase):
def _get_target_class(self):
from google.cloud.bigquery.table import PartitionRange
return PartitionRange
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
object_under_test = self._make_one()
assert object_under_test.start is None
assert object_under_test.end is None
assert object_under_test.interval is None
def test_constructor_w_properties(self):
object_under_test = self._make_one(start=1, end=10, interval=2)
assert object_under_test.start == 1
assert object_under_test.end == 10
assert object_under_test.interval == 2
def test_constructor_w_resource(self):
object_under_test = self._make_one(
_properties={"start": -1234567890, "end": 1234567890, "interval": 1000000}
)
assert object_under_test.start == -1234567890
assert object_under_test.end == 1234567890
assert object_under_test.interval == 1000000
def test_repr(self):
object_under_test = self._make_one(start=1, end=10, interval=2)
assert repr(object_under_test) == "PartitionRange(end=10, interval=2, start=1)"
class TestRangePartitioning(unittest.TestCase):
def _get_target_class(self):
from google.cloud.bigquery.table import RangePartitioning
return RangePartitioning
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
object_under_test = self._make_one()
assert object_under_test.field is None
assert object_under_test.range_.start is None
assert object_under_test.range_.end is None
assert object_under_test.range_.interval is None
def test_constructor_w_properties(self):
from google.cloud.bigquery.table import PartitionRange
object_under_test = self._make_one(
range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
)
assert object_under_test.field == "integer_col"
assert object_under_test.range_.start == 1
assert object_under_test.range_.end == 10
assert object_under_test.range_.interval == 2
def test_constructor_w_resource(self):
object_under_test = self._make_one(
_properties={
"field": "some_column",
"range": {"start": -1234567890, "end": 1234567890, "interval": 1000000},
}
)
assert object_under_test.field == "some_column"
assert object_under_test.range_.start == -1234567890
assert object_under_test.range_.end == 1234567890
assert object_under_test.range_.interval == 1000000
def test_range_w_wrong_type(self):
object_under_test = self._make_one()
with pytest.raises(ValueError, match="PartitionRange"):
object_under_test.range_ = object()
def test_repr(self):
from google.cloud.bigquery.table import PartitionRange
object_under_test = self._make_one(
range_=PartitionRange(start=1, end=10, interval=2), field="integer_col"
)
assert (
repr(object_under_test)
== "RangePartitioning(field='integer_col', range_=PartitionRange(end=10, interval=2, start=1))"
)
class TestTimePartitioning(unittest.TestCase):
def _get_target_class(self):
from google.cloud.bigquery.table import TimePartitioning
return TimePartitioning
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
time_partitioning = self._make_one()
self.assertEqual(time_partitioning.type_, "DAY")
self.assertIsNone(time_partitioning.field)
self.assertIsNone(time_partitioning.expiration_ms)
def test_constructor_explicit(self):
from google.cloud.bigquery.table import TimePartitioningType
time_partitioning = self._make_one(
type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
)
self.assertEqual(time_partitioning.type_, "DAY")
self.assertEqual(time_partitioning.field, "name")
self.assertEqual(time_partitioning.expiration_ms, 10000)
def test_require_partition_filter_warns_deprecation(self):
object_under_test = self._make_one()
with warnings.catch_warnings(record=True) as warned:
assert object_under_test.require_partition_filter is None
object_under_test.require_partition_filter = True
assert object_under_test.require_partition_filter
assert len(warned) == 3
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
def test_from_api_repr_empty(self):
klass = self._get_target_class()
# Even though there are required properties according to the API
# specification, sometimes time partitioning is populated as an empty
# object. See internal bug 131167013.
api_repr = {}
time_partitioning = klass.from_api_repr(api_repr)
self.assertIsNone(time_partitioning.type_)
self.assertIsNone(time_partitioning.field)
self.assertIsNone(time_partitioning.expiration_ms)
def test_from_api_repr_minimal(self):
from google.cloud.bigquery.table import TimePartitioningType
klass = self._get_target_class()
api_repr = {"type": "DAY"}
time_partitioning = klass.from_api_repr(api_repr)
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertIsNone(time_partitioning.field)
self.assertIsNone(time_partitioning.expiration_ms)
def test_from_api_repr_doesnt_override_type(self):
klass = self._get_target_class()
api_repr = {"type": "HOUR"}
time_partitioning = klass.from_api_repr(api_repr)
self.assertEqual(time_partitioning.type_, "HOUR")
def test_from_api_repr_explicit(self):
from google.cloud.bigquery.table import TimePartitioningType
klass = self._get_target_class()
api_repr = {
"type": "DAY",
"field": "name",
"expirationMs": "10000",
"requirePartitionFilter": True,
}
time_partitioning = klass.from_api_repr(api_repr)
self.assertEqual(time_partitioning.type_, TimePartitioningType.DAY)
self.assertEqual(time_partitioning.field, "name")
self.assertEqual(time_partitioning.expiration_ms, 10000)
with warnings.catch_warnings(record=True) as warned:
self.assertTrue(time_partitioning.require_partition_filter)
self.assertIs(warned[0].category, PendingDeprecationWarning)
def test_to_api_repr_defaults(self):
time_partitioning = self._make_one()
expected = {"type": "DAY"}
self.assertEqual(time_partitioning.to_api_repr(), expected)
def test_to_api_repr_explicit(self):
from google.cloud.bigquery.table import TimePartitioningType
time_partitioning = self._make_one(
type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
)
with warnings.catch_warnings(record=True) as warned:
time_partitioning.require_partition_filter = True
self.assertIs(warned[0].category, PendingDeprecationWarning)
expected = {
"type": "DAY",
"field": "name",
"expirationMs": "10000",
"requirePartitionFilter": True,
}
self.assertEqual(time_partitioning.to_api_repr(), expected)
def test___eq___wrong_type(self):
time_partitioning = self._make_one()
other = object()
self.assertNotEqual(time_partitioning, other)
self.assertEqual(time_partitioning, mock.ANY)
def test___eq___type__mismatch(self):
time_partitioning = self._make_one()
other = self._make_one(type_="HOUR")
self.assertNotEqual(time_partitioning, other)
def test___eq___field_mismatch(self):
time_partitioning = self._make_one(field="foo")
other = self._make_one(field="bar")
self.assertNotEqual(time_partitioning, other)
def test___eq___expiration_ms_mismatch(self):
time_partitioning = self._make_one(field="foo", expiration_ms=100000)
other = self._make_one(field="foo", expiration_ms=200000)
self.assertNotEqual(time_partitioning, other)
def test___eq___require_partition_filter_mismatch(self):
time_partitioning = self._make_one(field="foo", expiration_ms=100000)
other = self._make_one(field="foo", expiration_ms=100000)
with warnings.catch_warnings(record=True) as warned:
time_partitioning.require_partition_filter = True
other.require_partition_filter = False
assert len(warned) == 2
for warning in warned:
self.assertIs(warning.category, PendingDeprecationWarning)
self.assertNotEqual(time_partitioning, other)
def test___eq___hit(self):
time_partitioning = self._make_one(field="foo", expiration_ms=100000)
other = self._make_one(field="foo", expiration_ms=100000)
self.assertEqual(time_partitioning, other)
def test___ne___wrong_type(self):
time_partitioning = self._make_one()
other = object()
self.assertNotEqual(time_partitioning, other)
self.assertEqual(time_partitioning, mock.ANY)
def test___ne___same_value(self):
time_partitioning1 = self._make_one()
time_partitioning2 = self._make_one()
# unittest ``assertEqual`` uses ``==`` not ``!=``.
comparison_val = time_partitioning1 != time_partitioning2
self.assertFalse(comparison_val)
def test___ne___different_values(self):
time_partitioning1 = self._make_one()
time_partitioning2 = self._make_one(type_="HOUR")
self.assertNotEqual(time_partitioning1, time_partitioning2)
def test___hash__set_equality(self):
time_partitioning1 = self._make_one(field="foo")
time_partitioning2 = self._make_one(field="foo")
set_one = {time_partitioning1, time_partitioning2}
set_two = {time_partitioning1, time_partitioning2}
self.assertEqual(set_one, set_two)
def test___hash__not_equals(self):
time_partitioning1 = self._make_one(field="foo")
time_partitioning2 = self._make_one(field="bar")
set_one = {time_partitioning1}
set_two = {time_partitioning2}
self.assertNotEqual(set_one, set_two)
def test___repr___minimal(self):
time_partitioning = self._make_one()
expected = "TimePartitioning(type=DAY)"
self.assertEqual(repr(time_partitioning), expected)
def test___repr___explicit(self):
from google.cloud.bigquery.table import TimePartitioningType
time_partitioning = self._make_one(
type_=TimePartitioningType.DAY, field="name", expiration_ms=10000
)
expected = "TimePartitioning(" "expirationMs=10000," "field=name," "type=DAY)"
self.assertEqual(repr(time_partitioning), expected)
def test_set_expiration_w_none(self):
time_partitioning = self._make_one()
time_partitioning.expiration_ms = None
assert time_partitioning._properties["expirationMs"] is None
@pytest.mark.skipif(
bigquery_storage_v1beta1 is None, reason="Requires `google-cloud-bigquery-storage`"
)
def test_table_reference_to_bqstorage():
from google.cloud.bigquery import table as mut
# Can't use parametrized pytest because bigquery_storage_v1beta1 may not be
# available.
expected = bigquery_storage_v1beta1.types.TableReference(
project_id="my-project", dataset_id="my_dataset", table_id="my_table"
)
cases = (
"my-project.my_dataset.my_table",
"my-project.my_dataset.my_table$20181225",
"my-project.my_dataset.my_table@1234567890",
"my-project.my_dataset.my_table$20181225@1234567890",
)
classes = (mut.TableReference, mut.Table, mut.TableListItem)
for case, cls in itertools.product(cases, classes):
got = cls.from_string(case).to_bqstorage()
assert got == expected
@unittest.skipIf(
bigquery_storage_v1beta1 is None, "Requires `google-cloud-bigquery-storage`"
)
def test_table_reference_to_bqstorage_raises_import_error():
from google.cloud.bigquery import table as mut
classes = (mut.TableReference, mut.Table, mut.TableListItem)
for cls in classes:
with mock.patch.object(mut, "bigquery_storage_v1beta1", None), pytest.raises(
ValueError
) as exc_context:
cls.from_string("my-project.my_dataset.my_table").to_bqstorage()
assert mut._NO_BQSTORAGE_ERROR in str(exc_context.value)
|
tswast/google-cloud-python
|
bigquery/tests/unit/test_table.py
|
Python
|
apache-2.0
| 132,151
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2002-2006 Zuza Software Foundation
#
# This file is part of translate.
# The file was copied from the Python 2.5 source.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# NB! IMPORTANT SEMANTIC DIFFERENCE WITH THE OFFICIAL contextlib.
# In Python 2.5+, if an exception is thrown in a 'with' statement
# which uses a generator-based context manager (that is, a
# context manager created by decorating a generator with
# @contextmanager), the exception will be propagated to the
# generator via the .throw method of the generator.
#
# This does not exist in Python 2.4. Thus, we just naively finish
# off the context manager. This also means that generator-based
# context managers can't deal with exceptions, so be warned.
"""Utilities for with-statement contexts. See PEP 343."""
import sys
__all__ = ["contextmanager", "nested", "closing"]
class GeneratorContextManager(object):
"""Helper for @contextmanager decorator."""
def __init__(self, gen):
self.gen = gen
def __enter__(self):
try:
return self.gen.next()
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, tb):
if type is None:
try:
self.gen.next()
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
try:
self.gen.next()
except StopIteration:
import traceback
traceback.print_exception(type, value, tb)
raise value
except StopIteration, exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
def helper(*args, **kwds):
return GeneratorContextManager(func(*args, **kwds))
try:
helper.__name__ = func.__name__
helper.__doc__ = func.__doc__
helper.__dict__ = func.__dict__
except:
pass
return helper
@contextmanager
def nested(*managers):
"""Support multiple context managers in a single with-statement.
Code like this:
with nested(A, B, C) as (X, Y, Z):
<body>
is equivalent to this:
with A as X:
with B as Y:
with C as Z:
<body>
"""
exits = []
vars = []
exc = (None, None, None)
# Lambdas are an easy way to create unique objects. We don't want
# this to be None, since our answer might actually be None
undefined = lambda: 42
result = undefined
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
result = vars
except:
exc = sys.exc_info()
# If nothing has gone wrong, then result contains our return value
# and thus it is not equal to 'undefined'. Thus, yield the value.
if result != undefined:
yield result
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
raise exc[0], exc[1], exc[2]
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
|
jagg81/translate-toolkit
|
translate/misc/contextlib.py
|
Python
|
gpl-2.0
| 5,557
|
__author__ = 'Administrator'
# 1.preper the music data in 'musics' file, and the music format must be '.wav'
# 2.run the pkl_music_featuer_lable to pkl the dataset
# 3.run DBN
|
daleloogn/BUG-Theano-DBN
|
use_dbn_on_bigset/__init__.py
|
Python
|
gpl-2.0
| 181
|
# $Id: 300_ice_1_1.py 2084 2008-06-27 23:53:00Z bennylp $
#
from inc_cfg import *
# ICE mismatch
test_param = TestParam(
"Callee=use ICE, caller=use ICE",
[
InstanceParam("callee", "--null-audio --use-ice --max-calls=1", enable_buffer=True),
InstanceParam("caller", "--null-audio --use-ice --max-calls=1", enable_buffer=True)
]
)
|
halex2005/pjproject
|
tests/pjsua/scripts-call/300_ice_1_1.py
|
Python
|
gpl-2.0
| 345
|
from mido import MidiFile, MidiTrack, Message
from keras.layers import LSTM, Dense, Activation, Dropout
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.optimizers import RMSprop
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import mido
########### PROCESS MIDI FILE #############
mid = MidiFile('allegroconspirito.mid') # a Mozart piece
notes = []
time = float(0)
prev = float(0)
for msg in mid:
### this time is in seconds, not ticks
time += msg.time
if not msg.is_meta:
### only interested in piano channel
if msg.channel == 0:
if msg.type == 'note_on':
# note in vector form to train on
note = msg.bytes()
# only interested in the note and velocity. note message is in the form of [type, note, velocity]
note = note[1:3]
note.append(time-prev)
prev = time
notes.append(note)
###########################################
######## SCALE DATA TO BETWEEN 0, 1 #######
t = []
for note in notes:
note[0] = (note[0]-24)/88
note[1] = note[1]/127
t.append(note[2])
max_t = max(t) # scale based on the biggest time of any note
for note in notes:
note[2] = note[2]/max_t
###########################################
############ CREATE DATA, LABELS ##########
X = []
Y = []
n_prev = 30
# n_prev notes to predict the (n_prev+1)th note
for i in range(len(notes)-n_prev):
x = notes[i:i+n_prev]
y = notes[i+n_prev]
X.append(x)
Y.append(y)
# save a seed to do prediction later
seed = notes[0:n_prev]
###########################################
############### BUILD MODEL ###############
print('Build model...')
model = Sequential()
model.add(LSTM(128, input_shape=(n_prev, 3), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(64, input_shape=(n_prev, 3), return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(3))
model.add(Activation('linear'))
optimizer = RMSprop(lr=0.01)
model.compile(loss='mse', optimizer='rmsprop')
model.fit(X, Y, batch_size=300, epochs=400, verbose=1)
###########################################
############ MAKE PREDICTIONS #############
prediction = []
x = seed
x = np.expand_dims(x, axis=0)
for i in range(3000):
preds = model.predict(x)
print (preds)
x = np.squeeze(x)
x = np.concatenate((x, preds))
x = x[1:]
x = np.expand_dims(x, axis=0)
preds = np.squeeze(preds)
prediction.append(preds)
for pred in prediction:
pred[0] = int(88*pred[0] + 24)
pred[1] = int(127*pred[1])
pred[2] *= max_t
# to reject values that will be out of range
if pred[0] < 24:
pred[0] = 24
elif pred[0] > 102:
pred[0] = 102
if pred[1] < 0:
pred[1] = 0
elif pred[1] > 127:
pred[1] = 127
if pred[2] < 0:
pred[2] = 0
###########################################
###### SAVING TRACK FROM BYTES DATA #######
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)
for note in prediction:
# 147 means note_on
note = np.insert(note, 0, 147)
bytes = note.astype(int)
print (note)
msg = Message.from_bytes(bytes[0:3])
time = int(note[3]/0.001025) # to rescale to midi's delta ticks. arbitrary value for now.
msg.time = time
track.append(msg)
mid.save('new_song.mid')
###########################################
|
anandha2017/udacity
|
nd101 Deep Learning Nanodegree Foundation/DockerImages/26_sirajs_text_summarisation/notebooks/02-rudimentary-ai-composer/createmusic.py
|
Python
|
mit
| 3,179
|
"""
Unit tests for DVCS wrapper.
Simplified BSD-license. (c) Kevin Dunn, 2011.
"""
from django.test import TestCase
from scipy_central import utils
# Python imports
import os
import shutil
import tempfile
import scipy_central.filestorage.dvcs_wrapper as dvcs
class DVCS_Tests(TestCase):
def setUp(self):
""" Use a known testing file; write it to a temporary location for
the test.
"""
self.tempdir = tempfile.mkdtemp()
self.local_path = os.path.join(self.tempdir, 'local')
self.remote_path = os.path.join(self.tempdir, 'remote')
def tearDown(self):
""" Remove temporary files. """
pass
shutil.rmtree(self.tempdir)
def test_dvcs(self):
pass
backends = ['hg']
for backend in backends:
# Start from scratch every time
shutil.rmtree(self.tempdir)
utils.ensuredir(self.tempdir)
utils.ensuredir(self.local_path)
utils.ensuredir(self.remote_path)
f = open(os.path.join(self.remote_path, 'index.rst'), 'w')
f.writelines(['Header\n','======\n', '\n', 'Paragraph 1\n', '\n',
'Paragraph 2\n', '\n', 'Paragraph 3\n'])
f.close()
remote_repo = dvcs.DVCSRepo(backend, self.remote_path)
remote_repo.add(['.'])
remote_repo.commit('Initial commit', user="Alan Thompson")
remote_hash = remote_repo.get_revision_info()
## Create, add and commit to the remote repo
## Verify that we cannot expect to query the remote repo:
##self.assertRaises(dvcs.DVCSError, dvcs.get_revision_info, remote=True)
local_repo = remote_repo.clone(self.local_path)
local_hash = local_repo.check_out(rev='tip')
self.assertEqual(local_hash, remote_hash)
# Now, in the local repo, make some changes to test
# Add a comment for paragraph 2; commit
f = open(os.path.join(self.local_path, 'index.rst'), 'w')
f.writelines(['Header\n','======\n', '\n', 'Paragraph 1\n', '\n',
'Paragraph 2\n', '\n', '.. ucomment:: aaaaaa: 11,\n',
'\n', 'Paragraph 3\n'])
f.close()
self.assertRaises(dvcs.DVCSError, local_repo.push)
local_repo.set_remote(self.remote_path)
rev1 = local_repo.update_commit_and_push_updates(message='Para 2')
# Check out an old revision to modify, rather than the latest revision
new_hash = local_repo.check_out(rev=local_hash)
self.assertEqual(new_hash, local_hash)
# Now add a comment to paragraph 3, but from the initial revision
f = open(os.path.join(self.local_path, 'index.rst'), 'w')
f.writelines(['Header\n','======\n', '\n', 'Paragraph 1\n', '\n',
'Paragraph 2\n', '\n', 'Paragraph 3\n', '\n',
'.. ucomment:: bbbbbb: 22,\n'])
f.close()
rev2 = local_repo.update_commit_and_push_updates(message='Para 3')
# Add a comment above on the local repo, again starting from old version
hex_str = local_repo.check_out(rev=local_hash)
# Now add a comment to paragraph 1
f = open(os.path.join(self.local_path, 'index.rst'), 'w')
f.writelines(['Header\n','======\n', '\n', 'Paragraph 1\n', '\n',
'.. ucomment:: cccccc: 33,\n', '\n', 'Paragraph 2\n',
'\n', 'Paragraph 3\n'])
f.close()
rev3 = local_repo.update_commit_and_push_updates(message='Para 1')
f = open(os.path.join(self.local_path, 'index.rst'), 'r')
lines = f.readlines()
f.close()
final_result = ['Header\n', '======\n', '\n', 'Paragraph 1\n',
'\n', '.. ucomment:: cccccc: 33,\n', '\n',
'Paragraph 2\n', '\n',
'.. ucomment:: aaaaaa: 11,\n', '\n',
'Paragraph 3\n', '\n',
'.. ucomment:: bbbbbb: 22,\n']
self.assertEqual(lines, final_result)
# Now test the code in dvcs.pull_update_and_merge(...).
# Handles the basic case when the author makes changes (they are pushed
# to the remote repo) and they should be imported imported into the
# local repo without requiring a merge.
final_result.insert(3, 'A new paragraph.\n')
final_result.insert(4, '\n')
with open(os.path.join(self.remote_path, 'index.rst'), 'w') as f_handle:
f_handle.writelines(final_result)
remote_repo.commit(message='Remote update.')
local_repo.pull_update_and_merge()
with open(os.path.join(self.local_path, 'index.rst'), 'r') as f_handle:
local_lines = f_handle.readlines()
self.assertEqual(local_lines, final_result)
|
Srisai85/SciPyCentral
|
scipy_central/filestorage/tests.py
|
Python
|
bsd-3-clause
| 5,122
|
import logging
try:
from configparser import ConfigParser
except ImportError:
# Python 2 support
from ConfigParser import ConfigParser
logger = logging.getLogger("packges.knightos.org")
logger.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
sh.setFormatter(formatter)
logger.addHandler(sh)
# scss logger
logging.getLogger("scss").addHandler(sh)
config = ConfigParser()
config.readfp(open('config.ini'))
env = 'dev'
_cfg = lambda k: config.get(env, k)
_cfgi = lambda k: int(_cfg(k))
|
KnightOS/packages.knightos.org
|
packages/config.py
|
Python
|
mit
| 619
|
#---------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#---------------------------------------------------------------------------------------------
import unittest
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
# pylint: disable=line-too-long
from azure.cli.command_modules.resource._validators import (validate_resource_type,
validate_parent,
_resolve_api_version as resolve_api_version)
class TestApiCheck(unittest.TestCase):
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
pass
def tearDown(self):
pass
def test_resolve_api_provider_backup(self):
""" Verifies provider is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/test')
self.assertEqual(resolve_api_version(self._get_mock_client(), resource_type), "2016-01-01")
def test_resolve_api_provider_with_parent_backup(self):
""" Verifies provider (with parent) is used as backup if api-version not specified. """
resource_type = validate_resource_type('Mock/bar')
parent = validate_parent('foo/testfoo123')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type, parent),
"1999-01-01"
)
def test_resolve_api_all_previews(self):
""" Verifies most recent preview version returned only if there are no non-preview versions. """
resource_type = validate_resource_type('Mock/preview')
self.assertEqual(
resolve_api_version(self._get_mock_client(), resource_type),
"2005-01-01-preview"
)
def _get_mock_client(self):
client = MagicMock()
provider = MagicMock()
provider.resource_types = [
self._get_mock_resource_type('skip', ['2000-01-01-preview', '2000-01-01']),
self._get_mock_resource_type('test', ['2016-01-01-preview', '2016-01-01']),
self._get_mock_resource_type('foo/bar', ['1999-01-01-preview', '1999-01-01']),
self._get_mock_resource_type('preview', ['2005-01-01-preview', '2004-01-01-preview'])
]
client.providers.get.return_value = provider
return client
def _get_mock_resource_type(self, name, api_versions): #pylint: disable=no-self-use
rt = MagicMock()
rt.resource_type = name
rt.api_versions = api_versions
return rt
if __name__ == '__main__':
unittest.main()
|
BurtBiel/azure-cli
|
src/command_modules/azure-cli-resource/azure/cli/command_modules/resource/tests/test_api_check.py
|
Python
|
mit
| 2,871
|
# app.routing.tasks
import logging, json, re, pytz
from time import sleep
from bson import ObjectId as oid
from flask import g
from dateutil.parser import parse
from datetime import datetime, date, time, timedelta
from app import celery, get_keys, get_group
from app.lib import gcal, gdrive, timer
from app.lib.utils import format_bson
from app.lib.dt import to_local, ddmmyyyy_to_date
from app.lib.timer import Timer
from app.main import parser
from app.main.etapestry import EtapError, get_udf
from .main import add_metadata
from .build import submit_job, get_solution
from . import depots, sheet, routific
log = logging.getLogger(__name__)
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def discover_routes(self, group, within_days=5, **rest):
'''Scans schedule for blocks, adds metadata to db'''
g.group = group
# REMOVE ME
coll = 'new_routes' if g.group == 'vec' else 'routes'
from app.main.socketio import smart_emit
sleep(3)
smart_emit('discover_routes', {'status':'in-progress'})
log.debug('Discovering routes...')
n_found = 0
events = []
service = gcal.gauth(get_keys('google')['oauth'])
cal_ids = get_keys('cal_ids')
for _id in cal_ids:
start = to_local(d=date.today())
events += gcal.get_events(
service,
cal_ids[_id],
start,
start + timedelta(days=within_days))
events = sorted(events, key=lambda k: k['start'].get('date'))
for event in events:
block = parser.get_block(event['summary'])
event_dt = to_local(d=parse(event['start']['date']), t=time(8,0))
if not block:
continue
if not g.db[coll].find_one(
{'date':event_dt.astimezone(pytz.utc), 'block': block, 'group':g.group}
):
try:
meta = add_metadata(block, event_dt, event)
except Exception as e:
log.exception('Error writing route %s metadata', block)
continue
log.debug('discovered %s on %s', block, event_dt.strftime('%b %-d'))
smart_emit('discover_routes', {
'status': 'discovered', 'route': format_bson(meta)},
room=g.group)
n_found +=1
smart_emit('discover_routes', {'status':'completed'}, room=g.group)
return 'discovered %s routes' % n_found
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def build_scheduled_routes(self, group=None, **rest):
'''Route orders for today's Blocks and build Sheets
'''
groups = [get_keys(group=group)] if group else g.db['groups'].find()
for group_ in groups:
g.group = group_['name']
n_fails = n_success = 0
log.info("Task: Building scheduled routes...")
# REMOVE ME
coll = 'new_routes' if g.group == 'vec' else 'routes'
routes = g.db[coll].find(
{'group':g.group, 'date':to_local(d=date.today(),t=time(8,0))})
discover_routes(g.group)
for route in routes:
try:
build_route(str(route['_id']))
except Exception as e:
log.exception('Error building route %s', route['block'],
extra={'route_id':str(route['_id'])})
n_fails+=1
continue
n_success += 1
sleep(2)
if n_fails == 0:
log.info('Task completed. Built %s/%s scheduled routes',
n_success, n_success + n_fails)
else:
log.error('Built %s/%s scheduled routes. Click for error details.',
n_success, n_success + n_fails)
return 'success'
#-------------------------------------------------------------------------------
@celery.task(bind=True)
def build_route(self, route_id, job_id=None, **rest):
'''Celery task that routes a Block via Routific and writes orders to a Sheet
Can take up to a few min to run depending on size of route, speed of
dependent API services (geocoder, sheets/drive api)
@route_id: '_id' of record in 'routes' db collection (str)
@job_id: routific job string. If passed, creates Sheet without re-routing
Returns: db.routes dict on success, False on error
'''
from app.lib.gsheets_cls import SS
from app.main.maps import GeocodeError
timer = Timer()
orders = "processing"
route = g.db['routes'].find_one({"_id":oid(route_id)})
if route:
coll = 'routes'
if not route:
route = g.db['new_routes'].find_one({"_id":oid(route_id)})
coll = 'new_routes'
g.group = route['group']
oauth = get_keys('google')['oauth']
api_key = get_keys('google')['geocode']['api_key']
log.debug('Building route %s...', route['block'],
extra={'route_id':route_id, 'job_id':job_id or None})
if job_id is None:
try:
job_id = submit_job(oid(route_id))
except GeocodeError as e:
log.exception('Geocoding error.', extra={'response':e.response})
raise
while orders == "processing":
log.debug('No solution yet...')
sleep(5)
orders = get_solution(job_id, api_key)
title = '%s: %s (%s)' %(route['date'].strftime('%b %-d'), route['block'], route['routific']['driver']['name'])
ss = sheet.build(gdrive.gauth(oauth), title)
route = g.db[coll].find_one_and_update({'_id':oid(route_id)}, {'$set':{'ss':ss}})
wks_name = get_keys('routing')['gdrive']['template_orders_wks_name']
try:
route_ss = SS(get_keys('google')['oauth'], ss['id'])
orders_wks = route_ss.wks('Orders')
sheet.write_orders(orders_wks, orders)
info_wks = route_ss.wks('Info')
sheet.write_prop(info_wks, route)
# Append orders w/o geolocation
for e in route['routific']['errors']:
order = routific.order(e['acct'], e['acct']['address'], {}, '', '',0)
sheet.append_order(orders_wks, order)
except Exception as e:
log.exception('Error writing to Sheets')
raise
#smart_emit('route_status',{
# 'status':'completed', 'ss_id':ss['id'], 'warnings':route['warnings']})
log.info('Built route %s [Orders=%s]', route['block'], len(orders),
extra={'n_orders':len(orders), 'n_unserved': route['routific']['nUnserved'],
'n_warnings': len(route['routific']['warnings']), 'n_errors': len(route['routific']['errors'])})
return json.dumps({'status':'success', 'route_id':str(route['_id'])})
|
SeanEstey/Bravo
|
app/routing/tasks.py
|
Python
|
gpl-2.0
| 6,613
|
import html.parser
import urllib.parse
import markdown_dictionary as m
class Htmlparser(html.parser.HTMLParser):
# History of tag,attrs to get attrs-data at handle_endtag()
pathList = []
# The markdown string which will get build over time
result = ''
# The current data from the current opened tag
currentText = ''
# The host including protocol of the submission, e.g.: https://forum-en.guildwars2.com
host = ''
# Blockquote indention level
lvlBlockquote = 0
# Resulting from blockquote indention level: the text modifier (suitable number of '>'s
qm = ''
# State for code tag
is_code = False
# State for a tag
is_a = False
"""
Return the pos item (tag-name or attributes) from the end of the pathlist
"""
def read_pathlist(self, pos, name):
return self.pathList[-pos][name]
"""
Repairs relative and relative/external urls to absolute https urls
"""
def repair_href(self, href):
if href[:12] == '/external?l=':
return urllib.parse.unquote_plus(href[12:])
elif href[:2] == '//':
return 'https:' + href
elif href[:1] == '/':
return self.host + href
elif href[:str(href).find('?')].isdigit():
return self.host + '/forums/en/overwatch/topic/' + href
else:
return href
"""
Formats url and (opt.) visual/url description into markdown syntax
"""
@staticmethod
def mrkd_href(href, visual=''):
if visual == '':
return '{0}{1}{2}{3}{1}{4}'.format(m.lvs, href.strip(), m.lve, m.lhs, m.lhe)
else:
return '{0}{1}{2}{3}{4}{5}'.format(m.lvs, visual.strip(), m.lve, m.lhs, href.strip(), m.lhe)
"""
Extracts userfriendly youtube url from embedded url
"""
@staticmethod
def excavate_youtube(url):
return url
"""
Appends string including (opt.) blockquote modifier, (opt.) nextline and (opt.) codespacer to final result.
"""
def append(self, string, qm=None, is_nextline=False):
if qm is None:
qm = self.qm
if self.is_code:
string = m.code + str(string)
if is_nextline:
self.result += m.newline2 + qm + str(string)
else:
self.result += str(string)
"""
Appends string to current data
"""
def add(self, string):
self.currentText += string
"""
Returns current data to caller and clears the data afterwards
"""
def get_current_text(self):
t = self.currentText
self.currentText = ''
return t
"""
Handles starttag of html parser
"""
def handle_starttag(self, tag, attrs):
if tag == 'blockquote':
self.lvlBlockquote += 1
if tag == 'p':
self.append('', None, True)
if tag == 'strong':
self.add(m.bold)
if tag == 'em':
self.add(m.itallic)
if tag == 'del':
self.add(m.strike)
if tag == 'h1':
self.add(m.h1)
if tag == 'h2':
self.add(m.h2)
if tag == 'h3':
self.add(m.h3)
if tag == 'h4':
self.add(m.h4)
if tag == 'h5':
self.add(m.h5)
if tag == 'h6' or tag == 'ins' or (tag == 'span' and attrs[0][1] == 'underline'):
self.add(m.h6)
if tag == 'ul':
self.add(m.newline2)
if tag == 'li':
self.add(m.li)
if tag == 'a':
self.append(self.get_current_text())
self.is_a = True
if tag == 'code':
self.is_code = True
if attrs is not None:
self.pathList.append({'tag': tag, 'attrs': dict(attrs)})
else:
self.pathList.append({'tag': tag, 'attrs': None})
"""
Handles data of html parser
"""
def handle_data(self, data):
self.currentText += data
"""
Handles endtag of html parser
"""
def handle_endtag(self, tag):
# Following variables are needed to distinguish between links out of blockquotes and links in blockquotes
self.qm = ''
if self.lvlBlockquote > 0:
for i in range(0, self.lvlBlockquote):
self.qm += m.quote
if tag == 'blockquote':
self.lvlBlockquote -= 1
self.append(self.get_current_text(), is_nextline=True)
if tag == 'span' and self.read_pathlist(1, 'attrs') is not None:
for key in self.read_pathlist(1, 'attrs'):
if str(self.read_pathlist(1, 'attrs')[key]).find('bml-quote-date') != -1:
self.append(self.get_current_text() + ' ',is_nextline=True)
if str(self.read_pathlist(1, 'attrs')[key]).find('truncated') != -1:
self.get_current_text()
if str(self.read_pathlist(1, 'attrs')[key]).find('underline') != -1:
self.append(self.get_current_text(), is_nextline=True)
if tag == 'img':
self.add(self.mrkd_href(self.repair_href(self.read_pathlist(1, 'attrs')['src'])))
if tag == 'p':
self.append(self.get_current_text(), is_nextline=True)
if tag == 'strong':
self.add(m.bold)
if tag == 'em':
self.add(m.itallic)
if tag == 'a':
self.append(self.mrkd_href(self.repair_href(self.read_pathlist(1, 'attrs')['href']), self.get_current_text()))
self.is_a = False
if tag == 'del':
self.add(m.strike)
if tag in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'ins']:
self.append(self.get_current_text(), is_nextline=True)
if tag == 'br':
if not self.is_a:
self.append(self.get_current_text(), is_nextline=True)
if tag == 'li':
self.append(self.get_current_text() + m.newline2)
if tag == 'iframe':
self.append(self.mrkd_href(self.excavate_youtube(self.repair_href(self.read_pathlist(1,'attrs')['src']))))
if tag == 'div':
self.append(self.get_current_text(), is_nextline=True)
if tag == 'code':
self.append(self.get_current_text(), is_nextline=True)
self.is_code = False
if len(self.pathList) > 0:
self.pathList.pop()
def parse(source, host):
parser = Htmlparser()
parser.host = host
parser.convert_charrefs = True
parser.feed(source)
return parser.result
if __name__ == '__main__':
print(parse('<strong>boldtest</strong><br /><em>ittalictest</em><br /><span class="underline">underlinetest</span><br /><ul><li>listitem1</li><li>listitem2</li></ul><br />http://<br /><code>codetest <br />us.battle.net/forums/en/overwatch/<br /> code<strong>boldtest</strong></code><br /><blockquote><blockquote><span class="truncated">...</span>quote2</blockquote>quote1</blockquote>helloworld</div>', "https://us.battle.net"))
|
Tinywave/polarbytebot
|
overwatch_html2markdown.py
|
Python
|
mit
| 6,973
|
"""The MIT License (MIT)
Copyright (c) 2016 Robert A. Brown (www.robbtech.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
import math
import pylab as mpl
import numpy as np
import time
def weightVariable(shape,std=1.0,name=None):
# Create a set of weights initialized with truncated normal random values
name = 'weights' if name is None else name
return tf.get_variable(name,shape,initializer=tf.truncated_normal_initializer(stddev=std/math.sqrt(shape[0])))
def biasVariable(shape,bias=0.1,name=None):
# create a set of bias nodes initialized with a constant 0.1
name = 'biases' if name is None else name
return tf.get_variable(name,shape,initializer=tf.constant_initializer(bias))
def conv2d(x,W,strides=[1,1,1,1],name=None):
# return an op that convolves x with W
strides = np.array(strides)
if strides.size == 1:
strides = np.array([1,strides,strides,1])
elif strides.size == 2:
strides = np.array([1,strides[0],strides[1],1])
if np.any(strides < 1):
strides = np.around(1./strides).astype(np.uint8)
return tf.nn.conv2d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name)
else:
return tf.nn.conv2d(x,W,strides=strides.tolist(),padding='SAME',name=name)
def conv3d(x,W,strides=1,name=None):
# return an op that convolves x with W
strides = np.array(strides)
if strides.size == 1:
strides = np.array([1,strides,strides,strides[0],1])
elif strides.size == 3:
strides = np.array([1,strides[0],strides[1],strides[2],1])
if np.any(strides < 1):
strides = np.around(1./strides).astype(np.uint8)
return tf.nn.conv3d_transpose(x,W,strides=strides.tolist(),padding='SAME',name=name)
else:
return tf.nn.conv3d(x,W,strides=strides.tolist(),padding='SAME',name=name)
def max_pool_2x2(x,name=None):
# return an op that performs max pooling across a 2D image
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding='SAME',name=name)
def max_pool(x,shape,name=None):
# return an op that performs max pooling across a 2D image
return tf.nn.max_pool(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name)
def max_pool3d(x,shape,name=None):
# return an op that performs max pooling across a 2D image
return tf.nn.max_pool3d(x,ksize=[1]+shape+[1],strides=[1]+shape+[1],padding='SAME',name=name)
def plotFields(layer,fieldShape=None,channel=None,figOffset=1,cmap=None,padding=0.01):
# Receptive Fields Summary
try:
W = layer.W
except:
W = layer
wp = W.eval().transpose();
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
fields = np.reshape(wp,list(wp.shape[0:-1])+fieldShape)
else: # Convolutional layer already has shape
features, channels, iy, ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fig = mpl.figure(figOffset); mpl.clf()
# Using image grid
from mpl_toolkits.axes_grid1 import ImageGrid
grid = ImageGrid(fig,111,nrows_ncols=(perRow,perColumn),axes_pad=padding,cbar_mode='single')
for i in range(0,np.shape(fields)[0]):
im = grid[i].imshow(fields[i],cmap=cmap);
grid.cbar_axes[0].colorbar(im)
mpl.title('%s Receptive Fields' % layer.name)
# old way
# fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
# tiled = []
# for i in range(0,perColumn*perRow,perColumn):
# tiled.append(np.hstack(fields2[i:i+perColumn]))
#
# tiled = np.vstack(tiled)
# mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Receptive Fields' % layer.name); mpl.colorbar();
mpl.figure(figOffset+1); mpl.clf(); mpl.imshow(np.sum(np.abs(fields),0),cmap=cmap); mpl.title('%s Total Absolute Input Dependency' % layer.name); mpl.colorbar()
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None):
# Output summary
try:
W = layer.output
except:
W = layer
wp = W.eval(feed_dict=feed_dict);
if len(np.shape(wp)) < 4: # Fully connected layer, has no shape
temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel()
fields = np.reshape(temp,[1]+fieldShape)
else: # Convolutional layer already has shape
wp = np.rollaxis(wp,3,0)
features, channels, iy,ix = np.shape(wp)
if channel is not None:
fields = wp[:,channel,:,:]
else:
fields = np.reshape(wp,[features*channels,iy,ix])
perRow = int(math.floor(math.sqrt(fields.shape[0])))
perColumn = int(math.ceil(fields.shape[0]/float(perRow)))
fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))])
tiled = []
for i in range(0,perColumn*perRow,perColumn):
tiled.append(np.hstack(fields2[i:i+perColumn]))
tiled = np.vstack(tiled)
if figOffset is not None:
mpl.figure(figOffset); mpl.clf();
mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def train(session,trainingData,testingData,input,truth,cost,trainingStep,accuracy,iterations=5000,miniBatch=100,trainDict={},testDict=None,logName=None,initialize=True,addSummaryOps=True):
testDict = trainDict if testDict is None else testDict
if addSummaryOps:
costSummary = tf.summary.scalar("Cost Function", cost)
if accuracy is None:
accuracy = cost
accuracySummary = tf.summary.scalar("accuracy", accuracy)
mergedSummary = tf.summary.merge_all()
if logName is not None:
writer = tf.train.SummaryWriter(logName, session.graph_def)
if initialize:
tf.global_variables_initializer().run() # Take initial values and actually put them in variables
lastTime = 0; lastIterations = 0
print("Doing {} iterations".format(iterations))
for i in range(iterations): # Do some training
batch = trainingData.next_batch(miniBatch)
if (i%100 == 0) or (time.time()-lastTime > 5):
testDict.update({input:batch[0],truth:batch[1]})
# trainAccuracy = accuracy.eval(feed_dict=testDict)
# Test accuracy for TensorBoard
# testDict.update({input:testingData.images,truth:testingData.labels})
if addSummaryOps:
summary,testAccuracy,testCost = session.run([mergedSummary,accuracy,cost],feed_dict=testDict)
if logName is not None:
writer.add_summary(summary,i)
else:
testAccuracy,testCost = session.run([accuracy,cost],feed_dict=testDict)[0]
print('At batch {}: accuracy: {} cost: {} ({} samples/s)'.format(i,testAccuracy,testCost,(i-lastIterations)/(time.time()-lastTime)*miniBatch))
lastTime = time.time(); lastIterations = i
trainDict.update({input:batch[0],truth:batch[1]})
trainingStep.run(feed_dict=trainDict)
try:
# Only works with mnist-type data object
testDict.update({input:testingData.images, truth:testingData.labels})
print('Test accuracy: {}'.format(accuracy.eval(feed_dict=testDict)))
except:
pass
class Layer(object):
def __init__(self,input,units,name,std=1.0,bias=0.1):
self.input = input
self.units = units
self.name = name
self.initialize(std=std,bias=bias)
self.setupOutput()
self.setupSummary()
def initialize(self):
pass
def setupOutput(self):
pass
def setupSummary(self):
pass
class UtilityLayer(Layer):
def __init__(self,input,name):
self.input = input
self.name = name
self.initialize()
self.setupOutput()
self.setupSummary()
class Linear(Layer):
def initialize(self,std=1.0,bias=0.1):
with tf.variable_scope(self.name):
self.inputShape = np.product([i.value for i in self.input.get_shape()[1:] if i.value is not None])
self.W = weightVariable([self.inputShape,self.units],std=std)
self.b = biasVariable([self.units],bias=bias)
def setupOutput(self):
if len(self.input.get_shape()) > 2:
input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
else:
input = self.input
self.output = tf.matmul(input,self.W)
def setupSummary(self):
self.WHist = tf.histogram("weights" % self.name, self.W)
self.BHist = tf.histogram("biases" % self.name, self.b)
self.outputHist = tf.histogram("output" % self.name, self.output)
class SoftMax(Layer):
def initialize(self,std=1.0,bias=0.1):
with tf.variable_scope(self.name):
self.inputShape = np.product([i.value for i in self.input.get_shape()[1:] if i.value is not None])
self.W = weightVariable([self.inputShape,self.units],std=std)
self.b = biasVariable([self.units],bias=bias)
def setupOutput(self):
if len(self.input.get_shape()) > 2:
input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
else:
input = self.input
self.output = tf.nn.softmax(tf.matmul(input,self.W) + self.b)
def setupSummary(self):
self.WHist = tf.summary.histogram("weights", self.W)
self.BHist = tf.summary.histogram("biases", self.b)
self.outputHist = tf.summary.histogram("output", self.output)
class ReLu(SoftMax):
def setupOutput(self):
if len(self.input.get_shape()) > 2:
input = tf.reshape(self.input,[-1,self.inputShape]) # flatten reduced image into a vector
else:
input = self.input
self.output = tf.nn.relu(tf.matmul(input,self.W) + self.b)
class Conv2D(SoftMax):
def __init__(self,input,shape,name,strides=[1,1,1,1],std=1.0,bias=0.1):
self.input = input
self.units = shape[-1]
self.shape = shape
self.strides = strides
self.name = name
self.initialize(std=std,bias=bias)
self.setupOutput()
self.setupSummary()
def initialize(self,std=1.0,bias=0.1):
with tf.variable_scope(self.name):
self.W = weightVariable(self.shape,std=std) # YxX patch, Z contrast, outputs to N neurons
self.b = biasVariable([self.shape[-1]],bias=bias) # N bias variables to go with the N neurons
def setupOutput(self):
self.output = tf.nn.relu(conv2d(self.input,self.W,strides=self.strides) + self.b,name=self.name)
class ConvSoftMax(Conv2D):
def setupOutput(self):
self.output = tf.nn.softmax(conv2d(self.input,self.W) + self.b)
# inputShape = self.input.get_shape()
# convResult = conv2d(self.input,self.W) + self.b
#
# convResult = tf.reshape(convResult,[-1,self.units]) # flatten reduced image into a vector
# softMaxed = tf.nn.softmax(convResult)
# self.output = tf.reshape(softMaxed,[-1] + inputShape[1:3].as_list() + [self.units])
class Conv3D(Conv2D):
def __init__(self,input,shape,name,strides=[1,1,1,1,1],std=1.0,bias=0.1):
super(Conv3D,self).__init__(input,shape,name,strides,std,bias)
def setupOutput(self):
self.output = tf.nn.relu(conv3d(self.input,self.W,strides=self.strides) + self.b,name=self.name)
class Conv3DSoftMax(ConvSoftMax):
def setupOutput(self):
inputShape = self.input.get_shape()
convResult = conv3d(self.input,self.W) + self.b
convResult = tf.reshape(convResult,[-1,self.units]) # flatten reduced image into a vector
softMaxed = tf.nn.softmax(convResult)
self.output = tf.reshape(softMaxed,[-1] + inputShape[1:4].as_list() + [self.units])
class MaxPool2x2(UtilityLayer):
def setupOutput(self):
with tf.variable_scope(self.name):
self.output = max_pool_2x2(self.input)
class MaxPool(UtilityLayer):
def __init__(self,input,shape,name):
self.shape = shape
super(MaxPool,self).__init__(input,name)
def setupOutput(self):
with tf.variable_scope(self.name):
self.output = max_pool(self.input,shape=self.shape)
class MaxPool3D(MaxPool):
def setupOutput(self):
with tf.variable_scope(self.name):
self.output = max_pool3d(self.input,shape=self.shape)
class L2Norm(UtilityLayer):
def __init__(self,input,name):
super(L2Norm,self).__init__(input,name)
def setupOutput(self):
with tf.variable_scope(self.name):
self.output = tf.nn.l2_normalize(self.input,-1)
class Resample(UtilityLayer):
def __init__(self,input,outputShape,name,method=tf.image.ResizeMethod.BICUBIC,alignCorners=True):
self.outputShape = outputShape
self.method = method
self.alignCorners = alignCorners
super(Resample,self).__init__(input,name)
def setupOutput(self):
with tf.variable_scope(self.name):
try:
self.output = tf.image.resize_images(self.input,self.outputShape,method=self.method)#,align_corners=self.alignCorners)
except:
self.output = tf.image.resize_images(self.input,self.outputShape[0],self.outputShape[1],method=self.method)#,align_corners=self.alignCorners)
class Dropout(UtilityLayer):
def __init__(self,input,name):
self.input = input
self.name = name
super(Dropout,self).__init__(input,name)
def initialize(self):
with tf.variable_scope(self.name):
self.keepProb = tf.placeholder('float') # Variable to hold the dropout probability
def setupOutput(self):
with tf.variable_scope(self.name):
self.output = tf.nn.dropout(self.input,self.keepProb)
self.output.get_shape = self.input.get_shape # DEBUG: remove this whenever TensorFlow fixes this bug
#*** Main Part ***
if __name__ == '__main__':
import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
session = tf.InteractiveSession()
x = tf.placeholder('float',shape=[None,784],name='input') # Input tensor
y_ = tf.placeholder('float', shape=[None,10],name='correctLabels') # Correct labels
trainingIterations = 5000
# L1 = ReLu(x,512,'relu1')
# L2 = ReLu(L1.output,128,'relu2')
# L3 = ReLu(L2.output,64,'relu3')
# L4 = SoftMax(x,10,'softmax')
# y = L4.output
# trainDict = {}; testDict = trainDict
# logName = 'logs/softmax'
xImage = tf.reshape(x,[-1,28,28,1]) # Reshape samples to 28x28x1 images
L1 = Conv2D(xImage,[5,5,1,32],'Conv1')
L2 = MaxPool2x2(L1.output,'MaxPool1')
L3 = Conv2D(L2.output,[5,5,32,64],'Conv2')
L4 = MaxPool2x2(L3.output,'MaxPool2')
L5 = ReLu(L4.output,128,'relu1')
L6 = Dropout(L5.output,'dropout')
L7 = SoftMax(L5.output,10,'softmax')
y = L7.output
kp = 0.5; trainDict = {L6.keepProb:kp}
kp = 1.0; testDict = {L6.keepProb:kp}
logName = 'logs/Conv'
# Training and evaluation
crossEntropy = -tf.reduce_sum(y_*tf.log(y)) # cost function
trainStep = tf.train.GradientDescentOptimizer(0.01).minimize(crossEntropy)
trainStep = tf.train.AdamOptimizer(1e-4).minimize(crossEntropy)
correctPrediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correctPrediction,'float'))
train(session=session,trainingData=mnist.train,testingData=mnist.test,truth=y_,input=x,cost=crossEntropy,trainingStep=trainStep,accuracy=accuracy,iterations=trainingIterations,miniBatch=100,trainDict=trainDict,testDict=testDict,logName=logName)
#plotFields(L1,[28,28],figOffset=1)
|
robb-brown/IntroToDeepLearning
|
tfs/tfs/TensorFlowInterface.py
|
Python
|
mit
| 15,513
|
import functools
import numpy
import chainer
from chainer.backends import cuda
from chainer.testing import attr
class BackendConfig(object):
_props = [
('use_cuda', False),
('use_cudnn', 'never'),
('cudnn_deterministic', False),
('autotune', False),
('use_ideep', 'never'),
]
def __init__(self, params):
if not isinstance(params, dict):
raise TypeError('params must be a dict.')
self._contexts = []
# Default values
for k, v in self._props:
setattr(self, k, v)
# Specified values
for k, v in params.items():
if not hasattr(self, k):
raise ValueError('Parameter {} is not defined'.format(k))
setattr(self, k, v)
@property
def xp(self):
if self.use_cuda:
return cuda.cupy
else:
return numpy
def __enter__(self):
self._contexts = [
chainer.using_config(
'use_cudnn', self.use_cudnn),
chainer.using_config(
'cudnn_deterministic', self.cudnn_deterministic),
chainer.using_config(
'autotune', self.autotune),
chainer.using_config(
'use_ideep', self.use_ideep),
]
for c in self._contexts:
c.__enter__()
return self
def __exit__(self, typ, value, traceback):
for c in reversed(self._contexts):
c.__exit__(typ, value, traceback)
def __repr__(self):
lst = []
for k, _ in self._props:
lst.append('{}={!r}'.format(k, getattr(self, k)))
return '<BackendConfig {}>'.format(' '.join(lst))
def get_func_str(self):
"""Returns a string that can be used in method name"""
lst = []
for k, _ in self._props:
val = getattr(self, k)
if val is True:
val = 'true'
elif val is False:
val = 'false'
else:
val = str(val)
lst.append('{}_{}'.format(k, val))
return '__'.join(lst)
def get_pytest_marks(self):
marks = []
if self.use_cuda:
marks.append(attr.gpu)
if self.use_cudnn != 'never':
marks.append(attr.cudnn)
else:
if self.use_ideep != 'never':
marks.append(attr.ideep)
assert all(callable(_) for _ in marks)
return marks
def _wrap_backend_test_method(impl, param, method_name):
backend_config = BackendConfig(param)
marks = backend_config.get_pytest_marks()
new_method_name = '{}__{}'.format(
method_name, backend_config.get_func_str())
@functools.wraps(impl)
def func(self, *args, **kwargs):
impl(self, backend_config, *args, **kwargs)
func.__name__ = new_method_name
# Apply test marks
for mark in marks:
func = mark(func)
return func, new_method_name
def inject_backend_tests(method_names, params):
if not isinstance(method_names, list):
raise TypeError('method_names must be a list.')
if not isinstance(params, list):
raise TypeError('params must be a list of dicts.')
if not all(isinstance(d, dict) for d in params):
raise TypeError('params must be a list of dicts.')
def wrap(case):
for method_name in method_names:
impl = getattr(case, method_name)
delattr(case, method_name)
for i_param, param in enumerate(params):
new_impl, new_method_name = _wrap_backend_test_method(
impl, param, method_name)
if hasattr(case, new_method_name):
raise RuntimeError(
'Test fixture already exists: {}'.format(
new_method_name))
setattr(case, new_method_name, new_impl)
return case
return wrap
|
aonotas/chainer
|
chainer/testing/backend.py
|
Python
|
mit
| 3,973
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TesisApp.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
scairi/Eevee
|
TesisApp/manage.py
|
Python
|
apache-2.0
| 806
|
"""
URL filter and manipulation tools
http://github.com/adbar/courlan
"""
import re
from pathlib import Path
from setuptools import setup
def get_version(package):
"Return package version as listed in `__version__` in `init.py`"
# version = Path(package, '__init__.py').read_text() # Python >= 3.5
with open(str(Path(package, '__init__.py')), 'r', encoding='utf-8') as filehandle:
initfile = filehandle.read()
return re.search('__version__ = [\'"]([^\'"]+)[\'"]', initfile).group(1)
def get_long_description():
"Return the README"
with open('README.rst', 'r', encoding='utf-8') as filehandle:
long_description = filehandle.read()
#long_description += "\n\n"
#with open("CHANGELOG.md", encoding="utf8") as f:
# long_description += f.read()
return long_description
setup(
name='courlan',
version=get_version('courlan'),
description='Clean, filter, normalize, and sample URLs',
long_description=get_long_description(),
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Filters',
],
keywords=['urls', 'url-parsing', 'url-manipulation', 'preprocessing', 'validation', 'webcrawling'],
url='http://github.com/adbar/courlan',
author='Adrien Barbaresi',
author_email='barbaresi@bbaw.de',
license='GPLv3+',
packages=['courlan'],
project_urls={
"Source": "https://github.com/adbar/courlan",
"Coverage": "https://codecov.io/github/adbar/courlan",
"Tracker": "https://github.com/adbar/courlan/issues",
},
#package_data={},
include_package_data=True,
python_requires='>=3.5',
install_requires=[
'tldextract; python_version < "3.6"',
'tld; python_version >= "3.6"',
'urllib3>=1.25,<2',
],
#extras_require=extras,
entry_points = {
'console_scripts': ['courlan=courlan.cli:main'],
},
# platforms='any',
tests_require=['pytest', 'tox'],
zip_safe=False,
)
|
adbar/url-tools
|
setup.py
|
Python
|
gpl-2.0
| 3,075
|
class Scheduler(object):
"""Define a domain."""
def __init__(self, matches, problem):
""".
PARAMETERS TYPE Potential Arguments
-----------------------------------------------
"""
self.matches = matches
schedule = []
self.allSchedules = []
for result in problem.getSolutions():
for k in result.keys():
course = k
local = result[k]
schedule.append((course, local))
self.allSchedules.append(schedule.pop())
|
renatorangel/scheduler
|
src/structures/scheduler.py
|
Python
|
apache-2.0
| 566
|
from sourcemash.database import db
class Feed(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(250))
url = db.Column(db.String(2083), index=True, unique=True)
description = db.Column(db.Text)
image_url = db.Column(db.String(2083))
last_updated = db.Column(db.DateTime)
topic = db.Column(db.String(50))
public = db.Column(db.Boolean, default=False)
item_count = db.Column(db.Integer, default=0)
items = db.relationship('Item', backref='feed', lazy='dynamic')
def __repr__(self):
return "<Feed %r (%r)>" % (self.title, self.url)
|
sourcemash/Sourcemash
|
sourcemash/feeds/models.py
|
Python
|
gpl-2.0
| 618
|
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import mock
import netaddr
from neutron.common import exceptions as ex
from quark.db import models
from quark import exceptions as q_ex
from quark.plugin_modules import floating_ips
from quark.tests import test_quark_plugin
class TestRemoveFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None):
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip"),
mock.patch("quark.db.api.port_disassociate_ip"),
mock.patch("quark.db.api.ip_address_deallocate"),
mock.patch("quark.ipam.QuarkIpam.deallocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".remove_floating_ip")
) as (flip_find, db_fixed_ip_disassoc, db_port_disassoc, db_dealloc,
mock_dealloc, mock_remove_flip):
flip_find.return_value = flip_model
yield
def test_delete_floating_by_ip_address_id(self):
flip = dict(id=1, address=3232235876, address_readable="192.168.1.100",
subnet_id=1, network_id=2, version=4, used_by_tenant_id=1,
network=dict(ipam_strategy="ANY"))
with self._stubs(flip=flip):
self.plugin.delete_floatingip(self.context, 1)
def test_delete_floating_by_when_ip_address_does_not_exists_fails(self):
with self._stubs():
with self.assertRaises(q_ex.FloatingIpNotFound):
self.plugin.delete_floatingip(self.context, 1)
class TestFloatingIPUtilityMethods(test_quark_plugin.TestQuarkPlugin):
def test_get_next_available_fixed_ip_with_single_fixed_ip(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress('192.168.0.1')
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_mult_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
for ip_addr in ["192.168.0.1", "192.168.0.2", "192.168.0.3"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], '192.168.0.1')
def test_get_next_available_fixed_ip_with_no_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.2")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip, None)
def test_get_next_available_fixed_ip_with_avail_fixed_ips(self):
port = models.Port()
port.update(dict(id=1))
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
flip_addr = netaddr.IPAddress("10.0.0.1")
flip = models.IPAddress()
flip.update(dict(address_type="floating",
address=int(flip_addr),
version=4,
address_readable=str(flip_addr),
allocated_at=datetime.datetime.now()))
flip.fixed_ip = fixed_ip
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
fixed_ip_addr = netaddr.IPAddress("192.168.0.2")
fixed_ip = models.IPAddress()
fixed_ip.update(dict(address_type="fixed",
address=int(fixed_ip_addr),
version=4,
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port.ip_addresses.append(fixed_ip)
port.ip_addresses.append(flip)
next_fixed_ip = floating_ips._get_next_available_fixed_ip(port)
self.assertEqual(next_fixed_ip["address_readable"], "192.168.0.2")
class TestCreateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None, port=None, ips=None, network=None):
port_model = None
if port:
port_model = models.Port()
port_model.update(dict(port=port))
if ips:
for ip in ips:
ip_model = models.IPAddress()
ip_model.update(ip)
addr_type = ip.get("address_type")
if addr_type == "floating" and "fixed_ip_addr" in ip:
fixed_ip = models.IPAddress()
fixed_ip.update(next(ip_addr for ip_addr in ips
if (ip_addr["address_readable"] ==
ip["fixed_ip_addr"])))
ip_model.fixed_ip = fixed_ip
port_model.ip_addresses.append(ip_model)
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
net_model = None
if network:
net_model = models.Network()
net_model.update(network)
def _alloc_ip(context, new_addr, net_id, port_m, *args, **kwargs):
new_addr.append(flip_model)
def _port_assoc(context, ports, addr, enable_port=None):
addr.ports = ports
return addr
def _flip_fixed_ip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
return addr
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.network_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.ipam.QuarkIpam.allocate_ip_address"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".register_floating_ip"),
mock.patch("quark.db.api.port_associate_ip"),
mock.patch("quark.db.api.floating_ip_associate_fixed_ip")
) as (flip_find, net_find, port_find, alloc_ip, mock_reg_flip,
port_assoc, fixed_ip_assoc):
flip_find.return_value = flip_model
net_find.return_value = net_model
port_find.return_value = port_model
alloc_ip.side_effect = _alloc_ip
port_assoc.side_effect = _port_assoc
fixed_ip_assoc.side_effect = _flip_fixed_ip_assoc
yield
def test_create_with_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_a_port(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
with self._stubs(flip=floating_ip, port=None,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"], port_id=None)
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip.get("fixed_ip_address"), None)
def test_create_with_fixed_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ips = []
for ip_addr in ["192.168.0.1", "192.168.0.2"]:
fixed_ip_addr = netaddr.IPAddress(ip_addr)
fixed_ips.append(dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now()))
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.2")
def test_create_with_floating_ip_specified(self):
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(id=1, address=int(floating_ip_addr), version=4,
address_readable=str(floating_ip_addr), subnet_id=1,
network_id=2, used_by_tenant_id=1)
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", address=int(fixed_ip_addr),
version=4, address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id=2)
with self._stubs(flip=floating_ip, port=port,
ips=fixed_ips, network=network):
request = dict(floating_network_id=network["id"],
port_id=port["id"], floating_ip_address="10.0.0.1")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["floating_ip_address"], "10.0.0.1")
self.assertEqual(flip["fixed_ip_address"], "192.168.0.1")
def test_create_without_network_id_fails(self):
with self._stubs():
with self.assertRaises(ex.BadRequest):
request = dict(port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_network_fails(self):
with self._stubs():
with self.assertRaises(ex.NetworkNotFound):
request = dict(floating_network_id=123,
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
with self._stubs(network=network):
with self.assertRaises(ex.PortNotFound):
request = dict(floating_network_id=network["id"],
port_id=2, floating_ip_address="10.0.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_with_invalid_fixed_ip_for_port_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ips = [dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=fixed_ips, network=network):
with self.assertRaises(
q_ex.FixedIpDoesNotExistsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.2")
flip = self.plugin.create_floatingip(self.context,
dict(floatingip=request))
self.assertEqual(flip["address_readable"], "10.0.0.1")
self.assertEqual(flip.fixed_ip["address_readable"],
"192.168.0.2")
def test_create_with_port_and_fixed_ip_with_existing_flip_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
q_ex.PortAlreadyContainsFloatingIp):
request = dict(floating_network_id=network["id"],
port_id=port["id"],
fixed_ip_address="192.168.0.1")
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, network=network):
with self.assertRaises(
q_ex.NoAvailableFixedIpsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
def test_create_when_port_has_no_available_fixed_ips_fails(self):
network = dict(id="00000000-0000-0000-0000-000000000000",
ipam_strategy="ANY")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
floating_ip_addr = netaddr.IPAddress("10.0.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now(),
fixed_ip_addr="192.168.0.1")
ips = [fixed_ip, floating_ip]
port = dict(id="abcdefgh-1111-2222-3333-1234567890ab")
with self._stubs(port=port, ips=ips, network=network):
with self.assertRaises(
q_ex.NoAvailableFixedIpsForPort):
request = dict(floating_network_id=network["id"],
port_id=port["id"])
self.plugin.create_floatingip(self.context,
dict(floatingip=request))
class TestUpdateFloatingIPs(test_quark_plugin.TestQuarkPlugin):
@contextlib.contextmanager
def _stubs(self, flip=None, curr_port=None, new_port=None, ips=None):
curr_port_model = None
if curr_port:
curr_port_model = models.Port()
curr_port_model.update(curr_port)
new_port_model = None
if new_port:
new_port_model = models.Port()
new_port_model.update(new_port)
if ips:
for ip in ips:
ip_model = models.IPAddress()
ip_model.update(ip)
addr_type = ip.get("address_type")
if addr_type == "floating" and "fixed_ip_addr" in ip:
fixed_ip = models.IPAddress()
fixed_ip.update(next(ip_addr for ip_addr in ips
if (ip_addr["address_readable"] ==
ip["fixed_ip_addr"])))
ip_model.fixed_ip = fixed_ip
new_port_model.ip_addresses.append(ip_model)
flip_model = None
if flip:
flip_model = models.IPAddress()
flip_model.update(flip)
if curr_port_model:
flip_model.ports = [curr_port_model]
fixed_ip = flip.get("fixed_ip_address")
if fixed_ip:
addr = netaddr.IPAddress(fixed_ip)
fixed_ip_model = models.IPAddress()
fixed_ip_model.update(dict(address_readable=fixed_ip,
address=int(addr), version=4,
address_type="fixed"))
flip_model.fixed_ip = fixed_ip_model
def _find_port(context, id, **kwargs):
return (curr_port_model if (curr_port_model and
id == curr_port_model.id)
else new_port_model)
def _flip_assoc(context, addr, fixed_ip):
addr.fixed_ip = fixed_ip
return addr
def _flip_disassoc(context, addr):
addr.fixed_ip = None
return addr
def _port_assoc(context, ports, addr, enable_ports=None):
addr.ports = ports
return addr
def _port_dessoc(context, ports, addr):
addr.associations = []
addr.ports = []
return addr
with contextlib.nested(
mock.patch("quark.db.api.floating_ip_find"),
mock.patch("quark.db.api.port_find"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".register_floating_ip"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".update_floating_ip"),
mock.patch("quark.drivers.unicorn_driver.UnicornDriver"
".remove_floating_ip"),
mock.patch("quark.db.api.port_associate_ip"),
mock.patch("quark.db.api.port_disassociate_ip"),
mock.patch("quark.db.api.floating_ip_associate_fixed_ip"),
mock.patch("quark.db.api.floating_ip_disassociate_fixed_ip")
) as (flip_find, port_find, reg_flip, update_flip, rem_flip,
port_assoc, port_dessoc, flip_assoc, flip_dessoc):
flip_find.return_value = flip_model
port_find.side_effect = _find_port
port_assoc.side_effect = _port_assoc
port_dessoc.side_effect = _port_dessoc
flip_assoc.side_effect = _flip_assoc
flip_dessoc.side_effect = _flip_disassoc
yield
def test_update_with_new_port_and_no_previous_port(self):
new_port = dict(id="2")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
ips = [fixed_ip]
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port, ips=ips):
content = dict(port_id=new_port["id"])
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret["fixed_ip_address"], "192.168.0.1")
self.assertEqual(ret["port_id"], new_port["id"])
def test_update_with_new_port(self):
curr_port = dict(id="1")
new_port = dict(id="2")
fixed_ip_addr = netaddr.IPAddress("192.168.0.1")
fixed_ip = dict(address_type="fixed", version=4,
address=int(fixed_ip_addr),
address_readable=str(fixed_ip_addr),
allocated_at=datetime.datetime.now())
ips = [fixed_ip]
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, curr_port=curr_port,
new_port=new_port, ips=ips):
content = dict(port_id=new_port["id"])
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret["fixed_ip_address"], "192.168.0.1")
self.assertEqual(ret["port_id"], new_port["id"])
def test_update_with_no_port(self):
curr_port = dict(id="1")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, curr_port=curr_port):
content = dict(port_id=None)
ret = self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
self.assertEqual(ret.get("fixed_ip_address"), None)
self.assertEqual(ret.get("port_id"), None)
def test_update_with_non_existent_port_should_fail(self):
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip):
with self.assertRaises(ex.PortNotFound):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_port_with_no_fixed_ip_avail_should_fail(self):
new_port = dict(id="123")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port):
with self.assertRaises(q_ex.NoAvailableFixedIpsForPort):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_same_port_should_fail(self):
new_port = dict(id="123")
curr_port = dict(id="123")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port, curr_port=curr_port):
with self.assertRaises(q_ex.PortAlreadyAssociatedToFloatingIp):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_when_port_has_a_different_flip_should_fail(self):
new_port = dict(id="123")
floating_ip_addr = netaddr.IPAddress("192.168.0.1")
floating_ip = dict(address_type="floating", version=4,
address=int(floating_ip_addr),
address_readable=str(floating_ip_addr),
allocated_at=datetime.datetime.now())
ips = [floating_ip]
curr_port = dict(id="456")
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip, new_port=new_port,
curr_port=curr_port, ips=ips):
with self.assertRaises(q_ex.PortAlreadyContainsFloatingIp):
content = dict(port_id="123")
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_no_port_and_no_previous_port_should_fail(self):
addr = netaddr.IPAddress("10.0.0.1")
flip = dict(id="3", fixed_ip_address="172.16.1.1", address=int(addr),
address_readable=str(addr))
with self._stubs(flip=flip):
with self.assertRaises(q_ex.FloatingIpUpdateNoPortIdSupplied):
content = dict(port_id=None)
self.plugin.update_floatingip(self.context, flip["id"],
dict(floatingip=content))
def test_update_with_missing_port_id_param_should_fail(self):
with self._stubs():
with self.assertRaises(ex.BadRequest):
content = {}
self.plugin.update_floatingip(self.context, "123",
dict(floatingip=content))
|
alanquillin/quark
|
quark/tests/plugin_modules/test_floating_ips.py
|
Python
|
apache-2.0
| 30,810
|
import sales_order_notes
|
OpusVL/odoo_line_notes_field
|
line_notes_field/__init__.py
|
Python
|
agpl-3.0
| 25
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.