prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
import os
import sys
if __ | name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_backend_test.settings")
from django.core.management import execute_from_command_line
execute_from_command_li | ne(sys.argv)
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2007 Donald N. Allingham
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# |
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A | PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Package providing filter rules for GRAMPS.
"""
from ._hascitation import HasCitation
from ._allcitations import AllCitations
from ._changedsince import ChangedSince
from ._citationprivate import CitationPrivate
from ._hasgallery import HasGallery
from ._hasidof import HasIdOf
from ._hasnote import HasNote
from ._hasnotematchingsubstringof import HasNoteMatchingSubstringOf
from ._hasnoteregexp import HasNoteRegexp
from ._hasreferencecountof import HasReferenceCountOf
from ._hassource import HasSource
from ._hassourceidof import HasSourceIdOf
from ._hassourcenoteregexp import HasSourceNoteRegexp
from ._matchesfilter import MatchesFilter
from ._matchespagesubstringof import MatchesPageSubstringOf
from ._matchesrepositoryfilter import MatchesRepositoryFilter
from ._matchessourcefilter import MatchesSourceFilter
from ._regexpidof import RegExpIdOf
from ._regexpsourceidof import RegExpSourceIdOf
from ._hastag import HasTag
editor_rule_list = [
HasCitation,
AllCitations,
ChangedSince,
CitationPrivate,
HasGallery,
HasIdOf,
HasNote,
HasNoteRegexp,
HasReferenceCountOf,
HasSource,
HasSourceIdOf,
HasSourceNoteRegexp,
MatchesFilter,
MatchesPageSubstringOf,
MatchesRepositoryFilter,
MatchesSourceFilter,
RegExpIdOf,
RegExpSourceIdOf,
HasTag
]
|
from django.db import models
from django.test import TestCase
from ..models.compat import YAMLField
class TestYAMLModel(models.Model):
yaml_field = YAMLField()
class TestYAMLField(TestCase):
...
def test_to_python(self):
yaml_data = """
main:
- 1
- 2
- 3
"""
yaml_field = YAMLField()
yaml_field.to_python(yaml_data)
yaml_data = ""
yaml_field = YAMLField()
self.assertEqual(None, yaml_field.to_python(yaml_data))
yaml_data = """`"""
yaml_field = YAMLField()
with self.assertRaises(Exception):
yaml_field.to_python(yaml_data)
| def test_get_prep_value(self):
yaml_field = YAMLField()
self.assertEqual("", yaml_field.get_prep_value(None))
yaml_field = YAMLField()
data = {"aaa": "aaa😺",}
self.assertEqual(
"aaa: aa | a😺\n",
yaml_field.get_prep_value(data)
)
|
#!/usr/bin/env python3
from collections import namedtuple
from pdfrw import PdfName, PdfDict, PdfObject, PdfString
PageLabelTuple = namedtuple("PageLabelScheme",
"startpage style prefix firstpagenum")
defaults = {"style": "arabic", "prefix": '', "firstpagenum": 1}
styles = {"arabic": PdfName('D'),
"roman lowercase": PdfName('r'),
"roman uppercase": PdfName('R'),
"letters lowercase": PdfName('a'),
"letters uppercase": PdfName('A')}
stylecodes = {v: a for a, v in styles.items()}
class PageLabelScheme(PageLabelTuple):
"""Represents a page numbering scheme.
startpage : the index in the pdf (starting from 0) of the
first page the scheme will be applied to.
style : page numbering style (arabic, roman [lowercase|uppercase], letters [lowercase|uppercase])
prefix: a prefix to be prepended to all page labels
firstpagenum : where to start numbering
"""
__slots__ = tuple()
def __new__(cls, startpage,
style=defaults["style"],
prefix=defaults["prefix"],
firstpagenum=defaults["firstpagenum"]):
if style not in styles:
raise ValueError("PageLabel style must be one of %s" % cls.styles())
return super().__new__(cls, int(startpage), style, str(prefix), int(firstpagenum))
@classmethod
def from_pdf(cls, pagenum, opts):
"""Returns a new PageLabel using options from a pdfrw object"""
return cls(pagenum,
style=stylecodes.get(opts.S, defaults["style"]),
prefix=(opts.P and opts.P.decode() or defaults["prefix"]),
firstpagenum=(opts.St or defaults["firstpagenum"]))
@staticmethod
def styles():
"""Li | st of the allowed styles"" | "
return styles.keys()
def pdfobjs(self):
"""Returns a tuple of two elements to insert in the PageLabels.Nums
entry of a pdf"""
page_num = PdfObject(self.startpage)
opts = PdfDict(S=styles[self.style])
if self.prefix != defaults["prefix"]:
opts.P = PdfString.encode(self.prefix)
if self.firstpagenum != defaults["firstpagenum"]:
opts.St = PdfObject(self.firstpagenum)
return page_num, opts
|
import operator
from functools import reduce
from collections import namedtuple
from django.db.models import Q
from mi.models import Target
from wins.models import HVC
HVCStruct = namedtuple('HVCStruct', ['campaign_id', 'financial_year'])
def get_all_hvcs_referenced_by_targets(financial_years=None):
"""
Get a list of all hvcs that need to be created that are referenced by Targets
:param financial_years: optional, you can manually define the finan | cial years
instead of getting them from the Target
:type financial_years: List[int]
:returns a list of hvc (campaign_id, financial year) tuples that don't already exist: List[HVCStruct]
"""
hvc_ids_expected_by_targets = Target.objects.all().values_list('campaign_id', flat=True).distinct()
if not financial_years:
financial_years = Target.objects.all().values_list('financial_year', flat=True).distinct()
| to_create = [
HVCStruct(campaign_id=campaign_id,
financial_year=int(str(financial_year)[-2:]))
for campaign_id in hvc_ids_expected_by_targets for financial_year in financial_years
]
filter_q = reduce(
operator.or_,
[Q(campaign_id=data.campaign_id, financial_year=data.financial_year)
for data in to_create]
)
already_existing = [
HVCStruct(**data) for data in HVC.objects.filter(filter_q).values('campaign_id', 'financial_year')
]
to_create_without_already_existing = set(to_create) - set(already_existing)
return to_create_without_already_existing
|
#!/usr/bin/env python
"""
NPR 2017-01-22
www.npr.org/2017/01/22/511046359/youve-got-to-comb-together-to-solve-this-one
The numbers 5,000, 8,000, and 9,000 share a property that only five integers altogether have.
Identify the property and the two other integers that have it.
"""
# The property is that they are supervocalic (one each of aeiou).
# This code will simply try t | o find the other such numbers.
def is_supervocalic(w):
'''
Determine if a word has one each of a, e, i, o, u
We also want it not to have a 'y'
'''
vowels = 'aeiou'
for vowel in vowels:
if w.lower().count(vowel) != 1:
return False
if 'y' in w.lower():
return False
return True
# Thanks to http://stackoverflow.com/a/19193721
def numToWords(num,join=True):
'''words = {} convert an integer number into words'''
units = ['',' | one','two','three','four','five','six','seven','eight','nine']
teens = ['','eleven','twelve','thirteen','fourteen','fifteen','sixteen', \
'seventeen','eighteen','nineteen']
tens = ['','ten','twenty','thirty','forty','fifty','sixty','seventy', \
'eighty','ninety']
thousands = ['','thousand','million','billion','trillion','quadrillion', \
'quintillion','sextillion','septillion','octillion', \
'nonillion','decillion','undecillion','duodecillion', \
'tredecillion','quattuordecillion','sexdecillion', \
'septendecillion','octodecillion','novemdecillion', \
'vigintillion']
words = []
if num==0: words.append('zero')
else:
numStr = '%d'%num
numStrLen = len(numStr)
groups = (numStrLen+2)/3
numStr = numStr.zfill(groups*3)
for i in range(0,groups*3,3):
h,t,u = int(numStr[i]),int(numStr[i+1]),int(numStr[i+2])
g = groups-(i/3+1)
if h>=1:
words.append(units[h])
words.append('hundred')
if t>1:
words.append(tens[t])
if u>=1: words.append(units[u])
elif t==1:
if u>=1: words.append(teens[u])
else: words.append(tens[t])
else:
if u>=1: words.append(units[u])
if (g>=1) and ((h+t+u)>0): words.append(thousands[g])
if join: return ' '.join(words)
return words
# Note that every integer greater than 100,000 has a repeated vowel
for i in range(100000):
word = numToWords(i)
if is_supervocalic(word):
print i, word
|
"""
Django settings for testproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n$(okl9n*#au0%^wxgu$c#x(f%lby3v_j)wuti&6q-nx_35uj6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.cont | rib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.c | ontrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'testproject.urls'
WSGI_APPLICATION = 'testproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rotor sensing parameters."""
from makani.config import mconfig
from makani.control import system_types as m
@mconfig.Config
def Ma | keParams():
common_rotor_sensor = {
# Calibration for rotor speed and torque. This applies both to
# the speed sensed by and commanded to the motor controllers.
#
# TODO: The sign convention for rotor
# velocity should be reversed both here and in the motor
# controller.
| 'omega_cal': {'scale': -1.0, 'bias': 0.0, 'bias_count': 0},
'torque_cal': {'scale': -1.0, 'bias': 0.0, 'bias_count': 0},
}
return [common_rotor_sensor for _ in range(m.kNumMotors)]
|
import date, time, datetime, timedelta
# from dateutil.relativedelta import relativedelta
#
# from django.conf import settings
# from django.core.urlresolvers import reverse
# from django.test import TestCase
#
# from eventtools.utils import datetimeify
# from eventtools_testapp.models import *
#
# from _fixture import bigfixture, reload_films
# from _inject_app import TestCaseWithApp as AppTestCase
#
# class TestViews(AppTestCase):
#
# def setUp(self):
# if hasattr(settings, 'OCCURRENCES_PER_PAGE'):
# self._old_OCCURRENCES_PER_PAGE = settings.OCCURRENCES_PER_PAGE
# settings.OCCURRENCES_PER_PAGE = 20
# super(TestViews, self).setUp()
#
# def tearDown(self):
# if hasattr(self, '_old_OCCURRENCES_PER_PAGE'):
# settings.OCCURRENCES_PER_PAGE = self._old_OCCURRENCES_PER_PAGE
# else:
# delattr(settings, 'OCCURRENCES_PER_PAGE')
# super(TestViews, self).tearDown()
#
# def test_purls(self):
# """
# An occurrence has a pURL based on its id.
# You can view a page for an occurrence.
# """
#
# e = self.daily_tour
# o = e.occurrences.all()[0]
#
# #occurrence page
# ourl = reverse('occurrence', args=(o.id,))
# self.assertEqual(o.get_absolute_url(), ourl)
# self.assertTrue(str(o.id) in ourl)
# r1 = self.client.get(ourl)
# self.assertEqual(r1.status_code, 200)
#
# self.assertContains(r1, "Daily Tour")
# self.assertContains(r1, "1 January 2010")
# self.assertNotContains(r1, "00:00")
# self.assertNotContains(r1, "12am")
# self.assertNotContains(r1, "midnight")
#
# e2 = self.weekly_talk
# ourl = reverse('occurrence', args=(e2.occurrences.all()[0].id,))
# r1 = self.client.get(ourl)
# self.assertContains(r1, "Weekly Talk")
# self.assertContains(r1, "1 January 2010, 10am–noon")
#
# def test_list_view(self):
# """
# You can view a paginated list of occurrences for an event qs, following a given day, using ?startdate=2010-10-22&page=2.
# Each page shows n=20 occurrences and paginates by that amount.
# The occurrences are in chronological order.
# The times of all-day events do not appear.
# If there are no events in a given day, the day is not shown.
# The occurrences are grouped by day (and thus a day's occurrences may span several pages - this makes computation easier).
# TODO if a day is unfinished, show 'more on page n+1'..
# If there are no events in a given page, a 'no events match' message is shown.
# """
# url = reverse('occurrence_list',)
# r = self.client.get(url, {'startdate':'2010-01-01'})
# self.assertEqual(r.context['occurrence_pool'].count(), 109)
# self.assertEqual(len(r.context['occurrence_page']), 20)
# self.assertEqual(r.context['occurrence_page'][0].start.date(), date(2010,1,1))
#
# #check results in chrono order
# d = r.context['occurrence_pool'][0].start
# for occ in r.context['occurrence_pool']:
# self.assertTrue(occ.start >= d)
# d = occ.start
#
# #sho | uld have some pagination (6 pages)
# self.assertNotContains(r, "Earlier") #it's the first page
# self.assertContains(r, "Later")
# self.assertContains(r, "Showing 1–20 of 109")
#
# self.assertContains(r, "Friday, 1 January 2010", 1) #only print the date once
# | self.assertNotContains(r, "Saturday, 2 January 2010") #there are no events
# self.assertContains(r, "Sunday, 3 January 2010", 1) #only print the date once
#
# self.assertContains(r, "10am–​noon")
# self.assertNotContains(r, "12am")# these are all-day
# self.assertNotContains(r, "00:00")# these are all-day
# self.assertNotContains(r, "midnight") # these are all-day
#
# #doesn't matter how far back you go.
# r2 = self.client.get(url, {'startdate':'2000-01-01'})
# self.assertEqual(list(r.context['occurrence_pool']), list(r2.context['occurrence_pool']))
#
# #links
# o = r.context['occurrence_page'][0]
# ourl = reverse('occurrence', args=(o.id,))
# self.assertContains(r, ourl)
#
# #show a 'not found' message
# r = self.client.get(url, {'startdate':'2020-01-01'})
# self.assertEqual(r.context['occurrence_page'].count(), 0)
# self.assertContains(r, "Sorry, no events were found")
# self.assertNotContains(r, "Earlier")
# self.assertNotContains(r, "Later")
# self.assertNotContains(r, "Showing")
# self.assertEqual(r.status_code, 200) #not 404
#
#
# def test_date_range_view(self):
# """
# You can show all occurrences between two days on one page, by adding ?enddate=2010-10-24. Pagination adds or subtracts the difference in days (+1 - consider a single day) to the range.
# For some ranges, pagination is by a different amount:
# TODO: Precisely a month (paginate by month)
# TODO: Precisely a year (paginate by year)
# """
#
# url = reverse('occurrence_list',)
# r = self.client.get(url, {'startdate':'2010-01-01', 'enddate':'2010-01-05'})
# self.assertEqual(r.context['occurrence_pool'].count(), 109)
# self.assertEqual(len(r.context['occurrence_page']), 5)
# self.assertEqual(r.context['occurrence_page'][0].start.date(), date(2010,1,1))
# self.assertEqual(r.context['occurrence_page'].reverse()[0].start.date(), date(2010,1,5))
#
# self.assertContains(r, "Showing 1–5 January 2010")
# self.assertContains(r, '<a href="?startdate=2009-12-27&enddate=2009-12-31">Earlier</a>')
# self.assertContains(r, '<a href="?startdate=2010-01-06&enddate=2010-01-10">Later</a>')
#
# r = self.client.get(url, {'startdate':'2010-01-01', 'enddate':'2010-01-31'})
# self.assertContains(r, "Showing January 2010")
# # self.assertContains(r, '<a href="?datefrom=2009-12-01&dateto=2009-12-31">December 2009</a>')
# # self.assertContains(r, '<a href="?datefrom=2010-02-01&dateto=2010-02-28">February 2010</a>')
#
# def test_event_view(self):
# """
# You can view a paginated list of occurrences for an event.
# """
# #event page
# e = self.daily_tour
# eurl = reverse('event', kwargs={'event_slug': e.slug})
# self.assertEqual(e.get_absolute_url(), eurl)
# r3 = self.client.get(eurl, {'page': 2})
# self.assertEqual(r3.status_code, 200)
#
# #should have some pagination (3 pages)
# self.assertEqual(r3.context['occurrence_page'].count(), 20)
# self.assertContains(r3, "Earlier")
# self.assertContains(r3, "Later")
# self.assertContains(r3, "Showing 21–40 of 49")
#
# def test_ical(self):
# """
# You can view an ical for an occurrence.
# The ical is linked from the occurrence page.
# You can view an ical for a collection of occurrences.
# (TODO: do large icals perform well? If not we might have to make it a feed.)
# """
# e = self.daily_tour
# o = e.occurrences.all()[0]
#
# o_url = reverse('occurrence', kwargs={'occurrence_id': o.id })
# o_ical_url = reverse('occurrence_ical', kwargs={'occurrence_id': o.id })
# r = self.client.get(o_ical_url)
# self.assertEqual(r.status_code, 200)
#
# self.assertContains(r, "BEGIN:VCALENDAR", 1)
# self.assertContains(r, "BEGIN:VEVENT", 1)
#
# self.assertContains(r, "SUMMARY:Daily Tour", 1)
# self.assertContains(r, "DTSTART;VALUE=DATE:20100101", 1)
# self.assertContains(r, "DTEND;VALUE=DATE:20100101", 1)
# self.assertContains(r, "URL:http://testserver%s" % |
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
from __future__ import with_statement
import IECore
import Gaffer
import GafferUI
class CompoundDataPlugValueWidget( GafferUI.CompoundPlugValueWidget ) :
def __init__( self, plug, collapsed=True, label=None, summary=None, editable=True, **kw ) :
GafferUI.CompoundPlugValueWidget.__init__( self, plug, collapsed, label, summary, **kw )
self.__editable = True
self.__footerWidget = None
def _childPlugWidget( self, childPlug ) :
return _MemberPlugValueWidget( childPlug, self._label( childPlug ) )
def _footerWidget( self ) :
if self.__footerWidget is not None :
return self.__footerWidget
if self.__class__ is CompoundDataPlugValueWidget : # slight hack so that SectionedCompoundDataPlugValueWidget doesn't get a plus button
self.__footerWidget = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal )
self.__footerWidget.append( GafferUI.Spacer( IECore.V2i( GafferUI.PlugWidget.labelWidth(), 1 ) ) )
self.__footerWidget.append(
GafferUI.MenuButton( image="plus.png", hasFrame=False, menu=GafferUI.Menu( self.__addMenuDefinition() ) )
)
self.__footerWidget.append( GafferUI.Spacer( IECore.V2i( 1 ), IECore.V2i( 999999, 1 ) ), expand = True )
return self.__footerWidget
## May be reimplemented by derived classes to return a suitable label
# for the member represented by childPlug.
def _label( self, childPlug ) :
if not childPlug.getFlags( Gaffer.Plug.Flags.Dynamic ) :
return childPlug["name"].getValue()
return None
def __addMenuDefinition( self ) :
result = IECore.MenuDefinition()
result.append( "/Add/Bool", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.BoolData( False ) ) } )
result.append( "/Add/Float", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.FloatData( 0 ) ) } )
result.append( "/Add/Int", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.IntData( 0 ) ) } )
result.append( "/Add/NumericDivider", { "divider" : True } )
result.append( "/Add/String", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.StringData( "" ) ) } )
result.append( "/Add/StringDivider", { "divider" : True } )
result.append( "/Add/V2i", { "command" : IECore.curry( | Gaffer.WeakMethod( self.__addItem ), "", IECore.V2iData( IECore.V2i( 0 ) ) ) } )
result.append( "/Add/V3i", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3iData( IECore.V3i( 0 ) ) ) } )
result.append( "/Add/V2f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V2fData( IECore.V2f( 0 ) ) ) } )
result.append( "/Add/V3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.V3fData( IECore.V3f( 0 ) | ) ) } )
result.append( "/Add/VectorDivider", { "divider" : True } )
result.append( "/Add/Color3f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color3fData( IECore.Color3f( 0 ) ) ) } )
result.append( "/Add/Color4f", { "command" : IECore.curry( Gaffer.WeakMethod( self.__addItem ), "", IECore.Color4fData( IECore.Color4f( 0, 0, 0, 1 ) ) ) } )
return result
def __addItem( self, name, value ) :
with Gaffer.UndoContext( self.getPlug().ancestor( Gaffer.ScriptNode.staticTypeId() ) ) :
self.getPlug().addOptionalMember( name, value, enabled=True )
class _MemberPlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, childPlug, label=None ) :
self.__row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, self.__row, childPlug )
if label is not None or not childPlug.getFlags( Gaffer.Plug.Flags.Dynamic ) :
nameWidget = GafferUI.LabelPlugValueWidget(
childPlug,
horizontalAlignment = GafferUI.Label.HorizontalAlignment.Right,
verticalAlignment = GafferUI.Label.VerticalAlignment.Top,
)
if label is not None :
nameWidget.label().setText( label )
nameWidget.label()._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
else :
nameWidget = GafferUI.StringPlugValueWidget( childPlug["name"] )
nameWidget.textWidget()._qtWidget().setFixedWidth( GafferUI.PlugWidget.labelWidth() )
self.__row.append( nameWidget )
if "enabled" in childPlug :
self.__row.append(
GafferUI.BoolPlugValueWidget(
childPlug["enabled"],
displayMode = GafferUI.BoolWidget.DisplayMode.Switch
)
)
self.__row.append( GafferUI.PlugValueWidget.create( childPlug["value"] ), expand = True )
self._updateFromPlug()
def setPlug( self, plug ) :
GafferUI.PlugValueWidget.setPlug( self, plug )
if isinstance( self.__row[0], GafferUI.LabelPlugValueWidget ) :
self.__row[0].setPlug( plug )
else :
self.__row[0].setPlug( plug["name"] )
if "enabled" in plug :
self.__row[1].setPlug( plug["enabled"] )
self.__row[-1].setPlug( plug["value"] )
def hasLabel( self ) :
return True
def childPlugValueWidget( self, childPlug, lazy=True ) :
for w in self.__row :
if w.getPlug().isSame( childPlug ) :
return w
return None
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
GafferUI.PlugValueWidget.setReadOnly( self, readOnly )
for w in self.__row :
w.setReadOnly( readOnly )
def _updateFromPlug( self ) :
if "enabled" in self.getPlug() :
with self.getContext() :
enabled = self.getPlug()["enabled"].getValue()
if isinstance( self.__row[0], GafferUI.StringPlugValueWidget ) :
self.__row[0].setEnabled( enabled )
self.__row[-1].setEnabled( enabled )
GafferUI.PlugValueWidget.registerType( Gaffer.CompoundDataPlug.staticTypeId(), CompoundDataPlugValueWidget )
GafferUI.PlugValueWidget.registerType( Gaffer.CompoundDataPlug.MemberPlug.staticTypeId(), _MemberPlugValueWidget )
|
#!/usr/bin/env python
import datetime
from run_utils import *
class TestSuite(object):
def __init__(self, options, cache):
self.options = options
self.cache = cache
self.nameprefix = "opencv_" + self.options.mode + "_"
self.tests = self.cache.gatherTests(self.nameprefix + "*", self.isTest)
def getOS(self):
return getPlatformVersion() or self.cache.getOS()
def getHardware(self):
res = []
if self.cache.getArch() in ["x86", "x64"] and self.cache.withCuda():
res.append("CUDA")
return res
def getLogName(self, app, timestamp):
app = self.getAlias(app)
rev = self.cache.getGitVersion()
if isinstance(timestamp, datetime.datetime):
timestamp = timestamp.strftime("%Y%m%d-%H%M%S")
if self.options.longname:
small_pieces = [self.getOS(), self.cache.getArch()] + self.cache.getDependencies() + self.getHardware() + [self.cache.getSIMDFeatures()]
big_pieces = [app, str(rev), timestamp, "_".join([p for p in small_pieces if p])]
l = "__".join(big_pieces)
else:
pieces = [app, self.cache.getOS(), self.cache.getArch()] + self.getHardware() + [rev, timestamp]
lname = "_".join([p for p in pieces if p])
lname = re.sub(r'[\(\)\[\]\s,]', '_', lname)
l = re.sub(r'_+', '_', lname)
return l + ".xml"
def listTests(self, short = False, main = False):
if len(self.tests) == 0:
raise Err("No tests found")
for t in self.tests:
if short:
t = self.getAlias(t)
if not main or self.cache.isMainModule(t):
log.info("%s", t)
def getAlias(self, fname):
return sorted(self.getAliases(fname), key = len)[0]
def getAliases(self, fname):
def getCuts(fname, prefix):
# filename w/o extension (opencv_test_core)
noext = re.sub(r"\.(exe|apk)$", '', fname)
# filename w/o prefix (core.exe)
nopref = fname
if fname.startswith(prefix):
nopref = fname[len(prefix):]
# filename w/o prefix and extension (core)
noprefext = noext
if noext.startswith(prefix):
noprefext = noext[len(prefix):]
return noext, nopref, noprefext
# input is full path ('/home/.../bin/opencv_test_core') or 'java'
res = [fname]
fname = os.path.basename(fname)
res.append(fname) # filename (opencv_test_core.exe)
for s in getCuts(fname, self.nameprefix):
res.append(s)
if self.cache.build_type == "Debug" and "Visual Studio" in self.cache.cmake_generator:
res.append(re.sub(r"d$", '', s)) # MSVC debug config, remove 'd' suffix
log.debug("Aliases: %s", set(res))
return set(res)
def getTest(self, name):
# return stored test name by provided alias
for t in self.tests:
if name in self.getAliases(t):
return t
raise Err("Can not find test: %s", name)
def getTestList(self, white, black):
res = [t for t in white or self.tests if self.getAlias(t) not in black]
if len(res) == 0:
raise Err("No tests found")
return set(res)
def isTest(self, fullpath):
if fullpath == "java":
return True
if not os.path.isfile(fullpath):
return False
if self.cache.getOS() == "nt" and not fullpath.endswith(".exe"):
return False
return os.access(fullpath, os.X_OK)
def wrapInValgrind(self, cmd = []):
if self.options.valgrind:
res = ['valgrind']
if self.options.valgrind_supp:
res.append("--suppressions=%s" % self.options.valgrind_supp)
res.extend(self.options.valgrind_opt)
return res + cmd
return cmd
def runTest(self, path, logfile, workingDir, args = []):
args = args[:]
exe = os.path.abspath(path)
if path == "java":
cmd = [self.cache.ant_executable, "-Dopencv.build.type=%s" % self.cache.build_type, "buildAndTest"]
ret = execute(cmd, cwd = self.cache.java_test_binary_dir + "/.build")
return None, ret
else:
if isColorEnabled(args):
args.append("--gtest_color=yes")
cmd = self.wrapInValgrind([exe] + args)
tempDir = TempEnvDir('OPENCV_TEMP_PATH', "__opencv_temp.")
tempDir.init()
log.warning("Run: %s" % " ".join(cmd))
ret = execute(cmd, cwd = workingDir)
tempDir.clean()
hostlogpath = os.path.join(workingDir, logfile)
if os.path.isfile(hostlogpath):
return hostlogpath, ret
return None, ret
def checkPrerequisites(self):
if self.cache.getArch() == "x64" and hostmachine == "x86":
raise Err("Target architecture is incompatible with current platform")
def runTests(self, tests, black, workingDir, args = []):
self.checkPrerequ | isites()
args = args[:]
logs = []
test_list = self.getTestList(tests, black)
date = datetime.datetime.now()
if len(test_list) != 1:
args = [a for a in args if not a.startswith("--gtest_output=")]
ret = 0
for test in test_list:
more_args = []
exe = self.getTest(test) |
userlog = [a for a in args if a.startswith("--gtest_output=")]
if len(userlog) == 0:
logname = self.getLogName(exe, date)
more_args.append("--gtest_output=xml:" + logname)
else:
logname = userlog[0][userlog[0].find(":")+1:]
log.debug("Running the test: %s (%s) ==> %s in %s", exe, args + more_args, logname, workingDir)
if self.options.dry_run:
logfile, r = None, 0
else:
logfile, r = self.runTest(exe, logname, workingDir, args + more_args)
log.debug("Test returned: %s ==> %s", r, logfile)
if r != 0:
ret = r
if logfile:
logs.append(os.path.relpath(logfile, workingDir))
return logs, ret
#===================================================================================================
if __name__ == "__main__":
log.error("This is utility file, please execute run.py script")
|
# Copyright (C) 2010-2014 GRNET S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from snf_django.management.commands import SynnefoCommand, CommandError
from optparse import make_option
from synnefo.management import common
from synnefo.plankton.backend import PlanktonBackend
from snf_django.management import utils
class Command(SynnefoCommand):
args = "<snapshot_id>"
help = "Display available information about a snapshot"
option_list = SynnefoCommand.option_list + (
make_option(
'--user',
dest='userid',
default=None,
help="The UUID of the owner of the snapshot. Required"
"if snapsho | t is not public"),
make_option(
'--public',
dest='public',
default=False,
action="store_true",
help="Use this option if the snapshot is public"),
)
@common.convert_api_faults
def handle(self, *args, **options):
if len(args) != 1:
raise CommandErro | r("Please provide a snapshot ID")
snapshot_id = args[0]
userid = options["userid"]
public = options["public"]
if (userid is None) and (public is False):
raise CommandError("'user' option or 'public' option is required")
try:
with PlanktonBackend(userid) as backend:
snapshot = backend.get_snapshot(snapshot_id)
except:
raise CommandError("An error occurred, verify that snapshot and "
"user ID are valid")
utils.pprint_table(out=self.stdout, table=[snapshot.values()],
headers=snapshot.keys(), vertical=True)
|
#!/usr/bin/env python
import re
import sys
snappy_ver = "v3.0"
html_header_string = """\
<html>
<head>
<title>Snappy Assembly</title>
<style>
BODY {
margin-bottom: 200px;
}
TABLE TD {
vertical-align: middle;
}
H2 {
margin-bottom: 5px;
margin-top: 24px;
font-size: 20pt;
}
LI.section {
font-size: 20pt;
font-weight: bold;
}
H3 {
margin-left: -15px;
margin-bottom: 5px;
margin-top: 18px;
font-size: 16pt;
}
LI.step {
padding-left: 15px;
margin-left: 0;
font-size: 16pt;
font-weight: bold;
list-style-type: none;
}
DIV.desc {
margin-bottom: 15px;
font-size: 12pt;
font-weight: normal;
}
OL {
margin-left: 30px;
}
UL {
padding-left: 5px;
}
</style>
</head>
<body>
<h1>Snappy RepRap Assembly Instructions</h1>
<ol>
"""
class GenAssemblyIndex(object):
indexfile = "docs/assembly/index.html"
markdownfile = "wiki/{0}-Assembly.md".format(snappy_ver)
sourcefile = "full_assembly.scad"
modules = []
modinfo = {}
def write_index(self):
with open(self.indexfile, "w") as f:
f.write(html_header_string)
for mod_eng in self.modules:
f.write('<li class="section">')
f.write('<h2>{0}</h2>\n'.format(mod_eng))
stepcnt = len(self.modinfo[mod_eng])
if stepcnt > 1:
f.write('<ul>\n')
for stepinfo in self.modinfo[mod_eng]:
if stepcnt > 1:
f.write('<li class="step">')
f.write('<h3>Step {step}</h3>\n'.format(**stepinfo))
f.write(
'<div class="desc">{desc}</div>\n'
'<table>'
'<tr>'
'<td class="befor">'
'<img src="{module}_before.png">'
'</td>'
'<td class="arrow"><img src="arrow.png"></td>'
'<td class="after"><img src="{module}_after.png"></td>'
'</tr>'
'</table>\n'
.format(**stepinfo)
)
if stepcnt > 1:
f.write('</li>\n')
if stepcnt > 1:
f.write('</ul>\n')
f.write('</li>\n')
f.write('<li class="section">\n')
f.write('<h2>{0}</h2>\n'.format("RAMPS Wiring"))
f.write('<div class="desc">\n')
f.write('<p>Heres a diagram of what needs to be connected where on a RAMPS 1.4 controller board.</p>\n')
f.write('<p><a href="RAMPS_Wiring_For_Snappy.png"><img width="800" height="600" src="RAMPS_Wiring_For_Snappy.png"></a></p>')
f.write('<p>Click on the image to enlarge.</p>\n\n')
f.write('</div>\n')
f.write('</li>\n')
f.write('<li class="section">\n')
f.write('<h2>{0}</h2>\n'.format("Marlin Firmware for RAMPS"))
f.write('<div class="desc">\n')
f.write('You can find Marlin firmware pre-configured for the Snappy with a RAMPS 1.4 controller at\n')
f.write('<a href="https://github.com/revarbat/snappy-reprap/tree/v3.0/firmware">https://github.com/revarbat/snappy-reprap/tree/v3.0/firmware</a>\n')
f.write('</div>\n')
f.write('</li>\n')
f.write('</ol>\n')
f.write('</body>\n')
f.write('</html>\n')
d | ef write_markdown(self):
with open(self.markdownfile, "w") as f:
f.write("# Snappy RepRap Assembly Instructions\n\n")
for mod_eng in self.module | s:
f.write('## {0}\n\n'.format(mod_eng))
stepcnt = len(self.modinfo[mod_eng])
for stepinfo in self.modinfo[mod_eng]:
stepinfo['base'] = (
'https://raw.githubusercontent.com/'
'revarbat/snappy-reprap/{0}/docs/assembly/'
).format(snappy_ver)
if stepcnt > 1:
f.write('### Step {step}\n\n'.format(**stepinfo))
f.write(
'{desc}\n\n'
'Before | After\n'
'------ | -----\n'
'![{module} Step {step} Before]'
'({base}{module}_before.png) | '
'![{module} Step {step} After]'
'({base}{module}_after.png)\n\n'
.format(**stepinfo)
)
f.write('## {0}\n\n'.format("RAMPS Wiring"))
f.write('Heres a diagram of what needs to be connected where on a RAMPS 1.4 controller board.\n\n')
f.write('[]({0}-RAMPS_Wiring_For_Snappy.png)\n'.format(snappy_ver))
f.write('Click on the image to enlarge.\n\n')
f.write('## {0}\n\n'.format("Marlin Firmware for RAMPS"))
f.write('You can find Marlin firmware pre-configured for the Snappy with a RAMPS 1.4 controller at\n')
f.write('https://github.com/revarbat/snappy-reprap/tree/{0}/firmware\n'.format(snappy_ver))
def process_module(self, module, desc):
print("module: {0}".format(module))
step = 1
mod_eng = module.replace('_', ' ') \
.title() \
.replace('Xy', 'XY') \
.replace('Yz', 'YZ')
mod_split = mod_eng.split(" ")
if mod_split[-1].isdigit():
step = int(mod_split[-1])
mod_eng = " ".join(mod_split[:-1])
if mod_eng not in self.modules:
self.modules.append(mod_eng)
self.modinfo[mod_eng] = [
{
'module': module,
'step': step,
'desc': desc
},
]
else:
self.modinfo[mod_eng].append(
{
'module': module,
'step': step,
'desc': desc
},
)
def generate_index(self):
mod_re = re.compile(
r'module *([a-z_][a-z0-9_]*_assembly(_[0-9]+)?) *\('
)
desc_re = re.compile(r'// *desc: *(.*)$')
module = ""
desc = ""
with open(self.sourcefile, "r") as f:
for line in f.readlines():
mod_res = mod_re.search(line)
if mod_res:
if module:
self.process_module(module, desc)
module = mod_res.group(1)
desc = ""
desc_res = desc_re.search(line)
if desc_res:
desc += desc_res.group(1)
if module:
self.process_module(module, desc)
self.write_index()
self.write_markdown()
def main():
genidx = GenAssemblyIndex()
genidx.generate_index()
sys.exit(0)
if __name__ == "__main__":
main()
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
|
#!/usr/bin/python
"""
skeleton code for k-means clustering mini-project
"""
import pickle
import numpy
import matplotlib.pyplot as plt
import sys
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
def Draw(pred, features, poi, mark_poi=False, name="image.png", f1_name="feature 1", f2_name="feature 2"):
""" some plotting code designed to help you visualize your clusters """
### plot each cluster with a different color--add more colors for
### drawing more than 4 clusters
colors = ["b", "c", "k", "m", "g"]
for ii, pp in enumerate(pred):
plt.scatter(features[ii][0], features[ii][1], color = colors[pred[ii]])
### if you like, place red stars over points that are POIs (just for funsies)
if mark_poi:
for ii, pp in enumerate(pred):
if poi[ii]:
plt.scatter(features[ii][0], features[ii][1], color="r", marker="*")
plt.xlabel(f1_name)
plt.ylabel(f2_name)
plt.savefig(name)
plt.show()
### load in the dict of dicts containing all the data on each person | in the dataset
data_dict = pickle.load( open("../final_project/final_project_dataset.pkl", "r") )
### there's an outlier--remove it!
data_dict.pop("TOTAL", 0)
### the input features we want to use
### can be any key in the person-level dictionary (salary, director_fees, etc.)
feature_1 = "salary"
feature_2 = "exercised_stock_options"
poi = "poi"
features_list = [poi, feature_1, feature_2]
data = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( | data )
### in the "clustering with 3 features" part of the mini-project,
### you'll want to change this line to
### for f1, f2, _ in finance_features:
### (as it's currently written, line below assumes 2 features)
for f1, f2 in finance_features:
plt.scatter( f1, f2 )
plt.show()
from sklearn.cluster import KMeans
features_list = ["poi", feature_1, feature_2]
data2 = featureFormat(data_dict, features_list )
poi, finance_features = targetFeatureSplit( data2 )
clf = KMeans(n_clusters=2)
pred = clf.fit_predict( finance_features )
Draw(pred, finance_features, poi, name="clusters_before_scaling.pdf", f1_name=feature_1, f2_name=feature_2)
### cluster here; create predictions of the cluster labels
### for the data and store them to a list called pred
try:
Draw(pred, finance_features, poi, mark_poi=False, name="clusters.pdf", f1_name=feature_1, f2_name=feature_2)
except NameError:
print "no predictions object named pred found, no clusters to plot"
|
rname):
# N = getN(username)
# B = getB(username)
# S = getS(username)
# today = datetime.date.today()
# yesterday = today - datetime.timedelta(1)
# day_bf_yest = today - datetime.timedelta(2)
# ns = N.objects.all().order_by('init_date')[:10]
# bs = B.objects.all().order_by('init_date')[:10]
# ss = S.objects.all().order_by('init_date')[:10]
#
# #get groups for this user
# gs = G.objects.filter(members__username=username)
#
# return render_to_response('social/wall.html', {'ns':ns, 'bs':bs, 'ss':ss, 'gs':gs}, context_instance=RequestContext(request))
#===============================================================================
@login_required
def group_index(request, groupid):
return HttpResponseRedirect('/groups/'+groupid+'/snippetbook/notes/')
def get_groups_created_by_self(request, username):
if username == request.user.username:
gs_created_by_self = G.objects.filter(admins__username=username)
else:
gs_created_by_self = G.objects.filter(admins__username=username, private=False)
return gs_created_by_self
def get_groups_following(request, username):
if username == request.user.username:
gs_created_by_self = G.objects.filter(members__username=username).exclude(admins__username=username)
else:
gs_created_by_self = G.objects.filter(members__username=username, private=False).exclude(admins__username=username)
return gs_created_by_self
def get_groups_list(request, username):
gs_created_by_self = get_groups_created_by_self(request, username)
gs_created_by_self_list = [g for g in gs_created_by_self]
gs_following = get_groups_following(request, username)
gs_following_list = [g for g in gs_following]
group_set = set(gs_created_by_self_list).union(set(gs_following_list))
return list(group_set)
def profile(request, username):
gs_created = get_groups_created_by_self(request, username)
gs_following = get_groups_following(request, username)
profile_member = get_object_or_404(Member, username=username)
areas = Area.objects.using(username).filter(private=False)
return render_to_response('social/profile.html', {'gs_created':gs_created, 'gs_following':gs_following, \
'profile_user':User.objects.get(username=username), \
'profile_member':profile_member, 'profile_username':username,\
'areas':areas}, context_instance=RequestContext(request))
@login_required
def friends(request, username):
profile_member = get_object_or_404(Member, username=username)
friends = profile_member.get_friends()
sorted_members = [[m, m.get_public_notes_count()] for m in friends]
sorted_members.sort(key=lambda r:r[1],reverse = True)
if (not request.user.is_anonymous()) and request.user.username == username:
Notice.objects.filter(notice_type__label='friends_add', recipient=request.user).update(unseen=False)
return render_to_response('social/friends.html', { 'profile_username':username, 'friends':sorted_members}, context_instance=RequestContext(request, {}))
@login_required
def friends_notes2(request, username, bookname):
#print 'username:',username
#print 'bookname:',bookname
friends = request.user.member.get_friends()
q = Q(owner__in=friends, private=False)
note_list = getSN(bookname).objects.filter(q)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
#tags = get_group_tags(request, groupname, bookname)
return render_to_response('social/notes/friends_notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':None, 'qstr':qstr, \
'appname':'friends', 'cl':cl, 'profile_username':username},\
context_instance=RequestContext(request, {'book_uri_prefix':'/'+username+'/friends',
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all'),
'pick_lang': __get_lang(request)
}))
#notes of all users
def all_notes(request, bookname):
note_list = getSN(bookname).objects.filter(private=False)
qstr = __getQStr(request)
note_list = getSearchResults(note_list, qstr)
sort, order_type, paged_notes, cl = __get_notes_context(request, note_list)
return render_to_response('social/notes/all_notes.html', {'note_list':paged_notes,'sort':sort, 'bookname':bookname, \
'tags':None, 'qstr':qstr, \
'appname':'all', 'cl':cl},\
context_instance=RequestContext(request, {'book_uri_prefix':'/all',
'note_type':bookname_note_type_dict.get(bookname),
'pick_empty':request.GET.get('pick_empty', 'all'),
'pick_plan':request.GET.get('pick_plan', 'all'),
'pick_lang': __get_lang(request)
}))
def learners(request):
members = Member.objects.filter(is_active=True)#.order_by('-get_public_notes_count')
sorted_members = [[m, m.get_public_notes_count()] for m in members if m.get_public_notes_count() > 10 and m.username not in ['test', 'guest']]
sorted_members.sort(key=lambda r:r[1],reverse = True)
return render_to_response('social/learners.html', {'learners':sorted_members}, \
context_instance=RequestContext(request))
#viewing all people
@user_passes_test(lambda u: u.username=='leon')
@login_required
def people(request):
members = Member.objects.filter(is_active=True)#.order_by('-get_public_notes_count')
sorted_members = [[m, m.get_public_notes_count()] for m in members]
sorted_members.sort(key=lambda r:r[1],reverse = True)
return render_to_response('social/people.html', {'learners':sorted_members}, \
context_instance=RequestContext(request))
def groups(request):
gs = G.objects.filter(private=False).annotate(num_members=Count('members')).order_by('-num_members')
return render_to_response('social/group/groups.html', {'groups':gs}, \
context_instance=RequestContext(request))
#TODO: think of getting username arg, since you can get it from request. Also think of
# get rid of it in the url.py, changing to personal, or my
@login_required
def my_groups(request, username):
gs_created_by_self = get_groups_created_by_self(request, username)
gs_following = get_groups_following(request, username)
addGroupForm = AddGroupForm(initial={'admins': [username]})
T = getT(username)
tags = T.objects.filter(private=False)
#posting for adding a group
if request.method == 'POST':
| post = request.POST.copy()
#print 'post', post
#T | ODO: move logic below t |
import os.path
import pygame
from pygame.locals import *
from tecles import *
pygame.init()
class Joc (object):
WIDTH = 600
HEIGHT = 400
screen = pygame.display.set_mode((WIDTH, HEIGHT), 0, 32)
background = pygame.image.load(os.path.join("Imatges","fons.jpg"))
clock = pygame.time.Clock()
dt = 0.05
puntsR = 0
puntsL = 0
font = pygame.font.SysFont("Arial", 20)
quit = False
palaL = None
palaR = None
pilota = None
pales = pygame.sprite.Group()
pilotes = pygame.sprite.Group()
def toggle_quit():
Joc.quit = not Joc.quit
def gol():
for pilota in Joc.pilotes.sprites():
if pilota.posicio[0] > Joc.WIDTH:
Joc.puntsR += 1
print(Joc.puntsL, Joc.puntsR)
pilota.restart()
elif pilota.posicio[0] < 0:
Joc.puntsL += 1
print(Joc.puntsL, Joc.puntsR)
| pilota.restart()
def main_loop():
while not Joc.quit:
for event in pygame.event.get():
if event.type == KEYUP or event.type == | KEYDOWN:
handle_keys(event,Joc)
elif event.type == QUIT:
Joc.quit = True
Joc.pales.update()
Joc.pilotes.update()
Joc.screen.blit(Joc.background,(0,0))
Joc.pilotes.draw(Joc.screen)
Joc.pales.draw(Joc.screen)
Joc.gol()
pygame.display.update()
Joc.dt = Joc.clock.tick() / 10
|
"""Produces the catalogue page for the VPHAS website."""
import os
import json
import datetime
#i | mport httplib
from astropy import log
from jinja2 import Environment, FileSystemLoader
from surveytools import SURVEYTOOLS_DATA
# Load the column definitions from the JSON file
filename_columns = os.path.join(SURVEYTOOLS_DATA, 'vphas-columns.json')
columns = json.loads(open(filename_ | columns).read())
def get_filesize(host, url):
"""Returns the filesize of a remote document."""
return 0
"""
conn = httplib.HTTPConnection(host)
conn.request("HEAD", url)
res = conn.getresponse()
try:
size = float(res.getheader('content-length')) / (1024.*1024.)
except Exception, e:
print 'Failed {0}'.format(url)
print e
if size < 950:
result = '{0:.0f} MB'.format(size)
else:
result = '{0:.01f} GB'.format(size/1024.)
log.info('Filesize of {0} = {1}'.format(url, result))
return result
"""
args = {'host': 'www.vphas.org',
'dir_light': '/data/dr2/light',
'dir_full': '/data/dr2/full',
'last_update': datetime.datetime.utcnow().strftime("%Y-%m-%d"),
'columns': columns}
if __name__ == '__main__':
env = Environment(loader=FileSystemLoader('.'),
trim_blocks=True,
lstrip_blocks=True)
env.globals['get_filesize'] = get_filesize
template = env.get_template('dr2-template.html')
with open("vphas-dr2.shtml", "w") as f:
f.write(template.render(args))
|
import sys
from indra import reach
from indra.assemblers import GraphAssembler
orig_txt = [ln.strip() for ln in open('ras_pathway.txt', 'rt').readlines()]
correct_txt = [ln.strip() for ln in open('correction.txt', 'rt').readlines()]
for ln in correct_txt:
if ln.startswith('<'):
remove_line = ln[2:]
orig_txt.remove(remove_line)
elif ln.startswith('>'):
add_line = ln[2:]
orig_txt.append(add_line)
txt = ' '.join(orig_txt)
rp = reach. | process_text(txt, offline=True)
st = rp.statements
for s in st:
print '%s\t%s' % (s, s.evidence[0].text)
graphpr = {'rankdir': 'TD'}
nodepr = {'fontsize': 12, 'shape': 'plaintext', 'margin': '0,0', 'pad': 0}
ga = GraphAssembler(st, graph_properties=graphpr, node_pro | perties=nodepr)
ga.make_model()
ga.save_dot('rps6ka_correction.dot')
ga.save_pdf('rps6k1_correction.pdf')
|
nitialize_processes(self, num):
self._workers = []
for i in range(num):
rslt_q = multiprocessing.Queue()
self._workers.app | end([None, rslt_q])
self._result_prc = ResultProcess(self._final_q, self._workers)
self._result_prc.start()
def _initialize_notifie | d_handlers(self, handlers):
'''
Clears and initializes the shared notified handlers dict with entries
for each handler in the play, which is an empty array that will contain
inventory hostnames for those hosts triggering the handler.
'''
# Zero the dictionary first by removing any entries there.
# Proxied dicts don't support iteritems, so we have to use keys()
for key in self._notified_handlers.keys():
del self._notified_handlers[key]
def _process_block(b):
temp_list = []
for t in b.block:
if isinstance(t, Block):
temp_list.extend(_process_block(t))
else:
temp_list.append(t)
return temp_list
handler_list = []
for handler_block in handlers:
handler_list.extend(_process_block(handler_block))
# then initialize it with the handler names from the handler list
for handler in handler_list:
self._notified_handlers[handler.get_name()] = []
def load_callbacks(self):
'''
Loads all available callbacks, with the exception of those which
utilize the CALLBACK_TYPE option. When CALLBACK_TYPE is set to 'stdout',
only one such callback plugin will be loaded.
'''
if self._callbacks_loaded:
return
stdout_callback_loaded = False
if self._stdout_callback is None:
self._stdout_callback = C.DEFAULT_STDOUT_CALLBACK
if isinstance(self._stdout_callback, CallbackBase):
stdout_callback_loaded = True
elif isinstance(self._stdout_callback, string_types):
if self._stdout_callback not in callback_loader:
raise AnsibleError("Invalid callback for stdout specified: %s" % self._stdout_callback)
else:
self._stdout_callback = callback_loader.get(self._stdout_callback)
stdout_callback_loaded = True
else:
raise AnsibleError("callback must be an instance of CallbackBase or the name of a callback plugin")
for callback_plugin in callback_loader.all(class_only=True):
if hasattr(callback_plugin, 'CALLBACK_VERSION') and callback_plugin.CALLBACK_VERSION >= 2.0:
# we only allow one callback of type 'stdout' to be loaded, so check
# the name of the current plugin and type to see if we need to skip
# loading this callback plugin
callback_type = getattr(callback_plugin, 'CALLBACK_TYPE', None)
callback_needs_whitelist = getattr(callback_plugin, 'CALLBACK_NEEDS_WHITELIST', False)
(callback_name, _) = os.path.splitext(os.path.basename(callback_plugin._original_path))
if callback_type == 'stdout':
if callback_name != self._stdout_callback or stdout_callback_loaded:
continue
stdout_callback_loaded = True
elif callback_name == 'tree' and self._run_tree:
pass
elif not self._run_additional_callbacks or (callback_needs_whitelist and (C.DEFAULT_CALLBACK_WHITELIST is None or callback_name not in C.DEFAULT_CALLBACK_WHITELIST)):
continue
self._callback_plugins.append(callback_plugin())
self._callbacks_loaded = True
def run(self, play):
'''
Iterates over the roles/tasks in a play, using the given (or default)
strategy for queueing tasks. The default is the linear strategy, which
operates like classic Ansible by keeping all hosts in lock-step with
a given task (meaning no hosts move on to the next task until all hosts
are done with the current task).
'''
if not self._callbacks_loaded:
self.load_callbacks()
all_vars = self._variable_manager.get_vars(loader=self._loader, play=play)
templar = Templar(loader=self._loader, variables=all_vars)
new_play = play.copy()
new_play.post_validate(templar)
self.hostvars = HostVars(
inventory=self._inventory,
variable_manager=self._variable_manager,
loader=self._loader,
)
# Fork # of forks, # of hosts or serial, whichever is lowest
contenders = [self._options.forks, play.serial, len(self._inventory.get_hosts(new_play.hosts))]
contenders = [ v for v in contenders if v is not None and v > 0 ]
self._initialize_processes(min(contenders))
play_context = PlayContext(new_play, self._options, self.passwords, self._connection_lockfile.fileno())
for callback_plugin in self._callback_plugins:
if hasattr(callback_plugin, 'set_play_context'):
callback_plugin.set_play_context(play_context)
self.send_callback('v2_playbook_on_play_start', new_play)
# initialize the shared dictionary containing the notified handlers
self._initialize_notified_handlers(new_play.handlers)
# load the specified strategy (or the default linear one)
strategy = strategy_loader.get(new_play.strategy, self)
if strategy is None:
raise AnsibleError("Invalid play strategy specified: %s" % new_play.strategy, obj=play._ds)
# build the iterator
iterator = PlayIterator(
inventory=self._inventory,
play=new_play,
play_context=play_context,
variable_manager=self._variable_manager,
all_vars=all_vars,
start_at_done = self._start_at_done,
)
# during initialization, the PlayContext will clear the start_at_task
# field to signal that a matching task was found, so check that here
# and remember it so we don't try to skip tasks on future plays
if getattr(self._options, 'start_at_task', None) is not None and play_context.start_at_task is None:
self._start_at_done = True
# and run the play using the strategy and cleanup on way out
play_return = strategy.run(iterator, play_context)
self._cleanup_processes()
return play_return
def cleanup(self):
display.debug("RUNNING CLEANUP")
self.terminate()
self._final_q.close()
self._cleanup_processes()
def _cleanup_processes(self):
if self._result_prc:
self._result_prc.terminate()
for (worker_prc, rslt_q) in self._workers:
rslt_q.close()
if worker_prc and worker_prc.is_alive():
try:
worker_prc.terminate()
except AttributeError:
pass
def clear_failed_hosts(self):
self._failed_hosts = dict()
def get_inventory(self):
return self._inventory
def get_variable_manager(self):
return self._variable_manager
def get_loader(self):
return self._loader
def get_notified_handlers(self):
return self._notified_handlers
def get_workers(self):
return self._workers[:]
def terminate(self):
self._terminated = True
def send_callback(self, method_name, *args, **kwargs):
for callback_plugin in [self._stdout_callback] + self._callback_plugins:
# a plugin that set self.disabled to True will not be called
# see osx_say.py example for such a plugin
if getattr(callback_plugin, 'disabled', False):
continue
# try to find v2 method, fallback to v1 method, ignore callback if no method found
methods = []
for possible in [method_name, 'v2_on_any']:
gotit = getattr(callback_plugin, possib |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 17:53
from __future__ import unicode_liter | als
from django.conf import settings
from django.db import migrations, models
import django.db.m | odels.deletion
class Migration(migrations.Migration):
dependencies = [
('wunderlist', '0011_auto_20151230_1843'),
]
operations = [
migrations.AlterField(
model_name='connection',
name='owner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to=settings.AUTH_USER_MODEL),
),
]
|
Router(conf, self.logger)
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0,
'hashmatch': 0, 'rsync': 0, 'remove': 0,
'start': time.time(), 'failure_nodes': {}}
def _add_failure_stats(self, failure_devs_info):
for node, dev in failure_devs_info:
self.stats['failure'] += 1
failure_devs = self.stats['failure_nodes'].setdefault(node, {})
failure_devs.setdefault(dev, 0)
failure_devs[dev] += 1
def _get_my_replication_ips(self):
my_replication_ips = set()
ips = whataremyips()
for policy in POLICIES:
self.load_object_ring(policy)
for local_dev in [dev for dev in policy.object_ring.devs
if dev and dev['replication_ip'] in ips and
dev['replication_port'] == self.port]:
my_replication_ips.add(local_dev['replication_ip'])
return list(my_replication_ips)
# Just exists for doc anchor point
def sync(self, node, job, suffixes, *args, **kwargs):
"""
Synchronize local suffix directories from a partition with a remote
node.
:param node: the "dev" entry for the remote node to sync with
:param job: information about the partition being synced
:param suffixes: a list of suffixes which need to be pushed
:returns: boolean and dictionary, boolean indicating success or failure
"""
return self.sync_method(node, job, suffixes, *args, **kwargs)
def load_object_ring(self, policy):
"""
Make sure the policy's rings are loaded.
:param policy: the StoragePolicy instance
:returns: appropriate ring object
"""
policy.load_ring(self.swift_dir)
return policy.object_ring
def _rsync(self, args):
"""
Execute the rsync binary to replicate a partition.
:returns: return code of rsync process. 0 is successful
"""
start_time = time.time()
ret_val = None
try:
with Timeout(self.rsync_timeout):
proc = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
results = proc.stdout.read()
ret_val = proc.wait()
except Timeout:
self.logger.error(_("Killing long-running rsync: %s"), str(args))
proc.kill()
return 1 # failure response code
total_time = time.time() - start_time
for result in results.split('\n'):
if result == '':
continue
if result.startswith('cd+'):
continue
if not ret_val:
self.logger.info(result)
else:
self.logger.error(result)
if ret_val:
error_line = _('Bad rsync return code: %(ret)d <- %(args)s') % \
{'args': str(args), 'ret': ret_val}
if self.rsync_error_log_line_length:
error_line = error_line[:self.rsync_error_log_line_length]
self.logger.error(error_line)
else:
log_method = self.logger.info if results else self.logger.debug
log_method(
_("Successful rsync of %(src)s at %(dst)s (%(time).03f)"),
{'src': args[-2], 'dst': args[-1], 'time': total_time})
return ret_val
def rsync(self, node, job, suffixes):
"""
Uses rsync to implement the sync method. This was the first
sync method in Swift.
"""
if not os.path.exists(job['path']):
return False, {}
args = [
'rsync',
'--recursive',
'--whole-file',
'--human-readable',
'--xattrs',
'--itemize-changes',
'--ignore-existing',
'--timeout=%s' % self.rsync_io_timeout,
'--contimeout=%s' % self.rsync_io_timeout,
'--bwlimit=%s' % self.rsync_bwlimit,
'--exclude=.*.%s' % ''.join('[0-9a-zA-Z]' for i in range(6))
]
if self.rsync_compress and \
job['region'] != node['region']:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
args.append('--compress')
rsync_module = rsync_module_interpolation(self.rsync_module, node)
had_any = False
for suffix in suffixes:
spath = join(job['path'], suffix)
if os.path.exists(spath):
args.append(spath)
had_any = True
if not had_any:
return False, {}
data_dir = get_data_dir(job['policy'])
args.append(join(rsync_module, node['device'],
data_dir, job['partition']))
return self._rsync(args) == 0, {}
def ssync(self, node, job, suffixes, remote_check_objs=None):
return ssync_sender.Sender(
self, node, job, suffixes, remote_check_objs)()
def check_ring(self, object_ring):
"""
Check to see if the ring has been updated
:param object_ring: the ring to check
:returns: boolean indicating whether or not the ring has changed
"""
if time.time() > self.next_check:
self.next_check = time.time() + self.ring_check_interval
if object_ring.has_changed():
return False
return True
def update_deleted(self, job):
"""
High-level method that replicates a single partition that doesn't
belong on this node.
:param job: a dict containing info about the partition to be replicated
"""
def tpool_get_suffixes(path):
return [suff for suff in os.listdir(path)
if len(suff) == 3 and isdir(join(path, suff))]
self.replication_count += 1
self.logger.increment('partition.delete.count.%s' % (job['device'],))
headers = dict(self.default_headers)
| headers['X-Backend-Storage-Policy-Index'] = int(job['policy'])
failure_devs_info = set()
begin = time.time()
handoff_partition_deleted = False
try:
responses = []
suffixes = tpool.execute(tpool_get_suffixes, job['path'])
synced_remote_regions = {}
delete_objs = None
if suffixes:
for node in job['nodes']:
self.stats | ['rsync'] += 1
kwargs = {}
if node['region'] in synced_remote_regions and \
self.conf.get('sync_method', 'rsync') == 'ssync':
kwargs['remote_check_objs'] = \
synced_remote_regions[node['region']]
# candidates is a dict(hash=>timestamp) of objects
# for deletion
success, candidates = self.sync(
node, job, suffixes, **kwargs)
if success:
with Timeout(self.http_timeout):
conn = http_connect(
node['replication_ip'],
node['replication_port'],
node['device'], job['partition'], 'REPLICATE',
'/' + '-'.join(suffixes), headers=headers)
conn.getresponse().read()
if node['region'] != job['region']:
synced_remote_regions[node['region']] = viewkeys(
candidates)
else:
failure_devs_info.add((node['replication_ip'],
node['device']))
responses.append(success)
for cand_objs in synced_remote_regions.values():
if delete_objs is None:
delete |
ide failover region when requesting manual Failover for a hub.
All required parameters must be populated in order to send to Azure.
:ivar failover_region: Required. Region the hub will be failed over to.
:vartype failover_region: str
"""
_validation = {
'failover_region': {'required': True},
}
_attribute_map = {
'failover_region': {'key': 'failoverRegion', 'type': 'str'},
}
def __init__(
self,
*,
failover_region: str,
**kwargs
):
"""
:keyword failover_region: Required. Region the hub will be failed over to.
:paramtype failover_region: str
"""
super(FailoverInput, self).__init__(**kwargs)
self.failover_region = failover_region
class FallbackRouteProperties(msrest.serialization.Model):
"""The properties of the fallback route. IoT Hub uses these properties when it routes messages to the fallback endpoint.
All required parameters must be populated in order to send to Azure.
:ivar name: The name of the route. The name can only include alphanumeric characters, periods,
underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:vartype name: str
:ivar source: Required. The source to which the routing rule is to be applied to. For example,
DeviceMessages. Possible values include: "Invalid", "DeviceMessages", "TwinChangeEvents",
"DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:vartype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:ivar condition: The condition which is evaluated in order to apply the fallback route. If the
condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:vartype condition: str
:ivar endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:vartype endpoint_names: list[str]
:ivar is_enabled: Required. Used to specify whether the fallback route is enabled.
:vartype is_enabled: bool
"""
_validation = {
'source': {'required': True},
'endpoint_names': {'required': True, 'max_items': 1, 'min_items': 1},
'is_enabled': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'source': {'key': 'source', 'type': 'str'},
'condition': {'key': 'condition', 'type': 'str'},
'endpoint_names': {'key': 'endpointNames', 'type': '[str]'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
source: Union[str, "RoutingSource"],
endpoint_names: List[str],
is_enabled: bool,
name: Optional[str] = None,
condition: Optional[str] = None,
**kwargs
):
"""
:keyword name: The name of the route. The name can only include alphanumeric characters,
periods, underscores, hyphens, has a maximum length of 64 characters, and must be unique.
:paramtype name: str
:keyword source: Required. The source to which the routing rule is to be applied to. For
example, DeviceMessages. Possible values include: "Invalid", "DeviceMessages",
"TwinChangeEvents", "DeviceLifecycleEvents", "DeviceJobLifecycleEvents".
:paramtype source: str or ~azure.mgmt.iothub.v2019_11_04.models.RoutingSource
:keyword condition: The condition which is evaluated in order to apply the fallback route. If
the condition is not provided it will evaluate to true by default. For grammar, See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-query-language.
:paramtype condition: str
:keyword endpoint_names: Required. The list of endpoints to which the messages that satisfy the
condition are routed to. Currently only 1 endpoint is allowed.
:paramtype endpoint_names: list[str]
:keyword is_enabled: Required. Used to specify whether the fallback route is enabled.
:paramtype is_enabled: bool
"""
super(FallbackRouteProperties, self).__init__(**kwargs)
self.name = name
self.source = source
self.condition = condition
self.endpoint_names = endpoint_names
self.is_enabled = is_enabled
class FeedbackProperties(msrest.serialization.Model):
"""The properties of the feedback queue for cloud-to-device messages.
:ivar lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype lock_duration_as_iso8601: ~datetime.timedelta
:ivar ttl_as_iso8601: The period of time for which a message is available to consume before it
is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype ttl_as_iso8601: ~datetime.timedelta
:ivar max_delivery_count: The number of times the IoT hub attempts to deliver a message on the
feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:vartype max_delivery_count: int
"""
_validation = {
'max_delive | ry_count': {'maximum': 100, 'minimum': 1},
}
_attribute_map = {
'lock_duration_as_iso8601': {'key': 'lockDurationAsIso8601', 'type': 'duration'},
'ttl_as_iso8601': {'key': 'ttlAsIso8601', 'type': 'dur | ation'},
'max_delivery_count': {'key': 'maxDeliveryCount', 'type': 'int'},
}
def __init__(
self,
*,
lock_duration_as_iso8601: Optional[datetime.timedelta] = None,
ttl_as_iso8601: Optional[datetime.timedelta] = None,
max_delivery_count: Optional[int] = None,
**kwargs
):
"""
:keyword lock_duration_as_iso8601: The lock duration for the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype lock_duration_as_iso8601: ~datetime.timedelta
:keyword ttl_as_iso8601: The period of time for which a message is available to consume before
it is expired by the IoT hub. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype ttl_as_iso8601: ~datetime.timedelta
:keyword max_delivery_count: The number of times the IoT hub attempts to deliver a message on
the feedback queue. See:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-messaging#cloud-to-device-messages.
:paramtype max_delivery_count: int
"""
super(FeedbackProperties, self).__init__(**kwargs)
self.lock_duration_as_iso8601 = lock_duration_as_iso8601
self.ttl_as_iso8601 = ttl_as_iso8601
self.max_delivery_count = max_delivery_count
class ImportDevicesRequest(msrest.serialization.Model):
"""Use to provide parameters when requesting an import of all devices in the hub.
All required parameters must be populated in order to send to Azure.
:ivar input_blob_container_uri: Required. The input blob container URI.
:vartype input_blob_container_uri: str
:ivar output_blob_container_uri: Required. The output blob container URI.
:vartype output_blob_container_uri: str
"""
_validation = {
'input_blob_container_uri': {'required': True},
'output_blob_container_uri': {'required': True},
}
_attribute_map = {
'input_blob_container_uri': {'key': 'inputBlobContainerUri', 'type': 'str'},
'output_blob_container_uri': {'key': 'outputBlobContainerUri', 'type': 'str'},
}
def __init__(
self,
*,
input_blob_container_uri: str,
output_blob_container_uri: str,
**kwargs
):
"""
:keyword input_blob_container_uri: Required. The input blob container URI.
:paramtype input_blob_container_uri: str
|
e running processes socket/port information (or None)."""
return self._socket or self.read_metadata_by_name(self._name, 'socket', self._socket_type)
@classmethod
def get_subprocess_output(cls, *args):
"""Get the output of an executed command.
:param *args: An iterable representing the command to execute (e.g. ['ls', '-al']).
:raises: `ProcessManager.ExecutionError` on `OSError` or `CalledProcessError`.
:returns: The output of the command.
"""
try:
return subprocess.check_output(*args, stderr=subprocess.STDOUT)
except (OSError, subprocess.CalledProcessError) as e:
subprocess_output = getattr(e, 'output', '').strip()
raise cls.ExecutionError(str(e), subprocess_output)
def await_pid(self, timeout):
"""Wait up to a given timeout for a process to write pid metadata."""
return self.await_metadata_by_name(self._name, 'pid', timeout, int)
def await_socket(self, timeout):
"""Wait up to a given timeout for a process to write socket info."""
return self.await_metadata_by_name(self._name, 'socket', timeout, self._socket_type)
def write_pid(self, pid):
"""Write the current processes PID to the pidfile location"""
self.write_metadata_by_name(self._name, 'pid', str(pid))
def write_socket(self, socket_info):
"""Write the local processes socket information (TCP port or UNIX socket)."""
self.write_metadata_by_name(self._name, 'socket', str(socket_info))
def write_named_socket(self, socket_name, socket_info):
"""A multi-tenant, named alternative to ProcessManager.write_socket()."""
self.write_metadata_by_name(self._name, 'socket_{}'.format(socket_name), str(socket_info))
def _as_process(self):
"""Returns a psutil `Process` object wrapping our pid.
NB: Even with a process object in hand, subsequent method calls against it can always raise
`NoSuchProcess`. Care is needed to document the raises in the public API or else trap them and
do something sensible for the API.
:returns: a psutil Process object or else None if we have no pid.
:rtype: :class:`psutil.Process`
:raises: :class:`psutil.NoSuchProcess` if the process identified by our pid has died.
"""
if self._process is None and self.pid:
self._process = psutil.Process(self.pid)
return self._process
def is_dead(self):
"""Return a boolean indicating whether the process is dead or not."""
return not self.is_alive()
def is_alive(self, extended_check=None):
"""Return a boolean indicating whether the process is running or not.
:param func extended_check: An additional callable that will be invoked to perform an extended
liveness check. This callable should take a single argument of a
`psutil.Process` instance representing the context-local process
and return a boolean True/False to indicate alive vs not alive.
"""
try:
process = self._as_process()
return not (
# Can happen if we don't find our pid.
(not process) or
# Check for walkers.
(process.status() == psutil.STATUS_ZOMBIE) or
# Check for stale pids.
(self.process_name and self.process_name != process.name()) or
# Extended checking.
(extended_check and not extended_check(process))
)
except (psutil.NoSuchProcess, psutil.AccessDenied):
# On some platforms, accessing attributes of a zombie'd Process results in NoSuchProcess.
return False
def purge_metadata(self, force=False):
"""Instance-based version of ProcessMetadataManager.purge_metadata_by_name() that checks
for process liveness before purging metadata.
:param bool force: If True, skip process liveness check before purging metadata.
:raises: `ProcessManager.MetadataError` when OSError is encountered on metadata dir removal.
"""
if not force and self.is_alive():
raise self.MetadataError('cannot purge metadata for a running process!')
super(ProcessManager, self).purge_metadata_by_name(self._name)
def _kill(self, kill_sig):
"""Send a signal to the current process."""
if self.pid:
os.kill(self.pid, kill_sig)
def terminate(self, signal_chain=KILL_CHAIN, kill_wait=KILL_WAIT_SEC, purge=True):
"""Ensure a process is terminated by sending a chain of kill signals (SIGTERM, SIGKILL)."""
alive = self.is_alive()
if alive:
logger.debug('terminating {}'.format(self._name))
for signal_type in signal_chain:
pid = self.pid
try:
logger.debug('sending signal {} to pid {}'.format(signal_type, pid))
self._kill(signal_type)
except OSError as e:
logger.warning('caught OSError({e!s}) during attempt to kill -{signal} {pid}!'
.format(e=e, signal=signal_type, pid=pid))
# Wait up to kill_wait seconds to terminate or move onto the next signal.
try:
if self._deadline_until(self.is_dead, kill_wait):
alive = False
logger.debug('successfully terminated pid {}'.format(pid))
break
except self.Timeout:
# Loop to the next kill signal on timeout.
pass
if alive:
raise self.NonResponsiveProcess('failed to kill pid {pid} with signals {chain}'
.format(pid=self.pid, chain=signal_chain))
if purge:
self.purge_metadata(force=True)
def daemonize(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None,
write_pid=True):
"""Perform a double-fork, execute | callbacks and write the child pid file.
The double-fork here is necessary to truly daemonize the subprocess such that it can never
take control of a tty. The initial fork and setsid() creates a new, isolated process group
and also makes the first child a session leader (which can still acquire a tty). By forking a
second time, we ensure that the second child can never acquire a controlling terminal because
it's no longer a session leader - but it now has its own separate proc | ess group.
Additionally, a normal daemon implementation would typically perform an os.umask(0) to reset
the processes file mode creation mask post-fork. We do not do this here (and in daemon_spawn
below) due to the fact that the daemons that pants would run are typically personal user
daemons. Having a disparate umask from pre-vs-post fork causes files written in each phase to
differ in their permissions without good reason - in this case, we want to inherit the umask.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
os.setsid()
second_pid = os.fork()
if second_pid == 0:
try:
os.chdir(self._buildroot)
self.post_fork_child(**post_fork_child_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
try:
if write_pid: self.write_pid(second_pid)
self.post_fork_parent(**post_fork_parent_opts or {})
except Exception:
logging.critical(traceback.format_exc())
os._exit(0)
else:
# This prevents un-reaped, throw-away parent processes from lingering in the process table.
os.waitpid(pid, 0)
def daemon_spawn(self, pre_fork_opts=None, post_fork_parent_opts=None, post_fork_child_opts=None):
"""Perform a single-fork to run a subprocess and write the child pid file.
Use this if your post_fork_child block invokes a subprocess via subprocess.Popen(). In this
case, a second fork such as used in daemonize() is extraneous given that Popen() also forks.
Using this daemonization method vs daemonize() leaves the responsibility of writing the pid
to the caller to allow for library-agnostic flexibility in subprocess execution.
"""
self.purge_metadata()
self.pre_fork(**pre_fork_opts or {})
pid = os.fork()
if pid == 0:
try:
os.setsid()
os.chdir(self._buildroot)
self |
#
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Pub | lic License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This applicat | ion is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio SAT_OBSERVER module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the sat_observer namespace
try:
# this might fail if the module is python-only
from sat_observer_swig import *
except ImportError:
pass
# import any pure python here
#
|
from datetime import datetime
import subprocess
import logging
import os
from flask import Flask, current_app
from flask.ext.sqlalchemy import SQLAlchemy
from jinja2 import FileSystemLoader
from werkzeug.local import LocalProxy
import yaml
from flask.ext.cache import Cache
from bitcoinrpc import AuthServiceProxy
root = os.path.abspath(os.path.dirname(__file__) + '/../')
db = SQLAlchemy()
cache = Cache()
coinserv = LocalProxy(
lambda: getattr(current_app, 'rpc_connection', None))
def create_app(config='/config.yml', celery=False):
# initialize our flask application
app = Flask(__name__, static_folder='../static', static_url_path='/static')
# set our template path and configs
app.jinja_loader = FileSystemLoader(os.path.join(root, 'templates'))
config_vars = yaml.load(open(root + config))
# inject all the yaml configs
app.config.update(config_vars)
app.logger.info(app.config)
app.rpc_connection = AuthServiceProxy(
"http://{0}:{1}@{2}:{3}/"
.format(app.config['coinserv']['username'],
app.config['coinserv']['password'],
app.config['coinserv']['address'],
app.config['coinserv']['port'],
pool_kwargs=dict(maxsize=app.config.get('maxsize', 10))))
# add the debug toolbar if we're in debug mode...
if app.config['DEBUG']:
from flask_debugtoolbar import DebugToolbarExtension
DebugToolbarExtension(app)
app.logger.handlers[0].setFormatter(logging.Formatter(
'%(asctime)s %(levelname)s: %(message)s '
'[in %(filename)s:%(lineno)d]'))
# register all our plugins
db.init_app(app)
cache_config = {'CACHE_TYPE': 'redis'}
cache_config.update(app.config.get('main_cache', {}))
cache.init_app(app, config=cache_config)
if not celery:
hdlr = logging.FileHandler(app.config.get('log_file', 'webserver.log'))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
app.logger.addHandler(hdlr)
app.logger.setLevel(logging.INFO)
# try and fetch the git version information
try:
output = subprocess.check_output("git show -s --format='%ci %h'",
shell=True).strip().rsplit(" ", 1)
app.config['hash'] = output[1]
app.config['revdate'] = ou | tput[0]
# celery won't work with this, so set some default
except Exception:
app.config['hash'] = ''
app.config['revdate'] = ''
# filters for jinja
@app.template_filter( | 'time_ago')
def pretty_date(time=False):
"""
Get a datetime object or a int() Epoch timestamp and return a
pretty string like 'an hour ago', 'Yesterday', '3 months ago',
'just now', etc
"""
now = datetime.utcnow()
if type(time) is int:
diff = now - datetime.fromtimestamp(time)
elif isinstance(time, datetime):
diff = now - time
elif not time:
diff = now - now
second_diff = diff.seconds
day_diff = diff.days
if day_diff < 0:
return ''
if day_diff == 0:
if second_diff < 60:
return str(second_diff) + " seconds ago"
if second_diff < 120:
return "a minute ago"
if second_diff < 3600:
return str(second_diff / 60) + " minutes ago"
if second_diff < 7200:
return "an hour ago"
if second_diff < 86400:
return str(second_diff / 3600) + " hours ago"
if day_diff == 1:
return "Yesterday"
if day_diff < 7:
return str(day_diff) + " days ago"
if day_diff < 31:
return str(day_diff/7) + " weeks ago"
if day_diff < 365:
return str(day_diff/30) + " months ago"
return str(day_diff/365) + " years ago"
from .tasks import celery
celery.conf.update(app.config)
# Route registration
# =========================================================================
from . import views, models, api, rpc_views
app.register_blueprint(views.main)
app.register_blueprint(api.api, url_prefix='/api')
return app
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (http://tiny.be). All Rights Reserved
#
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PUR | POSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
################# | #############################################################
import avanzosc_french_amortization
import wizard
|
#!/usr/bin/env python3
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "RankedChoiceRestaurants.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import m | ay fail for some other reason. Ensure tha | t the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
#!/usr/bin/env python
##########################################################################
# run/ec2-setup/spot.py
#
# Part of Project Thrill - http://project-thrill.org
#
# Copyright (C) 2015 Matthias Stumpp <mstumpp@gmail.com>
#
# All rights reserved. Published under the BSD-2 license in the LICENSE file.
##########################################################################
import boto3
import time
import json
impo | rt datetime
import sys
with open('config.json') as data_file:
data = json.load(data_file)
client = boto3.client('ec2')
ec2 = boto3.resource('ec2')
job_id = int(time.time | ())
blockMappings = [{'DeviceName': '/dev/sda1',
'Ebs': {
'VolumeSize': 8,
'DeleteOnTermination': True,
'VolumeType': 'gp2'
}
}]
if data["VOL_SNAPSHOT_ID"]:
blockMappings.append(
{
'DeviceName': data["DEVICE"],
'Ebs': {
'SnapshotId': data["VOL_SNAPSHOT_ID"],
'DeleteOnTermination': True,
'VolumeType': 'gp2'
}
})
response = client.request_spot_instances(SpotPrice=data["SPOT_PRICE"],
InstanceCount=data["COUNT"],
Type=data["TYPE"],
#ValidFrom=datetime.datetime(2015, 10, 11, 18, 10, 00),
ValidUntil=datetime.datetime(2015, 10, 11, 19, 37, 00),
LaunchSpecification={
'ImageId' : data["AMI_ID"],
'KeyName' : data["EC2_KEY_HANDLE"],
'InstanceType' : data["INSTANCE_TYPE"],
'SecurityGroups' : [ data["SECGROUP_HANDLE"] ],
'Placement' : { 'AvailabilityZone': data["ZONE"]
'BlockDeviceMappings' : blockMappings}
})
request_ids = []
for request in response['SpotInstanceRequests']:
request_ids.append(request['SpotInstanceRequestId'])
fulfilled_instances = []
loop = True;
print "waiting for instances to get fulfilled..."
while loop:
requests = client.describe_spot_instance_requests(SpotInstanceRequestIds=request_ids)
for request in requests['SpotInstanceRequests']:
if request['State'] in ['closed', 'cancelled', 'failed']:
print request['SpotInstanceRequestId'] + " " + request['State']
loop = False
break; # TODO(ms) ensure running instances are terminated
if 'InstanceId' in request and request['InstanceId'] not in running_instances:
fulfilled_instances.append(request['InstanceId'])
print request['InstanceId'] + " running..."
if len(fulfilled_instances) == int(data["COUNT"]):
print 'all requested instances are fulfilled'
break;
time.sleep(5)
if loop == False:
print "unable to fulfill all requested instances... aborting..."
sys.exit();
# add tag to each instance
for instance in fulfilled_instances:
instance.create_tags(Tags=[{'Key': 'JobId', 'Value': str(job_id)}])
# ensure all instances are running
loop = True;
while loop:
loop = False
response = client.describe_instance_status(InstanceIds=running_instances, IncludeAllInstances=True)
for status in response['InstanceStatuses']:
if status['InstanceState']['Name'] != 'running':
loop = True
print "all instances are running..."
print str(data["COUNT"]) + " instances up and running! JobId: " + str(job_id)
##########################################################################
|
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* no last comment,
* check_request_state returns True
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = True
mock_avatar.return_value = 'dummy_avatar'
mock_get_last_comment.return_value = 'dummy_last_comment'
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp
mock_datetime.strptime.return_value = 'mock_strptime_date'
mock_PagureReview.return_value = '1'
mock_call_api.return_value = mock_pagure.mock_api_call_return_value()
# Call function
response = PagureService().request_reviews(
user_name='dummy_user',
repo_name='dummy_repo'
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/dummy_repo/pull-requests',
ssl_verify=True)
mock_get_last_comment.assert_called_with(
mock_call_api.return_value['requests'][0]
)
mock_datetime.strptime.assert_called_with('mock_date', '%Y-%m-%d %H:%M:%S.%f')
mock_has_new_comments.assert_not_called()
mock_check_request_state.assert_called_with('mock_strptime_date', None)
mock_avatar.assert_called_with(
'dummy_user', ssl_verify=True
)
mock_PagureReview.assert_called_with(
user='dummy_user',
title='dummy_title',
url='https://pagure.io/mock_repo_reference/pull-request/mock_id',
time='mock_strptime_date',
updated_time='mock_strptime_date',
comments=3,
image='dummy_avatar',
last_comment='dummy_last_comment',
project_name='mock_repo_reference',
project_url='https://pagure.io/mock_repo_reference'
)
self.assertEqual(response, ['1'])
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_no_repo(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function without repos and:
* no last comment,
* check_request_state returns True
* _call_api raises a HTTPError,
* no namespace
"""
# Set up mock return values and side effects
mock_call_api.side_effect = requests.exceptions.HTTPError
# Call function
with self.assertRaises(Exception):
PagureService().request_reviews(
user_name='dummy_user'
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/pull-requests',
ssl_verify=True
)
mock_get_last_comment.assert_not_called()
mock_datetime.strptime.assert_not_called()
mock_has_new_comments.assert_not_called()
mock_check_request_state.assert_not_called()
mock_avatar.assert_not_called()
mock_PagureReview.assert_not_called()
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo_last_comment(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* with last comment,
* check_request_state returns True
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = True
mock_avatar.return_value = 'dummy_avatar'
self.mock_last_comment.created_at = 'dummy_date'
mock_get_last_comment.return_value = self.mock_last_comment
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp
mock_datetime.strptime.return_value = 'mock_strptime_date'
mock_PagureReview.return_value = '1'
mock_call_api.return_value = mock_pagure.mock_api_call_return_value()
# Call function
response = PagureService().request_reviews(
user_name='dummy_user',
repo_name='dummy_repo',
show_last_comment=True
)
# Validate function calls and response
mock_call_api.assert_called_with(
url='https://pagure.io/api/0/dummy_user/dummy_repo/pull-requests',
ssl_verify=True
)
mock_get_last_comment.assert_called_with(
mock_call_api. | return_value['requests'][0]
)
mock_datetime.strptime.assert_called_with('mock_date', '%Y-%m-%d | %H:%M:%S.%f')
mock_has_new_comments.assert_called_with(
'dummy_date', True
)
mock_check_request_state.assert_called_with('mock_strptime_date', None)
mock_avatar.assert_not_called()
mock_PagureReview.assert_not_called()
self.assertEqual(response, [])
@patch(PATH + 'PagureService._call_api')
@patch(PATH + 'PagureService.get_last_comment')
@patch(PATH + 'datetime')
@patch(PATH + 'PagureService.check_request_state')
@patch(PATH + 'PagureService.has_new_comments')
@patch(PATH + 'PagureReview')
@patch(PATH + 'PagureService._avatar')
def test_request_reviews_with_repo_with_age(self,
mock_avatar,
mock_PagureReview,
mock_has_new_comments,
mock_check_request_state,
mock_datetime,
mock_get_last_comment,
mock_call_api):
"""
Tests 'request_reviews' function with repos and:
* no last comment,
* check_request_state returns False
* no errors,
* no namespace
"""
# Set up mock return values and side effects
mock_check_request_state.return_value = False
mock_get_last_comment.return_value = 'dummy_last_comment'
mock_datetime.utcfromtimestamp.return_value = self.mock_utcfromtimestamp_error
mock_datetime.strptime |
"""Handle nice json response for error."""
from flask import jsonify
def not_found(e):
"""Send a correct json for 404."""
response = jsonify({'status': 404, 'error': 'Not found',
'message': 'Invalid resource URI'})
response.status_code = 404
return response
def method_not_supported(e):
| """Send a correct json for 405."""
response = jsonify({'status': 405, 'error' | : 'Method not supported',
'message': 'This method is not supported'})
response.status_code = 405
return response
def internal_server_error(e):
"""Send a correct json for 500."""
response = jsonify({'status': 500, 'error': 'Internal server error',
'message': e.args[0]})
response.status_code = 500
return response
|
vial constraints and "
"a) at least one decision variable is unbounded "
"above and its corresponding cost is negative, or "
"b) at least one decision variable is unbounded below "
"and its corresponding cost is positive. ")
else: # test_empty_constraint_2
status = 0
message = ("The solution was determined in presolve as there are "
"no non-trivial constraints.")
complete = True
x[c < 0] = ub_mod[c < 0]
x[c > 0] = lb_mod[c > 0]
# where c is zero, set x to a finite bound or zero
x_zero_c = ub_mod[c == 0]
x_zero_c[np.isinf(x_zero_c)] = ub_mod[c == 0][np.isinf(x_zero_c)]
x_zero_c[np.isinf(x_zero_c)] = 0
x[c == 0] = x_zero_c
# if this is not the last step of presolve, should convert bounds back
# to array and return here
# *sigh* - convert bounds back to their standard form (list of tuples)
# again, in retrospect, numpy array would be standard form
lb[np.equal(lb, -np.inf)] = None
ub[np.equal(ub, np.inf)] = None
bounds = np.hstack((lb[:, np.newaxis], ub[:, np.newaxis]))
bounds = bounds.tolist()
for i, row in enumerate(bounds):
for j, col in enumerate(row):
if str(col) == "nan":
# comparing col to float("nan") and np.nan doesn't work.
# should use np.isnan
bounds[i][j] = None
# remove redundant (linearly dependent) rows from equality constraints
n_rows_A = A_eq.shape[0]
redundancy_warning = ("A_eq does not appear to be of full row rank. To "
"improve performance, check the problem formulation "
"for redundant equality constraints.")
if (sps.issparse(A_eq)):
if rr and A_eq.size > 0: # TODO: Fast sparse rank check?
A_eq, b_eq, status, message = _remove_redundancy_sparse(A_eq, b_eq)
if A_eq.shape[0] < n_rows_A:
warn(redundancy_warning, OptimizeWarning, stacklevel=1)
if status != 0:
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
c0, x, undo, complete, status, message)
# This is a wild guess for which redundancy removal algorithm will be
# faster. More testing would be good.
small_nullspace = 5
if rr and A_eq.size > 0:
try: # TODO: instead use results of first SVD in _remove_redundancy
rank = np.linalg.matrix_rank(A_eq)
except Exception: # oh well, we'll have to go with _remove_redundancy_dense
rank = 0
if rr and A_eq.size > 0 and rank < A_eq.shape[0]:
warn(redundancy_warning, OptimizeWarning, stacklevel=3)
dim_row_nullspace = A_eq.shape[0]-rank
if dim_row_nullspace <= small_nullspace:
A_eq, b_eq, status, message = _remove_redundancy(A_eq, b_eq)
if dim_row_nullspace > small_nullspace or status == 4:
A_eq, b_eq, status, message = _remove_redundancy_dense(A_eq, b_eq)
if A_eq.shape[0] < rank:
message = ("Due to numerical issues, redundant equality "
"constraints could not be removed automatically. "
"Try providing your constraint matrices as sparse "
"matrices to activate spars | e presolve, try turning "
"off redundancy removal, or try turning off presolve "
"altogether.")
status = 4
if status != 0:
complete = True
return (_LPProblem(c, A_ub, b_ub, A_eq, b_eq, bounds, x0),
| c0, x, undo, complete, status, message)
def _parse_linprog(lp, options):
"""
Parse the provided linear programming problem
``_parse_linprog`` employs two main steps ``_check_sparse_inputs`` and
``_clean_inputs``. ``_check_sparse_inputs`` checks for sparsity in the
provided constraints (``A_ub`` and ``A_eq) and if these match the provided
sparsity optional values.
``_clean inputs`` checks of the provided inputs. If no violations are
identified the objective vector, upper bound constraints, equality
constraints, and simple bounds are returned in the expected format.
Parameters
----------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
options : dict
A dictionary of solver options. All methods accept the following
generic options:
maxiter : int
Maximum number of iterations to perform.
disp : bool
Set to True to print convergence messages.
For method-specific options, see :func:`show_options('linprog')`.
Returns
-------
lp : A `scipy.optimize._linprog_util._LPProblem` consisting of the following fields:
c : 1D array
The coefficients of the linear objective function to be minimized.
A_ub : 2D array, optional
The inequality constraint matrix. Each row of ``A_ub`` specifies the
coefficients of a linear inequality constraint on ``x``.
b_ub : 1D array, optional
The inequality constraint vector. Each element represents an
upper bound on the corresponding value of ``A_ub @ x``.
A_eq : 2D array, optional
The equality constraint matrix. Each row of ``A_eq`` specifies the
coefficients of a linear equality constraint on ``x``.
b_eq : 1D array, optional
The equality constraint vector. Each element of ``A_eq @ x`` must equal
the corresponding element of ``b_eq``.
bounds : sequence, optional
A sequence of ``(min, max)`` pairs for each element in ``x``, defining
the minimum and maximum values of that decision variable. Use ``None`` to
indicate that there is no bound. By default, bounds are ``(0, None)``
(all decision variables are non-negative).
If a single tuple ``(min, max)`` is provided, then ``min`` and
``max`` will serve as bounds for all decision variables.
x0 : 1D array, optional
Guess values of the decision variables, which will be refined by
the optimization algorithm. This argument is currently used only by the
'revised simplex' method, and can only be used if `x0` represents a
basic feasible solution.
options : dict, optional |
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_overrides_reduce(self):
for proto in protocols:
x = REX_three()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, None)
def test_reduce_ex_calls_base(self):
for proto in protocols:
x = REX_four()
self.assertEqual(x._proto, None)
s = self.dumps(x, proto)
self.assertEqual(x._proto, proto)
y = self.loads(s)
self.assertEqual(y._proto, proto)
def test_reduce_calls_base(self):
for proto in protocols:
x = REX_five()
self.assertEqual(x._reduce_called, 0)
s = self.dumps(x, proto)
self.assertEqual(x._reduce_called, 1)
y = self.loads(s)
self.assertEqual(y._reduce_called, 1)
def test_reduce_bad_iterator(self):
# Issue4176: crash when 4th and 5th items of __reduce__()
# are not iterators
class C(object):
def __reduce__(self):
# 4th item is not an iterator
return list, (), None, [], None
class D(object):
def __reduce__(self):
# 5th item is not an iterator
return dict, (), None, None, []
# Protocol 0 is less strict and also accept iterables.
for proto in protocols:
try:
self.dumps(C(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
try:
self.dumps(D(), proto)
except (AttributeError, pickle.PickleError, cPickle.PickleError):
pass
def test_many_puts_and_gets(self):
| # Test that internal data structures correctly deal with lots of
# puts/gets.
keys = ("aaa" + str(i) for i in xrange(100))
lar | ge_dict = dict((k, [4, 5, 6]) for k in keys)
obj = [dict(large_dict), dict(large_dict), dict(large_dict)]
for proto in protocols:
dumped = self.dumps(obj, proto)
loaded = self.loads(dumped)
self.assertEqual(loaded, obj,
"Failed protocol %d: %r != %r"
% (proto, obj, loaded))
def test_attribute_name_interning(self):
# Test that attribute names of pickled objects are interned when
# unpickling.
for proto in protocols:
x = C()
x.foo = 42
x.bar = "hello"
s = self.dumps(x, proto)
y = self.loads(s)
x_keys = sorted(x.__dict__)
y_keys = sorted(y.__dict__)
for x_key, y_key in zip(x_keys, y_keys):
self.assertIs(x_key, y_key)
# Test classes for reduce_ex
class REX_one(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return REX_one, ()
# No __reduce_ex__ here, but inheriting it from object
class REX_two(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
# No __reduce__ here, but inheriting it from object
class REX_three(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return REX_two, ()
def __reduce__(self):
raise TestFailed, "This __reduce__ shouldn't be called"
class REX_four(object):
_proto = None
def __reduce_ex__(self, proto):
self._proto = proto
return object.__reduce_ex__(self, proto)
# Calling base class method should succeed
class REX_five(object):
_reduce_called = 0
def __reduce__(self):
self._reduce_called = 1
return object.__reduce__(self)
# This one used to fail with infinite recursion
# Test classes for newobj
class MyInt(int):
sample = 1
class MyLong(long):
sample = 1L
class MyFloat(float):
sample = 1.0
class MyComplex(complex):
sample = 1.0 + 0.0j
class MyStr(str):
sample = "hello"
class MyUnicode(unicode):
sample = u"hello \u1234"
class MyTuple(tuple):
sample = (1, 2, 3)
class MyList(list):
sample = [1, 2, 3]
class MyDict(dict):
sample = {"a": 1, "b": 2}
myclasses = [MyInt, MyLong, MyFloat,
MyComplex,
MyStr, MyUnicode,
MyTuple, MyList, MyDict]
class SlotList(MyList):
__slots__ = ["foo"]
class SimpleNewObj(object):
def __init__(self, a, b, c):
# raise an error, to make sure this isn't called
raise TypeError("SimpleNewObj.__init__() didn't expect to get called")
class AbstractPickleModuleTests(unittest.TestCase):
def test_dump_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_closed_file(self):
import os
f = open(TESTFN, "w")
try:
f.close()
self.assertRaises(ValueError, self.module.dump, 123, f)
finally:
os.remove(TESTFN)
def test_load_from_and_dump_to_file(self):
stream = cStringIO.StringIO()
data = [123, {}, 124]
self.module.dump(data, stream)
stream.seek(0)
unpickled = self.module.load(stream)
self.assertEqual(unpickled, data)
def test_highest_protocol(self):
# Of course this needs to be changed when HIGHEST_PROTOCOL changes.
self.assertEqual(self.module.HIGHEST_PROTOCOL, 2)
def test_callapi(self):
f = cStringIO.StringIO()
# With and without keyword arguments
self.module.dump(123, f, -1)
self.module.dump(123, file=f, protocol=-1)
self.module.dumps(123, -1)
self.module.dumps(123, protocol=-1)
self.module.Pickler(f, -1)
self.module.Pickler(f, protocol=-1)
def test_incomplete_input(self):
s = StringIO.StringIO("X''.")
self.assertRaises(EOFError, self.module.load, s)
def test_restricted(self):
# issue7128: cPickle failed in restricted mode
builtins = {self.module.__name__: self.module,
'__import__': __import__}
d = {}
teststr = "def f(): {0}.dumps(0)".format(self.module.__name__)
exec teststr in {'__builtins__': builtins}, d
d['f']()
def test_bad_input(self):
# Test issue4298
s = '\x58\0\0\0\x54'
self.assertRaises(EOFError, self.module.loads, s)
# Test issue7455
s = '0'
# XXX Why doesn't pickle raise UnpicklingError?
self.assertRaises((IndexError, cPickle.UnpicklingError),
self.module.loads, s)
class AbstractPersistentPicklerTests(unittest.TestCase):
# This class defines persistent_id() and persistent_load()
# functions that should be used by the pickler. All even integers
# are pickled using persistent ids.
def persistent_id(self, object):
if isinstance(object, int) and object % 2 == 0:
self.id_count += 1
return str(object)
else:
return None
def persistent_load(self, oid):
self.load_count += 1
object = int(oid)
assert object % 2 == 0
return object
def test_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
def test_bin_persistence(self):
self.id_count = 0
self.load_count = 0
L = range(10)
self.assertEqual(self.loads(self.dumps(L, 1)), L)
self.assertEqual(self.id_count, 5)
self.assertEqual(self.load_count, 5)
class AbstractPicklerUnpicklerObjectTests(unittest.TestCase): |
t.text.lower() in ('off', 'no', 'false'):
return False
else:
raise Exception("Invalid boolean value: %s in %s" % (element.text, element.tag))
else:
return default_value
def _get_elem_email_format_value(element, default_value):
if element is not None:
return EmailFormat[element.text]
else:
return default_value
def _get_elem_email_security_value(element, default_value):
if element is not None:
return EmailSecurity[element.text]
else:
return default_value
def _get_elem_list_value(element, default_value):
if element is not None:
return element.text.split(', ')
else:
return default_value
def _get_elem_tasks_value(element, default_value):
if element is None:
return default_value
tasks = []
| for task in element:
if task.tag == 'sourcecontrol':
if task.attrib['type'] in ('svn', 'git'):
url = task.find('./url').text
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
preCleanWorkingDirectory = _get_elem_bool_value(
task.find('./preCleanWorkingDirectory'),
False)
if task.attrib['type'] == 'svn':
| tasks.append(svntask.SvnTask(url, workingDirectory, preCleanWorkingDirectory))
else: # git
tasks.append(gittask.GitTask(url, workingDirectory, preCleanWorkingDirectory))
else:
Logger.warning('Unsupported sourcecontrol type ' + task.attrib['type'])
elif task.tag == 'make':
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
args = _get_elem_str_value(task.find('./args'), '')
timeout = _get_elem_int_value(task.find('./timeout'), 600)
tasks.append(maketask.MakeTask(workingDirectory, args, timeout))
elif task.tag == 'exec':
executable = task.find('./executable').text
workingDirectory = _get_elem_str_value(task.find('./workingDirectory'), '')
args = _get_elem_str_value(task.find('./args'), '')
timeout = _get_elem_int_value(task.find('./timeout'), 600)
warningExitCode = _get_elem_int_value(task.find('./warningExitCode'), None)
tasks.append(
exectask.ExecTask(
executable,
workingDirectory,
args,
timeout,
warningExitCode))
else:
Logger.warning('Unsupported task ' + task.tag)
return tasks
class ParseError(Exception):
pass
class Projects:
def __init__(self):
self._projects = []
self.cur = 0
def exists(self, name):
for project in self._projects:
if project['name'] == name:
return True
return False
def append(self, name,
tasks,
emailFrom, emailTo, emailFormat,
emailServerHost, emailServerPort,
emailServerSecurity,
emailServerUsername, emailServerPassword,
emailAttachments,
failOnError):
if self.exists(name):
raise Exception(
"Failed to add project because the project named '%s' already exists" %
name)
if tasks is None:
tasks = []
if emailTo is None:
emailTo = []
self._projects.append({'name': name,
'tasks': tasks,
'emailFrom': emailFrom,
'emailTo': emailTo,
'emailFormat': emailFormat,
'emailServerHost': emailServerHost,
'emailServerPort': emailServerPort,
'emailServerSecurity': emailServerSecurity,
'emailServerUsername': emailServerUsername,
'emailServerPassword': emailServerPassword,
'emailAttachments': emailAttachments,
'failOnError': failOnError})
def addTask(self, name, task):
if not self.exists(name):
raise Exception(
"Failed to add task because the project named '%s' does not exist" %
name)
for project in self._projects:
if project['name'] == name:
project['tasks'].append(task)
def next(self):
if self.cur >= len(self._projects):
self.cur = 0
raise StopIteration
else:
cur = self.cur
self.cur = cur + 1
key = self._projects[cur]['name']
val = deepcopy(self._projects[cur])
val.pop('name')
return key, val
def __next__(self):
# for compatibility between Python 2 that uses next() and Python 3 that uses __next__()
return self.next()
def __iter__(self):
return self
def __getitem__(self, name):
for project in self._projects:
if project['name'] == name:
retVal = deepcopy(project)
retVal.pop('name')
return retVal
raise Exception("Project named '%s' does not exist" % name)
def __len__(self):
return len(self._projects)
def parse(aCcPyConfigFileName=DefCcPyConfigFileName):
"""Parse ccpy project configuration file
Return the instance if Projects class
Projects and tasks within each project are returned in the order they appear in the config file.
Supported tasks are: SvnTask, MakeTask and ExecTask
Throw ParseError
"""
try:
Logger.debug("Reading ccpy configuration from %s..." % aCcPyConfigFileName)
tree = ET.parse(aCcPyConfigFileName)
root = tree.getroot()
if (root.tag != 'ccpy'):
raise Exception('Invalid root tag name: ' + root.tag)
projects = Projects()
for projectElem in root.findall('./project'):
tasks = _get_elem_tasks_value(projectElem.find('./tasks'), None)
emailFrom = _get_elem_str_value(projectElem.find('./emailNotification/from'), "")
emailTo = _get_elem_list_value(projectElem.find('./emailNotification/to'), None)
emailFormat = _get_elem_email_format_value(
projectElem.find('./emailNotification/format'),
EmailFormat.attachment)
emailServerHost = _get_elem_str_value(
projectElem.find('./emailNotification/server'),
'localhost')
emailServerPort = _get_elem_int_value(
projectElem.find('./emailNotification/port'),
25)
emailServerSecurity = _get_elem_email_security_value(
projectElem.find('./emailNotification/security'),
EmailSecurity.none)
emailServerUsername = _get_elem_str_value(
projectElem.find('./emailNotification/username'),
None)
emailServerPassword = _get_elem_str_value(
projectElem.find('./emailNotification/password'),
None)
emailAttachments = []
for emailAttachment in projectElem.findall('./emailNotification/attachment'):
emailAttachments.append(emailAttachment.text)
failOnError = _get_elem_bool_value(projectElem.find('./failOnError'), True)
projects.append(projectElem.attrib['name'],
tasks=tasks,
emailFrom=emailFrom,
emailTo=emailTo,
emailFormat=emailFormat,
emailServerHost=emailServerHost,
emailServerPort=emailServerPort,
emailServerSecurity=emailServerSecurity,
emailServerUsername=emailServerUsername,
emailServerPassword=emailServerPas |
# Sumber: LMD/models.py
from google.appengine.ext import db
class Users(db.Model):
username = db.St | ringProperty()
passwd = db.StringProperty()
email = db.StringProperty()
fullname = db.StringProperty()
address = db.TextProperty()
phone = db.StringProperty()
role = db.IntegerPropert | y(default=99)
livecenter = db.ListProperty(db.Key,default=[])
|
# URL sorted by priority
# If one URL does not work, the next one will be tried
self.po_urls = po_urls
def spawn_runner(self, module, module_file, language, port):
"""Launch a gladerunner instance.
If a running process is attached to this session, it will be replaced.
"""
self.port = port
env = {
"GDK_BACKEND": "broadway",
"UBUNTU_MENUPROXY": "",
"LIBOVERLAY_SCROLLBAR": "0",
}
if self.process is not None and self.process.poll() is None:
self.process.kill()
if language in self.custom_po:
if self.custom_po[language][0] != module:
raise DeckardException(
'"%s" does not exist' % language,
"No such file was registered for the " "%s module." % module,
)
lang_root = os.path.join(self.custom_po[language][1], "LANGS")
# This locale has to be available on your system
language = "%s.UTF-8" % self.custom_po[language][2]
else:
if language != "POSIX":
language = "%s.UTF-8" % language
lang_root = os.path.join(self.content_root, "LANGS")
env["LANG"] = language
# Build the gladerunner command line
args = [
self.gladerunner,
"--suicidal",
"--with-broadwayd",
str(port),
os.path.join(self.content_root, module, module_file),
module,
language,
lang_root,
]
# Should we use a Glade catalog?
if os.path.isfile(self.glade_catalog):
args.extend(("--catalog-path", self.glade_catalog))
# Launch it!
self.process = Popen(args, stdin=PIPE, env=env)
def store_po(self, name, module, fd=None):
"""Store a custom PO file
If fd is None, try to download name from self.po_urls.
Each url of the list will be tried until the file is found.
If a PO file with the same name is already attached to this session,
it will be replaced.
Returns a dictionary, associating all re | levant modules with a list of
stored PO files for it on this session, from the oldest to the newest.
"""
# Very basic check, msgfmt will crash anyway if the file is not valid
if not name. | lower().endswith(".po"):
raise DeckardException(
"This is not a PO file", "%s is not a PO file." % name
)
lang_root = tempfile.mkdtemp(prefix="deckard_")
po_path = os.path.join(lang_root, "file.po")
po = open(po_path, "bw")
if fd is not None:
# The file was sent by the user
for line in fd:
po.write(line)
po.close()
fd.close()
elif len(self.po_urls) > 0:
# Let's try to download 'name'
response = None
error = None
for url in self.po_urls:
try:
response = urllib.request.urlopen(url % name)
break
except Exception as e:
error = str(e)
if response is None:
# Most likely a '404: not found' error
raise DeckardException("Enable to retrieve the file", error)
res_len = response.length
if res_len > self.max_po_download_size:
response.close()
raise DeckardException(
"File too big",
'The "%s" file is %d long and this app '
"will not retrieve a file bigger than "
"%d bytes." % (name, res_len, self.max_po_download_size),
)
# Let's finally download this file!
po.write(response.read(res_len))
response.close()
po.close()
else:
raise DeckardException(
"Operation not supported",
"The PO download feature is not configured " "on this instance.",
)
# Try to guess the language of this PO file, default is 'en_US'
# This is good to know to later set proper environment variables and so
# load the right GTK translation and reverse the interface if necessary
po_lang = "en_US"
with open(po_path, encoding="utf8") as po:
# Give up if we find nothing in the 50 first lines
for _ in range(50):
line = po.readline()
match = re.match(r'^"Language: (.+)\\n"$', line)
if match:
po_lang = match.group(1)
# The encoding is often wrong, so strip it
po_lang = locale.normalize(po_lang).rsplit(".")[0]
# Test if the detected locale is available on the system
try:
locale.setlocale(locale.LC_ALL, "%s.UTF-8" % po_lang)
except:
# Fallback to a known locale
po_lang = "en_US"
finally:
locale.resetlocale()
break
# create necessary directories
mo_path = os.path.join(lang_root, "LANGS", po_lang, "LC_MESSAGES")
os.makedirs(mo_path)
try:
check_output(
[
"/usr/bin/msgfmt",
"--check",
"--output-file",
os.path.join(mo_path, module) + ".mo",
po_path,
],
stderr=STDOUT,
)
except CalledProcessError as e:
shutil.rmtree(lang_root)
# We don't need to expose the file name in the error message
log = e.output.decode("unicode_escape").replace("%s:" % po_path, "")
raise DeckardException("Error while building the .mo", log)
if name in self.custom_po:
shutil.rmtree(self.custom_po[name][1])
del self.custom_po[name] # drop to re-add at the end of the queue
elif len(self.custom_po) >= self.max_custom_po:
# delete the oldest
shutil.rmtree(self.custom_po.popitem(last=False)[1][1])
self.custom_po[name] = (module, lang_root, po_lang)
res = {}
for item in self.custom_po:
if self.custom_po[item][0] not in res:
res[self.custom_po[item][0]] = [item]
else:
res[self.custom_po[item][0]].append(item)
return res
def keep_process_alive(self):
"""Beg the runner (if any) to stay alive
Returns True if the message was sent, False if it wasn't (eg. if there
is no process)."""
if self.process is not None and self.process.poll() is None:
self.process.stdin.write(b"Please stay alive!")
self.process.stdin.flush()
return True
return False
def is_removable(self):
"""State if this Session is removable.
Returns True if no running process is attached to this Session and
if no PO file is stored.
It also returns True if this Session was tagged as removable.
Otherwise, this function will return False.
"""
if self.removable:
return True
elif self.process is None or self.process.poll() is not None:
if len(self.custom_po) == 0:
return True
return False
def __del__(self):
"""Kill the process if it is running and delete any custom PO files"""
if self.process is not None and self.process.poll() is None:
self.process.kill()
for name in self.custom_po:
shutil.rmtree(self.custom_po[name][1])
class SessionsManager:
"""Helper to manage all Deckard sessions."""
def __init__(
self,
gladerunner,
content_root,
max_users=10,
first_port=2019,
max_custom_po_per_session=4,
max_po_download_size=1500000,
glade_cat |
#!/usr/bin/env python
import os
import sys
from autoprocess import autoProcessTV, autoProcessMovie, autoProcessTVSR, sonarr, radarr
from readSettings import ReadSettings
from mkvtomp4 import MkvtoMp4
from deluge_client import DelugeRPCClient
import logging
from logging.config import fileConfig
logpath = '/var/log/sickbeard_mp4_automator'
if os.name == 'nt':
logpath = os.path.dirname(sys.argv[0])
elif not os.path.isdir(logpath):
try:
os.mkdir(logpath)
except:
logpath = os.path.dirname(sys.argv[0])
configPath = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), 'logging.ini')).replace("\\", "\\\\")
logPath = os.path.abspath(os.path.join(logpath, 'index.log')).replace("\\", "\\\\")
fileConfig(configPath, defaults={'logfilename': logPath})
log = logging.getLogger("delugePostProcess")
log.info("Deluge post processing started.")
settings = ReadSettings(os.path.dirname(sys.argv[0]), "autoProcess.ini")
categories = [settings.deluge['sb'], settings.deluge['cp'], settings.deluge['sonarr'], settings.deluge['radarr'], settings.deluge['sr'], settings.deluge['bypass']]
remove = settings.deluge['remove']
if len(sys.argv) < 4:
log.error("Not enough command line parameters present, are you launching this from deluge?")
sys.exit()
path = str(sys.argv[3])
torrent_name = str(sys.argv[2])
torrent_id = str(sys.argv[1])
delete_dir = None
log.debug("Path: %s." % path)
log.debug("Torrent: %s." % torrent_name)
log.debug("Hash: %s." % torrent_id)
client = DelugeRPCClient(host=settings.deluge['host'], port=int(settings.deluge['port']), username=settings.deluge['user'], password=settings.deluge['pass'])
client.connect()
if client.connected:
log.info("Successfully connected to Deluge")
else:
log.error("Failed to connect to Deluge")
sys.exit()
torrent_data = client.call('core.get_torrent_status', torrent_id, ['files', 'label'])
try:
torrent_files = torrent_data[b'files']
category = torrent_data[b'label'].lower().decode()
except:
torrent_files = torrent_data['files']
category = torrent_data['label'].lower()
files = []
log.debug("List of files in torrent:")
for contents in torrent_files:
try:
files.append(contents[b'path'].decode())
log.debug(contents[b'path'].decode())
except:
files.append(contents['path'])
log.debug(contents['path'])
if category.lower() not in categories:
log.error("No valid category detected.")
sys.exit()
if len(categories) != len(set(categories)):
log.error("Duplicate category detected. Category names must be unique.")
sys.exit()
if settings.deluge['convert']:
# Check for custom Deluge output_dir
if settings.deluge['output_dir']:
settings.output_dir = settings.deluge['output_dir']
log.debug("Overriding output_dir to %s." % settings.deluge['output_dir'])
# Perform conversion.
settings.delete = False
if not settings.output_dir:
suffix = "convert"
settings.output_dir = os.path.join(path, ("%s-%s" % (torrent_name, suffix)))
if not os.path.exists(settings.output_dir):
os.mkdir(settings.output_dir)
delete_dir = settings.output_dir
converter = MkvtoMp4(settings)
for filename in files:
inputfile = os.path.join(path, filename)
if MkvtoMp4(settings).validSource(inputfile):
log.inf | o("Converting file %s at location %s." % (inputfile, settings.output_dir))
try:
output = converter.process(inputfile)
except:
log.exception("Error converting file %s." % inputfile)
path = converter.output_dir
else:
suffix = "copy"
newpath = os.path.join(path, ("%s-%s" % (torrent_name, suffix | )))
if not os.path.exists(newpath):
os.mkdir(newpath)
for filename in files:
inputfile = os.path.join(path, filename)
log.info("Copying file %s to %s." % (inputfile, newpath))
shutil.copy(inputfile, newpath)
path = newpath
delete_dir = newpath
# Send to Sickbeard
if (category == categories[0]):
log.info("Passing %s directory to Sickbeard." % path)
autoProcessTV.processEpisode(path, settings)
# Send to CouchPotato
elif (category == categories[1]):
log.info("Passing %s directory to Couch Potato." % path)
autoProcessMovie.process(path, settings, torrent_name)
# Send to Sonarr
elif (category == categories[2]):
log.info("Passing %s directory to Sonarr." % path)
sonarr.processEpisode(path, settings)
elif (category == categories[3]):
log.info("Passing %s directory to Radarr." % path)
radarr.processMovie(path, settings)
elif (category == categories[4]):
log.info("Passing %s directory to Sickrage." % path)
autoProcessTVSR.processEpisode(path, settings)
elif (category == categories[5]):
log.info("Bypassing any further processing as per category.")
if delete_dir:
if os.path.exists(delete_dir):
try:
os.rmdir(delete_dir)
log.debug("Successfully removed tempoary directory %s." % delete_dir)
except:
log.exception("Unable to delete temporary directory.")
if remove:
try:
client.call('core.remove_torrent', torrent_id, True)
except:
log.exception("Unable to remove torrent from deluge.")
|
from django.conf.urls import url
from rest_framework.urlpatterns import format_suffix_patterns
from . import views
from django.views.generic.base import View
app_name = 'secapp'
urlpatterns = [
# Index view
url(r'^ | $', views.index, name='index'),
# List of events for a Log Source
url(r'^(?P<id_log_source>[0-9]+)/event/$', views.events, name='events'),
# Packet of an event (for a Log Source)
url(r'^(?P<id_log_source>[0-9]+)/event/(?P<id_event>[0-9]+)$', views.event_information,
name='event_information'),
# Additional information about a packet event
url(r'^(?P<id_log_source>[0-9]+)/event/(?P<id_event | >[0-9]+)/additional_info/$',
views.additional_info,
name='additional_info'),
url(r'^api/events/$', views.EventsInformation().events_list, name='events_list'),
url(r'^api/events/by_source/(?P<pk>[0-9]+)/$', views.EventsInformation().events_by_source,
name='events_by_source'),
url(r'^api/events/by_source/(?P<pk>[0-9]+)/(?P<fk>[0-9]+)/$', views.EventsInformation().events_by_source_detail,
name='events_by_source_detail'),
url(r'^api/events/(?P<pk>[0-9]+)/json$', views.EventsInformation().event_detail, name='event_detail'),
url(r'^api/events/(?P<pk>[0-9]+)/$', views.EventsInformation.as_view()),
url(r'^api/events/hour/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_hour,
name='events_source_in_hour'),
url(r'^api/events/day/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_day,
name='events_source_in_day'),
url(r'^api/events/week/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_week,
name='events_source_in_week'),
url(r'^api/events/month/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_month,
name='events_source_in_month'),
url(r'^api/events/year/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_in_year,
name='events_source_in_year'),
url(r'^api/events/last_day/(?P<pk>[0-9]+)/$', views.EventsInformation().events_source_last_day,
name='events_source_last_day'),
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
#!python
"""
VMController Host - a general purpose host-side virtual machine controller via exposed hypervisors apis.
"""
try:
import os
import sys
import logging
import warnings
import multiprocessing
import time
import inject
from twisted.internet import reactor
from pkg_resources import resource_stream
from ConfigParser import SafeConfigParser
from optparse import OptionParser
from vmcontroller.common import StompProtocolFactory, StompProtocol
from vmcontroller.host.config import init_config, init_config_file, debug_config
from vmcontroller.host.controller import HyperVisorController
from vmcontroller.host.services import HostStompEngine, HostWords
from vmcontroller.host.services.HostServices import Host, HostXMLRPCService
except ImportError, e:
print "Import error in %s : %s" % (__name__, e)
import sys
sys.exit()
logger = logging.getLogger(__name__)
def init_logging(logfile=None, loglevel=logging.INFO):
"""
Sets logging configuration.
@param logfile: File to log messages. Default is None.
@param loglevel: Log level. Default is logging.INFO.
"""
format = '%(asctime)s - [%(threadName)s] %(filename)s:%(lineno)s - (%(levelname)s) %(message)s'
if logfile:
logging.basicConfig(filename=logfile, level=loglevel, format=format)
else:
logging.basicConfig(level=loglevel, format=format)
def init():
"""
Initializes VMController Host package.
First parses command line options. Then, creates config object from default cfg file.
Re-initializes config object if a config file is supplied and sets logger configuration.
Finally, uses dependency injection to bind objects to names.
"""
parser = OptionParser()
parser.add_option("-c", "--config", dest="configfile",
help="Read configuration from FILE. (Overrides default config file.)", metavar="FILE")
parser.add_option("-a", "--host", dest="xmlrpc_host",
help="Listen on specified address for XMLRPC interface (default 127.0.0.1)", metavar="ADDR")
parser.add_option("-p", "--port", dest="xmlrpc_port",
help="Listen on specified port for XMLRPC interface (default 50505)", type="int", metavar="PORT")
parser.add_option("-l", "--logfile", dest="logfile",
help="Log to specified file.", metavar="FILE")
parser.add_option("--debug", action="store_true", dest="debug", default=False,
help="Sets logging to debug (unless logging configured in config file).")
(options, args) = parser.parse_args()
config = init_config()
injector = inject.Injector()
inject.register(injector)
injector.bind('config', to=config)
injector.bind('stompEngine', to=HostStompEngine, scope=inject.appscope)
injector.bind('words', to=HostWords.getWords)
injector.bind('stompProtocol', to=StompProtocol, scope=inject.appscope)
injector.bind('subject', to=Host)
injector.bind('hvController', to=HyperVisorController)
init_config_file(options.configfile)
if options.xmlrpc_host is not None:
config.set('xmlrpc', 'host', options.xmlrpc_host)
if options.xmlrpc_port is not None:
config.set('xmlrpc', 'port', str(options.xmlrpc_port))
level = logging.DEBUG if options.debug else logging.INFO
init_logging(logfile=options.logfile, loglevel=level)
#debug_config(config)
def start_coilmq(config, server_event, tries=-1, delay=1, backoff=1.5):
"""
Starts CoilMQ broker.
@param config: Config for CoilMQ.
@param server_event: Event attached to multiprocessing manager.
@param tries: Maximum retries to start the server. Default -1 (infinite).
@param delay: Time to wait before next try to start broker. Default 1.
@param backoff: Factor to set delay. Default 1.5.
"""
m_tries = tries
m_delay = delay
m_server = None
try:
from coilmq.config import config as broker_config
import coilmq.start
except ImportError, e:
print "Import error: %s\nPlease check." % e
exit()
if config.has_section('broker'):
for (attribute, value) in config.items('broker'):
if attribute != 'name':
broker_config.set('coilmq', attribute, value)
logger.debug("[coilmq] %s = %s" % (attribute, value))
broker_server = None
while True:
try:
broker_server = coilmq.start.server_from_config(broker_config)
logger.info("Stomp server listening on %s:%s" % broker_server.server_address)
server_event.set()
broker_server.serve_forever()
except (KeyboardInterrupt, SystemExit):
logger.info("Stomp server stopped by user interrupt.")
raise SystemExit()
except IOError as ex:
logger.error("Exception while starting coilmq broker: '%s'", ex)
if m_tries != 0:
logger.debug("Retrying coilmq startup in %.1f seconds...", m_delay)
time.sleep(m_delay)
m_delay *= backoff
m_tries -= 1
else:
logger.debug("Ran out of trials (tried %d times) for coilmq startup. Giving up.", tries)
break
except Exception, e:
logger.error("Stomp server stopped due to error: %s" % e)
logger.exception(e)
raise SystemExit()
finally:
if broker_server: broker_server.server_close()
@inject.param('config')
def init_coilmq(config, brokerTimeout=60):
"""
Intializes and starts CoilMQ stomp broker as a light weight (multiprocessing) process.
@param config: Injected config object.
@param brokerTimeout: Timeout to check is broker is running. Default 60s.
"""
manager = multiprocessing.Manager()
server_event = manager.Event()
broker = multiprocessing.Process(target=start_coilmq, arg | s=(config, server_event))
broker.daemon = False
broker.name = 'VMController-Broker'
broker.start()
server_event.wait(brokerTimeout)
if not server_event.is_set():
logger.fatal("Broker not available after %.1f seconds. Giving up", brokerTimeout)
return -1
@inject.param('config')
def ini | t_morbid(config):
"""
Starts up light weight, twisted based MorbidQ stomp broker.
@param config: Injected config object.
"""
try:
import morbid
except ImportError, e:
import sys
print "Import error: %s\nPlease check." % e
sys.exit()
morbid_factory = morbid.StompFactory(verbose=True)
broker_host = config.get('broker', 'host')
broker_port = int(config.get('broker', 'port'))
try:
reactor.listenTCP(broker_port, morbid_factory, interface=broker_host)
except:
logger.fatal("Unable to start Morbid, port may not be free. Exiting.")
import sys
sys.exit()
logger.info("Starting MorbidQ broker %s:%s", broker_host, broker_port)
@inject.param('config')
def start(config):
"""
Starts VMController Host.
@param config: The injected config object.
"""
broker_name = config.get('broker', 'name')
if broker_name == 'morbid':
init_morbid()
elif broker_name == 'coilmq':
init_coilmq()
else:
logger.fatal("No broker found... Exiting")
exit()
stompProtocolFactory = StompProtocolFactory()
xmlrpcService = HostXMLRPCService()
xmlrpcService.makeEngineAccesible()
host = config.get('broker', 'host')
port = int(config.get('broker', 'port'))
reactor.connectTCP(host, port, stompProtocolFactory)
reactor.run()
def main():
"""
Initializes and starts VMController Host.
"""
init()
logger.info("Welcome to VMController Host!")
start()
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
pass
except Exception, e:
logger.error("Server terminated due to error: %s" % e)
logger.exception(e)
|
import sys
out = sys.stdout
class Colors:
def black (self, fmt='', *args): self('\x1b[1;30m'+fmt+'\x1b[0m', *args)
def red (self, fmt='', *args): self('\x1b[1;31m'+fmt+'\x1b[0m', *args)
def green (self, fmt='', *args): self('\x1b[1;32m'+fmt+'\x1b[0m', *args)
def yellow (self, fmt='', *args): self('\x1b[1;33m'+fmt+'\x1b[0m', *args)
def blue (self, fmt='', *args): self('\x1b[1;34m'+fmt+'\x1b[0m', *args)
def purple (self, fmt='', *args): self('\x1b[1;35m'+fmt+'\x1b[0m', *args)
def cyan (self, fmt='', *args): self('\x1b[1;36m'+fmt+'\x1b[0m', *args)
def white (self, fmt='', *args): self('\x1b[1;37m'+fmt+'\x1b[0m', *args)
class PrintF(Colors):
def __call__(self, fmt='', *args):
out.write(fmt % args)
out.flush()
class WriteLine(Colors):
def __call__(self, fmt='', *args):
out.write(fmt % args)
out.write('\n')
def hexdump(blob, width=16, offset=0):
fmt = '%%.%dx: ' % len('%.x' % (len(blob) - 1))
while blob:
line = blob[:width]
blob = blob[width:]
printf.white(fmt, offset)
printf.cyan(' '.j | oin('%.2x' % ord(c) for c in line))
printf(' ' * ((width-len(line))*3+1))
for c in line:
if ord(c) < 32 or ord(c) > 126:
printf.black('.')
else:
printf.whit | e('%c', c)
writeln()
offset += width
__builtins__['printf'] = PrintF()
__builtins__['writeln'] = WriteLine()
__builtins__['hexdump'] = hexdump
|
.. versionadded:: 0.7
"""
# TODO move most of this down to the Mongo layer?
# TODO experiment with cursor.batch_size as alternative pagination
# implementation
def parse_aggregation_stage(d, key, value):
for st_key, st_value in d.items():
if isinstance(st_value, dict):
parse_aggregation_stage(st_value, key, value)
if key == st_value:
d[st_key] = value
response = {}
documents = []
req = parse_request(resource)
req_pipeline = copy.deepcopy(pipeline)
if req.aggregation:
try:
query = json.loads(req.aggregation)
except ValueError:
abort(400, description='Aggregation query could not be parsed.')
for key, value in query.items():
if key[0] != '$':
pass
for stage in req_pipeline:
parse_aggregation_stage(stage, key, value)
if req.max_results > 1:
limit = {"$limit": req.max_results}
skip = {"$skip": (req.page - 1) * req.max_results}
req_pipeline.append(skip)
req_pipeline.append(limit)
cursor = app.data.aggregate(resource, req_pipeline, options)
for document in cursor:
documents.append(document)
response[config.ITEMS] = documents
# PyMongo's CommandCursor does not return a count, so we cannot
# provide paination/total count info as we do with a normal (non-aggregate)
# GET request.
return response, None, None, 200, []
def _perform_find(resource, lookup):
"""
.. versionadded:: 0.7
"""
documents = []
response = {}
etag = None
req = parse_request(resource)
embedded_fields = resolve_embedded_fields(resource, req)
# continue processing the full request
last_update = epoch()
# If-Modified-Since disabled on collections (#334)
req.if_modified_since = None
cursor = app.data.find(resource, req, lookup)
# If soft delete is enabled, data.find will not include items marked
# deleted unless req.show_deleted is True
for document in cursor:
build_response_document(document, resource, embedded_fields)
documents.append(document)
# build last update for entire response
if document[config.LAST_UPDATED] > last_update:
last_update = document[config.LAST_UPDATED]
status = 200
headers = []
last_modified = last_update if last_update > epoch() else None
response[config.ITEMS] = documents
if config.OPTIMIZE_PAGINATION_FOR_SPEED:
count = None
else:
count = cursor.count(with_limit_and_skip=False)
headers.append((config.HEADER_TOTAL_COUNT, count))
if config.DOMAIN[resource]['hateoas']:
response[config.LINKS] = _pagination_links(resource, req, count)
# add pagination info
if config.DOMAIN[resource]['pagination']:
response[config.META] = _meta_links(req, count)
# notify registered callback functions. Please note that, should the
# functions modify the documents, the last_modified and etag won't be
# updated to reflect the changes (they always reflect the documents
# state on the database.)
getattr(app, "on_fetched_resource")(resource, response)
getattr(app, "on_fetched_resource_%s" % resource)(response)
# the 'extra' cursor field, if present, will be added to the response.
# Can be used by Eve extensions to add extra, custom data to any
# response.
if hasattr(cursor, 'extra'):
getattr(cursor, 'extra')(response)
return response, last_modified, etag, status, headers
@ratelimit()
@requires_auth('item')
@pre_event
def getitem(resource, **lookup):
"""
Default function for handling GET requests to document endpoints, it has
decorators for rate limiting, authentication and for raising pre-request
events. After the decorators are applied forwards to call to
:func:`getitem_internal`
.. versionadded:: 0.6.2
"""
return getitem_internal(resource, **lookup)
def getitem_internal(resource, **lookup):
"""
:param resource: the name of the resource to which the document belongs.
:param **lookup: the lookup query.
.. versionchanged:: 0.6
Handle soft deleted documents
.. versionchanged:: 0.5
Allow ``?version=all`` requests to fire ``on_fetched_*`` events.
Create pagination links for document versions. (#475)
Pagination links reflect current query. (#464)
.. versionchanged:: 0.4
HATOEAS link for contains the business unit value even when
regexes have been configured for the resource endpoint.
'on_fetched' now returns the whole response (HATEOAS metafields
included.)
Support for document versioning.
Changed ``on_fetch_*`` changed to ``on_fetched_*``.
.. versionchanged:: 0.3
Support for media fields.
When IF_MATCH is disabled, no etag is included in the payload.
.. versionchanged:: 0.1.1
Support for Embeded Resource Serialization.
.. versionchanged:: 0.1.0
Support for optional HATEOAS.
.. versionchanged: 0.0.8
'on_getting_item' event is raised when a document has been read from the
database and is about to be sent to the client.
.. versionchanged:: 0.0.7
Support for Rate-Limiting.
.. versionchanged:: 0.0.6
Support for HEAD requests.
.. versionchanged:: 0.0.6
ETag added to payload.
.. versionchanged:: 0.0.5
Support for user-restricted access to resources.
Support for LAST_UPDATED field missing from documents, because they were
created outside the API context.
.. versionchanged:: 0.0.4
Added the ``requires_auth`` decorator.
.. versionchanged:: 0.0.3
Superflous ``response`` container removed. Links wrapped with
``_links``. Links are now properly JSON formatted.
"""
req = parse_request(resource)
resource_def = config.DOMAIN[resource]
embedded_fields = resolv | e_embedded_fields(resource, req)
soft_delete_enabled = config.DOMAIN[resource]['soft_delete']
if soft_delete_enabled:
# GET requests should always fetch soft deleted documents from the db
# They are handled and included in 404 respons | es below.
req.show_deleted = True
document = app.data.find_one(resource, req, **lookup)
if not document:
abort(404)
response = {}
etag = None
version = request.args.get(config.VERSION_PARAM)
latest_doc = None
cursor = None
# calculate last_modified before get_old_document rolls back the document,
# allowing us to invalidate the cache when _latest_version changes
last_modified = last_updated(document)
# synthesize old document version(s)
if resource_def['versioning'] is True:
latest_doc = document
document = get_old_document(
resource, req, lookup, document, version)
# meld into response document
build_response_document(document, resource, embedded_fields, latest_doc)
if config.IF_MATCH:
etag = document[config.ETAG]
if resource_def['versioning'] is True:
# In order to keep the LATEST_VERSION field up to date in client
# caches, changes to the latest version should invalidate cached
# copies of previous verisons. Incorporate the latest version into
# versioned document ETags on the fly to ensure 'If-None-Match'
# comparisons support this caching behavior.
etag += str(document[config.LATEST_VERSION])
# check embedded fields resolved in build_response_document() for more
# recent last updated timestamps. We don't want to respond 304 if embedded
# fields have changed
for field in embedded_fields:
embedded_document = document.get(field)
if isinstance(embedded_document, dict):
embedded_last_updated = last_updated(embedded_document)
if embedded_last_updated > last_modified:
last_modified = embedded_last_updated
# facilitate client caching by returning a 304 when appropriate
cache_validator |
# -*- coding: utf-8 -*-
# Define your | item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class WebofknowledgePipeline( | object):
def process_item(self, item, spider):
return item
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from future.moves.urllib.parse import quote
import logging
from jinja2 import TemplateSyntaxError
from flexget import plugin
from flexget.event import event
from flexget.utils.search import normalize_unicode
log = logging.getLogger('search_rss')
class SearchRSS(object):
"""A generic search plugin that can use rss based search feeds. Configure it like rss
plugin, but include {{{search_term}}} in the url where the search term should go."""
schema = {'$ref': '/schema/plugin/rss'}
def search(self, task, entry, config=None):
from flexget.utils.template import environment
search_strings = [quote(normalize_unicode(s).encode('utf-8'))
for s in entry.get('search_strings', [entry['title']])]
rss_plugin = plugin.get_plugin_by_name('rss')
entries = set()
rss_config = rss_plugin.instance.build_config(config)
try:
template = environment.from_string(rss_config['url'])
except TemplateSyntaxError as e:
raise plugin.PluginError('Invalid jinja template as rss | url: %s' % e)
rss_config['all_entries'] = True
for search_string in search_strings:
rss_config['url'] = template.render({'search_term': search_string})
# TODO: capture some other_fields to try to find seed/peer/content_size numbers?
| try:
results = rss_plugin.phase_handlers['input'](task, rss_config)
except plugin.PluginError as e:
log.error('Error attempting to get rss for %s: %s', rss_config['url'], e)
else:
entries.update(results)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchRSS, 'search_rss', groups=['search'], api_ver=2)
|
from . import visualize_classic_binning
from . import visualize_tree_binning
fr | om . import visualize_llh
from . import visualize_model
__all__ = ('visualize_classic_binning',
'vi | sualize_llh',
'visualize_tree_binning',
'visualize_model')
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Daniel Campos (danielcampos@avanzosc.es) Date: 07/10/2014
#
# This program is | free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software | Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from . import models
from . import wizard
|
import sys
import struct
import logging
import ibapi
from ibapi.client import EClient
from ibapi.wrapper import EWrapper, iswrapper
import PyQt5.Qt as qt
import PyQt5.QtNetwork as qtnetwork
import tws_async.util as util
util.allowCtrlC()
__all__ = ['TWSClientQt', 'iswrapper']
class TWSClientQt(EWrapper, EClient):
"""
Version of ibapi.client.EClient that integrates with the Qt event loop.
"""
def __init__(self):
EClient.__init__(self, wrapper=self)
self.qApp = qt.QApplication.instance() or qt.QApplication(sys.argv)
self.readyTrigger = Trigger()
self._logger = logging.getLogger(__class__.__name__)
def reset(self):
EClient.reset(self)
self._data = b''
self._reqIdSeq = 0
def run(self):
self.qApp.exec_()
def connect(self, host, port, clientId, asyncConnect=False):
self._logger.info('Connecting to {}:{} with clientId {}...'.
format(host, port, clientId))
self.host = host
self.port = port
self.clientId = clientId
self.conn = TWSConnection(host, port)
self.conn.connect()
self.conn.socket.connected.connect(self._onSocketConnected)
self.conn.socket.disconnected.connect(self._onSocketDisonnected)
self.conn.socket.readyRead.connect(self._onSocketReadyRead)
self.conn.socket.error.connect(self._onSocketError)
self.setConnState(EClient.CONNECTING)
if not asyncConnect:
self.readyTrigger.wait()
def getReqId(self) -> int:
"""
Get new request ID.
"""
assert self._reqIdSeq
newId = self._reqIdSeq
self._reqIdSeq += 1
return newId
def dataHandlingPre(self):
pass
def dataHandlingPost(self):
pass
def _prefix(self, msg):
# prefix a message with its length
return struct.pack('>I', len(msg)) + msg
def _onSocketConnected(self):
# start handshake
msg = b'API\0'
msg += self._prefix(b'v%d..%d' % (
ibapi.server_versions.MIN_CLIENT_VER,
ibapi.server_versions.MAX_CLIENT_VER))
self.conn.sendMsg(msg)
self.decoder = ibapi.decoder.Decoder(self.wrapper, None)
def _onSocketDisonnected(se | lf):
EClient.disconnect(self)
def _onSocketError(self, socke | tError):
if self.conn.socket:
self._logger.error(self.conn.socket.errorString())
def _onSocketReadyRead(self):
self.dataHandlingPre()
self._data += bytes(self.conn.socket.readAll())
while True:
if len(self._data) <= 4:
break
# 4 byte prefix tells the message length
msgEnd = 4 + struct.unpack('>I', self._data[:4])[0]
if len(self._data) < msgEnd:
# insufficient data for now
break
msg = self._data[4:msgEnd]
self._data = self._data[msgEnd:]
fields = msg.split(b'\0')
fields.pop() # pop off last empty element
if not self.serverVersion_ and len(fields) == 2:
# this concludes the handshake
version, self.connTime = fields
self.serverVersion_ = int(version)
self.decoder.serverVersion = self.serverVersion_
self.setConnState(EClient.CONNECTED)
self.startApi()
self.wrapper.connectAck()
self._logger.info('Logged on to server version {}'.
format(self.serverVersion_))
else:
# snoop for next valid id response,
# it signals readiness of the client
if fields[0] == b'9':
_, _, validId = fields
self._reqIdSeq = int(validId)
self.readyTrigger.go()
# decode and handle the message
self.decoder.interpret(fields)
self.dataHandlingPost()
class TWSConnection:
"""
Replacement for ibapi.connection.Connection that uses a QTcpSocket.
"""
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = None
def connect(self):
self.socket = qtnetwork.QTcpSocket()
# set TCP_NODELAY (disable Nagle's algorithm)
self.socket.setSocketOption(
qtnetwork.QAbstractSocket.LowDelayOption, True)
self.socket.connectToHost(self.host, self.port)
def disconnect(self):
self.socket.close()
self.socket = None
def isConnected(self):
return self.socket is not None
def sendMsg(self, msg):
self.socket.write(msg)
self.socket.flush()
class Trigger(qt.QObject):
"""
Wait synchronously on a trigger.
"""
trigger = qt.pyqtSignal()
def __init__(self):
qt.QObject.__init__(self)
def go(self):
self.trigger.emit()
def wait(self, timeout=5000):
spy = qt.QSignalSpy(self.trigger)
spy.wait(timeout)
class TWS_TestQt(TWSClientQt):
"""
Test to connect to a running TWS or gateway server.
"""
def __init__(self):
TWSClientQt.__init__(self)
@iswrapper
def updateAccountValue(self, key: str, val: str, currency: str,
accountName: str):
print('Account update: {} = {} {}'.format(key, val, currency))
if __name__ == '__main__':
util.logToConsole()
tws = TWS_TestQt()
tws.connect(host='127.0.0.1', port=7497, clientId=1)
tws.reqAccountUpdates(1, '')
tws.run()
|
expected_text="foo\r\r\r\n", ac | tual_text="foo\n")
tests.add('failures/expected/testharness.html',
actual_text='This is a testharness.js-based test.\nFAIL: assert fired\n.Harness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('failures/expected/text.html', actual_text='text_fail-png')
tests.add('failures/expected/crash_then_t | ext.html')
tests.add('failures/expected/skip_text.html', actual_text='text diff')
tests.add('failures/flaky/text.html')
tests.add('failures/unexpected/missing_text.html', expected_text=None)
tests.add('failures/unexpected/missing_check.html', expected_image='missing-check-png')
tests.add('failures/unexpected/missing_image.html', expected_image=None)
tests.add('failures/unexpected/missing_render_tree_dump.html', actual_text="""layer at (0,0) size 800x600
RenderView at (0,0) size 800x600
layer at (0,0) size 800x34
RenderBlock {HTML} at (0,0) size 800x34
RenderBody {BODY} at (8,8) size 784x18
RenderText {#text} at (0,0) size 133x18
text run at (0,0) width 133: "This is an image test!"
""", expected_text=None)
tests.add('failures/unexpected/crash.html', crash=True)
tests.add('failures/unexpected/crash-with-stderr.html', crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/web-process-crash-with-stderr.html', web_process_crash=True,
error="mock-std-error-output")
tests.add('failures/unexpected/pass.html')
tests.add('failures/unexpected/text-checksum.html',
actual_text='text-checksum_fail-txt',
actual_checksum='text-checksum_fail-checksum')
tests.add('failures/unexpected/text-image-checksum.html',
actual_text='text-image-checksum_fail-txt',
actual_image='text-image-checksum_fail-pngtEXtchecksum\x00checksum_fail',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/checksum-with-matching-image.html',
actual_checksum='text-image-checksum_fail-checksum')
tests.add('failures/unexpected/skip_pass.html')
tests.add('failures/unexpected/text.html', actual_text='text_fail-txt')
tests.add('failures/unexpected/text_then_crash.html')
tests.add('failures/unexpected/timeout.html', timeout=True)
tests.add('http/tests/passes/text.html')
tests.add('http/tests/passes/image.html')
tests.add('http/tests/ssl/text.html')
tests.add('passes/args.html')
tests.add('passes/error.html', error='stuff going to stderr')
tests.add('passes/image.html')
tests.add('passes/audio.html',
actual_audio=base64.b64encode('audio-wav'), expected_audio='audio-wav',
actual_text=None, expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
tests.add('passes/platform_image.html')
tests.add('passes/checksum_in_image.html',
expected_image='tEXtchecksum\x00checksum_in_image-checksum')
tests.add('passes/skipped/skip.html')
tests.add('passes/testharness.html',
actual_text='This is a testharness.js-based test.\nPASS: assert is fine\nHarness: the test ran to completion.\n\n', expected_text=None,
actual_image=None, expected_image=None,
actual_checksum=None)
# Note that here the checksums don't match but the images do, so this test passes "unexpectedly".
# See https://bugs.webkit.org/show_bug.cgi?id=69444 .
tests.add('failures/unexpected/checksum.html', actual_checksum='checksum_fail-checksum')
# Text output files contain "\r\n" on Windows. This may be
# helpfully filtered to "\r\r\n" by our Python/Cygwin tooling.
tests.add('passes/text.html',
expected_text='\nfoo\n\n', actual_text='\nfoo\r\n\r\r\n')
# For reftests.
tests.add_reftest('passes/reftest.html', 'passes/reftest-expected.html', same_image=True)
# This adds a different virtual reference to ensure that that also works.
tests.add('virtual/passes/reftest-expected.html', actual_checksum='xxx', actual_image='XXX', is_reftest=True)
tests.add_reftest('passes/mismatch.html', 'passes/mismatch-expected-mismatch.html', same_image=False)
tests.add_reftest('passes/svgreftest.svg', 'passes/svgreftest-expected.svg', same_image=True)
tests.add_reftest('passes/xhtreftest.xht', 'passes/xhtreftest-expected.html', same_image=True)
tests.add_reftest('passes/phpreftest.php', 'passes/phpreftest-expected-mismatch.svg', same_image=False)
tests.add_reftest('failures/expected/reftest.html', 'failures/expected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/expected/mismatch.html', 'failures/expected/mismatch-expected-mismatch.html', same_image=True)
tests.add_reftest('failures/unexpected/crash-reftest.html', 'failures/unexpected/crash-reftest-expected.html', same_image=True, crash=True)
tests.add_reftest('failures/unexpected/reftest.html', 'failures/unexpected/reftest-expected.html', same_image=False)
tests.add_reftest('failures/unexpected/mismatch.html', 'failures/unexpected/mismatch-expected-mismatch.html', same_image=True)
tests.add('failures/unexpected/reftest-nopixel.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('failures/unexpected/reftest-nopixel-expected.html', actual_checksum=None, actual_image=None, is_reftest=True)
tests.add('reftests/foo/test.html')
tests.add('reftests/foo/test-ref.html')
tests.add('reftests/foo/multiple-match-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-match-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-mismatch-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-success.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/multiple-both-failure.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/matching-ref.html', actual_checksum='abc', actual_image='abc')
tests.add('reftests/foo/mismatching-ref.html', actual_checksum='def', actual_image='def')
tests.add('reftests/foo/second-mismatching-ref.html', actual_checksum='ghi', actual_image='ghi')
# The following files shouldn't be treated as reftests
tests.add_reftest('reftests/foo/unlistedtest.html', 'reftests/foo/unlistedtest-expected.html', same_image=True)
tests.add('reftests/foo/reference/bar/common.html')
tests.add('reftests/foo/reftest/bar/shared.html')
tests.add('websocket/tests/passes/text.html')
# For testing that we don't run tests under platform/. Note that these don't contribute to TOTAL_TESTS.
tests.add('platform/test-mac-leopard/http/test.html')
tests.add('platform/test-win-win7/http/test.html')
# For testing if perf tests are running in a locked shard.
tests.add('perf/foo/test.html')
tests.add('perf/foo/test-ref.html')
# For testing --pixel-test-directories.
tests.add('failures/unexpected/pixeldir/image_in_pixeldir.html',
actual_image='image_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_in_pixeldir-pngtEXtchecksum\x00checksum-png')
tests.add('failures/unexpected/image_not_in_pixeldir.html',
actual_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum_fail',
expected_image='image_not_in_pixeldir-pngtEXtchecksum\x00checksum-png')
# For testing that virtual test suites don't expand names containing themselves
# See webkit.org/b/97925 and base_unittest.PortTest.test_tests().
tests.add('passes/test-virtual-passes.html')
tests.add('passes/passes/test-virtual-passes.html')
return tests
# Here we use a non-standard location for the layout tests, to ensure that
# this works. The path contains a '.' in the name because we've seen bugs
# related to this before.
LAYOUT_TEST_DI |
import socket, sys,os,re
from struct import *
mymac=sys.argv[1]
rmac=sys.argv[2]
interface=sys.argv[3]
mode=sys.argv[4]
def address (a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
try:
s = socket.socket( socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x0003))
except socket.error , msg:
print 'Socket could not be created. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
sys.exit()
while True:
packet = s.recvfrom(65565)
packet = packet[0]
pack_length = 14
pack_heade | r = packet[:pack_length]
pack = unpack('!6s6sH' , pack_header)
pack_protocol = socket.ntohs(pack[2])
#print 'Destination MAC : ' + address(packet[0:6]) + | ' Source MAC : ' + address(packet[6:12])
#print rmac, interface , mode
router_mac=re.sub(r':',"",rmac)
pc_mac=re.sub(r':',"",mymac)
router_mac= router_mac[:-6]
if mymac == address(packet[0:6]) :
if rmac != address(packet[6:12]) and rmac != "01005e" and rmac != "ffffff" and rmac != "333300":
os.system("bash ./passive.sh '"+rmac+"' '"+interface+"' '"+mode+"' ")
elif mymac == address(packet[6:12]) :
if rmac != address(packet[0:6]) and rmac != "01005e" and rmac != "ffffff" and rmac != "333300":
os.system("bash ./passive.sh '"+rmac+"' '"+interface+"' '"+mode+"' ")
|
import uuid
import base64
import re
def generate_key():
"""
generates a uuid, encodes it with base32 and strips it's padding.
this reduces the string size from 32 to 26 chars.
"""
return base64.b32encode(uuid.uuid4().bytes).strip('=').lower()[0:12]
def thousand_separator(x=0, sep='.', dot=','):
"""
creates a string of number separated by selected delimiters
"""
num, _, frac = str(x).partition(dot)
num = re.sub(r'(\d{3})(?=\d)', r'\1'+sep, num[::-1])[::-1]
if frac:
num += dot + frac
return num
def new_parser(passed_object, request_data):
"""
Maps passed request object from client into expected object.
Use this for creation of new object by passing an instantiated
empty object into the passed_object variable
"""
for item, value in request_data.values.iteritems():
if hasattr(passed_object, item | ) and value is not None:
try:
setattr(passed_object, item, value)
except:
setattr(passed_object, item, convert_to_date(value))
passed_object.id = gener | ate_key()
return passed_object
def edit_parser(passed_object, request_data):
"""
Maps value from passed json object for data edit purposes.
You need to pass in object resulting from query into the
passed_object variable
"""
for item in request_data.values:
if item != "id" and hasattr(passed_object, item) and request_data.values.get(item) != None:
setattr(passed_object, item, request_data.values.get(item))
return passed_object
def convert_to_date(date_string):
from datetime import date
input = date_string.split("-")
return date(int(input[0]),int(input[1]),int(input[2]))
def multikeysort(items, columns):
from operator import itemgetter
comparers = [ ((itemgetter(col[1:].strip()), -1) if col.startswith('-') else (itemgetter(col.strip()), 1)) for col in columns]
def comparer(left, right):
for fn, mult in comparers:
result = cmp(fn(left), fn(right))
if result:
return mult * result
else:
return 0
return sorted(items, cmp=comparer) |
tion: The type of videos to find (one of the globals MOVIES, MUSIC_VIDEOS or TVSHOWS).
:rtype: list
:return: A list of expired videos, along with a number of extra attributes specific to the video type.
"""
# A non-exhaustive list of pre-defined filters to use during JSON-RPC requests
# These are possible conditions that must be met before a video can be deleted
by_playcount = {u"field": u"playcount", u"operator": u"greaterthan", u"value": u"0"}
by_date_played = {u"field": u"lastplayed", u"operator": u"notinthelast", u"value": u"{0:f}".format(get_setting(expire_after))}
by_minimum_rating = {u"field": u"rating", u"operator": u"lessthan", u"value": u"{0:f}".format(get_setting(minimum_rating))}
by_no_rating = {u"field": u"rating", u"operator": u"isnot", u"value": u"0"}
by_progress | = {u"field": u"inprogress", u"operator": u"false", u"value": u""}
by_exc | lusion1 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion1)}
by_exclusion2 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion2)}
by_exclusion3 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion3)}
by_exclusion4 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion4)}
by_exclusion5 = {u"field": u"path", u"operator": u"doesnotcontain", u"value": get_setting(exclusion5)}
# link settings and filters together
settings_and_filters = [
(get_setting(enable_expiration), by_date_played),
(get_setting(clean_when_low_rated), by_minimum_rating),
(get_setting(not_in_progress), by_progress),
(get_setting(exclusion_enabled) and get_setting(exclusion1) is not u"", by_exclusion1),
(get_setting(exclusion_enabled) and get_setting(exclusion2) is not u"", by_exclusion2),
(get_setting(exclusion_enabled) and get_setting(exclusion3) is not u"", by_exclusion3),
(get_setting(exclusion_enabled) and get_setting(exclusion4) is not u"", by_exclusion4),
(get_setting(exclusion_enabled) and get_setting(exclusion5) is not u"", by_exclusion5)
]
# Only check not rated videos if checking for video ratings at all
if get_setting(clean_when_low_rated):
settings_and_filters.append((get_setting(ignore_no_rating), by_no_rating))
enabled_filters = [by_playcount]
for s, f in settings_and_filters:
if s and f[u"field"] in self.supported_filter_fields[option]:
enabled_filters.append(f)
debug(u"[{0}] Filters enabled: {1}".format(self.methods[option], enabled_filters))
filters = {u"and": enabled_filters}
request = {
u"jsonrpc": u"2.0",
u"method": self.methods[option],
u"params": {
u"properties": self.properties[option],
u"filter": filters
},
u"id": 1
}
rpc_cmd = json.dumps(request)
response = xbmc.executeJSONRPC(rpc_cmd)
debug(u"[{0}] Response: {1}".format(self.methods[option], response.decode("utf-8")))
result = json.loads(response)
# Check the results for errors
try:
error = result[u"error"]
debug(u"An error occurred. {0}".format(error))
return None
except KeyError as ke:
if u"error" in ke:
pass # no error
else:
raise
debug(u"Building list of expired videos")
expired_videos = []
response = result[u"result"]
try:
debug(u"Found {0:d} watched {1} matching your conditions".format(response[u"limits"][u"total"], option))
debug(u"JSON Response: {0}".format(response))
for video in response[option]:
# Gather all properties and add it to this video's information
temp = []
for p in self.properties[option]:
temp.append(video[p])
expired_videos.append(temp)
except KeyError as ke:
if option in ke:
pass # no expired videos found
else:
debug(u"KeyError: {0} not found".format(ke), xbmc.LOGWARNING)
debug(u"{0}".format(response), xbmc.LOGWARNING)
raise
finally:
debug(u"Expired videos: {0}".format(expired_videos))
return expired_videos
def unstack(self, path):
"""Unstack path if it is a stacked movie. See http://kodi.wiki/view/File_stacking for more info.
:type path: unicode
:param path: The path that should be unstacked.
:rtype: list
:return: A list of paths that are part of the stack. If it is no stacked movie, a one-element list is returned.
"""
if path.startswith(u"stack://"):
debug(u"Unstacking {0}.".format(path))
return path.replace(u"stack://", u"").split(u" , ")
else:
debug(u"Unstacking {0} is not needed.".format(path))
return [path]
def get_stack_bare_title(self, filenames):
"""Find the common title of files part of a stack, minus the volume and file extension.
Example:
["Movie_Title_part1.ext", "Movie_Title_part2.ext"] yields "Movie_Title"
:type filenames: list
:param filenames: a list of file names that are part of a stack. Use unstack() to find these file names.
:rtype: str
:return: common title of file names part of a stack
"""
title = os.path.basename(os.path.commonprefix([f.encode("utf-8") for f in filenames])).decode("utf-8")
for e in self.stacking_indicators:
if title.endswith(e):
title = title[:-len(e)].rstrip(u"._-")
break
return title
def delete_file(self, location):
"""
Delete a file from the file system. Also supports stacked movie files.
Example:
success = delete_file(location)
:type location: unicode
:param location: the path to the file you wish to delete.
:rtype: bool
:return: True if (at least one) file was deleted successfully, False otherwise.
"""
debug(u"Attempting to delete {0}".format(location))
paths = self.unstack(location)
success = []
for p in paths:
if xbmcvfs.exists(p):
success.append(bool(xbmcvfs.delete(p)))
else:
debug(u"File {0} no longer exists.".format(p), xbmc.LOGERROR)
success.append(False)
return any(success)
def delete_empty_folders(self, location):
"""
Delete the folder if it is empty. Presence of custom file extensions can be ignored while scanning.
To achieve this, edit the ignored file types setting in the addon settings.
Example:
success = delete_empty_folders(path)
:type location: unicode
:param location: The path to the folder to be deleted.
:rtype: bool
:return: True if the folder was deleted successfully, False otherwise.
"""
if not get_setting(delete_folders):
debug(u"Deleting of empty folders is disabled.")
return False
folder = self.unstack(location)[0] # Stacked paths should have the same parent, use any
debug(u"Checking if {0} is empty".format(folder))
ignored_file_types = [file_ext.strip() for file_ext in get_setting(ignore_extensions).split(u",")]
debug(u"Ignoring file types {0}".format(ignored_file_types))
subfolders, files = xbmcvfs.listdir(folder)
debug(u"Contents of {dir}:\nSubfolders: {sub}\nFiles: {files}".format(dir=folder, sub=subfolders, files=files))
empty = True
try:
for f in files:
_, ext = os.path.splitext(f)
if ext and ext not in ignored_file_types: # ensure f is not a folder and its extension is |
# file openpyxl/writer/strings.py
# Copyright (c) 2010-2011 openpyxl
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# @license: http://www.opensource.org/licenses/mit-license.php
# @author: see AUTHORS file
"""Write the shared string table."""
# Python stdlib imports
try:
# Python 2
from StringIO import StringIO
BytesIO = StringIO
except ImportError:
# Python 3
from io import BytesIO, StringIO
# package imports
from openpyxl.shared.xmltools import start_tag, end_tag, tag, XMLGenerator
def create_string_table(workbook):
"""Compile the string table for a workbook."""
strings = set()
for sheet in workbook.worksheets:
for cell in sheet.get_cell_collection():
if cell.data_type == cell.TYPE_STRING and cell._value is not None:
strings.add(cell.value)
return dict((key, i) for i, key in enumerate(strings))
def write_string_table(string_table):
"""Write the string table xml."""
temp_buffer = StringIO()
doc = XMLGener | ator(out=temp_buffer, encoding='utf-8')
start_tag(doc, 'sst', {'xmlns':
'http://schemas.openxmlformats.org/spreadsheetml/2006/main',
'uniqueCount': '%d' % len(string_table)})
strings_to_write = sorted(string_table.items(),
key=lambda pair: pair[1])
for key in [pair[0] for pair in strings_to_write]:
start_ta | g(doc, 'si')
if key.strip() != key:
attr = {'xml:space': 'preserve'}
else:
attr = {}
tag(doc, 't', attr, key)
end_tag(doc, 'si')
end_tag(doc, 'sst')
string_table_xml = temp_buffer.getvalue()
temp_buffer.close()
return string_table_xml
class StringTableBuilder(object):
def __init__(self):
self.counter = 0
self.dct = {}
def add(self, key):
key = key.strip()
try:
return self.dct[key]
except KeyError:
res = self.dct[key] = self.counter
self.counter += 1
return res
def get_table(self):
return self.dct
|
import os
import peewee
from rivr_peewee import Database
DATABASE_URL = os.environ.get('DATABASE_URL')
if DATABASE_URL and DATABASE_URL.startswith('postgres://'):
DATABASE_URL = DATABASE_URL.replace('postgres://', 'postgres+pool://')
# disable auto connection
EXTRA_OPTIONS = 'autoconnect=false'
if '?' in DATABASE_URL:
DATABASE_URL += '&' + EXTRA_OPTIONS
else:
DATABASE_URL += '?' + EXTRA_OPTIONS
os.environ['DATABASE_URL'] = DATABASE_URL
database = Database()
class Device(database.Model):
| apns_token = peewee.CharField(max_length=64, unique=True)
def __repr__(self) -> str:
return '<Device {}>'.format(self.apns_token)
class Token(database.Model) | :
PUSH_SCOPE = 'push'
ALL_SCOPE = 'all'
device = peewee.ForeignKeyField(Device)
token = peewee.CharField(max_length=64, unique=True, primary_key=True)
scope = peewee.CharField(max_length=10, choices=(PUSH_SCOPE, ALL_SCOPE))
def __repr__(self) -> str:
return '<Token {} ({})>'.format(self.token, self.scope)
@property
def token_last_eight(self) -> str:
return self.token[-8:]
|
__author__ = "Johannes Köster"
__copyright__ = "Copyright 2015, Johannes Köster"
__email__ = "koester@jimmy.harvard.edu"
__license__ = "MIT"
import os
import mimetypes
import base64
import textwrap
import datetime
import io
from docutils.parsers.rst.directives.images import Image, Figure
from docutils.parsers.rst import directives
from docutils.core import publish_file
from snakemake.utils import format
from snakemake.logging import logger
class EmbeddedMixin(object):
"""
Replaces the URI of a directive with a base64-encoded version.
Useful for embedding images/figures in reports.
"""
def run(self):
"""
Image.run() handles most of the
"""
result = Image.run(self)
reference = directives.uri(self.arguments[0])
self.options['uri'] = data_uri(reference)
return result
# Create (and register) new image:: and figure:: directi | ves that use a base64
# data URI instead of pointing to a filename.
class EmbeddedImage(Image, EmbeddedMixin):
pass
directives.register_directive('embeddedimage', EmbeddedImage)
class EmbeddedFigure(Figure, EmbeddedMixin):
pass
directives.register_ | directive('embeddedfigure', EmbeddedFigure)
def data_uri(file, defaultenc="utf8"):
"""Craft a base64 data URI from file with proper encoding and mimetype."""
mime, encoding = mimetypes.guess_type(file)
if mime is None:
mime = "text/plain"
logger.info("Could not detect mimetype for {}, assuming "
"text/plain.".format(file))
if encoding is None:
encoding = defaultenc
with open(file, "rb") as f:
data = base64.b64encode(f.read())
uri = ("data:{mime};charset={charset};filename={filename};base64,{data}"
"".format(filename=os.path.basename(file),
mime=mime,
charset=encoding,
data=data.decode()))
return uri
def report(text, path,
stylesheet=os.path.join(os.path.dirname(__file__), "report.css"),
defaultenc="utf8",
template=None,
metadata=None, **files):
outmime, _ = mimetypes.guess_type(path)
if outmime != "text/html":
raise ValueError("Path to report output has to be an HTML file.")
definitions = textwrap.dedent("""
.. role:: raw-html(raw)
:format: html
""")
metadata = textwrap.dedent("""
.. container::
:name: metadata
{metadata}{date}
""").format(metadata=metadata + " | " if metadata else "",
date=datetime.date.today().isoformat())
text = format(textwrap.dedent(text), stepout=3)
attachments = [textwrap.dedent("""
.. container::
:name: attachments
""")]
for name, _files in sorted(files.items()):
if not isinstance(_files, list):
_files = [_files]
links = []
for file in _files:
data = data_uri(file)
links.append(':raw-html:`<a href="{data}" download="{filename}" draggable="true">{filename}</a>`'.format(
data=data, filename=os.path.basename(file)))
links = "\n\n ".join(links)
attachments.append('''
.. container::
:name: {name}
{name}:
{links}
'''.format(name=name,
links=links))
text = definitions + text + "\n\n" + "\n\n".join(attachments) + metadata
overrides = dict()
if template is not None:
overrides["template"] = template
if stylesheet is not None:
overrides["stylesheet_path"] = stylesheet
html = open(path, "w")
publish_file(source=io.StringIO(text),
destination=html,
writer_name="html",
settings_overrides=overrides)
|
p://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from neutron_lib.api import validators
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
import six
from neutron._i18n import _
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.extensions import l3
DNS_LABEL_MAX_LEN = 63
DNS_LABEL_REGEX = "[a-z0-9-]{1,%d}$" % DNS_LABEL_MAX_LEN
FQDN_MAX_LEN = 255
DNS_DOMAIN_DEFAULT = 'openstacklocal.'
class DNSDomainNotFound(n_exc.NotFound):
message = _("Domain %(dns_domain)s not found in the external DNS service")
class DuplicateRecordSet(n_exc.Conflict):
message = _("Name %(dns_name)s is duplicated in the external DNS service")
class ExternalDNSDriverNotFound(n_exc.NotFound):
message = _("External DNS driver %(driver)s could not be found.")
class InvalidPTRZoneConfiguration(n_exc.Conflict):
message = _("Value of %(parameter)s has to be multiple of %(number)s, "
"with maximum value of %(maximum)s and minimum value of "
"%(minimum)s")
def _validate_dns_name(data, max_len=FQDN_MAX_LEN):
msg = _validate_dns_format(data, max_len)
if msg:
return msg
request_dns_name = _get_request_dns_name(data)
if request_dns_name:
msg = _validate_dns_name_with_dns_domain(request_dns_name)
if msg:
return msg
def _validate_fip_dns_name(data, max_len=FQDN_MAX_LEN):
msg = validators.validate_string(data)
if msg:
return msg
if not data:
return
if data.endswith('.'):
msg = _("'%s' is a FQDN. It should be a relative domain name") % data
return msg
msg = _validate_dns_format(data, max_len)
if msg:
return msg
length = len(data)
if length > max_len - 3:
msg = _("'%(data)s' contains '%(length)s' characters. Adding a "
"domain name will cause it to exceed the maximum length "
"of a FQDN of '%(max_len)s'") % {"data": data,
"length": length,
"max_len": max_len}
return msg
def _validate_dns_domain(data, max_len=FQDN_MAX_LEN):
msg = validators.validate_string(data)
if msg:
return msg
if not data:
return
if not data.endswith('.'):
msg = _("'%s' is not a FQDN") % data
return msg
msg = _validate_dns_format(data, max_len)
if msg:
return msg
length = len(data)
if length > max_len - 2:
msg = _("'%(data)s' contains '%(length)s' characters. Adding a "
"sub-domain will cause it to exceed the maximum length of a "
"FQDN of '%(max_len)s'") % {"data": data,
"length": length,
"max_len": max_len}
return msg
def _validate_dns_format(data, max_len=FQDN_MAX_LEN):
# NOTE: An individual name regex instead of an entire FQDN was used
# because its easier to make correct. The logic should validate that the
# dns_name matches RFC 1123 (section 2.1) and RFC 952.
if not data:
return
try:
# Trailing periods are allowed to indicate that a name is fully
# qualified per RFC 1034 (page 7).
trimmed = data if not data.endswith('.') else data[:-1]
if len(trimmed) > 255:
raise TypeError(
_("'%s' exceeds the 255 character FQDN limit") % trimmed)
names = trimmed.split('.')
for name in names:
if not name:
raise TypeError(_("Encountered an empty component."))
if name.endswith('-') or name[0] == '-':
raise TypeError(
_("Name '%s' must not start or end with a hyphen.") % name)
if not re.match(DNS_LABEL_REGEX, name):
raise TypeError(
_("Name '%s' must be 1-63 characters long, each of "
"which can only be alphanumeric or a hyphen.") % name)
# RFC 1123 hints that a TLD can't be all numeric. last is a TLD if
# it's an FQDN.
if len(names) > 1 and re.match("^[0-9]+$", names[-1]):
raise TypeError(_("TLD '%s' must not be all numeric") % names[-1])
except TypeError as e:
msg = _("'%(data)s' not a valid PQDN or FQDN. Reason: %(reason)s") % {
'data': data, 'reason': str(e)}
return msg
def _validate_dns_name_with_dns_domain(request_dns_name):
# If a PQDN was passed, make sure the FQDN that will be generated is of
# legal size
dns_domain = _get_dns_domain()
higher_labels = dns_domain
if dns_domain:
higher_labels = '.%s' % dns_domain
higher_labels_len = len(higher_labels)
dns_name_len = len(request_dns_name)
if not request_dns_name.endswith('.'):
if dns_name_len + higher_labels_len > FQDN_MAX_LEN:
msg = _("The dns_name passed is a PQDN and its size is "
"'%(dns_name_len)s'. The dns_domain option in "
"neutron.conf is set to %(dns_domain)s, with a "
"length of '%(higher_labels_len)s'. When the two are "
"concatenated to form a FQDN (with a '.' at the end), "
"the resulting length exceeds the maximum size "
"of '%(fqdn_max_len)s'"
) % {'dns_name_len': dns_name_len,
'dns_domain': cfg.CONF.dns_domain,
'higher_labels_len': higher_labels_len,
'fqdn_max_len': FQDN_MAX_LEN}
return msg
return
# A FQDN was passed
if (dns_name_len <= higher_labels_len or not
request_dns_name.endswith(higher_labels)):
msg = _("The dns_name passed is a FQDN. Its higher level labels "
"must be equal to the dns_domain option in neutron.conf, "
"that has been set to '%(dns_domain)s'. It must also "
"include one or more valid DNS labels to the left "
"of '%(dns_domain)s'") % {'dns_domain':
cfg.CONF.dns_domain}
return msg
def _get_dns_domain():
if not cfg.CONF.dns_domain:
return ''
if cfg.CONF.dns_doma | in.endswith('.'):
return cfg.CONF.dns_domain
return '%s.' % cfg.CONF.dns_domain
def _get_request_dns_name(data):
dns_domain = _get_dns_domain()
if ((dns_domain and dns_do | main != DNS_DOMAIN_DEFAULT)):
return data
return ''
def convert_to_lowercase(data):
if isinstance(data, six.string_types):
return data.lower()
msg = _("'%s' cannot be converted to lowercase string") % data
raise n_exc.InvalidInput(error_message=msg)
validators.add_validator('dns_name', _validate_dns_name)
validators.add_validator('fip_dns_name', _validate_fip_dns_name)
validators.add_validator('dns_domain', _validate_dns_domain)
DNSNAME = 'dns_name'
DNSDOMAIN = 'dns_domain'
DNSASSIGNMENT = 'dns_assignment'
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
DNSNAME: {'allow_post': True, 'allow_put': True,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:dns_name': FQDN_MAX_LEN},
'is_visible': True},
DNSASSIGNMENT: {'allow_post': False, 'allow_put': False,
'is_visible': True},
},
l3.FLOATINGIPS: {
DNSNAME: {'allow_post': True, 'allow_put': False,
'default': '',
'convert_to': convert_to_lowercase,
'validate': {'type:fip_dns_name': FQDN_MAX_LEN},
'is_visible': True},
DNSDOMAIN: {'allow_post': True, 'allow_put': False,
'default': |
#!/usr/bin/env python
from distutils.core import setup
setup(name = "quasi",
version = "0.87",
description = "A multiple-context Python shell",
author | = "Ben Last",
author_email = "ben@benlast.com",
url = "http://quasi-shell.sourceforge.net/",
license = "BSD",
scripts = ["quas | i.py"],
data_files = [("share/licenses/quasi", ["LICENSE"])],
extra_path = "quasi",
packages = ["."]
)
|
from serial import Serial
import time
import platform
import socke | t
serialPort = Serial('COM3' if platform.system() == 'Windows' else '/dev/ttyUSB0', 9600)
time.sleep(2)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('', 2222))
server.listen(1)
while True:
(client, address) = server.accept()
print('Connected')
while True:
data = client.recv(6)#.decode()
if 'CLOSE' in data: break
#print(data)
seria | lPort.write(data)
|
#!/usr/bin/env python3
# Copyright 2015, 2016 Endless Mobile, Inc.
# This file is part of eos-event-recorder-daemon.
#
# eos-event-recorder-daemon is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or (at your
# option) any later version.
#
# eos-event-recorder-daemon is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with eos-event-recorder-daemon. If not, see
# <http://www.gnu.org/licenses/>.
import gzip
import http.server
impor | t sys
class PrintingHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_PUT(self):
print(self.path, flush=True)
content_encoding = self.headers['X-Endless-Content-Encoding']
print(content_encoding, flush=True)
content_length = int(self.headers['Content-Length'])
compressed_request_body = self.rfile.read(content_length)
decompressed_request_body = gzip.decompress(compressed_request_body)
| print(len(decompressed_request_body), flush=True)
sys.stdout.buffer.write(decompressed_request_body)
sys.stdout.buffer.flush()
status_code_str = sys.stdin.readline()
status_code = int(status_code_str)
self.send_response(status_code)
self.end_headers()
# A metrics server that simply prints the requests it receives to stdout
class MockServer(http.server.HTTPServer):
def __init__(self):
SERVER_ADDRESS = ('localhost', 0)
super().__init__(SERVER_ADDRESS, PrintingHTTPRequestHandler)
if __name__ == '__main__':
mock_server = MockServer()
print(mock_server.server_port, flush=True)
mock_server.serve_forever()
|
#! /usr/bin/env python
#script that takes an optional argument for the date and target collection and calculates angular separation and elevation of each target from the moon.
import ephem, subprocess, operator, argparse
#host & port info
hostName="veritase.sao.arizona.edu"
portNum=""
#hostName="lucifer1.spa.umn.edu"
#portNum=33060
#dict for sorting/writing info
moonlightsources = {}
#setting up ephem observer object for veritas
veritas = ephem.Observer()
veritas.lat = '31:40.51'
veritas.lon = '-110:57.132'
veritas.elevation = 1268
#argument parser
parser = argparse.ArgumentParser(description='Takes optional arguments to specify date and target collection. If no arguments are specified, will calculate angular distances from the Moon at the current time for all moonlight targets')
parser.add_argument('--date',default=veritas.date, help='specify DATE (in UT) in the format "YYYY/MM/DD HH:MM" don\'t forget the quotation marks')
parser.add_argument('--targets',default='moonlight_targets',help='Specifies collection of targets. Multiple Useful values for TARGETS: moonlight_targets,reduced_HV_targets,moonlight_bright,primary_targets,secondary_targets,blazar_filler_targets')
parser.add_argument('--nocuts',help = 'displays results for all targets in the list, even if they fail the moon distance and elevation cuts', action = "store_true")
args = parser.parse_args()
#setting date/time to user-spefied value (or default to current date/time)
veritas.date = args.date
#letting user know the date and target collection used.
print
print "Date and time used (in UT): %s" %veritas.date
print
print "Calculating angular distances from the Moon for targets in %s collection..." %args.targets
#MySQL command, runs on command line through subprocess
targetList = args.targets.split(",")
#for collection in args.targets.split(","):
for n in range(0, len(targetList) ):
if n == 0:
execCMD = "SELECT tblObserving_Collection.source_id,ra,decl,epoch FROM tblObserving_Sources JOIN tblObserving_Collection ON tblObserving_Sources.source_id = tblObserving_Collection.source_id WHERE tblObserving_Collection.collection_id='%s'" %targetList[n]
else:
execCMD = execCMD + " OR tblObserving_Collection.collection_id='%s'" %targetList[n]
sqlOut = subprocess.Popen(["mysql","-h","%s" %(hostName),"-P","%s" %(portNum),"-u", "readonly", "-D","VERITAS", "--execute=%s" %(execCMD)], stdout=subprocess.PIPE)
#stores query results
QUERY, err = sqlOut.communicate()
if QUERY == "":
print
print "Query result is empty. Make sure date and target collection provided are valid. Going to crash now :("
#loop through all objects in the bright moonlight list
#calculating and printing out angular separation from moon
for count,source in enumerate(QUERY.rstrip().split("\n")):
#skip header in query results
if count == 0:
continue
#parsing through query results
sourceName=source.split("\t")[0]
sourceRA=source.split("\t")[1]
sourceDEC=source.split("\t")[2]
sourceEpoch=source.split("\t")[3]
#makes sure same epoch is u | sed
veritas.epoch = float(sourceEpoch)
#Define ephem moon object and calculate position (ra, dec) and phase
TheMoon = ephem.Moon(veritas)
TheMoon.compute(veritas)
illum = TheMoon.moon_phase*100.
#Get angular separation of moon and target
degFromMoon = 180./ephem.pi * ephem.separation((TheMoon.ra,TheMoon.dec),(float(sourceRA),float(sourceDEC)))
#Define ehp | em object for source, to get elevation
sourceobj = ephem.FixedBody()
sourceobj._ra = float(sourceRA)
sourceobj._dec = float(sourceDEC)
sourceobj.compute(veritas)
sourceALT = sourceobj.alt*180./ephem.pi
moonlightsources[sourceName]=[(degFromMoon,sourceALT)]
#end of for loop
sorted_sources = sorted(moonlightsources.iteritems(), key=operator.itemgetter(1), reverse=True)
#print sorted_sources
if not args.nocuts: #printing only targets that pass the cuts
print "Only showing targets with elevation > 20 degrees and moon distance > 10 degrees"
print
print "Source\t\t\tDegrees from Moon\tElevation"
print "--------------------------------------------------------------"
for s in sorted_sources:
if s[1][0][1] > 20 and s[1][0][0] > 10:
if len(s[0]) <=7:
print "%s\t\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
elif len(s[0]) <=15:
print "%s\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:
print "%s\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:#printing all targets, when cuts are disabled
print
print "Source\t\t\tDegrees from Moon\tElevation"
print "--------------------------------------------------------------"
for s in sorted_sources:
if len(s[0]) <=7:
print "%s\t\t\t%0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
elif len(s[0]) <=15:
print "%s\t\t %0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
else:
print "%s\t %0.3f\t\t\t%0.3f" %(s[0],s[1][0][0],s[1][0][1])
print "--------------------------------------------------------------"
print "The Moon is %0.2f%% illuminated" % illum
print
|
'''
This script will remove the directories if that contains only xml files.
'''
import os
srcpath = raw_input("Enter the source path : ")
for | root, sub, files in os.walk(os.path.abspath(srcpath)):
if files:
| files = [f for f in files if not f.endswith('.xml')]
if not files:
fpath = os.path.join(root)
os.system('rm -rf %s' % fpath)
print "removed", fpath
|
# -*- coding: utf-8 -*-
import argparse
import time
import socket
import sys
import json
__prog__ = 'xbmc-command'
PROG = __prog__
__version__ = '0.4.1'
VERSION = __version__
class XBMC(object):
def __init__(self, host, port):
self.address = (host, port)
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__buffer = ""
self.__decode = json.JSONDecoder().raw_decode
self.settimeout(0)
def settimeout(self, timeout):
self.__timeout = timeout
self.__socket.settimeout(timeout if timeout > 0 else None)
def connect(self):
self.__socket.connect(self.address)
def close(self):
if self.__socket:
self.__socket.close()
def shutdown(self):
self.__socket.shutdown(socket.SHUT_RDWR)
def __getattr__(self, key):
return Rpc(self, key)
def send(self, req):
self.__socket.send(bytearray(req, 'utf-8'))
def recv(self, json_rpc_id):
start = time.time()
while True:
if self.__timeout > 0 and time.time() - start > self.__timeout:
raise CommandException("read timeout")
try:
data = self.__socket.recv(1024)
except socket.timeout:
raise CommandException("read timeout")
if not data:
return None
self.__buffer += data.decode('utf-8')
while True:
json_result = None
try:
json_result, index = self.__decode(self.__buffer)
self.__buffer = self.__buffer[index:]
except ValueError:
break
if json_result and 'id' in json_result and \
json_result['id'] == json_rpc_id:
return json_result
return None
class Rpc(object):
__REQ__ = '{"jsonrpc":"2.0", "method":"%s", "params":%s, "id":"%s"}'
def __init__(self, xbmc, method):
self.__xbmc = xbmc
self.__method = method
def __getattr__(self, key):
return Rpc(self.__xbmc, "%s.%s" % (self.__method, key))
def __call__(self, *args, **kwargs):
params = '{}'
ident = str(kwargs['id']) if 'id' in kwargs else self.__method
if args:
params = json.dumps(args[0])
elif 'params' in kwargs:
params = json.dumps(kwargs['params'])
self.__xbmc.send(Rpc.__REQ__ % (self.__method, params, ident))
class CommandException(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return self.msg
class Command(object):
def __init__(self):
self.xbmc = None
def call(self, args):
raise NotImplementedErro | r("Please Implement this method")
def run_command(self, args):
try:
self.xbmc.connect()
except socket.timeout:
raise CommandException("Unable to connect to host %s:%s" % \
(self.xbmc.address[0], self.xbmc.address[1]))
except socket.error as err:
| self.xbmc.close()
raise CommandException("Could not open socket: " + err)
self.call(args)
def get_active_player_id(self):
self.xbmc.Player.GetActivePlayers()
result = self.xbmc.recv('Player.GetActivePlayers')
if not result:
raise CommandException('unable to receive active players')
if len(result['result']) <= 0:
return -1
for player in result['result']:
if player['type'] in ('audio', 'video'):
return player['playerid']
return result['result'][0]['playerid']
@property
def parser(self):
parser = argparse.ArgumentParser(add_help=False)
self.create_parser(parser)
parser.add_argument('--help', action='help',
help='show this help message and exit')
return parser
def create_parser(self, parser):
return parser
def parse_args(self, args):
return self.parser.parse_args(args)
@property
def short_description(self):
return ''
# vim: ft=python ts=8 sts=4 sw=4 et:
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com | /llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by th | e Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLimma(RPackage):
"""Data analysis, linear models and differential expression
for microarray data."""
homepage = "https://www.bioconductor.org/packages/limma/"
url = "https://git.bioconductor.org/packages/limma"
list_url = homepage
version('3.32.10', git='https://git.bioconductor.org/packages/limma', commit='593edf28e21fe054d64137ae271b8a52ab05bc60')
version('3.32.6', 'df5dc2b85189a24e939efa3a8e6abc41')
depends_on('r@3.4.0:3.4.9', when='@3.32.10')
|
# --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, dis | tribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished | to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
import unittest
import subprocess
import sys
import isodate
import tempfile
import json
from datetime import date, datetime, timedelta
import os
from os.path import dirname, pardir, join, realpath
cwd = dirname(realpath(__file__))
log_level = int(os.environ.get('PythonLogLevel', 10))
tests = realpath(join(cwd, pardir, "Expected", "AcceptanceTests"))
sys.path.append(join(tests, "Validation"))
from msrest.serialization import Deserializer
from msrest.exceptions import DeserializationError, ValidationError
from auto_rest_validation_test import AutoRestValidationTest
from auto_rest_validation_test.models import (
Product,
ConstantProduct,
ChildProduct)
class ValidationTests(unittest.TestCase):
def test_constant_values(self):
client = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client.api_version = "12-34-5678"
client.get_with_constant_in_path()
body = Product(child=ChildProduct())
product = client.post_with_constant_in_body(body=body)
self.assertIsNotNone(product)
def test_validation(self):
client = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client.api_version = "12-34-5678"
try:
client.validation_of_method_parameters("1", 100)
except ValidationError as err:
self.assertEqual(err.rule, "min_length")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("1234567890A", 100)
except ValidationError as err:
self.assertEqual(err.rule, "max_length")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("!@#$", 100)
except ValidationError as err:
self.assertEqual(err.rule, "pattern")
self.assertEqual(err.target, "resource_group_name")
try:
client.validation_of_method_parameters("123", 105)
except ValidationError as err:
self.assertEqual(err.rule, "multiple")
self.assertEqual(err.target, "id")
try:
client.validation_of_method_parameters("123", 0)
except ValidationError as err:
self.assertEqual(err.rule, "minimum")
self.assertEqual(err.target, "id")
try:
client.validation_of_method_parameters("123", 2000)
except ValidationError as err:
self.assertEqual(err.rule, "maximum")
self.assertEqual(err.target, "id")
try:
tempproduct=Product(child=ChildProduct(), capacity=0)
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "minimum_ex")
self.assertIn("capacity", err.target)
try:
tempproduct=Product(child=ChildProduct(), capacity=100)
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "maximum_ex")
self.assertIn("capacity", err.target)
try:
tempproduct=Product(child=ChildProduct(),
display_names=["item1","item2","item3","item4","item5","item6","item7"])
client.validation_of_body("123", 150, tempproduct)
except ValidationError as err:
self.assertEqual(err.rule, "max_items")
self.assertIn("display_names", err.target)
client2 = AutoRestValidationTest(
"abc123",
base_url="http://localhost:3000")
client2.api_version = "abc"
try:
client2.validation_of_method_parameters("123", 150)
except ValidationError as err:
self.assertEqual(err.rule, "pattern")
self.assertEqual(err.target, "self.api_version")
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from | solidspy.solids_GUI import solids_GUI
s | olids_GUI() |
class Reverb:
pass
class ScatterLocation:
pass
class Chorus:
pass
class Vocoder:
| pass
class S | huffleSound:
pass
|
# -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import absolute_import, division, print_function
import collections
import itertools
import logging
from shot_detector.utils.dsl import DslOperatorMixin
from shot_detector.utils.dsl.dsl_kwargs import dsl_kwargs_decorator
class DslFilterMixin(DslOperatorMixin):
"""
Basic filter mixin to build Filter-DSL
"""
__logger = logging.getLogger(__name__)
@staticmethod
def dsl_kwargs_decorator(*dsl_rules):
"""
:param dsl_rules:
:return:
"""
return dsl_kwargs_decorator(*dsl_rules)
def __or__(self, other):
"""
:param Filter other:
:return:
"""
return self.apply_sequence([other])
def __ror__(self, other):
"""
:param Filter other:
:return:
"""
return self.apply_sequence([other])
def apply_sequence(self, others):
"""
:param others:
:return:
"""
filters = self.cast_to_apply_sequence(others)
filter_sequence = self.apply_filter_sequence(filters)
return filter_sequence
def apply_filter_sequence(self, filters):
"""
Extends current own `sequential_filters` with `filters`
or creates a new `FilterSequence`.
:param filters:
:return:
"""
from .filter_sequence import FilterSequence
if isinstance(self, FilterSequence):
self_filters = self.sequential_filters
joined_filters = itertools.chain(self_filters, filters)
filter_sequence = self
else:
joined_filters = itertools.chain([self], filters)
filter_sequence = FilterSequence
joined_filter_list = list(joined_filters)
filter_sequence = filter_sequence(
sequential_filters=joined_filter_list
)
return filter_sequence
# @staticmethod
def cast_to_apply_sequence(self, others):
"""
:param others:
:return:
"""
from .filter_cast_features import FilterCastFeatures
for other in others:
if isinstance(other, tuple):
other = DslFilterMixin.to_tuple(*other)
if not isinstance(other, DslFilterMixin):
other = FilterCastFeatures(
op_func=other,
parallel_filters=[self]
)
yield other
def apply_operator(self,
op_func=None,
others=None,
op_mode=None,
**kwargs):
"""
:param op_func:
:param others:
:param op_mode:
:param kwargs:
:return:
"""
filters = self.cast_to_apply_operator(others)
filter_operator = self.apply_filter_op | erator(
op_func=op_func,
filters=filters,
op_mode=op_mode,
)
return filter_operator
def apply_filter_operator(self,
op_func=None,
filters=None,
op_mode=None,
**kwargs):
"""
:param op_func:
:param filters:
:param | op_mode:
:param kwargs:
:return:
"""
from .filter_operator import FilterOperator, FilterOperatorMode
fo_op_mode = FilterOperatorMode.LEFT
if op_mode is self.Operator.RIGHT:
fo_op_mode = FilterOperatorMode.RIGHT
# joined_filters = itertools.chain([self], filters)
filter_operator = FilterOperator(
op_func=op_func,
op_mode=fo_op_mode,
# parallel_filters=list(joined_filters),
**kwargs
)
if isinstance(self, FilterOperator) and filter_operator == self:
self_filters = self.parallel_filters
joined_filters = itertools.chain(self_filters, filters)
filter_operator = self
else:
joined_filters = itertools.chain([self], filters)
joined_filter_list = list(joined_filters)
filter_operator = filter_operator(
parallel_filters=joined_filter_list
)
return filter_operator
@classmethod
def to_tuple(cls, *args):
"""
:param args:
:return:
"""
from .filter_tuple import FilterTuple
filter_tuple = FilterTuple(
parallel_filters=list(args)
)
return filter_tuple
def cast_to_apply_operator(self, others):
"""
:param others:
:return:
"""
for other in others:
if not isinstance(other, DslFilterMixin):
other = self.scalar_to_filter(
value=other,
)
yield other
def to_filter(self, value):
"""
:param value:
:return:
"""
if isinstance(value, collections.Iterable):
return self.seq_to_filter(value)
return self.scalar_to_filter(value)
@staticmethod
def seq_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_seq_value import FilterCastSeqValue
return FilterCastSeqValue(seq=value)
@staticmethod
def scalar_to_filter(value):
"""
:param value:
:return:
"""
from .filter_cast_scalar_value import FilterCastScalarValue
return FilterCastScalarValue(value=value)
def __contains__(self, item):
"""
:param Filter item:
:return:
"""
return self.intersect(item)
def i(self, *args, **kwargs):
"""
:param args:
:param kwargs:
:return:
"""
return self.intersect(*args, **kwargs)
def intersect(self, other, threshold=0):
"""
:param other:
:param threshold:
:return:
"""
from .filter_intersection import FilterIntersection
return FilterIntersection(
parallel_filters=[self, other],
threshold=threshold
)
|
Inc. 2016
"""
import jsonpickle
import re
class APIHelper:
"""A Helper Class for various functions associated with API Calls.
This class contains static methods for operations that need to be
performed during API requests. All of the methods inside this class are
static methods, there is no need to ever initialise an instance of this
class.
"""
@staticmethod
def json_serialize(obj):
"""JSON Serialization of a given object.
Args:
obj (object): The object to serialise.
Returns:
str: The JSON serialized string of the object.
"""
if obj is None:
return None
# Resolve any Names if it's one of our objects that needs to have this called on
if isinstance(obj, list):
value = list()
for item in obj:
try:
value.append(item.resolve_names())
except (AttributeError, TypeError):
value.append(item)
obj = value
else:
try:
obj = obj.resolve_names()
except (AttributeError, TypeError):
obj = obj
return jsonpickle.encode(obj, False)
@staticmethod
def json_deserialize(json):
"""JSON Deerialization of a given string.
Args:
json (str): The JSON serialized string to deserialize.
Returns:
dict: A dictionary representing the data contained in the
JSON serialized string.
"""
if json is None:
return None
return jsonpickle.decode(json)
@staticmethod
def append_url_with_template_parameters(url,
parameters):
"""Replaces template parameters in the given url.
Args:
url (str): The query url string to replace the template parameters.
parameters (dict): The parameters to replace in the url.
Returns:
str: Url with replaced parameters.
"""
# Parameter validation
if url is None:
raise ValueError("url is null")
if parameters is None:
return url
# Iterate and replace parameters
for key in parameters:
element = parameters[key]
replace_value = ""
# Load parameter value
if element is None:
replace_value = ""
elif isinstance(element, list):
replace_value = "/".join(element)
else:
replace_value = str(element)
url = url.replace('{{{0}}}'.format(key),str(replace_value))
return url
@staticmethod
def append_url_with_query_parameters(url,
parameters):
"""Appends the given set of parameters to the given query string.
Args:
url (str): The query url string to append the parameters.
parameters (dict): The parameters to append.
Returns:
str: Url with appended query parameters.
"""
# Perform parameter validation
if url is None:
raise ValueError("url is null")
if parameters is None:
return url
# Does the query string already have parameters?
has_params = '?' in url
# Iterate and replace parameters
for key in parameters:
element = parameters[key]
# Ignore null values
if element is None:
continue
# If already has parameters, use the & to append new parameters
separator = '&' if has_params else '?'
if isinstance(element, list):
url = url + '{0}{1}[]={2}'.format(separator, key, '&{0}[]='.format(key).join(element))
else:
url = url + '{0}{1}={2}'.format(separator, key, str(parameters[key]))
# Indicate the url has params
has_params = True
return url
@staticmethod
def clean_url(url):
"""Validates and processes the given query Url to clean empty slashes.
Args:
url (str): The given query Url to process.
Returns:
str: Clean Url as string.
"""
# Ensure that the urls are absolute
regex = "^https?://[^/]+"
match = re.match(regex, url)
if match is None:
raise ValueError('Invalid Url format.')
# Remove redundant forward slashes
protocol = match.group(0)
query_url = url[len(protocol):]
query_url = re.sub("//+", "/", query_url);
return protocol + query_url
return query_url
@staticmethod
def form_encode(obj,
instanceName):
"""Encodes a model in a form-encoded manner such as person[Name]
Args:
obj (object): The given Object to form encode.
instanceName (string): The base name to appear before each entry
for this object.
Returns:
dict: A dictionary of form encoded properties of the model.
"""
# Resolve the names first
value = APIHelper.resolve_name(obj)
retval = dict()
if value is None:
return None
# Loop through every item we need to send
for item in value:
if isinstance(value[item], list):
# Loop through each item in the list and add it by number
i = 0
for entry in value[item]:
retval.update(APIHelper.form_encode(entry, instanceName + "[" + item + "][" + str(i) + "]"))
i += 1
elif isinstance | (value[item], dict):
# Loop through each item in the dictionary and add it
retval.update(APIHelper.form_encode(value[item], instanceName + "[" + item + "]"))
| else:
# Add the current item
retval[instanceName + "[" + item + "]"] = value[item]
return retval
@staticmethod
def resolve_names(obj,
names,
retval):
"""Resolves parameters from their Model names to their API names.
Args:
obj (object): The given Object to resolve names for.
names (dict): A dictionary containing a mapping from model name
to API name.
retval (dict): The dictionary to return which may or may not be
empty (but must not be None).
Returns:
dict: A dictionary form of the model with properties in their API
formats.
"""
# Loop through all properties in this model
for name in names:
value = getattr(obj, name)
if isinstance(value, list):
# Loop through each item
retval[names[name]] = list()
for item in value:
retval[names[name]].append(APIHelper.resolve_name(item))
elif isinstance(value, dict):
# Loop through each item
retval[names[name]] = dict()
for key in value:
retval[names[name]][key] = APIHelper.resolve_name(value[key])
else:
retval[names[name]] = APIHelper.resolve_name(value)
# Return the result
return retval
@staticmethod
def resolve_name(value):
"""Resolves name for a given object
If the object needs to be recursively resolved, this method will
|
from setuptools import setup, find_packages
import sys, os
version = '1.3'
long_description = """The raisin.restyler package is a part of Raisin, the web application
used for publishing the summary statistics of Grape, a pipeline used for processing and
analyzing RNA-Seq data."""
setup(name='raisin.restyler',
version=version,
description="A package used in the Raisin web application",
long_description=long_description,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Intended Audience :: Developers',
'Operating System :: O | S Independent',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural La | nguage :: English',
'Topic :: Software Development :: Libraries :: Python Modules',
'Operating System :: POSIX :: Linux'],
keywords='RNA-Seq pipeline ngs transcriptome bioinformatics ETL',
author='Maik Roder',
author_email='maikroeder@gmail.com',
url='http://big.crg.cat/services/grape',
license='GPL',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
namespace_packages = ['raisin'],
package_data = {'raisin.restyler':['templates/*.pt']},
include_package_data=True,
zip_safe=False,
install_requires=[
# -*- Extra requirements: -*-
'configobj',
'zope.pagetemplate'
],
entry_points="""
# -*- Entry points: -*-
""",
)
|
sum = 1
curr = 3
for width in xr | ange(3,1002,2):
inc = width - 1
sum = sum + curr #bottom right
curr = curr + inc
sum = sum + curr #bottom left
curr = curr + inc
| sum = sum + curr #top left
curr = curr + inc
sum = sum + curr #top right
curr = curr + inc + 2
print sum
|
py as pythoncopy
import scipy.optimize as spopt
class rslc():
"""
A regularly sampled lightcurve, typically obtained by regression.
To make such a rslc from a usual lightcurve object, look at the factory function below.
One idea is that we want to be able to add and subtract those, propagating errors.
There is no "microlensing" or similar stuff -- only time shifts.
"""
def __init__(self, jds, mags, magerrs, pad, pd, timeshift=0.0, name="Name", plotcolour="black"):
self.jds = jds
self.mags = mags
self.magerrs = magerrs
self.plotcolour = plotcolour
self.name = name
self.timeshift = timeshift
self.pad = pad
self.pd = pd
def __str__(self):
retstr = "[RS:%s]" % (self.name)
if self.timeshift != 0.0:
retstr += "(%.3f)" % (self.timeshift)
return retstr
def shifttime(self, timeshift):
self.timeshift += timeshift
def copy(self):
return pythoncopy.deepcopy(self)
def getjds(self):
return self.jds + self.timeshift
def getmags(self):
return self.mags
def getmagerrs(self):
return self.magerrs
def mask(self, maxmagerr = 0.1, target = 20.0):
self.magerrs[self.magerrs > maxmagerr] = target
def wtv(self, method = "weights"):
"""
Return some weighted average variation WAV.
Usuall called on a "difference" lightcurve.
"""
#return np.sum(np.fabs(self.mags[1:] - self.mags[:-1]))
#mask = self.magerrs < maxmagerr
if method == "weights":
dys = self.mags[1:] - self.mags[:-1]
dyws = 1.0 / (0.5*(self.magerrs[1:] + self.magerrs[:-1]))
out = np.sum(np.fabs(dys) * dyws) / np.sum(dyws)
if method == "simple":
out = np.sum(np.fabs(self.mags[1:] - self.mags[:-1]))
return out
def factory(l, pad=300, pd=2, plotcolour=None,knotstep=20.0, n=None, stab=True,stabext=300.0, stabgap=20.0, stabstep=5.0,
stabmagerr=-2.0, stabrampsize=0, stabrampfact=1.0, bokit=1, bokeps=2.0, boktests=5,
bokwindow=None, k=3, verbose=True):
"""
Give me a lightcurve, I return a regularly sampled light curve, by performing some spline regression.
!!! New: I also return the spline used for the regression
:param pad: the padding, in days
:param pd: the point density, in points per days.
The points live on a regular grid in julian days, 0.0, 0.1, 0.2, 0.3 ...
"""
if plotcolour == None:
plotcolour = l.plotcolour
name = l.object
jds = l.jds.copy()
timeshift = l.timeshift
mags = l.getmags(noml=True)
magerrs = l.getmagerrs()
minjd = np.round(jds[0] - pad)
maxjd = np.round(jds[-1] + pad)
npts = int(maxjd - minjd)*pd
rsjds = np.linspace(minjd, maxjd, npts) # rs for regularly sampled
# The regression itself
mean_mag = np.mean(mags)
def meanprior(query):
return (0.0 * query + mean_mag)
regfct,spline = splreg.splreg(jds, mags, magerrs,knotstep=knotstep, n=n, stab=stab, stabext=stabext, stabgap=stabgap, stabstep=stabstep,
stabmagerr=stabmagerr, stabrampsize=stabrampsize, stabrampfact=stabrampfact, bokit=bokit, bokeps=bokeps,
boktests=boktests,bokwindow=bokwindow, k=k, verbose=verbose)
(rsmags, rsmagerrs) = regfct(rsjds)
return rslc(rsjds, rsmags, rsmagerrs, pad, pd, timeshift=timeshift, name=name, plotcolour=plotcolour),spline
def subtract(rs1, rs2):
"""
I subtract rs2 from rs1.
This means I keep the jds and timeshift of rs1, and only change the mags and magerrs,
interpolating rs2.
I return a brand new rslc object, that has no timeshift (as we do not care about a timeshift, for a difference).
:param rs1:
:type rs1: rslc object
:param rs2:
:type rs2: rslc object
"""
newjds = rs1.getjds()
newmags = rs1.mags.copy()
newmagerrs = rs1.magerrs.copy()
newpad = rs1.pad
newpd = rs1.pd
newname = "%s(%+.1f)-%s(%+.1f)" % (rs1.name, rs1.timeshift, rs2.name, rs2.timeshift)
# We interpolate rs2 at the positions of rs1
newrs2mags = np.interp(rs1.getjds(), rs2.getjds(), rs2.mags, left=np.nan, right=np.nan)
newrs2magerrs = np.interp(rs1.getjds(), rs2.getjds(), rs2.magerrs, left=np.nan, right=np.nan)
# These arrays contain NaN at one of there extremas.
newmags -= newrs2mags
newmagerrs = np.sqrt(rs1.magerrs*rs1.magerrs + newrs2magerrs*newrs2magerrs)
# The NaN are now propagated in newmags and newmagerrs
# We cut them :
nanmask = np.isnan(newmags)
#nnan = np.sum(nanmask)
#print nnan/newpd
newjds = newjds[nanmask == False]
newmags = newmags[nanmask == False]
newmagerrs = newmagerrs[nanmask == False]
return rslc(newjds, newmags, newmagerrs, newpad, newpd, timeshift=0.0, name=newname, plotcolour="black")
def wtvdiff(rs1, rs2, | method):
"""
Returns the wtv (weighted TV) of the difference between 2 curves.
This is symmetric (no change if you invert rs1 and rs2), up to some smal | l numerical errors.
"""
out = subtract(rs1, rs2).wtv(method)
#print out
return float(out)
def bruteranges(step, radius, center):
"""
Auxiliary function for brute force exploration.
Prepares the "ranges" parameter to be passed to brute force optimizer
In other words, we draw a cube ...
radius is an int saying how many steps to go left and right of center.
center is an array of the centers, it can be of any lenght.
You make 2*radius + 1 steps in each direction !, so radius=2 means 5 steps thus 125 calls for 4 curves.
"""
low = - step * radius
up = step * (radius+1)
if center.shape == ():
c = float(center)
return [((c+low),(c+up),step)]
else:
return [((c+low),(c+up),step) for c in center]
def opt_ts(rslcs, method="weights", verbose=True):
"""
I optimize the timeshifts between the rslcs to minimize the wtv between them.
Note that even if the wtvdiff is only about two curves, we cannot split this into optimizing
AB AC AD in a row, as this would never calculate BC, and BC is not contained into AB + AC.
!!! New : I also return a spline to optimise the magshifts
:param rslcs: a list of rslc objects
"""
rslcsc = [rs.copy() for rs in rslcs] # We'll work on copies.
# No need for reverse combis, as wtvdiff is symmetric.
#couplelist = [couple for couple in [[rs1, rs2] for rs1 in rslcsc for rs2 in rslcsc] if couple[0] != couple[1]]
indexes = np.arange(len(rslcsc))
indlist = [c for c in [[i1, i2] for i1 in indexes for i2 in indexes] if c[1] > c[0]]
couplelist = [[rslcsc[i1], rslcsc[i2]] for (i1, i2) in indlist]
# So the elements in couplelist are the SAME as those from rslcsc
inishifts = np.array([rs.timeshift for rs in rslcsc[1:]]) # We won't move the first curve.
def errorfct(timeshifts):
if timeshifts.shape == ():
timeshifts = np.array([timeshifts])
for (rs, timeshift) in zip(rslcsc[1:], timeshifts):
rs.timeshift = timeshift
tvs = np.array([wtvdiff(rs1, rs2, method=method) for (rs1, rs2) in couplelist])
ret = np.sum(tvs)
#if verbose:
# print timeshifts, ret
return ret
if verbose:
print "Starting time shift optimization ..."
print "Initial pars (shifts, not delays) : ", inishifts
# Some brute force exploration, like for the dispersion techniques ...
res = spopt.brute(errorfct, bruteranges(5,3,inishifts), full_output = 0, finish=None)
# This would finish by default with fmin ... we do not want that.
if verbose:
print "Brute 1 shifts : %s" % res
print "Brute 1 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(2.5,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 2 shifts : %s" % res
print "Brute 2 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(1.25,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 3 shifts : %s" % res
print "Brute 3 errorfct : %f" % errorfct(res)
res = spopt.brute(errorfct, bruteranges(0.5,3,res), full_output = 0, finish=None)
if verbose:
print "Brute 4 shifts : %s" % res
print "Brute 4 errorfct : %f" % errorfct(res)
minout = spopt.fmin_powell(errorfct, res, xtol=0.001, full_output=1, disp=verbose)
#minout = spopt.fmin_bfgs(errorfct, inishifts, maxiter=None, full_output=1, disp=verbose, retall=0, callback=None)
popt = minout[0]
minwtv = errorfct(popt) # This sets popt, and the optimal ML and source.
if verbose:
print "Final shifts : %s" % popt
print "Final errorfct : %f" |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/contrib/lite/toco/toco_flags.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.contrib.lite.toco import types_pb2 as tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/contrib/lite/toco/toco_flags.proto',
package='toco',
syntax='proto2',
serialized_pb=_b('\n-tensorflow/contrib/lite/toco/toco_flags.proto\x12\x04toco\x1a(tensorflow/contrib/lite/toco/types.proto\"\x92\x03\n\tTocoFlags\x12&\n\x0cinput_format\x18\x01 \x01(\x0e\x32\x10.toco.FileFormat\x12\'\n\routput_format\x18\x02 \x01(\x0e\x32\x10.toco.FileFormat\x12.\n\x14inference_input_type\x18\x0b \x01(\x0e\x32\x10.toco.IODataType\x12(\n\x0einference_type\x18\x04 \x01(\x0e\x32\x10.toco.IODataType\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_min\x18\x05 \x01(\x02\x12\x1a\n\x12\x64\x65\x66\x61ult_ranges_max\x18\x06 \x01(\x02\x12\x17\n\x0f\x64rop_fake_quant\x18\x07 \x01(\x08\x12!\n\x19reorder_across_fake_quant\x18\x08 \x01(\x08\x12\x18\n\x10\x61llow_custom_ops\x18\n \x01(\x08\x12\x1f\n\x17\x64rop_control_dependency\x18\x0c \x01(\x08\x12+\n#debug_disable_recurrent_cell_fusion\x18\r \x01(\x08*\\\n\nFileFormat\x12\x17\n\x13\x46ILE_FORMAT_UNKNOWN\x10\x00\x12\x17\n\x13TENSORFLOW_GRAPHDEF\x10\x01\x12\n\n\x06TFLITE\x10\x02\x12\x10\n\x0cGRAPHVIZ_DOT\x10\x03')
,
dependencies=[tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2.DESCRIPTOR,])
_FILEFORMAT = _descriptor.EnumDescriptor(
name='FileFormat',
full_name='toco.FileFormat',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='FILE_FORMAT_UNKNOWN', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TENSORFLOW_GRAPHDEF', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TFLITE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='GRAPHVIZ_DOT', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=502,
serialized_end=594,
)
_sym_db.RegisterEnumDescriptor(_FILEFORMAT)
FileFormat = enum_type_wrapper.EnumTypeWrapper(_FILEFORMAT)
FILE_FORMAT_UNKNOWN = 0
TENSORFLOW_GRAPHDEF = 1
TFLITE = 2
GRAPHVIZ_DOT = 3
_TOCOFLAGS = _descriptor.Descriptor(
name='TocoFlags',
full_name='toco.TocoFlags',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_format', full_name='toco.TocoFlags.input_format', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='output_format', full_name='toco.TocoFlags.output_format', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_input_type', full_name='toco.TocoFlags.inference_input_type', index=2,
number=11, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='inference_type', full_name='toco.TocoFlags.inference_type', index=3,
number=4, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_ranges_min', full_name='toco.TocoFlags.default_ranges_min', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='default_ranges_max', full_name='toco.TocoFlags.default_ranges_max', index=5,
number=6, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_fake_quant', full_name='toco.TocoFlags.drop_fake_quant', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='reorder_across_fake_quant', full_name='toco.TocoFlags.reorder_across_fake_quant', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='allow_custom_ops', full_name='toco.TocoFlags.allow_custom_ops', index=8,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='drop_control_dependency', full_name='toco.TocoFlags.drop_control_dependency', index=9,
number=12, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='debug_disable_recurrent_cell_fusion', full_name='toco.TocoFlags.debug_disable_recurrent_cell_fusion', index=10,
number=13, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_ty | pe=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
o | neofs=[
],
serialized_start=98,
serialized_end=500,
)
_TOCOFLAGS.fields_by_name['input_format'].enum_type = _FILEFORMAT
_TOCOFLAGS.fields_by_name['output_format'].enum_type = _FILEFORMAT
_TOCOFLAGS.fields_by_name['inference_input_type'].enum_type = tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2._IODATATYPE
_TOCOFLAGS.fields_by_name['inference_type'].enum_type = tensorflow_dot_contrib_dot_lite_dot_toco_dot_types__pb2._IODATATYPE
DESCRIPTOR.message_types_by_name['TocoFlags'] = _TOCOFLAGS
DESCRIPTOR.enum_types_by_name['FileFormat'] = _FILEFORMAT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TocoFlags = _reflection.GeneratedProtocolMessageType('TocoFlags', (_message.Message,), dict(
DESCRIPTOR = _TOCOFLAGS,
__module__ = 'tensorflow.contrib.lite.toco.toco_flags_pb2'
# @@protoc_insertion_point(class_scope:toco.TocoFlags)
))
_sym_db.RegisterMessage(TocoFlags)
# @@protoc_insertion_point(module_scope)
|
""" crypto.aes
AES Encryption Algorithm
The AES algorithm is just Rijndael algorithm restricted to the default
blockSize of 128 bits.
Copyright (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
2002-06-01
"""
from crypto.cipher.rijndael import Rijndael
from crypto.cipher.bas | e import BlockCipher, padWithPadLen, noPadding
from crypto.errors import Ba | dKeySizeError
class AES(Rijndael):
""" The AES algorithm is the Rijndael block cipher restricted to block
sizes of 128 bits and key sizes of 128, 192 or 256 bits
"""
def __init__(self, key = None, padding = padWithPadLen(), keySize=16):
""" Initialize AES, keySize is in bytes """
if not (keySize == 16 or keySize == 24 or keySize == 32) :
raise BadKeySizeError, 'Illegal AES key size, must be 16, 24, or 32 bytes'
Rijndael.__init__( self, key, padding=padding, keySize=keySize, blockSize=16 )
self.name = 'AES'
|
from ditutils. | core import setup
setup(
name = 'morpheusapi',
packages = ['morpheusapi'],
version = '2.11.1',
description = 'A python wrapper for Morpheus APIs',
author = 'Ad | am Hicks',
author_email = 'thomas.adam.hicks@gmail.com',
url = 'https://github.com/tadamhicks/morpheus-python',
download_url = 'https://github.com/tadamhicks/morpheus-python/archive/2.11.1.tar.gz',
keywords = ['morpheus', 'api', 'morpheus data'],
classifiers = [],
)
|
-)
#
# (C) 2001-2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: loop://[option[/option...]]
# options:
# - "debug" print diagnostic messages
from serial.serialutil import *
import threading
import time
import logging
# map log level names to constants. used in fromURL()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
class LoopbackSerial(SerialBase):
"""Serial port implementation that simulates a loop back connection in plain software."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
if self._isOpen:
raise SerialException("Port is already open.")
self.logger = None
self.buffer_lock = threading.Lock()
self.loop_buffer = bytearray()
self.cts = False
self.dsr = False
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
# not that there is anything to open, but the function applies the
# options found in the URL
self.fromURL(self.port)
# not that there anything to configure...
self._reconfigurePort()
# all things set up get, now a clean start
self._isOpen = True
if not self._rtscts:
self.setRTS(True)
self.setDTR(True)
self.flushInput()
self.flushOutput()
def _reconfigurePort(self):
"""\
Set communication parameters on opened port. For the loop://
protocol all settings are ignored!
"""
# not that's it of any real use, but it helps in the unit tests
if not isinstance(self._baudrate, (int, long)) or not 0 < self._baudrate < 2**32:
raise ValueError("invalid baudrate: %r" % (self._baudrate))
if self.logger:
self.logger.info('_reconfigurePort()')
def close(self):
"""Close port"""
if self._isOpen:
self._isOpen = False
# in case of quick reconnects, give the server some time
time.sleep(0.3)
def makeDeviceName(self, port):
raise SerialException("there is no sensible way to turn numbers into URLs")
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("loop://"): url = url[7:]
try:
# process options now, directly altering self
for option in url.split('/'):
if '=' in option:
option, value = option.split('=', 1)
else:
value = None
if not option:
pass
elif option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.loop')
self.logger.setLevel(LOGGER_LEVELS[value])
self.logger.debug('enabled logging')
else:
raise ValueError('unknown option: %r' % (option,))
except ValueError, e:
raise SerialException('expected a string in the form "[loop://][option[/option...]]": %s' % e)
# - - - - - - - - - - - - - - - - - - - - - - - -
def inWaiting(self):
"""Return the number of characters currently in the input buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
# attention the logged value can differ from return value in
# threaded environments...
self.logger.debug('inWaiting() -> %d' % (len(self.loop_buffer),))
return len(self.loop_buffer)
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self._isOpen: raise portNotOpenError
if self._timeout is not None:
timeout = time.time() + self._timeout
else:
timeout = None
data = bytearray()
while size > 0:
self.buffer_lock.acquire()
try:
block = to_bytes(self.loop_buffer[:size])
del self.loop_buffer[:size]
finally:
self.buffer_lock.release()
data += block
size -= len(block)
# check for timeout now, after data has been read.
# useful for timeout = 0 (non blocking) read
if timeout and time.time() > timeout:
break
return bytes(data)
def write(self, data):
"""\
Output the given string over the serial port. Can block | if the
connection is blocked. May raise SerialException | if the connection is
closed.
"""
if not self._isOpen: raise portNotOpenError
# ensure we're working with bytes
data = to_bytes(data)
# calculate aprox time that would be used to send the data
time_used_to_send = 10.0*len(data) / self._baudrate
# when a write timeout is configured check if we would be successful
# (not sending anything, not even the part that would have time)
if self._writeTimeout is not None and time_used_to_send > self._writeTimeout:
time.sleep(self._writeTimeout) # must wait so that unit test succeeds
raise writeTimeoutError
self.buffer_lock.acquire()
try:
self.loop_buffer += data
finally:
self.buffer_lock.release()
return len(data)
def flushInput(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushInput()')
self.buffer_lock.acquire()
try:
del self.loop_buffer[:]
finally:
self.buffer_lock.release()
def flushOutput(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('flushOutput()')
def sendBreak(self, duration=0.25):
"""\
Send break condition. Timed, returns to idle state after given
duration.
"""
if not self._isOpen: raise portNotOpenError
def setBreak(self, level=True):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setBreak(%r)' % (level,))
def setRTS(self, level=True):
"""Set terminal status line: Request To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setRTS(%r) -> state of CTS' % (level,))
self.cts = level
def setDTR(self, level=True):
"""Set terminal status line: Data Terminal Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('setDTR(%r) -> state of DSR' % (level,))
self.dsr = level
def getCTS(self):
"""Read terminal status line: Clear To Send"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getCTS() -> state of RTS (%r)' % (self.cts,))
return self.cts
def getDSR(self):
"""Read terminal status line: Data Set Ready"""
if not self._isOpen: raise portNotOpenError
if self.logger:
self.logger.info('getDSR() -> state of DTR (%r)' % (self.dsr,))
return self.dsr
def getRI(self):
"""Read ter |
# Import the helper gateway class
from AfricasTalkingGateway import AfricasTalkingGateway, AfricasTalkingGatewayException
from twitment.search import ClassTwitter
# Specify your login | credentials
cl | ass SMS(object):
def __init__(self):
pass
def send(self,num):
sendtweet_obj = ClassTwitter()
x = sendtweet_obj.wordFrequency.wordslist
username = "CATHERINERAKAMA"
apikey = "676dbd926bbb04fa69ce90ee81d3f5ffee2692aaf80eb5793bd70fe93e77dc2e"
# Specify the numbers that you want to send to in a comma-separated list
# Please ensure you include the country code (+254 for Kenya)
to = num
# And of course we want our recipients to know what we really do
message = x
# Create a new instance of our awesome gateway class
gateway = AfricasTalkingGateway(username, apikey)
# Any gateway errors will be captured by our custom Exception class below,
# so wrap the call in a try-catch block
try:
# Thats it, hit send and we'll take care of the rest.
results = gateway.sendMessage(to, message)
for recipient in results:
# status is either "Success" or "error message"
print 'Message sent to number=%s;status=%s' % (recipient['number'],
recipient[
'status'])
except AfricasTalkingGatewayException, e:
print 'Encountered an error while sending: %s' % str(e)
|
# Author: Bala Venkatesan
# License: Apache 2.0
########################################################################
# Wrote this file to separate out the loading of the data from the
# python file where the actual display happens
########################################################################
import pandas as pd
import csv
########################################################################
# Loading data
########################################################################
statefile = open('./annual_ | averages_by_state.csv', 'r')
csvreader = csv.reader(statefile)
########################################################################
# initializing a dataframe to parse only req | uired data from file
########################################################################
columns = ["STATE",
"TOTAL_POPULATION",
"WORKFORCE",
"WORK_%_OF_POP",
"EMPLOYED",
"EMP_%_OF_POP",
"UNEMPLOYED",
"UNEMPLOMENT_RATE",
]
data = []
rowIndex = 0
########################################################################
# function that parses the state data for 2012 & 2013 and returns
# a DataFrame with the data read from the file
# the function cleans the data before returning the DataFrame
########################################################################
def state_data():
for row in csvreader:
#######################################################################################
# intialize a bunch of index variables for data clean up
# startat is used to push the iteration to the right in the case of states with 2 words
# stopat moves corresponding.
#######################################################################################
index = 0
startat = 0
stopat=10
statename = row[0]
# Initializing pandas series for DataFrame.
values = []
for x in enumerate(row):
print statename
print x
if(index == 0):
values.append(statename.upper())
else:
values.append(x.replace(",",""))
index = index + 1
data.insert(rowIndex,values)
df = pd.DataFrame(data,columns=columns)
return df
if __name__ == '__main__':
print state_data()
|
# coding: utf-8
# Copyright (c) 2012, SciELO <scielo-dev@googlegroups.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this l | ist | of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
import re
from django import forms
from django.utils.translation import ugettext_lazy as _
class ISSNField(forms.CharField):
default_error_messages = {
'invalid': _('Enter a valid ISSN.')
}
regex = r'[0-9]{4}-[0-9]{3}[0-9X]{1}$'
def clean(self, value):
if value is not u'' and value is not None:
result = re.match(self.regex, value)
if result is None:
raise forms.ValidationError(self.error_messages['invalid'])
return value |
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identi | fier: (Apache-2.0 OR MIT)
from spack import *
class RSdmtools(RPackage):
"""Species Distribution Modelling Tools: Tools for processing data
associated with species distribution modelling exercises
This packages provides a set of tools for post processi | ng the outcomes of
species distribution modeling exercises."""
homepage = "https://cloud.r-project.org/package=SDMTools"
url = "https://cloud.r-project.org/src/contrib/SDMTools_1.1-221.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/SDMTools"
version('1.1-221.1', sha256='3825856263bdb648ca018b27dc6ab8ceaef24691215c197f8d5cd17718b54fbb')
version('1.1-221', sha256='a6da297a670f756ee964ffd99c3b212c55c297d385583fd0e767435dd5cd4ccd')
version('1.1-20', sha256='d6a261ce8f487d5d03b1931039f528f2eb50fb9386e7aae40045c966ff6d4182')
version('1.1-13', sha256='02d94977bfa2f41f1db60e619335ac0ea8109dd98108ff9d21a412f7c4a14a2e')
version('1.1-12', sha256='6dc4a8a046e7fced190402f39a9bae6f863e08c320f0881367c022b2f220f14b')
version('1.1-11', sha256='1caf8fa1914ad6921d76e7b22a8c25cfe55892b0d21aef3b2a7b8f5b79b9388b')
depends_on('r-r-utils', type=('build', 'run'))
|
#/u/Goldensights
import praw
import time
import datetime
'''USER CONFIG'''
USERNAME = ""
#This is the bot's Username. In | order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once. PRAW can downlo | ad 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
PRINTFILE = 'messages.txt'
#This is the file, in the same directory as the .py file, where the messages are stored
SUBJECTLINE = "Newsletterly"
ITEMTYPE = 't4'
#The type of item to gather. t4 is a PM
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def work():
unread = r.get_unread(limit=MAXPOSTS)
results = []
for message in unread:
if ITEMTYPE in message.fullname:
print(message.id, message.subject, end=" ")
if SUBJECTLINE.lower() in message.subject.lower():
print(message.body)
messagedate = datetime.datetime.utcfromtimestamp(message.created_utc)
messagedate = datetime.datetime.strftime(messagedate, "%B %d %Y %H:%M UTC")
results += [message.fullname + " : " + message.author.name, messagedate, message.body, "\n\n"]
else:
print()
message.mark_as_read()
logfile = open(PRINTFILE, "a")
for result in results:
print(result, file=logfile)
logfile.close()
while True:
try:
work()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
time.sleep(WAIT) |
.popped_tensor_lists[internal_capture])
elif internal_capture.dtype == dtypes.resource:
grad_func_graph.outputs.append(internal_capture)
else:
raise ValueError("Tensor %s is in list of internal_captures but is"
" neither a resource nor is in popped_tensor_lists." %
str(internal_capture))
return grad_func_graph, args
def _grad_fn(ys, xs, args, func_graph):
"""Computes the gradient of `func_graph` in the current graph.
This function builds the gradient graph of the corresponding forward-pass
`func_graph` by differentiating `func_graph`'s outputs w.r.t. its inputs.
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
args: The input arguments.
args[0] - Loop counter
args[1] - Total number of iterations.
args[2:] - Incoming gradients for `ys`.
func_graph: function.FuncGraph. The corresponding forward-pass function.
Returns:
The output gradient Tensors.
"""
grad_ys = args[2:]
# Build the gradient graph. Note that this builds the gradient computation of
# func_graph in the current graph, which requires capturing tensors from
# func_graph. The captured func_graph tensors are resolved to external tensors
# after the forward While op has been rewritten in _resolve_grad_captures.
# TODO(srbs): Mark GradientsHelper as public?
grad_outs = gradients_impl._GradientsHelper(
ys, xs, grad_ys=grad_ys, src_graph=func_graph,
unconnected_gradients="zero")
# TODO(b/118712257): Handle the case when grad_outs has None's e.g. when there
# is a tf.StopGradient in the loop body.
assert all(g is not None for g in grad_outs)
counter = args[0]
total_iters = args[1]
return [counter + 1, total_iters] + grad_outs
def _resolve_grad_captures(body_graph, body_grad_graph, while_op):
"""Returns the tensors to pass as captured inputs to `body_grad_graph`.
`body_grad_graph` may have external references to:
1. Its outer graph containing the input gradients. These are left as-is.
2. Accumulators captured from the forward-pass graph. These should have been
added as `while_op` outputs after the gradient graph was built. We replace
these with the corresponding output of `while_op`, i.e. a tensor in
`body_graph.outer_graph`. In the case of nested control flow or functions,
the gradient logic handling `body_grad_graph.outer_graph` will make sure
the tensor from `body_graph.outer_graph` is also correctly captured.
Args:
body_graph: FuncGraph. The forward-pass body function.
body_grad_graph: FuncGraph. The body gradients function.
while_op: The forward-pass While Operation calling `body_graph`.
Returns:
A list of input tensors to be passed as the captured inputs to
`body_grad_graph`.
"""
new_capture_inputs = []
for t in body_grad_graph.external_captures:
# All values captured by gradient computation should be from the forward
# graph or a captured resource variable (note that input gradients are
# regular non-captured inputs).
if t.graph == body_graph:
# Captured accumulator
t = while_op.outputs[t.graph.outputs.index(t)]
# Note: We rely on the capturing logic of the gradient While op graph to
# correctly capture the tensors in `body_graph.outer_graph`. Both cond_v2
# and while_v2 handle this while building their gradient functions.
assert t.graph == body_graph.outer_graph
else:
# Captured resource variable
assert t.dtype == dtypes.resource
new_capture_inputs.append(t)
return new_capture_inputs
def _get_accumulator(tensor):
r"""Returns TensorList if any containing accumulated values of tensor.
We try to find a pattern of the form:
input_tl tensor
\ /
(TensorListPushBack)
|
output_tl
which satisfies the following conditions:
1. input_tl must be in tensor.graph.inputs.
2. output_tl or Identity(output_tl) must be in tensor.graph.outputs.
3. tensor.graph.input_index(input_tl) == tensor.graph.output_index(output_t).
output_tl or Identity(output_tl) (whichever is in tensor.graph.outputs) is
returned if such a pattern is found else None is returned.
Args:
tensor: The Tensor to be accumulated.
Returns:
A variant tensor in the same graph as `tensor` or None if no accumulator is
found.
"""
assert isinstance(tensor.graph, func_graph_module.FuncGraph)
def get_func_graph_output(t):
"""Returns t or Identity(t) whichever exists in graph outputs else None."""
i | f t in tensor.graph.outputs:
return t
# tf.defun adds an Identity for each output, check whether that is the case.
iden | tity_op = t.consumers()[0]
if (identity_op.type == "Identity" and
identity_op.outputs[0] in tensor.graph.outputs):
return identity_op.outputs[0]
return None
for consumer in tensor.consumers():
# Find the consumer that is a TensorListPushBack node whose TensorList input
# is in the list of function inputs.
if (consumer.type != "TensorListPushBack" or
consumer.inputs[0] not in tensor.graph.inputs):
continue
output = get_func_graph_output(consumer.outputs[0])
if output is None:
# The TensorList output of `consumer` is not in the list of function
# outputs.
continue
accum_input_idx = tensor.graph.inputs.index(consumer.inputs[0])
accum_output_idx = tensor.graph.outputs.index(output)
if accum_input_idx == accum_output_idx:
return output
return None
class _WhileBodyGradFuncGraph(util.WhileBodyFuncGraph):
"""FuncGraph for the gradient function of the body of a While op.
Contains the logic for capturing the tensors from the body of the forward
While op which is as follows:
1. If the tensor is of resource type (these are not accumulated):
a. Ensure that the tensor is a loop invariant, i.e., it exists in both loop
inputs and outputs at the same index.
b. Lookup the corresponding resource tensor in the forward outer graph and
try to capture that.
2. If the tensor is not of resource type:
a. Create an accumulator for that tensor and output it from the forward
pass. Note this also requires adding it as an input to the forward pass.
b. Capture the accumulator from the forward pass in this FuncGraph. This
will later be resolved to the correct output of the forward While op.
c. Pop a value from the captured placeholder and use it as the captured
value for the forward pass tensor.
This only allows capturing tensors in the forward graph. A ValueError is
raised if an attempt is made to capture a tensor not in the forward graph.
To manually capture capture a tensor that is not in the forward graph, call
`capture` with `whitelisted=True`.
Note: The `captures` dict does not contain the forward tensor since it is not
directly captured. It contains the accumulator corresponding to this forward
tensor.
Attributes:
while_op_needs_rewrite: True if any non-resource intermediates were
captured, meaning the forward While op needs to be rewritten to output the
corresponding accumulators.
empty_tensor_lists: list of EmptyTensorList tensors to be used as initial
input to the new accumulators in the forward graph.
popped_tensor_lists: dict from the captured accumulator placeholder to the
TensorList obtained after popping the intermediate tensor from it. The
values of this dict need to be added to the list of outputs.
"""
def __init__(self, name, forward_cond_graph, forward_body_graph, max_iters):
super(_WhileBodyGradFuncGraph, self).__init__(name)
self.empty_tensor_lists = []
self.popped_tensor_lists = {}
# FuncGraph for the body of the forward While op.
self._forward_graph = forward_body_graph
# FuncGraph for the cond of the forward While op.
self._forward_cond_graph = forward_cond_graph
self._maximum_iterations = max_iters
# Dict from forward intermediate tensor to its indirectly captu |
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import get_object_or_404
from django import forms
from django.db.models import Max, Avg, Sum
from opos.models import Customers
from opos.forms import CustomerAddForm, CustomerForm
def is_staff (user):
if user.is_staff or user.is_superuser:
return True
else:
return False
@user_passes_test (is_staff)
def dashboard (request):
c = {}
c['curdebt'] = Customers.objects.all().aggregate(Sum('curdebt'))['curdebt__sum']
c['maxdebt'] = Customers.objects.all().aggregate(Sum('maxdebt'))['maxdebt__sum']
c['highestcurdebt'] = Customers.objects.all().aggregate(Max('curdebt'))['curdebt__max']
from opos.sql import get_total_sale
c['totalsale'] = get_total_sale ()[0]
return render (request, "dashboard.html", c)
@user_passes_test (is_staff)
def customersales(request, customerpk):
from opos.sql import get_customer_ticketlines
customer = get_object_or_404 (Customers, pk=customerpk)
ticketlines = get_customer_ticketlines (customer.pk)
c = {}
c['customer'] = customer
c['ticketlines'] = ticketlines
return render (request, "customer-sales.html", c)
@user_passes_test (is_staff)
def customers (request):
customers = Customers.objects.all ()
c = {}
c['customers'] | = customers
return render (request, "customers.html", c)
@user_passes_test (is_staff)
def customeradd (request):
if request.method == 'POST':
form = CustomerAddForm (request.POST)
if form.is_valid ():
form.save ()
return redirect ('customers')
c = {}
c['customeredit'] = CustomerAddForm ()
return render (request, "customer-add.html", c)
@user_passes_test (is_staff)
def customeredit (requ | est, customerpk):
customer = get_object_or_404 (Customers, pk=customerpk)
if request.method == 'POST':
form = CustomerForm (request.POST, instance=customer)
if form.is_valid ():
form.save ()
return redirect ('customers')
else:
form = CustomerForm (instance=customer)
c = {}
c['customer'] = customer
form.fields['id'] = forms.CharField (widget=forms.widgets.HiddenInput())
c['customeredit'] = form
return render (request, "customer-edit.html", c)
def selfdebtcheck (request):
c = {}
if request.method == 'POST':
card = 'c' + request.POST.get("card")
try:
customer = Customers.objects.get (card=card)
except:
return render (request, "self-debtcheck.html", c)
customer = get_object_or_404 (Customers, card=card)
c['customer'] = customer
c['leftdebt'] = customer.maxdebt - customer.curdebt
return render (request, "self-debtshow.html", c)
else:
return render (request, "self-debtcheck.html", c)
|
from ajenti.api import *
from ajenti.com impo | rt *
class DebianNetworkCfg(Plugin):
implements(IConfigurable)
name = 'Network'
id = 'network'
platform = ['Debian', 'Ubuntu']
def list_files(self):
dir = '/etc/network/'
return [dir+'*', dir+ | '*/*', dir+'*/*/*']
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
# First, we will create lists of dictionaries containing the pages
# we want to add into each category.
# Then we will create a dictionary of dictionaries for our categories.
# This might seem a little bit confusing, but it allows us to iterate
# through each data structure, and add the data to our models.
pyt | hon_pages = [
{"title": "Official Python Tutorial",
"url": "http://docs.python.org | /2/tutorial/",
"views": 32},
{"title": "How to Think like a Computer Scientist",
"url": "http://www.greenteapress.com/thinkpython/",
"views": 16},
{"title": "Learn Python in 10 Minutes",
"url": "http://www.korokithakis.net/tutorials/python/",
"views": 8}]
django_pages = [
{"title": "Official Django Tutorial",
"url": "https://docs.djangoproject.com/en/1.9/intro/tutorial01/",
"views": 32},
{"title": "Django Rocks",
"url": "http://www.djangorocks.com/",
"views": 16},
{"title": "How to Tango with Django",
"url":"http://www.tangowithdjango.com/",
"views": 8}]
other_pages = [
{"title": "Bottle",
"url": "http://bottlepy.org/docs/dev/",
"views": 32},
{"title": "Flask",
"url": "http://flask.pocoo.org",
"views": 16} ]
cats = {"Python": {"pages": python_pages, "views":128, "likes":64},
"Django": {"pages": django_pages, "views":64, "likes":32},
"Other Frameworks": {"pages": other_pages, "views":32, "likes":16},
"Python User Group": {"pages": [], "views": 34, "likes": 16},
"Pascal": {"pages": [], "views": 32, "likes": 16},
"Perl": {"pages": [], "views": 32, "likes": 16},
"Php": {"pages": [], "views": 32, "likes": 16},
"Prolog": {"pages": [], "views": 32, "likes": 16},
"Programming": {"pages": [], "views": 32, "likes": 16}
}
# The code below goes through the cats dictionary, then adds each category,
# and then adds all the associated pages for that category.
# if you are using Python 2.x then use cats.iteritems() see
# http://docs.quantifiedcode.com/python-anti-patterns/readability/
# for more information about how to iterate over a dictionary properly.
for cat, cat_data in cats.items():
c = add_cat(cat, cat_data["views"], cat_data["likes"])
for p in cat_data["pages"]:
add_page(c, p["title"], p["url"],p["views"])
# print out the categories we have added
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print("- {0} -{1}".format(str(c),str(p)))
def add_cat(name, views, likes):
c = Category.objects.get_or_create(name=name)[0]
c.views = views
c.likes = likes
c.save()
return c
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
# Start execution here!
if __name__ == '__main__':
print("Starting Rango population script...")
populate() |
# -*- coding: utf-8 -*-
import codecs
all_tables = {
'ktgg': ['main'],
'zgcpwsw': ['title', 'casecode'],
#
# # 'ktgg',
# # 'cdfy_sfgk',
# # 'newktgg',
# # 'zyktgg',
# # 'zgcpwsw',
# # 'itslaw',
# # 'qyxg_zgcpwsw',
# # 'qyxg_wscpws',
#
'zhixing': ['pname', 'case_code'],
'dishonesty': ['pname', 'case_code', 'exe_code'],
'recruit': ['pubdate_doublet', 'company_name', 'job_functions', 'source'],
'xgxx_shangbiao': ['applicant_name', 'application_no'],
'shgy_zhaobjg': ['title'],
'shgy_zhongbjg': ['title'],
'rmfygg': ['notice_content', 'notice_time', 'notice_type'],
'overseas_investment': ['certificate_no'],
'qyxx_wanfang_zhuanli': ['application_code'],
'tddy': ['landno', 'land_location', 'mortgage_right_name'],
'tdzr': ['land_location', 'landno', 'original_usename'],
'dcos': ['company_name', 'certificate_num'],
'qyxx_enterpriseQualificationForeign': ['company_name', 'certificate_no', 'issue_date'],
'qyxx_gcj | ljz': ['company_name', 'certificate_no'],
'qyxx_jzsgxkz': ['company_name', 'certificate_no'],
'qyxx_miit_jlzzdwmd': ['company_name', 'certificate_no'],
'qyxx_food_prod_cert': ['company_name', 'certificate_no'],
'qyxx_hai | guanzongshu': ['company_name', 'customs_code'],
'qyxx_gmpauth_prod_cert': ['company_name', 'certificate_no'],
'qyxx_hzp_pro_prod_cert': ['company_name', 'certificate_no'],
'qyxx_medi_jy_prod_cert': ['company_name', 'certificate_no'],
'qyxx_medi_pro_prod_cert': ['company_name', 'certificate_no'],
'qyxx_industrial_production_permit': ['company_name', 'certificate_no'],
'qyxx_nyscqyzzcx': ['company_name', 'validdate'],
'qyxx_tk': ['company_name', 'certificate_no'],
'qyxx_ck': ['company_name', 'certificate_no'],
'xzcf': ['name', 'public_date', 'punish_code'],
'rjzzq': ['copyright_nationality', 'regnum', 'regdate'],
'qyxx_finance_xkz': ['company_name', 'issue_date', 'id_serial_num'],
'qylogo': ['company_full_name'],
'ssgs_zjzx': ['_id'],
'simutong': ['financing_side', 'invest_side', 'invest_time'],
'tddkgs': ['title', 'main'],
'shgy_tdcr': ['project_name', 'project_location', 'electron_supervise'],
'qyxx_zhuanli': ['application_code', 'reg_effect_date'],
'zhuanli_zhuanyi': ['application_code', 'reg_effect_date'],
'zpzzq': ['copyright_owner', 'regnum'],
'zuzhijigoudm': ['jgdm', 'jgmc'],
# 'sfpm_taobao':['title','auctioneer','disposal_unit'],
# 'domain_name_website_info':['organizer_name','site_certificate_no','domain_name']
}
tablename='SIMUTONG'
if __name__ == '__main__':
# fok = codecs.open(tablename+'_mysql', 'r', encoding='utf-8')
# fdup = codecs.open(tablename+'_dup', 'r', encoding='utf-8')
#
# foks=fok.read()
# for i in fdup.readlines():
# if i.strip() not in foks:
# print i
# break
#
# fdup.seek(0)
# all_list=[]
#
# for i in fdup.readlines():
# all_list.append(i.strip())
# print len(all_list)
# print len(set(all_list))
a=1
b=0
try:
a/b
except Exception as e:
print str(e)
|
#
# Copyright 2014 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Converters between SCIM JSON representation and Keystone"""
import functools
ROLE_SEP = '#'
_EXT_SCHEMA = 'urn:scim:schemas:extension:keystone:%s'
DEFAULT_VERSION = '1.0'
def get_schema(BASE_SCHEMA, path):
if 'v2' in path:
version = '2.0'
else:
version = '1.0'
return BASE_SCHEMA % version
def _remove_dict_nones(f):
def wrapper(*args, **kwargs):
res = f(*args, **kwargs)
return dict(filter(lambda x: x[1], res.items()))
return wrapper
@_remove_dict_nones
def user_key2scim(ref, path, schema=True):
ref = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'userName': ref.get('name', None),
'displayName': ref.get('description', None),
'active': ref.get('enabled', None),
'emails': [{'value': ref['email']}] if 'email' in ref else None,
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
return ref
def listusers_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(user_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def user_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'email': scim.get('emails', [{}])[0].get('value', None),
'id': scim.get('id', None),
'enabled': scim.ge | t('active', None),
'name': scim.get('userName', None),
'description': scim.get('displayName', None),
'password': scim.get('password', None)
}
@_remove_ | dict_nones
def role_scim2key(scim):
keystone = {}
keystone['id'] = scim.get('id', None)
if scim.get('domain_id', None):
keystone['name'] = '%s%s%s' % (
scim.get('domain_id'), ROLE_SEP, scim.get('name', None))
else:
keystone['name'] = scim.get('name', None)
return keystone
@_remove_dict_nones
def role_key2scim(ref, path=DEFAULT_VERSION, schema=True):
scim = {
'schemas': [get_schema(_EXT_SCHEMA, path)] if schema else None,
'id': ref.get('id', None)
}
dom_name = ref.get('name', '')
if dom_name.find(ROLE_SEP) > -1:
(domain, name) = dom_name.split(ROLE_SEP, 1)
else:
(domain, name) = (None, dom_name)
scim['name'] = name
scim['domain_id'] = domain
return scim
def listroles_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(role_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def group_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'id': scim.get('id', None),
'name': scim.get('displayName', None)
}
@_remove_dict_nones
def group_key2scim(ref, path, schema=True):
return {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'displayName': ref.get('name', None),
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
def listgroups_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(group_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def organization_key2scim(ref, path, schema=True):
return {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)] if schema
else None,
'id': ref.get('id', None),
'name': ref.get('name', None),
'description': ref.get('description', None),
'active': ref.get('enabled', None),
'is_default': ref.get('is_default', None),
get_schema(_EXT_SCHEMA, path): {
'domain_id': ref.get('domain_id', None)
}
}
def listorganizations_key2scim(ref, path, page_info={}):
res = {
'schemas': [get_schema('urn:scim:schemas:core:%s', path),
get_schema(_EXT_SCHEMA, path)],
'Resources': map(functools.partial(organization_key2scim, schema=False,
path=path), ref)
}
res.update(page_info)
return res
@_remove_dict_nones
def organization_scim2key(scim, path):
return {
'domain_id': scim.get(get_schema(_EXT_SCHEMA, path), {})
.get('domain_id', None),
'id': scim.get('id', None),
'enabled': scim.get('active', None),
'name': scim.get('name', None),
'description': scim.get('description', None),
'is_default': scim.get('is_default', None)
}
|
l the evaluations for the given file.
Keys are the following:
track_name : Name of the track
ds_name : Name of the data set
HitRate_3F : F-measure of hit rate at 3 seconds
HitRate_3P : Precision of hit rate at 3 seconds
HitRate_3R : Recall of hit rate at 3 seconds
HitRate_0.5F : F-measure of hit rate at 0.5 seconds
HitRate_0.5P : Precision of hit rate at 0.5 seconds
HitRate_0.5R : Recall of hit rate at 0.5 seconds
HitRate_t3F : F-measure of hit rate at 3 seconds (trimmed)
HitRate_t3P : Precision of hit rate at 3 seconds (trimmed)
HitRate_t3F : Recall of hit rate at 3 seconds (trimmed)
HitRate_t0.5F : F-measure of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5P : Precision of hit rate at 0.5 seconds (trimmed)
HitRate_t0.5R : Recall of hit rate at 0.5 seconds (trimmed)
DevA2E : Median deviation of annotation to estimation
DevE2A : Median deviation of estimation to annotation
D : Information gain
PWF : F-measure of pair-wise frame clustering
PWP : Precision of pair-wise frame clustering
PWR : Recall of pair-wise frame clustering
Sf : F-measure normalized entropy score
So : Oversegmentation normalized entropy score
Su : Undersegmentation normalized entropy score
"""
logging.info("Evaluating %s" % os.path.basename(est_file))
res = {}
### Boundaries ###
# Hit Rate
res["HitRate_3P"], res["HitRate_3R"], res["HitRate_3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=False)
res["HitRate_0.5P"], res["HitRate_0.5R"], res["HitRate_0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=False)
res["HitRate_t3P"], res["HitRate_t3R"], res["HitRate_t3F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=3, trim=True)
res["HitRate_t0.5P"], res["HitRate_t0.5R"], res["HitRate_t0.5F"] = \
mir_eval.segment.detection(ann_inter, est_inter, window=.5, trim=True)
# Information gain
res["D"] = compute_information_gain(ann_inter, est_inter, est_file,
bins=bins)
# Median Deviations
res["DevR2E"], res["DevE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=False)
res["DevtR2E"], res["DevtE2R"] = mir_eval.segment.deviation(
ann_inter, est_inter, trim=True)
### Labels ###
if est_labels is not None and len(est_labels) != 0:
try:
# Align labels with intervals
ann_labels = list(ann_labels)
est_labels = list(est_labels)
ann_inter, ann_labels = mir_eval.util.adjust_intervals(ann_inter,
ann_labels)
est_inter, est_labels = mir_eval.util.adjust_intervals(
est_inter, est_labels, t_min=0, t_max=ann_inter.max())
# Pair-wise frame clustering
res["PWP"], res["PWR"], res["PWF"] = mir_eval.segment.pairwise(
ann_inter, ann_labels, est_inter, est_labels)
# Normalized Conditional Entropies
res["So"], res["Su"], res["Sf"] = mir_eval.segment.nce(
ann_inter, ann_labels, est_inter, est_labels)
except:
logging.warning("Labeling evaluation failed in file: %s" %
est_file)
return {}
# Names
base = os.path.basename(est_file)
res["track_id"] = base[:-5]
res["ds_name"] = base.split("_")[0]
return res
def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
# Get the ds_prefix
ds_prefix = os.path.basename(est_file).split("_")[0]
# Get context
if ds_prefix in msaf.prefix_dict. | keys():
context = msaf.prefix_dict[ds_prefix]
else:
context = "function"
try:
# TODO: Read hierarchical annotations
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(ref_file, annotation_id=0,
exclude_leve | ls=["function"])
else:
ref_inter, ref_labels = jams2.converters.load_jams_range(
ref_file, "sections", annotator=annotator_id, context=context)
except:
logging.warning("No references for file: %s" % ref_file)
return {}
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
if len(est_inter) == 0:
logging.warning("No estimations for file: %s" % est_file)
return {}
# Compute the results and return
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times)
# Build trees
ref_tree = mir_eval.segment.tree.SegmentTree(ref_times, ref_labels,
ref_levels)
est_tree = mir_eval.segment.tree.SegmentTree(est_times, est_labels)
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=100)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=150)
res["t_recall30"], res["t_precision30"], res["t_measure30"] = \
mir_eval.segment.hmeasure(ref_tree, est_tree, window=300)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file)
def compute_information_gain(ann_inter, est_inter, est_file, bins):
"""Computes the information gain of the est_file from the annotated
intervals and the estimated intervals."""
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
try:
D = mir_eval.beat.information_gain(ann_times, est_times, bins=bins)
except:
logging.warning("Couldn't compute the Information Gain for file "
"%s" % est_file)
D = 0
return D
def process_track(file_struct, boundaries_id, labels_id, config, annotator_id=0):
"""Processes a single track.
Parameters
----------
file_struct : object (FileStruct) or str
File struct or full path of the audio file to be evaluated.
boundaries_id : str
Identifier of the boundaries algorithm.
labels_id : str
Identifier of the labels algorithm.
config : dict
Configuration of the algorithms to be evaluated.
annotator_id : int
Number identifiying the annotator.
Returns
-------
one_res : dict
Dictionary of the results (see function compute_results).
"""
# Convert to file_struct if string is passed
if isinstance(file_struct, six.string_types):
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import chart_json_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import gtest_prog | ress_reporter
from telemetry.results import html_output_formatter
f | rom telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'csv', 'gtest', 'json',
'chartjson', 'csv-pivot-table', 'none')
# Filenames to use for given output formats.
_OUTPUT_FILENAME_LOOKUP = {
'html': 'results.html',
'csv': 'results.csv',
'json': 'results.json',
'chartjson': 'results-chart.json',
'csv-pivot-table': 'results-pivot-table.csv'
}
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', action='append', dest='output_formats',
choices=_OUTPUT_FORMAT_CHOICES, default=[],
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-dir', default=util.GetBaseDir(),
help='Where to save output data after the run.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace. Use '
'with html, buildbot, csv-pivot-table output formats.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--upload-bucket', default='internal',
choices=['public', 'partner', 'internal'],
help='Storage bucket to use for the uploaded results. '
'Defaults to internal. Supported values are: '
'public, partner, internal')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
# TODO(ariblue): Delete this flag entirely at some future data, when the
# existence of such a flag has been long forgotten.
if args.output_file:
parser.error('This flag is deprecated. Please use --output-dir instead.')
try:
os.makedirs(args.output_dir)
except OSError:
# Do nothing if the output directory already exists. Existing files will
# get overwritten.
pass
args.output_dir = os.path.expanduser(args.output_dir)
def _GetOutputStream(output_format, output_dir):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_format == 'buildbot':
return sys.stdout
assert output_format in _OUTPUT_FILENAME_LOOKUP, (
'No known filename for the \'%s\' output format' % output_format)
output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options,
value_can_be_added_predicate=lambda v: True):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
if not options.output_formats:
options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
output_formatters = []
for output_format in options.output_formats:
if output_format == 'none' or output_format == "gtest" or options.chartjson:
continue
output_stream = _GetOutputStream(output_format, options.output_dir)
if output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif output_format == 'csv-pivot-table':
output_formatters.append(
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'buildbot':
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, benchmark_metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
elif output_format == 'chartjson':
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = 'gtest' in options.output_formats
reporter = _GetProgressReporter(output_skipped_tests_summary,
options.suppress_gtest_report)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter,
output_dir=options.output_dir,
value_can_be_added_predicate=value_can_be_added_predicate)
|
import fechbase
class Records(fechbase.RecordsBase):
def __init__(self):
fechbase.RecordsBase.__init__(self)
self.fields = [
{'name': 'FORM TYPE', 'number': '1'},
{'name': 'FILER FEC CMTE ID', 'number': '2'},
{'name': 'COMMITTEE NAME', 'number': '3'},
{'name': 'STREET 1', 'number': '4'},
{'name': 'STREET 2', 'number': '5'},
{'name': 'CITY', 'number': '6'},
{'name': 'STATE', 'number': '7'},
{'name': 'ZIP', 'number': '8'},
{'name': 'ORGANIZATION TYPE', 'number': '9'},
{'name': 'RPTCODE', 'number': '10'},
{'name': 'OF ELECTION', 'number': '11-'},
{'name': 'STATE (OF ELECTION)', 'number': '12'},
{'name': 'COVERAGE FROM', 'number': '13-'},
{'name': 'COVERAGE TO', 'numbe | r': '14-'},
{'name': 'TOTAL COSTS', 'number': '15'},
{'name': 'FILER', 'number': '16-'},
{'name': 'SIGNED', 'number': '17-'},
{'name': 'TITLE', 'number': '18'},
]
self.fields_names = self.hash_names(self | .fields)
|
"""Test energy_p | rofiler module."""
import unittest
from physalia.energy_profiler import AndroidUseCase
# pylint: disable=missing-docstring
class TestEnergyProfiler(unittest.TestCase):
def test_empty_android_use_case(self):
# pylint: disable=no-self-use
use_case = AndroidUseCase(
name="Test",
app_apk="no/path",
app_pkg="no.package",
app_version="0.0.0",
run=None,
prepare=None,
| cleanup=None
)
use_case.run()
|
"""
Setup file for led-controller
author: Luis Garcia Rodriguez 2017
Licence: GPLv3
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='led-controller',
version='2.0.0',
| description='A simple interface for controlling LEDs in circadian experiments',
# The project's main homepage.
url='https://github.com/polygonaltree/Led-control',
# Author details
author='Luis Garcia Rodriguez',
author_email='luis.garcia@uni-muenster.de',
license='GPLv3',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=[" | my_module"]
install_requires=['pyside2'],
entry_points={
'console_scripts': [
'led-controller=gui:main',
],
},
)
|
s_alive=True)
self.cockatoo_white_d = self.cockatoo.variants.create(color='white', looks_alive=False)
self.cockatoo_blue_a = self.cockatoo.variants.create(color='blue', looks_alive=True)
self.cockatoo_blue_d = self.cockatoo.variants.create(color='blue', looks_alive=False)
self.custom_settings = {
'SATCHLESS_DELIVERY_PROVIDERS': ['satchless.contrib.delivery.simplepost.PostDeliveryProvider'],
'SATCHLESS_ORDER_PARTITIONERS': ['satchless.contrib.order.partitioner.simple'],
'SATCHLESS_PAYMENT_PROVIDERS': [TestPaymentProviderWithConfirmation],
'SATCHLESS_DJANGO_PAYMENT_TYPES': ['dummy'],
'PAYMENT_VARIANTS': {'dummy': ('payments.dummy.DummyProvider', {'url': '/', })},
}
self.original_settings = self._setup_settings(self.custom_settings)
order_handler.init_queues()
self.anon_client = Client()
PostShippingType.objects.create(price=12, typ='polecony', name='list polecony')
PostShippingType.objects.create(price=20, typ='list', name='List zwykly')
def tearDown(self):
self._teardown_settings(self.original_settings, self.custom_settings)
order_handler.init_queues()
def _test_status(self, url, method='get', *args, **kwargs):
status_code = kwargs.pop('status_code', 200)
client = kwargs.pop('client_instance', Client())
data = kwargs.pop('data', {})
response = getattr(client, method)(url, data=data, follow=False)
self.assertEqual(response.status_code, status_code,
'Incorrect status code for: %s, (%s, %s)! Expected: %s, received: %s. HTML:\n\n%s' % (
url.decode('utf-8'), args, kwargs, status_code, response.status_code,
response.content.decode('utf-8')))
return response
def _get_or_create_cart_for_client(self, client, typ='satchless_cart'):
self._test_status(reverse('satchless-cart-view'), client_instance=self.anon_client)
return TestCart.objects.get(pk=self.anon_client.session[CART_SESSION_KEY % typ], typ=typ)
def _get_order_from_session(self, session):
order_pk = session.get('satchless_order', None)
if order_pk:
return Order.objects.get(pk=order_pk)
return None
def _get_order_items(self, order):
order_items = set()
for group in order.groups.all():
order_items.update(group.items.values_list('product_variant', 'quantity'))
return order_items
def test_order_from_cart_view_creates_proper_order(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_order_is_updated_after_cart_changes(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order = self._get_order_from_session(self.anon_client.session)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
# update cart
cart.add_item(self.macaw_blue, 100)
cart.add_item(self.macaw_blue_fake, 100)
self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
old_order = order
order = self._get_order_from_session(self.anon_client.session)
# order should be reused
self.assertEqual(old_order.pk, order.pk)
self.assertNotEqual(order, None)
order_items = self._get_order_items(order)
# compare cart and order
self.assertEqual(set(cart.items.values_list('variant', 'quantity')), order_items)
def test_prepare_order_creates_order_and_redirects_to_checkout_when_cart_is_not_empty(self):
cart = self._get_or_create_cart_for_client(self.anon_client)
cart.replace_item(self.macaw_blue, 1)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
order_pk = self.anon_client.session.get('satchless_order', None)
order = Order.objects.get(pk=order_pk)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def test_prepare_order_redirects_to_cart_when_cart_is_empty(self):
self._get_or_create_cart_for_client(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
# 'satchless_cart' is taken from multistep/urls.py:
# url(r'^prepare-order/$', prepare_order, {'typ': 'satchless_cart'}...)
self.assertRedirects(response, reverse('satchless-cart-view'))
def test_prepare_order_redirects_to_checkout_when_order_exists(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(prepare_order), method='post',
client_instance=self.anon_client, status_code=302)
self.assertRedirects(response, reverse(views.checkout,
kwargs={'order_token':
order.token}))
def _create_cart(self, c | lient):
cart = self._get_or_create_cart_for_client(client)
cart.replace_item(self.macaw_blue, 1)
cart.replace_item(self.macaw_blue_fake, Decimal('2.45'))
cart.replace_item(self.cockatoo_white_a, Decimal('2.45'))
return cart
def _create_order(self, client):
sel | f._create_cart(client)
self._test_status(reverse(prepare_order), method='post',
client_instance=client, status_code=302)
return self._get_order_from_session(client.session)
def test_order_is_deleted_when_all_cart_items_are_deleted(self):
order = self._create_order(self.anon_client)
for cart_item in order.cart.items.all():
self.assertTrue(Order.objects.filter(pk=order.pk).exists())
order.cart.replace_item(cart_item.variant, 0)
self.assertFalse(Order.objects.filter(pk=order.pk).exists())
def test_checkout_view(self):
order = self._create_order(self.anon_client)
response = self._test_status(reverse(views.checkout,
kwargs={'order_token':
order.token}),
client_instance=self.anon_client,
status_code=200)
group = order.groups.get()
dtypes = order_handler.get_delivery_types(group)
dtype = dtypes[0][0]
df = response.context['delivery_formset']
data = {'billing_first_name': 'First',
'billing_last_name': 'Last',
'billing_street_address_1': 'Via Rodeo 1',
'billing_city': 'Beverly Hills',
'billing_country': 'US',
'billing_country_area': 'AZ',
'billing_phone': '555-555-5555',
'billing_postal_code': '90210'}
|
from tornado.web import RequestHandler
class BaseHandler(RequestHandler):
def initialize(self):
_settings = self.application.settings
self.db = self.application.db
#self | .redis = _settings["redis" | ]
self.log = _settings["log"]
|
# Generated by Django 3.1.4 on 2020-12-06 08:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('p | rojects', '0023_auto_20201202_0349'),
]
operations = [
migrations.AlterField(
model_name='review',
name='status',
field=models.CharField(choices=[('PENDING', 'Pending'), ('REQUESTED', 'Requested'), ('CANCELLED', 'Cancelled'), ('ACC | EPTED', 'Accepted'), ('DECLINED', 'Declined'), ('COMPLETED', 'Completed'), ('EXTRACTING', 'Retrieval in progress'), ('EXTRACTED', 'Retrieved'), ('FAILED', 'Retrieval failed'), ('REGISTERED', 'Registered')], default='PENDING', help_text='The status of the review.', max_length=16),
),
]
|
import random
import time
from collections import OrderedDict
from plenum.common.util import randomString
try:
import ujson as json
except ImportError:
import json
import pytest
from plenum.recorder.recorder import Recorder
TestRunningTimeLimitSec = 350
def test_add_to_recorder(recorder):
last_check_time = recorder.get_now_key()
time.sleep(1)
msg1, frm1 = 'm1', 'f1'
msg2, frm2 = 'm2', 'f2'
recorder.add_incoming(msg1, frm1)
time.sleep(3)
recorder.add_incoming(msg2, frm2)
time.sleep(2.1)
msg3, to1, to11 = 'm3', 't1', 't11'
msg4, to2 = 'm4', 't2'
recorder.add_outgoing(msg3, to1, to11)
time.sleep(.4)
recorder.add_outgoing(msg4, to2)
time.sleep(.5)
recorder.add_disconnecteds('a', 'b', 'c')
i = 0
for k, v in recorder.store.iterator(include_value=True):
assert int(k.decode()) > int(last_check_time)
if i == 0:
assert v.decode() == json.dumps([[Recorder.INCOMING_FLAG, msg1, frm1]])
if i == 1:
assert v.decode() == json.dumps([[Recorder.INCOMING_FLAG, msg2, frm2]])
assert int(k) - int(last_check_time) >= 3 * Recorder.TIME_FACTOR
if i == 2:
assert v.decode() == json.dumps([[Recorder.OUTGOING_FLAG, msg3, to1, to11]])
assert int(k) - int(last_check_time) >= 2.1 * Recorder.TIME_FACTOR
if i == 3:
assert v.decode() == json.dumps([[Recorder.OUTGOING_FLAG, msg4, to2]])
assert int(k) - int(last_check_time) >= .4 * Recorder.TIME_FACTOR
if i == 4:
assert v.decode() == json.dumps([[Recorder.DISCONN_FLAG, 'a', 'b', 'c']])
assert int(k) - int(last_check_time) >= .5 * Recorder.TIME_FACTOR
last_check_time = k.decode()
i += 1
def test_get_list_from_recorder(recorder):
msg1, frm1 = 'm1', 'f1'
msg2, frm2 = 'm2', 'f2'
msg3, to1, to11 = 'm3', 't1', 't11'
# Decrease resolution
recorder.TIME_FACTOR = 1
time.sleep(1)
recorder.add_outgoing(msg3, to1, to11)
recorder.add_incoming(msg1, frm1)
recorder.add_incoming(msg2, frm2)
recorder.add_disconnecteds('a', 'b', 'c')
for k, v in recorder.store.iterator(include_value=True):
assert v.decode() == json.dumps([
[Recorder.OUTGOING_FLAG, 'm3', 't1', 't11'],
[Recorder.INCOMING_FLAG, 'm1', 'f1'],
[Recorder.INCOMING_FLAG, 'm2', 'f2'],
[Recorder.DISCONN_FLAG, 'a', 'b', 'c']
])
def test_register_play_targets(recorder):
l1 = []
l2 = []
def add1(arg):
l1.append(arg)
def add2(arg):
l2.append(arg)
assert not recorder.replay_targets
recorder.register_replay_target('1', add1)
assert len(recorder.replay_targets) == 1
with pytest.raises(AssertionError):
recorder.register_replay_target('1', add2)
def test_recorded_parsings(recorder):
incoming = [[randomString(10), randomString(6)] for i in
range(3)]
outgoing = [[randomString(10), randomString(6)] for i in
range(5)]
for m, f in incoming:
recorder.add_incoming(m, f)
time.sleep(0.01)
for m, f in outgoing:
recorder.add_outgoing(m, f)
time.sleep(0.01)
with pytest.raises(AssertionError):
recorder.get_parsed(incoming[0], only_incoming=True, only_outgoing=True)
combined = incoming + outgoing
def sublist(lst1, lst2):
ls1 = [element for element in lst1 if element in lst2]
ls2 = [element for element in lst2 if el | ement in lst1]
return ls1 == ls2
for k, v in recorder.store.iterator(include_value=True):
p = Recorder.get_parsed(v)
assert sublist([i[1:] for i in p] , combined)
p = Recorder.get_parsed(v, only_incoming=True)
if p:
assert sublist(p, incoming)
for i in p:
incoming.remove(i)
p = Recorder.get_parsed(v, only_outgoing=True)
if p:
assert sublist(p, outgoing)
| for i in p:
outgoing.remove(i)
assert not incoming
assert not outgoing
def test_recorder_get_next_incoming_only(recorder):
incoming_count = 100
incoming = [(randomString(100), randomString(6)) for _ in
range(incoming_count)]
while incoming:
recorder.add_incoming(*incoming.pop())
time.sleep(random.choice([0, 1]) + random.random())
recorded_incomings = OrderedDict()
keys = []
for k, v in recorder.store.iterator(include_value=True):
v = Recorder.get_parsed(v)
keys.append(int(k))
recorded_incomings[int(k)] = v
assert len(recorded_incomings) == incoming_count
assert sorted(keys) == keys
max_time_to_run = incoming_count * 2 + 10
recorder.start_playing()
start = time.perf_counter()
while recorder.is_playing and (time.perf_counter() < start + max_time_to_run):
vals = recorder.get_next()
if vals:
check = recorded_incomings.popitem(last=False)[1]
assert check == vals
else:
time.sleep(0.01)
assert len(recorded_incomings) == 0
assert not recorder.is_playing
def test_recorder_get_next(recorder):
incoming_count = 100
outgoing_count = 50
incoming = [(randomString(100), randomString(6)) for _ in range(incoming_count)]
outgoing = [(randomString(100), randomString(6)) for _ in range(outgoing_count)]
while incoming or outgoing:
if random.choice([0, 1]) and outgoing:
recorder.add_outgoing(*outgoing.pop())
time.sleep(random.choice([0, 1]) + random.random())
elif incoming:
recorder.add_incoming(*incoming.pop())
time.sleep(random.choice([0, 1]) + random.random())
else:
continue
recorded_incomings = OrderedDict()
for k, v in recorder.store.iterator(include_value=True):
v = Recorder.get_parsed(v, only_incoming=True)
if v:
recorded_incomings[int(k)] = v
assert len(recorded_incomings) == incoming_count
max_time_to_run = incoming_count * 2 + 10
recorder.start_playing()
start = time.perf_counter()
while recorder.is_playing and (time.perf_counter() < start + max_time_to_run):
vals = recorder.get_next()
if vals:
inc = Recorder.filter_incoming(vals)
if inc:
assert recorded_incomings.popitem(last=False)[1] == inc
else:
time.sleep(0.01)
assert len(recorded_incomings) == 0
assert not recorder.is_playing
|
#_*_ coding: utf-8 _*_
t1 = ()
print type(t1)
t3 = 1,2,3
print type(t3)
r1 = (1)
print r1
print type(r1)
r1 = (1,)
print r1
print type(r1)
t = (1,2,3)
print t*2
print t+('aaa','bbb')
print t
print
print t[0], t[1:3]
print len(t)
print 1 in t
print range(1,3)
t= (12345,54321,'hhh')
u = t,(1,2,3,4,5)
print u
t2 = [1,2,3]
u2 = t2,(1,2,4)
print u2
t | 3 = {1:'ggg',2:'hhh'}
u3 = t3,(1,2,3)
print u3
x,y,z=1,2,3
print x
print y
print z
t = 1,2,'hello'
x,y,z = t
| |
# -*- coding: utf- | 8 -*-
fro | m .__version__ import __version__
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class Animal(object):
def run(self):
print('Animal running...')
class Dog(Animal):
def run(self):
print('Dog running...')
def shout(self):
print('Dog wang wang...')
class Cat(Animal):
def run(self):
print('Cat running...')
def shout(self):
print('Cat miao miao...')
class Pig(Animal):
def run(self):
print('Pig running slowly...')
def run_twice(animal):
animal.run()
animal.run()
dog = Dog()
cat = Cat()
print(dog.run())
print(cat.run())
print(run_twice(Animal()))
print(run_tw | ice(Dog()))
print(run_twice(Cat()))
print(run_twice(Pig() | )) |
#!/usr/bin/env python
def pos_neg(a, b, negative):
if negative:
return a < 0 and b < 0
else:
return (a < 0 and b > 0) or (a > 0 a | nd b < 0)
if __name__ == "__main__":
# run some tests if run as script
# (from the codingbat site -- not all, I got bored)
assert pos_ne | g(1, -1, False) is True
assert pos_neg(-1, 1, False) is True
assert pos_neg(-4, -5, True) is True
assert pos_neg(-4, -5, False) is False
assert pos_neg(-4, -5, True) is True
assert pos_neg(-6, -6, False) is False
assert pos_neg(-2, -1, False) is False
assert pos_neg(1, 2, False) is False
assert pos_neg(-5, 6, True) is False
assert pos_neg(-5, -5, True) is True
print "all tests passed"
|
on, TypeConstraint, TypeId
from pants.util.contextutil import temporary_file_path
from pants.util.objects import datatype
logger = logging.getLogger(__name__)
class ExecutionRequest(datatype('ExecutionRequest', ['roots'])):
"""Holds the roots for an execution, which might have been requested by a user.
To create an ExecutionRequest, see `LocalScheduler.build_request` (which performs goal
translation) or `LocalScheduler.execution_request`.
:param roots: Roots for this request.
:type roots: list of tuples of subject and product.
"""
class ExecutionResult(datatype('ExecutionResult', ['error', 'root_products'])):
"""Represents the result of a single execution."""
@classmethod
def finished(cls, root_products):
"""Create a success or partial success result from a finished run.
Runs can either finish with no errors, satisfying all promises, or they can partially finish
if run in fail-slow mode producing as many products as possible.
:param root_products: List of ((subject, product), State) tuples.
:rtype: `ExecutionResult`
"""
return cls(error=None, root_products=root_products)
@classmethod
def failure(cls, error):
"""Create a failure result.
A failure result represent a run with a fatal error. It presents the error but no
products.
:param error: The execution error encountered.
:type error: :class:`pants.base.exceptions.TaskError`
:rtype: `ExecutionResult`
"""
return cls(error=error, root_products=None)
class ExecutionError(Exception):
pass
class WrappedNativeScheduler(object):
def __init__(self, native, build_root, work_dir, ignore_patterns, rule_index):
self._native = native
# TODO: The only (?) case where we use inheritance rather than exact type unions.
has_products_constraint = SubclassesOf(HasProducts)
self._root_subject_types = sorted(rule_index.roots)
# Create the ExternContext, and the native Scheduler.
self._tasks = native.new_tasks()
self._register_rules(rule_index)
self._scheduler = native.new_scheduler(
self._tasks,
self._root_subject_types,
build_root,
work_dir,
ignore_patterns,
Snapshot,
_Snapshots,
FileContent,
FilesContent,
Path,
Dir,
File,
Link,
has_products_constraint,
constraint_for(Address),
constraint_for(Variants),
constraint_for(PathGlobs),
constraint_for(Snapshot),
constraint_for(_Snapshots),
constraint_for(FilesContent),
constraint_for(Dir),
constraint_for(File),
constraint_for(Link),
)
def _root_type_ids(self):
return self._to_ids_buf(sorted(self._root_subject_types))
def graph_trace(self):
with temporary_file_path() as path:
self._native.lib.graph_trace(self._scheduler, bytes(path))
with open(path) as fd:
for line in fd.readlines():
yield line.rstrip()
def assert_ruleset_valid(self):
raw_value = self._native.lib.validator_run(self._scheduler)
value = self._from_value(raw_value)
if isinstance(value, Exception):
raise ValueError(str(value))
def _to_value(self, obj):
return self._native.context.to_value(obj)
def _from_value(self, val):
return self._native.context.from_value(val)
def _to_id(self, typ):
return self._native.context.to_id(typ)
def _to_key(self, obj):
return self._native.context.to_key(obj)
def _from_id(self, cdata):
return self._native.context.from_id(cdata)
def _from_key(self, cdata):
return self._native.context.from_key(cdata)
def _to_constraint(self, type_or_constraint):
return TypeConstraint(self._to_id(constraint_for(type_or_constraint)))
def _to_ids_buf(self, types):
return self._native.to_ids_buf(types)
def _to_utf8_buf(self, string):
return self._native.context.utf8_buf(string)
def _register_rules(self, rule_index):
"""Record the given RuleIndex on `self._tasks`."""
registered = set()
for product_type, rules in rule_index.rules.items():
# TODO: The rules map has heterogeneous keys, so we normalize them to type constraints
# and dedupe them before registering to the native engine:
# see: https://github.com/pantsbuild/pants/issues/4005
output_constraint = self._to_constraint(product_type)
for rule in rules:
key = (output_constraint, rule)
if key in registered:
continue
registered.add(key)
if type(rule) is SingletonRule:
self._register_singleton(output_constraint, rule)
elif type(rule) is TaskRule:
self._register_task(output_constraint, rule)
else:
raise ValueError('Unexpected Rule type: {}'.format(rule))
def _register_singleton(self, output_constraint, rule):
"""Register the given SingletonRule.
A SingletonRule installed for a type will be the only provider for that type.
"""
self._native.lib.tasks_singleton_add(self._tasks,
self._to_value(rule.value),
output_constraint)
def _register_task(self, output_constraint, rule):
"""Register the given TaskRule with the native scheduler."""
input_selects = rule.input_selectors
func = rule.func
self._native.lib.tasks_task_begin(self._tasks, Function(self._to_id(func)), output_constraint)
for selector in input_selects:
selector_type = type(selector)
product_constraint = self._to_constraint(selector.product)
if selector_type is Select:
self._native.lib.tasks_add_select(self._tasks, product_constraint)
elif selector_type is SelectVariant:
key_buf = self._to_utf8_buf(selector.variant_key)
self._native.lib.tasks_add_select_variant(self._tasks,
product_constraint,
key_buf)
elif selector_typ | e is SelectDependencies:
self._native.lib.tasks_add_select_dependencies(self._tasks,
product_constraint,
| self._to_constraint(selector.dep_product),
self._to_utf8_buf(selector.field),
self._to_ids_buf(selector.field_types))
elif selector_type is SelectTransitive:
self._native.lib.tasks_add_select_transitive(self._tasks,
product_constraint,
self._to_constraint(selector.dep_product),
self._to_utf8_buf(selector.field),
self._to_ids_buf(selector.field_types))
elif selector_type is SelectProjection:
self._native.lib.tasks_add_select_projection(self._tasks,
self._to_constraint(selector.product),
TypeId(self._to_id(selector.projected_subject)),
self._to_utf8_buf(selector.field),
self._to_constraint(selector.input_product))
else:
raise ValueError('Unrecognized Selector type: {}'.format(selector))
self._native.lib.tasks_task_end(self._tasks)
def visualize_graph_to_file(self, filename):
self._native.lib.graph_visualize(self._scheduler, bytes(filename))
def visualize_rule_graph_to_file(self, filename):
self._native.lib.rule_graph_visualize(
self._scheduler,
self._root_type_ids(),
bytes(filename))
def rule_graph_visualization(self):
with temporary_file_path() as path:
self.visualize_rule_graph_to_file(path)
with open(path) as fd:
for line in fd.readlines():
yield line.rstrip()
def rule_subgraph_visualization(self, root_subject_type, product_type):
root_type_id = TypeI |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the ChooseFastestBranchDataset serialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.kernel_tests.serialization import dataset_serialization_test_base
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import optimization
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ChooseFastestBranchDatasetSerializationTest(
dataset_serialization_test_base.DatasetSerializationTestBase):
def testCore(self):
def build_ds(size):
dataset = dataset_ops.Dataset.range(size)
def branch_0(dataset):
return dataset.map(lambda x: x).batch(10)
def branch_1(dataset):
return dataset.batch(10).map(lambda x: x)
return optimization._ChooseFastestBranchDataset( # pylint: disable=protected-access
dataset, [branch_0, branch_1],
ratio_numerator=10)
for size in [100, 1000]:
self.run_core_tests(lambda: build_ds(size), None, size // 10) # pylint: disable=cell-var-from-loop
def testWithCapture(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const_32, dtypes.int64))
return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithPrefetch(self):
def build_ds():
dataset = dataset_ops.Dataset.range(10)
const_64 = constant_op.constant(1, dtypes.int64)
const_32 = constant_op.constant(1, dtypes.int32)
def branch_0(dataset):
return dataset.map(lambda x: x + const_64)
def branch_1(dataset):
return dataset.map(lambda x: x + math_ops.cast(const | _32, dtypes.int64))
| return optimization._ChooseFastestBranchDataset(
dataset, [branch_0, branch_1], num_elements_per_branch=3)
self.run_core_tests(build_ds, None, 10)
def testWithMoreOutputThanInput(self):
def build_ds():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(1000).batch(100)
def branch(dataset):
return dataset.apply(batching.unbatch())
return optimization._ChooseFastestBranchDataset(
dataset, [branch, branch],
ratio_denominator=10,
num_elements_per_branch=100)
self.run_core_tests(build_ds, None, 1000)
if __name__ == "__main__":
test.main()
|
from interfaces.labels_map import LabelsMap
from helpers.python_ext import to_str
class LTS:
def __init__(self,
init_states,
model_by_signal:dict,
tau_model:LabelsMap,
| state | _name:str,
input_signals,
output_signals):
self._output_models = model_by_signal
self._tau_model = tau_model
self._init_states = set(init_states)
self._state_name = state_name
self._output_signals = output_signals # TODO: duplication with _output_models?
self._input_signals = input_signals
@property
def state_name(self):
return self._state_name
@property
def input_signals(self):
return self._input_signals
@property
def output_signals(self):
return self._output_signals
@property
def init_states(self):
return self._init_states
@property
def states(self):
# states = set(k[self._state_name] for k in self._tau_model)
# return the range of tau \cup init_states
states = set(map(lambda l_v: l_v[1], self._tau_model.items()))
states.update(self.init_states)
return states
@property
def tau_model(self) -> LabelsMap:
return self._tau_model
@property
def model_by_signal(self):
return self._output_models
@property
def output_models(self) -> dict:
return self._output_models
def __str__(self):
return 'LTS:\n' \
' inputs: {inputs}\n' \
' outputs: {outputs}\n' \
' init_states: {init}\n' \
' states: {states}\n' \
' output_models: {output_models}'.format(init=str(self._init_states),
states=str(self.states),
output_models=str(self.model_by_signal),
inputs=to_str(self._input_signals),
outputs=to_str(self._output_signals))
|
# Copyright 2013 IBM Corp
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import testtools
from tempest.lib import base as test
from tempest.lib import decorators
from tempest.tests.lib import base
class TestSkipBecauseDecorator(base.TestCase):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@decorators.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False)
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestIdempotentIdDecorator(base.TestCase):
def _test_helper(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
"""Docstring"""
pass
return foo
def _test_helper_without_doc(self, _id, **decorator_args):
@decorators.idempotent_id(_id)
def foo():
pass
return foo
def test_positive(self):
_id = str(uuid.uuid4())
foo = self._test_helper(_id)
self.assertIn('id-%s' % _id, getattr(foo, '__testtools_attrs'))
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_positive_without_doc(self):
_id = str(uuid.uuid4())
foo = self._test_helper_without_doc(_id)
self.assertTrue(foo.__doc__.startswith('Test idempotent id: %s' % _id))
def test_idempotent_id_not_str(self):
_id = 42
self.assertRaises(TypeError, self._test_helper, _id)
def test_idempotent_id_not_valid_uuid(self):
_id = '42'
self.assertRaises(ValueError, self._test_helper, _id)
class TestSkipUnlessAttrDecorator(base.TestCase):
def _test_skip_unless_attr(self, attr, expected_to_skip=True):
| class TestFoo(test.BaseTes | tCase):
expected_attr = not expected_to_skip
@decorators.skip_unless_attr(attr)
def test_foo(self):
pass
t = TestFoo('test_foo')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException,
t.test_foo())
else:
try:
t.test_foo()
except Exception:
raise testtools.TestCase.failureException()
def test_skip_attr_does_not_exist(self):
self._test_skip_unless_attr('unexpected_attr')
def test_skip_attr_false(self):
self._test_skip_unless_attr('expected_attr')
def test_no_skip_for_attr_exist_and_true(self):
self._test_skip_unless_attr('expected_attr', expected_to_skip=False)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.